code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%% @author <NAME>
%% @doc
%% Run `gen_server' modules in synchronous way for easier testing.
%%
%% == Example ==
%%
%% ```
%% {ok, State} = gen_local:start(my_server, []).
%%
%% {ok, Reply, State1} = gen_local:call(State, do_thing).
%% '''
%% @end
-module(gen_local).
%% API exports
-export([start/2,
call/2,
cast/2,
send/2]).
-export_type([state/0]).
-record(state, {state, module}).
%% @type state().
%% Store for simulated process state.
%% @end
-opaque state() :: #state{}.
%%====================================================================
%% API functions
%%====================================================================
%% @doc
%% Fake startup process of given `gen_server' module in synchronous way.
%%
%% In short it will run `Module:init(Args)' and then will react in similar way
%% to the `gen_server:start/3' function.
%% @end
-spec start(module(), term()) -> {ok, state()} | ignore | {stopped, term()}.
start(Module, Args) ->
case Module:init(Args) of
{ok, State} ->
{ok, #state{state = State, module = Module}};
{ok, State, {continue, Msg}} ->
handle_continue(Msg, #state{state = State, module = Module});
{ok, State, _Timeout} ->
{ok, #state{state = State, module = Module}};
ignore ->
ignore;
{stop, Reason} ->
{stopped, Reason}
end.
%% @doc
%% Fake `gen_server:call/2' on faked `gen_server' process.
%%
%% It will try to always return value, so there is no timeout support. It
%% handles `gen_server:reply/2' calls whenever there is `noreply' answer.
%% @end
-spec call(S, Msg::term()) -> {ok, Reply, S}
| {stopped, Reason, State::term()}
| {stopped, Reason, Reply, State::term()}
when Reply :: term(),
Reason :: term(),
S :: state().
call(#state{module = Module, state = State} = S, Msg) ->
Tag = make_ref(),
case Module:handle_call(Msg, {self(), Tag}, State) of
{reply, Reply, NewState} ->
{ok, Reply, S#state{state = NewState}};
{reply, Reply, NewState, {continue, Cont}} ->
case handle_continue(Cont, S#state{state = NewState}) of
{ok, NewNewState} -> {ok, Reply, NewNewState};
Other -> Other
end;
{reply, Reply, NewState, _Timeout} ->
{ok, Reply, S#state{state = NewState}};
{noreply, NewState} ->
async_reply(Tag, S#state{state = NewState});
{noreply, NewState, {continue, Cont}} ->
case handle_continue(Cont, S#state{state = NewState}) of
{ok, NewNewState} ->
async_reply(Tag, S#state{state = NewNewState});
Other ->
Other
end;
{noreply, NewState, _Timeout} ->
async_reply(Tag, S#state{state = NewState});
{stop, Reason, NewState} ->
{stopped, Reason, NewState};
{stop, Reason, Reply, NewState} ->
{stopped, Reason, Reply, NewState}
end.
%% @doc
%% Fake `gen_server:cast/2' on faked `gen_server' process.
%% @end
-spec cast(S, Msg::term()) -> {ok, S}
| {stopped, Reason::term(), State::term()}
when S::state().
cast(S, Msg) ->
handle_reply(fake_call(S, handle_cast, Msg), S).
%% @doc
%% Fake sending message to faked `gen_server' process.
%% @end
-spec send(S, Msg::term()) -> {ok, S}
| {stopped, Reason::term(), State::term()}
when S::state().
send(S, Msg) ->
handle_reply(fake_call(S, handle_info, Msg), S).
%%====================================================================
%% Internal functions
%%====================================================================
-spec fake_call(state(), atom(), term()) -> term().
fake_call(#state{state = State, module = Module}, Callback, Msg) ->
Module:Callback(Msg, State).
-spec handle_continue(term(), S) -> {ok, S} | {stopped, term(), term()}.
handle_continue(Msg, S) ->
handle_reply(fake_call(S, handle_continue, Msg), S).
async_reply(Tag, State) ->
receive
{Tag, Reply} -> {ok, Reply, State}
end.
-spec handle_reply(Result, S) -> {ok, S} | {stopped, term(), term()}
when Result :: {noreply, term()}
| {noreply, term(), {continue, term()}}
| {noreply, term(), timeout()}
| {stop, term(), term()}.
handle_reply({noreply, NewState}, S) ->
{ok, S#state{state = NewState}};
handle_reply({noreply, NewState, {continue, Msg}}, S) ->
handle_continue(Msg, S#state{state = NewState});
handle_reply({noreply, NewState, _Timeout}, S) ->
{ok, S#state{state = NewState}};
handle_reply({stop, Reason, NewState}, _S) ->
{stopped, Reason, NewState}. | src/gen_local.erl | 0.584627 | 0.45647 | gen_local.erl | starcoder |
%% Copyright (c) 2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_demo.erl
%% Author : <NAME>
%% Purpose : Demo functions for Erlang interface of Erlog system.
-module(erlog_demo).
-export([efunc/1,ets_keys/1,get_list/1]).
%% efunc(Fcall) -> {succeed_last,Val}.
%% ets_keys(Table) -> {succeed,Val,Cont} | {succeed_last,Val} | fail.
%% get_list(ListGenerator) -> {succeed,Val,Cont} | {succeed_last,Val} | fail.
%% Test/demo functions for ecall predicate. Examples of different ways
%% of generating solutions.
efunc(Fcall) ->
%% Call an erlang function and return the value.
%% This is what the operators will generate.
Val = case Fcall of
{':',M,F} when is_atom(M), is_atom(F) -> M:F();
{':',M,{F,A}} when is_atom(M), is_atom(F) -> M:F(A);
{':',M,T} when is_atom(M), is_tuple(T), size(T) >= 2,
is_atom(element(1, T)) ->
apply(M,element(1, T),tl(tuple_to_list(T)))
end,
{succeed_last,Val}. %Optimisation
ets_keys(Tab) ->
%% Ets table keys back-trackable.
%% Solution with no look-ahead, get keys when requested.
%% This fun returns next key and itself for continuation.
F = fun (F1, Tab1, Last1) ->
case ets:next(Tab1, Last1) of
'$end_of_table' -> fail; %No more elements
Key1 -> {succeed,Key1, fun () -> F1(F1, Tab1, Key1) end}
end
end,
case ets:first(Tab) of
'$end_of_table' -> fail; %No elements
Key -> {succeed,Key, fun () -> F(F, Tab, Key) end}
end.
get_list(ListGen) ->
%% List as back-trackable generator.
%% This is what the operators will generate.
Vals = case ListGen of
{':',M,F} when is_atom(M), is_atom(F) -> M:F();
{':',M,{F,A}} when is_atom(M), is_atom(F) ->
M:F(A);
{':',M,T} when is_atom(M), is_tuple(T), size(T) >= 2,
is_atom(element(1, T)) ->
apply(M,element(1, T),tl(tuple_to_list(T)))
end,
%% This fun will return head and itself for continuation.
Fun = fun (F1, Es0) ->
case Es0 of
[E] -> {succeed_last,E}; %Optimisation for last one
[E|Es] -> {succeed,E,fun () -> F1(F1, Es) end};
[] -> fail %No more elements
end
end,
Fun(Fun, Vals). %Call with list of values | src/erlog_demo.erl | 0.659405 | 0.458167 | erlog_demo.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_epi_sup).
%% --------------------
%% Important assumption
%% ====================
%% Keeper and codechange_monitor childspecs rely on undocumented behaviour.
%% According to supervisor docs:
%% ...if the child process is a supervisor, gen_server, or gen_fsm, this
%% should be a list with one element [Module].
%% However it is perfectly fine to have more than one module in the list.
%% Modules property is used to determine if process is suspendable.
%% Only suspendable processes are hot code upgraded, others are killed.
%% The check looks like `lists:member(Module, Modules)`
%% The assumption is that it is indeed underdocumented fact and not
%% an implementation detail.
-behaviour(supervisor).
-include("couch_epi.hrl").
%% API
-export([start_link/0]).
-export([plugin_childspecs/2]).
%% Supervisor callbacks
-export([init/1]).
%% For testing
-export([
plugin_childspecs/3
]).
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
%% ===================================================================
%% API functions
%% ===================================================================
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
plugin_childspecs(Plugin, Children) ->
Plugins = application:get_env(couch_epi, plugins, []),
plugin_childspecs(Plugin, Plugins, Children).
%% ===================================================================
%% Supervisor callbacks
%% ===================================================================
init([]) ->
{ok, {{one_for_one, 5, 10}, keepers()}}.
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
keepers() ->
Plugins = application:get_env(couch_epi, plugins, []),
Definitions = couch_epi_plugin:grouped_definitions(Plugins),
Children = keeper_childspecs(Definitions),
remove_duplicates(Children).
plugin_childspecs(Plugin, Plugins, Children) ->
Definitions = couch_epi_plugin:grouped_definitions([Plugin]),
ExtraChildren = couch_epi_plugin:plugin_processes(Plugin, Plugins),
merge(ExtraChildren, Children) ++ childspecs(Definitions).
childspecs(Definitions) ->
lists:map(
fun({{Kind, Key}, Defs}) ->
CodeGen = couch_epi_plugin:codegen(Kind),
Handle = CodeGen:get_handle(Key),
Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
code_monitor(Name, [Handle], [Handle | Modules])
end,
Definitions
).
%% ------------------------------------------------------------------
%% Helper Function Definitions
%% ------------------------------------------------------------------
remove_duplicates(Definitions) ->
lists:ukeysort(1, Definitions).
keeper_childspecs(Definitions) ->
lists:map(
fun({{Kind, Key}, _Specs}) ->
Name = service_name(Key) ++ "|keeper",
CodeGen = couch_epi_plugin:codegen(Kind),
Handle = CodeGen:get_handle(Key),
keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
end,
Definitions
).
keeper(Name, Args, Modules) ->
{
"couch_epi|" ++ Name,
{couch_epi_module_keeper, start_link, Args},
permanent,
5000,
worker,
Modules
}.
code_monitor(Name, Args, Modules0) ->
Modules = [couch_epi_codechange_monitor | Modules0],
{
"couch_epi_codechange_monitor|" ++ Name,
{couch_epi_codechange_monitor, start_link, Args},
permanent,
5000,
worker,
Modules
}.
provider_kind(services) -> providers;
provider_kind(data_subscriptions) -> data_providers;
provider_kind(Kind) -> Kind.
service_name({ServiceId, Key}) ->
atom_to_list(ServiceId) ++ ":" ++ atom_to_list(Key);
service_name(ServiceId) ->
atom_to_list(ServiceId).
modules(#couch_epi_spec{kind = providers, value = Module}) ->
[Module];
modules(#couch_epi_spec{kind = services, value = Module}) ->
[Module];
modules(#couch_epi_spec{kind = data_providers, value = Value}) ->
case Value of
{static_module, Module} -> [Module];
{callback_module, Module} -> [Module];
_ -> []
end;
modules(#couch_epi_spec{kind = data_subscriptions, behaviour = Module}) ->
[Module].
merge([], Children) ->
Children;
merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
merge(Rest, lists:keystore(Id, 1, Children, Spec));
merge([#{id := Id} = Spec | Rest], Children) ->
Replace = fun
(#{id := I}) when I == Id -> Spec;
(E) -> E
end,
merge(Rest, lists:map(Replace, Children)). | src/couch_epi/src/couch_epi_sup.erl | 0.567937 | 0.436622 | couch_epi_sup.erl | starcoder |
%% shapes project
-module(shapes).
-export([area/1, perimeter/1, enclose/1, bits_d/1, bits_tc/1]).
-export([area_test/0, perimeter_test/0, bits_d_test/0, bits_tc_test/0]).
%% geometric shapes
%% {circle, R} % radius
%% {rectangle, H, W}. % height, width
%% {triangle, A, B, C} % sides
area({circle, R}) ->
math:pi() * R * R;
area({rectangle, H, W}) ->
H*W;
area({triangle, A, B, C}) -> % using Heron's Formula
S = (A + B + C) / 2,
math:sqrt(S * (S-A) * (S-B) * (S-C)).
area_test() ->
28.274333882308138 = shapes:area({circle, 3.0}),
12.0 = area({rectangle, 3.0, 4.0}),
24.0 = area({triangle, 4.0, 13.0, 15.0}),
ok.
%% calculate the perimeter given a shape tuple
perimeter({circle, R}) ->
2.0 * math:pi() * R;
perimeter({rectangle, H, W}) ->
2.0 * H * W;
perimeter({triangle, A, B, C}) ->
A + B + C.
perimeter_test() ->
25.132741228718345 = perimeter({circle, 4.0}),
24.0 = perimeter({rectangle, 3.0, 4.0}),
12.0 = perimeter({triangle, 3.0, 4.0, 5.0}),
ok.
%% enclose a shape in a rectangle.
%% https://en.wikipedia.org/wiki/Minimum_bounding_box_algorithms
enclose({circle, R}) ->
{rectangle, 2*R, 2*R};
enclose({rectangle, H, W}) ->
{rectangle, H, W};
enclose({triangle, A, B, C}) ->
{rectangle, A, triangleHeight(A,B,C)}.
triangleHeight(Base, S1, S2) ->
2 * area({triangle, Base, S1, S2}) / Base.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% count the ones in a binary number:
%%
%% erlang has bit oporators,
%% look up 'band' 'bor' 'bxor' 'bsl' 'bsr'.
%% http://erlang.org/doc/reference_manual/expressions.html#id78950
%%
%% which means that I can use...
%% ...Kernighan's algorithm:
%% http://stackoverflow.com/questions/12380478/
bits_d(0) ->
0;
bits_d(N) ->
1 + bits_d(N band (N-1)).
bits_d_test() ->
0 = bits_d(2#0), % base
1 = bits_d(2#1),
1 = bits_d(2#10),
2 = bits_d(2#11),
3 = bits_d(2#111), % test 7
1 = bits_d(2#1000), % test 8
bits_d_passed.
%% simple tail call fix.
bits_tc(N) ->
bits_tc_acc(N, 0).
bits_tc_acc(0, Acc) ->
Acc;
bits_tc_acc(N, Acc) ->
bits_tc_acc(N band (N-1), Acc+1).
bits_tc_test() ->
0 = bits_tc(0), % base
1 = bits_tc(1),
1 = bits_tc(2),
2 = bits_tc(3),
3 = bits_tc(7), % test 2#111
1 = bits_tc(8), % test 2#1000
bits_tc_passed. | class/wk1/shapes.erl | 0.708717 | 0.635477 | shapes.erl | starcoder |
%% @copyright 2007 Mochi Media, Inc.
%% @author <NAME> <<EMAIL>>
%% @doc Useful numeric algorithms for floats that cover some deficiencies
%% in the math module. More interesting is digits/1, which implements
%% the algorithm from:
%% http://www.cs.indiana.edu/~burger/fp/index.html
%% See also "Printing Floating-Point Numbers Quickly and Accurately"
%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
%% Design and Implementation.
-module(mochinum).
-author("<NAME> <<EMAIL>>").
-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
%% IEEE 754 Float exponent bias
-define(FLOAT_BIAS, 1022).
-define(MIN_EXP, -1074).
-define(BIG_POW, 4503599627370496).
%% External API
%% @spec digits(number()) -> string()
%% @doc Returns a string that accurately represents the given integer or float
%% using a conservative amount of digits. Great for generating
%% human-readable output, or compact ASCII serializations for floats.
digits(N) when is_integer(N) ->
integer_to_list(N);
digits(0.0) ->
"0.0";
digits(Float) ->
{Frac, Exp} = frexp(Float),
Exp1 = Exp - 53,
Frac1 = trunc(abs(Frac) * (1 bsl 53)),
[Place | Digits] = digits1(Float, Exp1, Frac1),
R = insert_decimal(Place, [$0 + D || D <- Digits]),
case Float < 0 of
true ->
[$- | R];
_ ->
R
end.
%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
%% @doc Return the fractional and exponent part of an IEEE 754 double,
%% equivalent to the libc function of the same name.
%% F = Frac * pow(2, Exp).
frexp(F) ->
frexp1(unpack(F)).
%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
%% @doc Moderately efficient way to exponentiate integers.
%% int_pow(10, 2) = 100.
int_pow(_X, 0) ->
1;
int_pow(X, N) when N > 0 ->
int_pow(X, N, 1).
%% @spec int_ceil(F::float()) -> integer()
%% @doc Return the ceiling of F as an integer. The ceiling is defined as
%% F when F == trunc(F);
%% trunc(F) when F < 0;
%% trunc(F) + 1 when F > 0.
int_ceil(X) ->
T = trunc(X),
case (X - T) of
Neg when Neg < 0 -> T;
Pos when Pos > 0 -> T + 1;
_ -> T
end.
%% Internal API
int_pow(X, N, R) when N < 2 ->
R * X;
int_pow(X, N, R) ->
int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
insert_decimal(0, S) ->
"0." ++ S;
insert_decimal(Place, S) when Place > 0 ->
L = length(S),
case Place - L of
0 ->
S ++ ".0";
N when N < 0 ->
{S0, S1} = lists:split(L + N, S),
S0 ++ "." ++ S1;
N when N < 6 ->
%% More places than digits
S ++ lists:duplicate(N, $0) ++ ".0";
_ ->
insert_decimal_exp(Place, S)
end;
insert_decimal(Place, S) when Place > -6 ->
"0." ++ lists:duplicate(abs(Place), $0) ++ S;
insert_decimal(Place, S) ->
insert_decimal_exp(Place, S).
insert_decimal_exp(Place, S) ->
[C | S0] = S,
S1 = case S0 of
[] ->
"0";
_ ->
S0
end,
Exp = case Place < 0 of
true ->
"e-";
false ->
"e+"
end,
[C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
digits1(Float, Exp, Frac) ->
Round = ((Frac band 1) =:= 0),
case Exp >= 0 of
true ->
BExp = 1 bsl Exp,
case (Frac =/= ?BIG_POW) of
true ->
scale((Frac * BExp * 2), 2, BExp, BExp,
Round, Round, Float);
false ->
scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
Round, Round, Float)
end;
false ->
case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
true ->
scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
Round, Round, Float);
false ->
scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
Round, Round, Float)
end
end.
scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
%% Note that the scheme implementation uses a 326 element look-up table
%% for int_pow(10, N) where we do not.
case Est >= 0 of
true ->
fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
LowOk, HighOk);
false ->
Scale = int_pow(10, -Est),
fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
LowOk, HighOk)
end.
fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
TooLow = case HighOk of
true ->
(R + MPlus) >= S;
false ->
(R + MPlus) > S
end,
case TooLow of
true ->
[(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
false ->
[K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
end.
generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
D = R0 div S,
R = R0 rem S,
TC1 = case LowOk of
true ->
R =< MMinus;
false ->
R < MMinus
end,
TC2 = case HighOk of
true ->
(R + MPlus) >= S;
false ->
(R + MPlus) > S
end,
case TC1 of
false ->
case TC2 of
false ->
[D | generate(R * 10, S, MPlus * 10, MMinus * 10,
LowOk, HighOk)];
true ->
[D + 1]
end;
true ->
case TC2 of
false ->
[D];
true ->
case R * 2 < S of
true ->
[D];
false ->
[D + 1]
end
end
end.
unpack(Float) ->
<<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
{Sign, Exp, Frac}.
frexp1({_Sign, 0, 0}) ->
{0.0, 0};
frexp1({Sign, 0, Frac}) ->
Exp = log2floor(Frac),
<<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
{Frac1, -(?FLOAT_BIAS) - 52 + Exp};
frexp1({Sign, Exp, Frac}) ->
<<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
{Frac1, Exp - ?FLOAT_BIAS}.
log2floor(Int) ->
log2floor(Int, 0).
log2floor(0, N) ->
N;
log2floor(Int, N) ->
log2floor(Int bsr 1, 1 + N).
%%
%% Tests
%%
-include_lib("eunit/include/eunit.hrl").
-ifdef(TEST).
int_ceil_test() ->
1 = int_ceil(0.0001),
0 = int_ceil(0.0),
1 = int_ceil(0.99),
1 = int_ceil(1.0),
-1 = int_ceil(-1.5),
-2 = int_ceil(-2.0),
ok.
int_pow_test() ->
1 = int_pow(1, 1),
1 = int_pow(1, 0),
1 = int_pow(10, 0),
10 = int_pow(10, 1),
100 = int_pow(10, 2),
1000 = int_pow(10, 3),
ok.
%% XXX arg 02/12/10
%% disabled pending resolution of http://code.google.com/p/mochiweb/issues/detail?id=63
%digits_test_disabled() ->
% ?assertEqual("0",
% digits(0)),
% ?assertEqual("0.0",
% digits(0.0)),
% ?assertEqual("1.0",
% digits(1.0)),
% ?assertEqual("-1.0",
% digits(-1.0)),
% ?assertEqual("0.1",
% digits(0.1)),
% ?assertEqual("0.01",
% digits(0.01)),
% ?assertEqual("0.001",
% digits(0.001)),
% ?assertEqual("1.0e+6",
% digits(1000000.0)),
% ?assertEqual("0.5",
% digits(0.5)),
% ?assertEqual("4503599627370496.0",
% digits(4503599627370496.0)),
% %% small denormalized number
% %% 4.94065645841246544177e-324
% <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
% ?assertEqual("4.9406564584124654e-324",
% digits(SmallDenorm)),
% ?assertEqual(SmallDenorm,
% list_to_float(digits(SmallDenorm))),
% %% large denormalized number
% %% 2.22507385850720088902e-308
% <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
% ?assertEqual("2.225073858507201e-308",
% digits(BigDenorm)),
% ?assertEqual(BigDenorm,
% list_to_float(digits(BigDenorm))),
% %% small normalized number
% %% 2.22507385850720138309e-308
% <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
% ?assertEqual("2.2250738585072014e-308",
% digits(SmallNorm)),
% ?assertEqual(SmallNorm,
% list_to_float(digits(SmallNorm))),
% %% large normalized number
% %% 1.79769313486231570815e+308
% <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
% ?assertEqual("1.7976931348623157e+308",
% digits(LargeNorm)),
% ?assertEqual(LargeNorm,
% list_to_float(digits(LargeNorm))),
% ok.
frexp_test() ->
%% zero
{0.0, 0} = frexp(0.0),
%% one
{0.5, 1} = frexp(1.0),
%% negative one
{-0.5, 1} = frexp(-1.0),
%% small denormalized number
%% 4.94065645841246544177e-324
<<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
{0.5, -1073} = frexp(SmallDenorm),
%% large denormalized number
%% 2.22507385850720088902e-308
<<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
{0.99999999999999978, -1022} = frexp(BigDenorm),
%% small normalized number
%% 2.22507385850720138309e-308
<<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
{0.5, -1021} = frexp(SmallNorm),
%% large normalized number
%% 1.79769313486231570815e+308
<<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
{0.99999999999999989, 1024} = frexp(LargeNorm),
ok.
-endif. | src/mochinum.erl | 0.615666 | 0.535038 | mochinum.erl | starcoder |
-module (mondemand_vmstats).
% This module is used to collect vmstats from the erlang vm and provide
% them in a form which mondemand can work with. In addition, I'd like
% to eventually support looking at a finer granularity than mondemand.
% In order to support this, a gen_server keeps a queue of samples. The
% samples are taken once a second, and a certain number of them are kept.
% About once a minute mondemand will come in and calculate gauges for all
% the sampled values (I use gauges so I don't overflow the counters in
% mondemand).
-include ("mondemand_internal.hrl").
-behaviour (gen_server).
%% API
-export ([start_link/0,
to_mondemand/0,
to_list/0,
first/0,
first/1,
last/0,
last/1,
collect_sample/2,
scheduler_wall_time_diff/2]).
%% gen_server callbacks
-export ( [ init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
-record (state, {samples = queue:new(),
max_samples = 300, % 5 minutes of sampled data
legacy = false, % old otp workarounds
previous_mondemand = undefined,
timer,
scheduler_former_flag,% keep track of previous scheduler
% stats flag for shutdown
collect_scheduler_stats
}).
-record (vm_sample, { timestamp,
context_switches,
gc_count,
gc_bytes_reclaimed,
io_bytes_in,
io_bytes_out,
reductions,
runtime,
wallclock,
run_queue,
queued_messages,
memory_total,
memory_process,
memory_system,
memory_atom,
memory_binary,
memory_ets,
process_count,
process_limit,
port_count,
port_limit,
scheduler_wall_time
}).
%-=====================================================================-
%- API -
%-=====================================================================-
start_link() ->
gen_server:start_link ({local, ?MODULE}, ?MODULE, [], []).
to_mondemand() ->
gen_server:call (?MODULE, to_mondemand).
first () ->
gen_server:call (?MODULE, first).
first (MetricOrMetrics) ->
metrics_from_sample (MetricOrMetrics, first()).
last () ->
gen_server:call (?MODULE, last).
last (MetricOrMetrics) ->
metrics_from_sample (MetricOrMetrics, last()).
metrics_from_sample (A, Sample = #vm_sample {}) when is_atom (A) ->
{_,Value} = metric_from_sample (A, Sample),
Value;
metrics_from_sample (L, Sample = #vm_sample {}) when is_list (L) ->
lists:map (fun (M) -> metric_from_sample (M, Sample) end, L).
metric_from_sample (Metric, Sample) ->
case Metric of
timestamp -> {Metric, Sample#vm_sample.timestamp};
context_switches -> {Metric, Sample#vm_sample.context_switches};
gc_count -> {Metric, Sample#vm_sample.gc_count};
gc_bytes_reclaimed -> {Metric, Sample#vm_sample.gc_bytes_reclaimed};
io_bytes_in -> {Metric, Sample#vm_sample.io_bytes_in};
io_bytes_out -> {Metric, Sample#vm_sample.io_bytes_out};
reductions -> {Metric, Sample#vm_sample.reductions};
runtime -> {Metric, Sample#vm_sample.runtime};
wallclock -> {Metric, Sample#vm_sample.wallclock};
run_queue -> {Metric, Sample#vm_sample.run_queue};
queued_messages -> {Metric, Sample#vm_sample.queued_messages};
memory_total -> {Metric, Sample#vm_sample.memory_total};
memory_process -> {Metric, Sample#vm_sample.memory_process};
memory_system -> {Metric, Sample#vm_sample.memory_system};
memory_atom -> {Metric, Sample#vm_sample.memory_atom};
memory_binary -> {Metric, Sample#vm_sample.memory_binary};
memory_ets -> {Metric, Sample#vm_sample.memory_ets};
process_count -> {Metric, Sample#vm_sample.process_count};
process_limit -> {Metric, Sample#vm_sample.process_limit};
port_count -> {Metric, Sample#vm_sample.port_count};
port_limit -> {Metric, Sample#vm_sample.port_limit};
scheduler_wall_time -> {Metric, Sample#vm_sample.scheduler_wall_time }
end.
to_list() ->
gen_server:call (?MODULE, to_list).
%-=====================================================================-
%- gen_server callbacks -
%-=====================================================================-
init([]) ->
% work around for the fact that R15B didn't have port_count
Legacy = mondemand_config:vmstats_legacy_workaround(),
% allow scheduler stats to be turned off (should default to true)
{Former, CollectSchedulerStats} =
case mondemand_config:vmstats_disable_scheduler_wall_time() of
true -> {undefined, false};
false -> {erlang:system_flag(scheduler_wall_time, true), true}
end,
InitialSample = collect_sample (Legacy, CollectSchedulerStats),
InitialQueue = queue:in (InitialSample, queue:new ()),
TRef = timer:send_interval (1000, collect), % collect samples every second
% keep the initial sample as both the previous mondemand value and put
% it into the queue
{ ok, #state { samples = InitialQueue,
timer = TRef,
legacy = Legacy,
collect_scheduler_stats = CollectSchedulerStats,
scheduler_former_flag = Former
}
}.
handle_call (first, _From, State = #state { samples = Queue }) ->
{value, FirstSample} = queue:peek (Queue),
{reply, FirstSample, State};
handle_call (last, _From, State = #state { samples = Queue }) ->
{value, LastSample} = queue:peek_r (Queue),
{reply, LastSample, State};
handle_call (to_list, _From, State = #state { samples = Queue }) ->
{reply, queue:to_list (Queue), State};
handle_call (to_mondemand, _From,
State = #state { samples = Queue,
previous_mondemand = Prev }) ->
% queue should always have something in it
{value, LastSample} = queue:peek_r (Queue),
Stats =
case Prev =:= undefined of
true ->
% we skip the first send of data to mondemand, as we have no way
% to really ensure the normal duration between sends to mondemand
% has elapsed, if it hasn't elapsed we might be emitting to mondemand
% shortly after restart and would see some spikiness in any counters
% (as they are turned into gauges with the assumption calls to
% to_mondemand/0 are happening on a regular interval).
[];
false ->
to_mondemand (Prev, LastSample)
end,
{reply, Stats, State#state { previous_mondemand = LastSample } };
handle_call (_Request, _From, State = #state { }) ->
{reply, ok, State }.
handle_cast (_Request, State = #state { }) ->
{noreply, State}.
handle_info (collect,
State = #state {samples = QueueIn,
max_samples = Max,
legacy = Legacy,
collect_scheduler_stats = CollectSchedulerStats
}) ->
% collect a sample
CurrentSample = collect_sample (Legacy, CollectSchedulerStats),
% insert it into the queue
QueueOut =
case queue:len (QueueIn) =:= Max of
true ->
% when we are at the max entries, we drop one since we are adding one
{{value, _},Q} = queue:out (QueueIn),
queue:in (CurrentSample, Q);
false ->
% otherwise we aren't full yet, so just add one
queue:in (CurrentSample, QueueIn)
end,
{noreply, State#state { samples = QueueOut }};
handle_info (_Info, State = #state {}) ->
{noreply, State}.
terminate (_Reason, #state { scheduler_former_flag = Former }) ->
% revert to the former wall time
case Former =/= undefined of
true -> erlang:system_flag(scheduler_wall_time, Former);
false -> ok
end,
ok.
code_change (_OldVsn, State, _Extra) ->
{ok, State}.
%%====================================================================
%% private functions
%%====================================================================
collect_sample (Legacy, CollectSchedulerStats) ->
Timestamp = mondemand_util:seconds_since_epoch (),
% Most collected from http://www.erlang.org/doc/man/erlang.html#statistics-1
%
% ContextSwitches is the total number of context switches since
% the system started.
{ ContextSwitches, _ } = erlang:statistics (context_switches),
% GC info
{ NumberOfGCs, WordsReclaimed, _} = erlang:statistics (garbage_collection),
% Input is the total number of bytes received through ports,
% and Output is the total number of bytes output to ports.
{{input,Input},{output,Output}} = erlang:statistics(io),
% information about reductions
{ TotalReductions, _ReductionsSinceLast } = erlang:statistics (reductions),
% Note that the run-time is the sum of the run-time for all threads in
% the Erlang run-time system and may therefore be greater than
% the wall-clock time. The time is returned in milliseconds.
{ TotalRuntime, _ } = erlang:statistics (runtime),
% wall_clock can be used in the same manner as runtime, except that
% real time is measured as opposed to runtime or CPU time.
{ TotalWallclock, _ } = erlang:statistics (wall_clock),
% Returns the total length of the run queues, that is, the number of
% processes that are ready to run on all available run queues.
RunQueue = erlang:statistics (run_queue),
% total length of all message queues
TotalMessages =
lists:foldl (
fun (Pid, Acc) ->
case process_info(Pid, message_queue_len) of
undefined -> Acc;
{message_queue_len, Count} -> Count+Acc
end
end,
0,
processes()
),
Memory = erlang:memory(),
TotalMemory = proplists:get_value(total, Memory),
ProcessMemory = proplists:get_value(processes_used, Memory),
SystemMemory = proplists:get_value(system, Memory),
AtomUsed = proplists:get_value(atom_used, Memory),
BinaryMemory = proplists:get_value(binary, Memory),
EtsMemory = proplists:get_value(ets, Memory),
ProcessCount = erlang:system_info(process_count),
ProcessLimit = erlang:system_info(process_limit),
% R15B didn't have a good way to get these so working around this fact
{PortCount, PortLimit} =
case Legacy of
true -> {length(erlang:ports()), 0};
false -> {erlang:system_info(port_count), erlang:system_info(port_limit)}
end,
SchedWallTime =
case CollectSchedulerStats of
true -> erlang:statistics(scheduler_wall_time);
false -> undefined
end,
#vm_sample {
timestamp = Timestamp,
context_switches = ContextSwitches,
gc_count = NumberOfGCs,
gc_bytes_reclaimed = WordsReclaimed * erlang:system_info(wordsize),
io_bytes_in = Input,
io_bytes_out = Output,
reductions = TotalReductions,
runtime = TotalRuntime,
wallclock = TotalWallclock,
run_queue = RunQueue,
queued_messages = TotalMessages,
memory_total = TotalMemory,
memory_process = ProcessMemory,
memory_system = SystemMemory,
memory_atom = AtomUsed,
memory_binary = BinaryMemory,
memory_ets = EtsMemory,
process_count = ProcessCount,
process_limit = ProcessLimit,
port_count = PortCount,
port_limit = PortLimit,
scheduler_wall_time = SchedWallTime
}.
to_mondemand (#vm_sample {
context_switches = PrevContextSwitches,
gc_count = PrevNumberOfGCs,
gc_bytes_reclaimed = PrevWordsReclaimed,
io_bytes_in = PrevInput,
io_bytes_out = PrevOutput,
reductions = PrevReductions,
runtime = PrevRuntime,
wallclock = PrevWallclock,
scheduler_wall_time = PrevSchedWallTime
},
#vm_sample {
timestamp = _Timestamp,
context_switches = ContextSwitches,
gc_count = NumberOfGCs,
gc_bytes_reclaimed = WordsReclaimed,
io_bytes_in = Input,
io_bytes_out = Output,
reductions = Reductions,
runtime = Runtime,
wallclock = Wallclock,
run_queue = RunQueue,
queued_messages = TotalMessages,
memory_total = TotalMemory,
memory_process = ProcessMemory,
memory_system = SystemMemory,
memory_atom = AtomUsed,
memory_binary = BinaryMemory,
memory_ets = EtsMemory,
process_count = ProcessCount,
process_limit = ProcessLimit,
port_count = PortCount,
port_limit = PortLimit,
scheduler_wall_time = SchedWallTime
}) ->
[
{ gauge, context_switches, ContextSwitches - PrevContextSwitches },
{ gauge, gc_count, NumberOfGCs - PrevNumberOfGCs },
{ gauge, gc_bytes_reclaimed, WordsReclaimed - PrevWordsReclaimed },
{ gauge, io_bytes_in, Input - PrevInput },
{ gauge, io_bytes_out, Output - PrevOutput },
{ gauge, reductions, Reductions - PrevReductions },
{ gauge, runtime, Runtime - PrevRuntime },
{ gauge, wallclock, Wallclock - PrevWallclock },
{ gauge, run_queue, RunQueue },
{ gauge, queued_messages, TotalMessages },
{ gauge, memory_total, TotalMemory },
{ gauge, memory_process, ProcessMemory },
{ gauge, memory_system, SystemMemory },
{ gauge, memory_atom, AtomUsed },
{ gauge, memory_binary, BinaryMemory },
{ gauge, memory_ets, EtsMemory },
{ gauge, process_count, ProcessCount },
{ gauge, process_limit, ProcessLimit },
{ gauge, port_count, PortCount },
{ gauge, port_limit, PortLimit }
| scheduler_wall_time_diff (PrevSchedWallTime, SchedWallTime)
].
scheduler_wall_time_diff (undefined,_) -> [];
scheduler_wall_time_diff (_,undefined) -> [];
scheduler_wall_time_diff (PrevSchedWallTime, SchedWallTime) ->
[ { gauge,
["scheduler_",integer_to_list(I),"_utilization"],
case TotalTime - PrevTotalTime of
0 -> 0;
TotalDiff -> trunc (((ActiveTime - PrevActiveTime)/TotalDiff) * 100.0)
end
}
|| {{I, PrevActiveTime, PrevTotalTime}, {I, ActiveTime, TotalTime}}
<- lists:zip(lists:sort(PrevSchedWallTime),lists:sort(SchedWallTime))
].
%%--------------------------------------------------------------------
%%% Test functions
%%--------------------------------------------------------------------
-ifdef (TEST).
-include_lib ("eunit/include/eunit.hrl").
-endif. | src/mondemand_vmstats.erl | 0.518546 | 0.502747 | mondemand_vmstats.erl | starcoder |
%%%------------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @author <NAME> <<EMAIL>>
%%% @copyright (C) 2015, <NAME>, <NAME>
%%% @doc
%%% Moves the frontiers of the clusters so that functions can
%%% be extracted from them. This is done by reducing the size
%%% of the common clusters, and expanding the size of the
%%% exclusive clusters.
%%% @end
%%% Created : 2 Jun 2015 by <NAME>
%%%------------------------------------------------------------------------
-module(fix_frontiers).
-export([fix_frontiers/5, delete_trivial_clusters/1, node_exports_vars/2]).
%%-----------------------------------------------------------------------
%% @doc
%% Moves the frontiers of the clusters to places where function
%% calls can be used to divide the trees.
%% This function updates both the Mapping and the CommCluster
%% dictionary. The right exclusive clusters are generated from the
%% fixed Mapping at {@link tree_clustering:cluster/3}
%% @end
%%-----------------------------------------------------------------------
-spec fix_frontiers(Pass :: 1 | 2,
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Mapping :: da_map:da_map(tree:tree_node(),
tree:tree_node()),
CommCluster :: cluster_dict:cluster_dict(tree:tree_node())) ->
{cluster_dict:cluster_dict(tree:tree_node()),
da_map:da_map(tree:tree_node(),
tree:tree_node())}.
fix_frontiers(Pass, Tree1, Tree2, Mapping, CommCluster) ->
cluster_dict:growing_map_fold(fix_cluster(Pass, Tree1, Tree2),
Mapping, CommCluster).
%%-----------------------------------------------------------------------
%% @doc
%% Produces a function that, when growing_map_folded on the common cluster
%% dictionary with the mapping as accumulator, it fixes the frontiers
%% in both the common cluster dictionary and the accumulator.
%% @see cluster_dict:growing_map_fold/3
%% @end
%%-----------------------------------------------------------------------
-spec fix_cluster(Pass :: 1 | 2,
Tree1 :: tree:tree(), Tree2 :: tree:tree()) ->
(fun ((Cluster :: cluster:cluster(tree:tree_node()),
Mapping :: da_map:da_map(tree:tree_node(),
tree:tree_node())) ->
{[cluster:cluster(tree:tree_node())], none | cluster:cluster(tree:tree_node()),
da_map:da_map(tree:tree_node(), tree:tree_node())})).
fix_cluster(Pass, Tree1, Tree2) ->
fun (Cluster, Mapping) ->
fix_cluster_mapfold(Pass, Tree1, Tree2, Cluster, Mapping)
end.
%%-----------------------------------------------------------------------
%% @doc
%% It fixes the frontiers in both the current cluster and the
%% accumulator.
%% @end
%%-----------------------------------------------------------------------
-spec fix_cluster_mapfold(Pass :: 1 | 2,
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Mapping :: da_map:da_map(tree:tree_node(),
tree:tree_node())) ->
{[cluster:cluster(tree:tree_node())],
none | cluster:cluster(tree:tree_node()),
da_map:da_map(tree:tree_node(),
tree:tree_node())}.
fix_cluster_mapfold(Pass, Tree1, Tree2, Cluster, Mapping) ->
case get_wrong_frontiers_fun(Pass, Tree1, Tree2, Cluster) of
[WrongFrontier|_] -> detach_node_pair(WrongFrontier,
{Cluster, Mapping});
[] -> {[], Cluster, Mapping}
end.
%%-----------------------------------------------------------------------
%% @doc
%% Returns a list of the node pairs that produce a wrong frontier.
%% @end
%%-----------------------------------------------------------------------
-spec get_wrong_frontiers_fun(Pass :: 1 | 2,
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node())) ->
[tree:tree_node()].
get_wrong_frontiers_fun(Pass, Tree1, Tree2, Cluster) ->
Children = cluster:get_nodes(Cluster),
lists:filter(
is_wrong_frontier_filter(Pass, Tree1, Tree2, Cluster),
lists:usort(Children)).
%%-----------------------------------------------------------------------
%% @doc
%% Produces a function that returns true when applied to a node pair
%% that produces a wrong frontier, it returns false otherwise.
%% @end
%%-----------------------------------------------------------------------
-spec is_wrong_frontier_filter(Pass :: 1 | 2,
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node())) ->
fun((tree:tree_node()) -> boolean()).
is_wrong_frontier_filter(Pass, Tree1, Tree2, Cluster) ->
fun (NodePair) ->
is_wrong_frontier(Pass, NodePair, Tree1, Tree2, Cluster)
end.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when applied to a node pair that produces a wrong
%% frontier, it returns false otherwise.
%% @end
%%-----------------------------------------------------------------------
-spec is_wrong_frontier(Pass :: 1 | 2,
NodePair :: tree:tree_node(),
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node())) ->
boolean().
is_wrong_frontier(Pass, NodePair, Tree1, Tree2, Cluster) ->
{Node1, Node2} = tree:get_pair_tuple(NodePair),
OkParent1 = tree:get_parent(Node1, Tree1),
OkParent2 = tree:get_parent(Node2, Tree2),
breaks_behaviour_info(Node1, Tree1)
orelse breaks_behaviour_info(Node2, Tree2)
orelse are_parents_wrong_frontier(Cluster, NodePair, OkParent1, OkParent2)
orelse are_children_wrong_frontier(Tree1, Tree2, Cluster, Node1, Node2)
orelse is_wrong_frontier_in_second_pass(Pass, Tree1, Tree2, Cluster,
Node1, Node2, OkParent1, OkParent2).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when applied to a node pair that produces a wrong
%% frontier in terms of the second pass, (and Pass = 2).
%% @end
%%-----------------------------------------------------------------------
-spec is_wrong_frontier_in_second_pass(
Pass :: 1 | 2, Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(), Node2 :: tree:tree_node(),
PNode1 :: 'error' | {'ok', tree:tree_node()},
PNode2 :: 'error' | {'ok', tree:tree_node()}) -> boolean().
is_wrong_frontier_in_second_pass(2, Tree1, Tree2, Cluster, Node1,
Node2, OkParent1, OkParent2) ->
frontier_breaks_artificial_block(Tree1, Tree2, Cluster, Node1, Node2)
orelse frontier_exports_vars(Tree1, Tree2, Cluster, Node1, Node2, OkParent1, OkParent2);
is_wrong_frontier_in_second_pass(_, _, _, _, _, _, _, _) -> false.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true if Node is the root of a declaration with name
%% `behaviour_info'.
%% @end
%%-----------------------------------------------------------------------
-spec breaks_behaviour_info(tree:tree_node(),tree:tree()) -> boolean().
breaks_behaviour_info(Node, Tree) ->
NodeValue = tree:get_value(Node),
case wrangler_syntax:type(NodeValue) of
function -> NameRef = wrangler_syntax:function_name(NodeValue),
{ok, NameNode} = tree:get_node(NameRef, Tree),
NameValue = tree:get_value(NameNode),
case wrangler_syntax:type(NameValue) of
atom -> wrangler_syntax:atom_value(NameValue) =:= behaviour_info;
_ -> false
end;
_ -> false
end.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when the node pair exports vars. Breaking through
%% here would imply that the exported vars are not accessible from
%% parent or sibling clusters.
%% @end
%%-----------------------------------------------------------------------
-spec frontier_exports_vars(Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(),
Node2 :: tree:tree_node(),
PNode1 :: 'error' | {'ok', tree:tree_node()},
PNode2 :: 'error' | {'ok', tree:tree_node()}) -> boolean().
frontier_exports_vars(Tree1, Tree2, Cluster, Node1, Node2, OkPNode1, OkPNode2) ->
any_children_not_in_cluster_export_vars(Tree1, Tree2, Cluster, Node1, Node2)
orelse (parents_not_in_cluster(Cluster, OkPNode1, OkPNode2)
andalso (not (is_artificial_block(Node1) andalso
is_artificial_block(Node2)))
andalso (node_exports_vars(Node1, Node2, Tree1, Tree2))).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true if either Node1 (assumed to belong to Tree1),
%% or Node2 (assumed to belong to Tree2), export variables.
%% @end
%%-----------------------------------------------------------------------
-spec node_exports_vars(Node1 :: tree:tree_node(), Node2 :: tree:tree_node(),
Tree1 :: tree:tree(), Tree2 :: tree:tree()) -> boolean().
node_exports_vars(Node1, Node2, Tree1, Tree2) ->
node_exports_vars(Node1, Tree1) orelse node_exports_vars(Node2, Tree2).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true if either Node (assumed to belong to Tree), exports
%% variables.
%% @end
%%-----------------------------------------------------------------------
-spec node_exports_vars(Node :: tree:tree_node(), Tree :: tree:tree()) ->
boolean().
node_exports_vars(Node, Tree) ->
api_refac:exported_vars(ast_tree:tree_to_ast(Node, Tree)) =/= [].
%%-----------------------------------------------------------------------
%% @doc
%% It returns true if the either Node1 or Node2 have children
%% out of the cluster that export vars, except if they are the
%% root of an artificial block (which is solved later).
%% @end
%%-----------------------------------------------------------------------
-spec any_children_not_in_cluster_export_vars(
Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()) -> boolean().
any_children_not_in_cluster_export_vars(Tree1, Tree2, Cluster, Node1, Node2) ->
lists:any(is_not_in_cluster_and_exports_vars(Cluster, Tree1, Tree2),
lists:zip(tree:get_children(Node1, Tree1),
tree:get_children(Node2, Tree2))).
%%-----------------------------------------------------------------------
%% @doc
%% Returns a function that when applied to a tuple with two nodes
%% returns true if the nodes are out of the cluster and they export
%% vars, except if they are the root of an artificial block
%% (which is solved later).
%% @end
%%-----------------------------------------------------------------------
-spec is_not_in_cluster_and_exports_vars(
Cluster :: cluster:cluster(tree:tree_node()),
Tree1 :: tree:tree(),
Tree2 :: tree:tree()) -> fun(({Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()}) ->
boolean()).
is_not_in_cluster_and_exports_vars(Cluster, Tree1, Tree2) ->
fun ({Node1, Node2}) ->
NodePair = tree:create_node_pair(Node1, Node2),
(not cluster:has_node(NodePair, Cluster))
andalso (not (is_artificial_block(Node1) andalso
is_artificial_block(Node2)))
andalso (node_exports_vars(Node1, Node2, Tree1, Tree2))
end.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true PNode1 and PNode2 (parents of a node of the cluster),
%% exist and are out of the cluster.
%% @end
%%-----------------------------------------------------------------------
-spec parents_not_in_cluster(Cluster :: cluster:cluster(tree:tree_node()),
PNode1 :: 'error' | {'ok', tree:tree_node()},
PNode2 :: 'error' | {'ok', tree:tree_node()}) ->
boolean().
parents_not_in_cluster(_Cluster, error, error) -> false;
parents_not_in_cluster(Cluster, {ok, PNode1}, {ok, PNode2}) ->
PNodePair = tree:create_node_pair(PNode1, PNode2),
not (cluster:has_node(PNodePair, Cluster));
parents_not_in_cluster(_Cluster, _, _) -> true.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when the node pair breaks an artificial block.
%% This should only happen in the case when the frontier is at the
%% bottom of a common cluster, since the mapping algorithm already
%% tries to maximise the number of common nodes, and the artificial
%% blocks must always be common to both trees.
%% @end
%%-----------------------------------------------------------------------
-spec frontier_breaks_artificial_block(Tree1 :: tree:tree(), Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()) -> boolean().
frontier_breaks_artificial_block(Tree1, Tree2, Cluster, Node1, Node2) ->
any_children_not_in_cluster(Tree1, Tree2, Cluster, Node1, Node2)
andalso are_artificial_block(Node1, Node2).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true if Node is an artificial block.
%% @end
%%-----------------------------------------------------------------------
-spec is_artificial_block(tree:tree_node()) -> boolean().
is_artificial_block(Node) -> tree:get_property(is_artificial_block,
Node) =:= {ok, true}.
%%-----------------------------------------------------------------------
%% @doc
%% Returns true if both Node1 and Node2 are an artificial block.
%% @end
%%-----------------------------------------------------------------------
-spec are_artificial_block(tree:tree_node(),_) -> boolean().
are_artificial_block(Node1, Node2) ->
is_artificial_block(Node1) andalso is_artificial_block(Node2).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when the node pair has a frontier with any of its
%% children. It is only applicable to node pairs.
%% @end
%%-----------------------------------------------------------------------
-spec any_children_not_in_cluster(Tree1 :: tree:tree(),
Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()) ->
boolean().
any_children_not_in_cluster(Tree1, Tree2, Cluster, Node1, Node2) ->
lists:any(is_not_in_cluster(Cluster),
lists:zip(tree:get_children(Node1, Tree1),
tree:get_children(Node2, Tree2))).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when the node pair has a wrong frontier with its parents,
%% it returns false otherwise. It is only applicable to node pairs.
%% PNode1 and PNode2 are expected to be the result of calling
%% {@link tree:get_parent/2} on each side of the node pair.
%% @end
%%-----------------------------------------------------------------------
-spec are_parents_wrong_frontier(Cluster :: cluster:cluster(TreeNode),
NodePair :: TreeNode,
PNode1 :: 'error' | {'ok', tree:tree_node()},
PNode2 :: 'error' | {'ok', tree:tree_node()}) ->
boolean() when
TreeNode :: tree:tree_node().
are_parents_wrong_frontier(_Cluster, _NodePair, error, error) -> false;
are_parents_wrong_frontier(Cluster, NodePair, {ok, PNode1}, {ok, PNode2}) ->
PNodePair = tree:create_node_pair(PNode1, PNode2),
((not ast_tree:is_expression_or_function(NodePair))
orelse ast_tree:breaks_funname(NodePair, PNode1)
orelse ast_tree:breaks_funname(NodePair, PNode2)
) andalso (not cluster:has_node(PNodePair, Cluster));
are_parents_wrong_frontier(_Cluster, NodePair, _, _) ->
not ast_tree:is_expression_or_function(NodePair).
%%-----------------------------------------------------------------------
%% @doc
%% Returns true when the node pair has a wrong frontier with its
%% children, it returns false otherwise. It is only applicable to node
%% pairs.
%% @end
%%-----------------------------------------------------------------------
-spec are_children_wrong_frontier(Tree1 :: tree:tree(),
Tree2 :: tree:tree(),
Cluster :: cluster:cluster(tree:tree_node()),
Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()) ->
boolean().
are_children_wrong_frontier(Tree1, Tree2, Cluster, Node1, Node2) ->
not lists:all(is_child_wrong_frontier(Cluster, Node1, Node2),
lists:zip(tree:get_children(Node1, Tree1),
tree:get_children(Node2, Tree2))).
%%-----------------------------------------------------------------------
%% @doc
%% Produces a function that, when applied to a Node, it returns true
%% if and only if Node belongs to Cluster.
%% @end
%%-----------------------------------------------------------------------
-spec is_not_in_cluster(Cluster :: cluster:cluster(tree:tree_node())) ->
(fun (({Node1 :: tree:tree_node(), Node2 :: tree:tree_node()}) -> boolean())).
is_not_in_cluster(Cluster) ->
fun ({Node1, Node2}) ->
NodePair = tree:create_node_pair(Node1, Node2),
not cluster:has_node(NodePair, Cluster)
end.
%%-----------------------------------------------------------------------
%% @doc
%% Produces a function that, when applied to a tuple with a pair of
%% nodes, it returns true if and only if (is an expression or a function,
%% and does not separate a function call from its name, or belongs to
%% Cluster.
%% @end
%%-----------------------------------------------------------------------
-spec is_child_wrong_frontier(Cluster :: cluster:cluster(tree:tree_node()),
PNode1 :: tree:tree_node(),
PNode2 :: tree:tree_node()) ->
(fun(({Node1 :: tree:tree_node(),
Node2 :: tree:tree_node()}) ->
boolean())).
is_child_wrong_frontier(Cluster, PNode1, PNode2) ->
fun ({Node1, Node2}) ->
NodePair = tree:create_node_pair(Node1, Node2),
(ast_tree:is_expression_or_function(Node1)
andalso ast_tree:is_expression_or_function(Node2)
andalso (not ast_tree:breaks_funname(NodePair, PNode1))
andalso (not ast_tree:breaks_funname(NodePair, PNode2)))
orelse cluster:has_node(NodePair, Cluster)
end.
%%-----------------------------------------------------------------------
%% @doc
%% It removes a node pair from the Cluster and from the Mapping.
%% @end
%%-----------------------------------------------------------------------
-spec detach_node_pair(NodePair :: tree:tree_node(),
{Cluster :: cluster:cluster(tree:tree_node()),
Mapping :: da_map:da_map(tree:tree_node(),
tree:tree_node())}) ->
{[cluster:cluster(tree:tree_node())], none,
da_map:da_map(tree:tree_node(),
tree:tree_node())}.
detach_node_pair(NodePair, {Cluster, Mapping}) ->
{Node1, _Node2} = tree:get_pair_tuple(NodePair),
NewClusters = cluster:remove_split(NodePair, Cluster),
{NewClusters, none, da_map:delete_by_key(Node1, Mapping)}.
%%-----------------------------------------------------------------------
%% @doc
%% Removes from the CommClusterDict and detaches from the Mapping every
%% cluster with a single node in it.
%% @end
%%-----------------------------------------------------------------------
-spec delete_trivial_clusters({CommClusterDict :: cluster_dict:cluster_dict(
tree:tree_node()),
Mapping :: da_map:da_map(tree:tree_node(),
tree:tree_node())}) ->
{cluster_dict:cluster_dict(tree:tree_node()),
da_map:da_map(tree:tree_node(), tree:tree_node())}.
delete_trivial_clusters({Comm, Mapping}) ->
{TrivialClusters, NewComm} = cluster_dict:extract_trivial_clusters(Comm),
{NewComm,
lists:foldl(fun (X, Y) ->
{Node1, _Node2} = tree:get_pair_tuple(X),
da_map:delete_by_key(Node1, Y)
end, Mapping, lists:flatmap(fun cluster:get_nodes/1,
TrivialClusters))}. | src/behaviour_extraction/fix_frontiers.erl | 0.539469 | 0.573111 | fix_frontiers.erl | starcoder |
%% -----------------------------------------------------------------------------
%%
%% Hamcrest Erlang.
%%
%% Copyright (c) 2010 <NAME> (<EMAIL>)
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%% THE SOFTWARE.
%% -----------------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @copyright 2010 <NAME>.
%% @doc Hamcrest API
%% @reference See <a href="http://code.google.com/p/hamcrest/">Hamcrest</a>
%% for more information.
%% -----------------------------------------------------------------------------
-module(hamcrest).
-include("hamcrest_internal.hrl").
-export_type([matchspec/0]).
-export([is_matcher/1,
match/2,
match/3,
check/2,
assert_that/2,
assert_that/3,
describe/2,
heckle/2]).
%%%============================================================================
%%% Types
%%%============================================================================
-opaque matchspec() :: #'hamcrest.matchspec'{}.
%%%============================================================================
%%% API
%%%============================================================================
%% @doc Returns `true' if the specified term is a valid hamcrest matcher,
%% otherwise `false'.
-spec(is_matcher/1 :: (any()) -> boolean()).
is_matcher(Something) ->
erlang:is_record(Something, 'hamcrest.matchspec').
-spec(match/2 :: (term(), matchspec()) -> boolean()).
match(Value, MatchSpec) ->
match(Value, MatchSpec, fun() -> ok end).
-spec(match/3 :: (term(), matchspec(),
fun(() -> any())) -> boolean()).
match(Value, MatchSpec, RunAfter) ->
(catch assert_that(Value, MatchSpec, RunAfter)) == true.
-spec(assert_that/3 :: (term(), matchspec(),
fun(() -> any())) -> 'true' | no_return()).
assert_that(Value, MatchSpec, RunAfter) when is_function(RunAfter, 0) ->
try assert_that(Value, MatchSpec)
after RunAfter()
end.
-spec(assert_that/2 :: (term(), matchspec()) -> 'true' | no_return()).
assert_that(Value, MatchSpec) ->
case check(Value, MatchSpec) of
{assertion_failed, _}=Failure ->
erlang:error(Failure);
true ->
true;
Other ->
exit({what_the, Other})
end.
-spec(check/2 :: (term(), matchspec()) -> 'true' | {assertion_failed, term()}).
check(Value, #'hamcrest.matchspec'{ matcher=MatchFunc }=MatchSpec) ->
heckle(MatchSpec, Value),
try MatchFunc(Value) of
true -> true;
{assertion_failed, _} ->
{assertion_failed, describe(MatchSpec, Value)};
{assertion_override, _}=Err ->
{assertion_failed, describe(MatchSpec, Err)};
false ->
{assertion_failed, describe(MatchSpec, Value)};
What ->
{assertion_failed, What}
catch
Class:Reason ->
{assertion_failed, describe(MatchSpec, {Class, Reason})}
end.
-spec(heckle/2 :: (matchspec(), any()) -> any()).
heckle(MatchSpec, Actual) ->
case application:get_env(hamcrest, heckle) of
{ok, [M,F,A]} ->
Argv = [MatchSpec, Actual|A],
apply(M, F, Argv);
_ ->
ok
end.
describe(Ms, Actual) when is_function(Actual, 0) ->
describe(Ms, erlang:fun_info(Actual));
describe(#'hamcrest.matchspec'{ desc=Desc, expected=Expected }, Actual) ->
[{expected, Expected},
{actual, Actual},
{matcher, Desc}]. | src/hamcrest.erl | 0.588653 | 0.408867 | hamcrest.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
% This module implements the querying of the vtree. It follows the rules of
% the standard R-tree. The only difference is that you can query with multiple
% bounding boxes at the same time. This is useful if you have queries over
% the dateline and want to decompose it into two separate bounding boxes
-module(vtree_search).
-include("vtree.hrl").
-export([search/4, all/3, count_search/2, count_all/1]).
-ifdef(makecheck).
-compile(nowarn_export_all).
-compile(export_all).
-endif.
-type foldfun() :: fun((#kv_node{} | #kp_node{}, any()) -> {ok | stop, any()}).
-spec search(Vt :: #vtree{}, Boxes :: [mbb()], FoldFun :: foldfun(),
InitAcc :: any()) -> any().
search(#vtree{root=nil}, _Boxes, _FoldFun, InitAcc) ->
InitAcc;
search(Vt, Boxes, FoldFun, InitAcc) ->
{_, Acc} = traverse(Vt, [Vt#vtree.root], Boxes, FoldFun, {ok, InitAcc}),
Acc.
% No bounding box given, return everything
-spec all(Vt :: #vtree{}, FoldFun :: foldfun(), InitAcc :: any()) -> any().
all(#vtree{root=nil}, _FoldFun, InitAcc) ->
InitAcc;
all(Vt, FoldFun, InitAcc) ->
{_, Acc} = traverse_all(Vt, [Vt#vtree.root], FoldFun, {ok, InitAcc}),
Acc.
% Returns only the number of matching geometries (and not the geometries
% themselves)
-spec count_search(Vt :: #vtree{}, Boxes :: [mbb()]) -> non_neg_integer().
count_search(Vt, Boxes) ->
search(Vt, Boxes, fun(_Node, Acc) -> {ok, Acc+1} end, 0).
% Returns the number all geometries (and not the geometries themselves)
-spec count_all(Vt :: #vtree{}) -> non_neg_integer().
count_all(Vt) ->
all(Vt, fun(_Node, Acc) -> {ok, Acc+1} end, 0).
% The accumulator is always a 2-tuple with eith 'ok' or 'stop' and the actual
% value.
-spec traverse(Vt :: #vtree{}, Nodes :: [#kv_node{} | #kp_node{}],
Boxes :: [mbb()], FoldFun :: foldfun(),
InitAcc :: {ok |stop, any()}) -> {ok | stop, any()}.
traverse(_Vt, _Nodes, _Boxes, _FoldFun, {stop, Acc}) ->
{stop, Acc};
traverse(_Vt, [], _Boxes, _FoldFun, OkAcc) ->
OkAcc;
traverse(Vt, [#kv_node{}|_]=Nodes, Boxes, FoldFun, OkAcc) ->
traverse_kv(Vt#vtree.less, Nodes, Boxes, FoldFun, OkAcc);
traverse(Vt, [#kp_node{}=Node|Rest], Boxes, FoldFun, OkAcc) ->
#vtree{
less = Less,
fd = Fd
} = Vt,
Result = case boxes_intersect_mbb(Boxes, Node#kp_node.key, Less) of
[] ->
% No box intersects, stop moving deeper
OkAcc;
IntersectingBoxes ->
Children = vtree_io:read_node(
Fd, Node#kp_node.childpointer),
% Move deeper
traverse(Vt, Children, IntersectingBoxes, FoldFun, OkAcc)
end,
% Move sideways
traverse(Vt, Rest, Boxes, FoldFun, Result).
-spec traverse_kv(Less :: lessfun(), Nodes :: [#kv_node{}], Boxes :: [mbb()],
FoldFun :: foldfun(), InitAcc :: {ok |stop, any()}) ->
{ok | stop, any()}.
traverse_kv(_Less, _Nodes, _Boxes, _FoldFun, {stop, Acc}) ->
{stop, Acc};
traverse_kv(_Less, [], _Boxes, _FoldFun, OkAcc) ->
OkAcc;
traverse_kv(Less, [Node|Rest], Boxes, FoldFun, {ok, Acc}) ->
Result = case any_box_intersects_mbb(
Boxes, Node#kv_node.key, Less) of
true ->
FoldFun(Node, Acc);
false ->
{ok, Acc}
end,
traverse_kv(Less, Rest, Boxes, FoldFun, Result).
% Traverse the full tree without any bounding box
-spec traverse_all(Vt :: #vtree{}, Nodes :: [#kv_node{} | #kp_node{}],
FoldFun :: foldfun(), InitAcc :: {ok |stop, any()}) ->
{ok | stop, any()}.
traverse_all(_Vt, _Nodes, _FoldFun, {stop, Acc}) ->
{stop, Acc};
traverse_all(_Vt, [], _FoldFun, OkAcc) ->
OkAcc;
traverse_all(_Vt, [#kv_node{}|_]=Nodes, FoldFun, OkAcc) ->
traverse_all_kv(Nodes, FoldFun, OkAcc);
traverse_all(Vt, [#kp_node{}=Node|Rest], FoldFun, OkAcc) ->
Children = vtree_io:read_node(Vt#vtree.fd, Node#kp_node.childpointer),
% Move deeper
Result = traverse_all(Vt, Children, FoldFun, OkAcc),
% Move sideways
traverse_all(Vt, Rest, FoldFun, Result).
-spec traverse_all_kv(Nodes :: [#kv_node{}], FoldFun :: foldfun(),
InitAcc :: {ok |stop, any()}) ->
{ok | stop, any()}.
traverse_all_kv(_Nodes, _FoldFun, {stop, Acc}) ->
{stop, Acc};
traverse_all_kv([], _FoldFun, OkAcc) ->
OkAcc;
traverse_all_kv([#kv_node{}=Node|Rest], FoldFun, {ok, Acc}) ->
Result = FoldFun(Node, Acc),
traverse_all_kv(Rest, FoldFun, Result).
% Returns true if any of the boxes intersects the MBB
-spec any_box_intersects_mbb(Boxes :: [mbb()], Mbb :: mbb(),
Less :: lessfun()) -> boolean().
any_box_intersects_mbb([], _Mbb, _Less) ->
false;
any_box_intersects_mbb([Box|Boxes], Mbb, Less) ->
case vtree_util:intersect_mbb(Box, Mbb, Less) of
overlapfree ->
any_box_intersects_mbb(Boxes, Mbb, Less);
_ -> true
end.
% Returns all boxes that intersect a given MBB
-spec boxes_intersect_mbb(Boxes :: [mbb()], Mbb :: mbb() | nil,
Less :: lessfun()) -> [mbb()].
boxes_intersect_mbb(Boxes, nil, _Less) ->
Boxes;
boxes_intersect_mbb(Boxes, Mbb, Less) ->
lists:filter(fun(Box) ->
case vtree_util:intersect_mbb(Box, Mbb, Less) of
overlapfree -> false;
_ -> true
end
end, Boxes). | vtree/src/vtree_search.erl | 0.80567 | 0.468122 | vtree_search.erl | starcoder |
%% Copyright 2018 Erlio GmbH Basel Switzerland (http://erl.io)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_diversity_script_sup).
-behaviour(supervisor).
%% API functions
-export([start_link/0,
start_script/1,
reload_script/1,
stop_script/1,
stats/0]).
%% Supervisor callbacks
-export([init/1]).
-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
permanent, 5000, Type, [Mod]}).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the supervisor
%%
%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
%% @end
%%--------------------------------------------------------------------
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
start_script(Script) ->
supervisor:start_child(?MODULE, ?CHILD({vmq_diversity_script, Script},
vmq_diversity_script, worker, [Script])).
reload_script(Script) ->
case lists:keyfind({vmq_diversity_script, Script}, 1,
supervisor:which_children(?MODULE)) of
{_, Pid, worker, _} when is_pid(Pid) ->
vmq_diversity_script:reload_script(Pid);
_ ->
{error, script_not_found}
end.
stop_script(Script) ->
case supervisor:terminate_child(?MODULE, {vmq_diversity_script, Script}) of
ok ->
supervisor:delete_child(?MODULE, {vmq_diversity_script, Script});
E ->
E
end.
stats() ->
lists:foldl(fun
({{vmq_diversity_script, Script}, Child, worker, _}, Acc) when is_pid(Child) ->
[{Script, vmq_diversity_script:stats(Child)}|Acc];
(_, Acc) ->
Acc
end, [], supervisor:which_children(?MODULE)).
%%%===================================================================
%%% Supervisor callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Whenever a supervisor is started using supervisor:start_link/[2,3],
%% this function is called by the new process to find out about
%% restart strategy, maximum restart frequency and child
%% specifications.
%%
%% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} |
%% ignore |
%% {error, Reason}
%% @end
%%--------------------------------------------------------------------
init([]) ->
{ok, {{one_for_one, 5, 10}, []}}.
%%%===================================================================
%%% Internal functions
%%%=================================================================== | apps/vmq_diversity/src/vmq_diversity_script_sup.erl | 0.50293 | 0.40072 | vmq_diversity_script_sup.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% jam: Date/time processing.
%%
%% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(jam).
-include("jam_internal.hrl").
-export([
compile/1, compile/2,
round_fractional_seconds/1, offset_round_fractional_seconds/1,
expand/2, increment/2, increment_time/2, offset_increment_time/2,
increment_date/2,
convert_tz/2, offset_convert_tz/2,
is_valid/1, is_valid/2,
is_complete/1, is_complete_date/1, is_complete_time/1,
normalize/1, offset_normalize/1,
to_epoch/1, to_epoch/2,
from_epoch/1, from_epoch/2,
tz_to_seconds/1]).
-ifdef(TEST).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-endif.
%%% Compiling
%% The compiling step converts the strings captured by parsing into a
%% (possibly valid) date and/or time. The resulting tuples will not be
%% the same as Erlang's date/time tuples because this library permits
%% fractional seconds and returns time zones as an explicit value for
%% further processing.
%% Per ISO 8601, a time such as "14:10" does *not* imply
%% "14:10:00". Instead, the first string is described as "reduced
%% accuracy". Similarly, "2016-06" and "2016-W15" are examples of
%% reduced accuracy dates.
%%
%% The compiling functions take an optional accuracy parameter,
%% `minimum_accuracy'.
%%
%% If the result is not sufficiently accurate to meet any minimum
%% accuracy requirement supplied, the atom `incomplete_time' or
%% `incomplete_date' will be returned instead of a new structure.
%%
%% The processing functions also take a default time zone as a
%% parameter, expressed as ISO 8601-compliant timezone strings ("Z",
%% "+04:30", "-05", etc). This will be ignored if a time zone is
%% already part of the parsed data.
%%% Validation
%% The compiling step only exercises as much knowledge about "real"
%% times and dates as is necessary (e.g., knowing what years are leap
%% years to properly interpret ordinal dates). A time such as "25:15"
%% is a possible outcome of the parsing and compiling steps, so
%% validation functions are supplied which will return `true' or
%% `false' if the date/time is legitimate.
%% If the `leap_second_midnight' parameter is supplied to the
%% validation function, a second of `60' will only be allowed if the
%% hour is 23 and minute is 59, but generally speaking we will not
%% always know what time zone the time is being expressed as, so that
%% requirement will not be enforced by default (and thus 60 seconds
%% will be always considered valid without that parameter).
%%
%% No more than one leap second has ever been added to the same day,
%% so this library will treat any number of seconds greater than 60 as
%% invalid.
%%% Normalization
%% There are two unusual times which may arise.
%%
%% The first, permitted by ISO 8601, is "24:00". This is *not* the
%% same as "00:00", at least when also attached to a date. "2016-06-15
%% 24:00" is the same as "2016-06-16 00:00" and normalization will
%% convert the former into the latter.
%%
%% The second unusual time value: seconds > 59.
%%
%% Occasionally leap seconds
%% (https://en.wikipedia.org/wiki/Leap_second) will be added at the
%% end of a day. UNIX/POSIX time will silently "absorb" that into the
%% first second of the following day
%% (https://en.wikipedia.org/wiki/Unix_time#Leap_seconds).
%%
%% So, seconds == 60 are allowed in the validation step and converted
%% to 00 seconds in the following minute in the normalization step.
%%% Conversion to epoch seconds
%% Functions will be provided to convert the time/date structures to
%% UNIX epoch time at a customizable level of granularity. Since
%% fractional times are supported, it will be possible to express
%% millisecond (or larger, or smaller) values in time strings.
%% list_to_integer (but leave `undefined' intact)
l2i(undefined) ->
undefined;
l2i(Value) ->
list_to_integer(Value).
%% undefined_to_zero
u2z(undefined) ->
0;
u2z(Int) ->
Int.
-define(YEAR_ACCURACY, 20).
-define(MONTH_ACCURACY, 18).
-define(DAY_ACCURACY, 16).
-define(HOUR_ACCURACY, 14).
-define(MINUTE_ACCURACY, 12).
-define(SECOND_ACCURACY, 10).
accuracy(undefined) ->
undefined;
accuracy(year) ->
?YEAR_ACCURACY;
accuracy(month) ->
?MONTH_ACCURACY;
accuracy(day) ->
?DAY_ACCURACY;
accuracy(hour) ->
?HOUR_ACCURACY;
accuracy(minute) ->
?MINUTE_ACCURACY;
accuracy(second) ->
?SECOND_ACCURACY.
make_date_record({Year, Month, Day}) ->
#date{year=Year, month=Month, day=Day}.
preprocess(#parsed_ordinal{year=Year, day=Day}, _Options) ->
%% Unlike other date and time formats, ordinal dates are
%% necessarily complete (to the day, anyway) so we'll convert to a
%% calendar date before checking accuracy
make_date_record(calculate_ordinal_date({Year, Day}));
preprocess(#parsed_time{timezone=undefined}=Time, Options) ->
%% Replace an undefined timezone with a default if passed as an option
Time#parsed_time{timezone=maybe_default_timezone(proplists:get_value(default_timezone, Options))};
preprocess(Tuple, _Options) ->
Tuple.
maybe_default_timezone(undefined) ->
undefined;
maybe_default_timezone(Timezone) ->
jam_iso8601:parse_tz(Timezone).
-spec compile(parsed_time()) -> time_record();
(parsed_datetime()) -> datetime_record();
(parsed_date()) -> date_record();
(parsed_timezone()) -> timezone();
('undefined') -> 'undefined'.
%% @equiv compile(Record, [])
compile(Record) ->
compile(Record, []).
%% @doc Convert the string-based output from a parser into numeric
%% values.
%%
%% Parameters that can be supplied in the optional `Options' list:
%%
%% <ul>
%% <li>`{minimum_accuracy, Accuracy}'</li>
%% <li>`{default_timezone, TZstring}'</li>
%% </ul>
%%
%% Accuracy values are atoms ranging from `year' to `second' that
%% indicate the granularity that must be captured in the output
%% of the parser.
%%
%% For example, specifying `minute' as a minimum accuracy means that a
%% legitimate ISO 8601 string like "2010-05-03T06" could not be
%% compiled and would result in an `incomplete_date' error.
%%
%% The default timezone string must be ISO 8601-compliant.
-spec compile(parsed_time(), list()) -> time_record();
(parsed_datetime(), list()) -> datetime_record();
(parsed_date(), list()) -> date_record()|datetime_record();
(parsed_timezone(), list()) -> timezone();
('undefined', list()) -> 'undefined'.
compile(undefined, _Options) ->
undefined;
compile(#parsed_timezone{}=TZ, _Options) ->
compile_timezone(TZ);
compile(#parsed_datetime{date=Date, time=Time}, Options) ->
finish_compile(check_accuracy(preprocess(Date, Options), preprocess(Time, Options), accuracy(proplists:get_value(minimum_accuracy, Options))));
compile(#parsed_calendar{}=Date, Options) ->
finish_compile(check_accuracy(preprocess(Date, Options), undefined, accuracy(proplists:get_value(minimum_accuracy, Options))));
compile(#parsed_ordinal{}=Date, Options) ->
finish_compile(check_accuracy(preprocess(Date, Options), undefined, accuracy(proplists:get_value(minimum_accuracy, Options))));
compile(#parsed_time{}=Time, Options) ->
finish_compile(check_accuracy(undefined, preprocess(Time, Options), accuracy(proplists:get_value(minimum_accuracy, Options)))).
%% `check_accuracy/4' will make certain any minimum accuracy is met
check_accuracy(Date, Time, undefined) ->
%% 3rd parameter is minimum accuracy. If there is no minimum
%% specified, we don't need to check anything here
{Date, Time};
check_accuracy(_Date, undefined, Minimum) when Minimum < ?DAY_ACCURACY ->
%% If we don't have a time, and our minimum accuracy is time-related, bail
incomplete_time;
check_accuracy(#parsed_calendar{month=undefined}, _Time, Minimum)
when Minimum < ?YEAR_ACCURACY ->
incomplete_date;
check_accuracy(#parsed_calendar{day=undefined}, _Time, Minimum)
when Minimum < ?MONTH_ACCURACY ->
%% Perhaps we should respond `incomplete_time' if the minimum
%% accuracy is hour/minute/second and we have an incomplete date
%% but I prefer this
incomplete_date;
check_accuracy(_Date, #parsed_time{minute=undefined, fraction=undefined}, Minimum)
when Minimum < ?HOUR_ACCURACY ->
%% If we have a fractional time, we'll consider it good enough to
%% make minute/second accuracy, so this clause only triggers on
%% undefined fractions
incomplete_time;
check_accuracy(_Date, #parsed_time{second=undefined, fraction=undefined}, Minimum)
when Minimum < ?MINUTE_ACCURACY ->
%% If we have a defined fraction, we'll consider it good enough to
%% make minute/second accuracy, so this clause only triggers on
%% undefined fractions
incomplete_time;
check_accuracy(Date, Time, _Minimum) ->
{Date, Time}.
%% Given a possibly-incomplete compiled record, increment it by some
%% number of units (possibly a negative number).
-spec increment(datetime_record(), integer()) -> datetime_record();
(date_record(), integer()) -> date_record();
(time_record(), integer()) -> time_record();
('undefined', integer()) -> 'undefined'.
increment(Anything, 0) ->
Anything;
increment(#datetime{date=Date,time=Time}, Incr) ->
id_and_increment(Date, Time, Incr);
increment(#date{}=Date, Incr) ->
increment_date(Date, Incr);
increment(#time{}=Time, Incr) ->
increment_time(Time, Incr);
increment(undefined, _Incr) ->
undefined.
%% Branch on whether the #time record is unpopulated
id_and_increment(Date, #time{hour=Hour}=Time, Incr) when Hour /= undefined ->
{DateAdj, NewTime} = offset_increment_time(Time, Incr),
expand_to_datetime(increment_date(Date, DateAdj), NewTime);
id_and_increment(Date, Time, Incr) ->
expand_to_datetime(increment_date(Date, Incr), Time).
increment_time(#time{}=Time, 0) ->
Time;
increment_time(#time{}=Time, Incr) ->
{_DateAdj, NewTime} = offset_increment_time(Time, Incr),
NewTime.
offset_increment_time(#time{}=Time, 0) ->
{0, Time};
offset_increment_time(#time{hour=Hour,
minute=undefined}=Time, Incr) ->
{DateAdj, NewHour} = wrap(Hour + Incr, hour),
{DateAdj, Time#time{hour=NewHour}};
offset_increment_time(#time{hour=Hour, minute=Minute,
second=undefined}=Time, Incr) ->
{HourAdj, NewMinute} = wrap(Minute + Incr, minute),
{DateAdj, NewHour} = wrap(Hour + HourAdj, hour),
{DateAdj, Time#time{hour=NewHour, minute=NewMinute}};
offset_increment_time(#time{hour=Hour, minute=Minute,
second=Second}=Time, Incr) ->
{MinuteAdj, NewSecond} = wrap(Second + Incr, second),
{HourAdj, NewMinute} = wrap(Minute + MinuteAdj, minute),
{DateAdj, NewHour} = wrap(Hour + HourAdj, hour),
{DateAdj, Time#time{hour=NewHour, minute=NewMinute,
second=NewSecond}}.
increment_date(#date{}=Date, 0) ->
Date;
increment_date(#date{year=Year,
month=undefined}=Date, Incr) ->
%% We do not attempt to block things like incrementing the year
%% into negative values
Date#date{year=Year + Incr};
increment_date(#date{year=Year,
month=Month,
day=undefined}=Date, Incr) ->
{YearAdj, NewMonth} = wrap(Month + Incr, month),
NewYear = Year + YearAdj,
Date#date{year=NewYear, month=NewMonth};
increment_date(#date{year=Year,
month=Month,
day=Day}=Date, Incr) ->
{NewYear, NewMonth, NewDay} = jam_math:add_date({Year, Month, Day}, Incr),
Date#date{year=NewYear, month=NewMonth, day=NewDay}.
%% All errors are atoms; rather than create a partial datetime record
%% with an error atom nested inside, make certain we return the error
%% directly.
expand_to_datetime(Date, _Time) when is_atom(Date) ->
Date;
expand_to_datetime(_Date, Time) when is_atom(Time) ->
Time;
expand_to_datetime(Date, Time) ->
#datetime{date=Date, time=Time}.
%% Given a target accuracy, populate all undefined fields larger or
%% equal to that accuracy to 1 (for date fields) or 0 (for time
%% fields). So, e.g., afterwards anything populated to `minute'
%% accuracy may still have `undefined' for the seconds field, but
%% every larger span will be an integer value.
-spec expand(compiled_record(), accuracy()) -> compiled_record();
('undefined', accuracy()) -> 'undefined'.
expand(undefined, _Target) ->
undefined;
expand(Record, Target) when is_atom(Target) ->
expand_2(Record, accuracy(Target)).
%% Names are hard. Step 2 of the expansion process forces a datetime
%% structure when necessary, and regardless continues to step 3.
expand_2(#date{}=Date, Target) when Target < ?DAY_ACCURACY ->
%% Must create a time record for time-based expansion requirements
expand_to_datetime(expand_3(Date, Target), expand_3(#time{}, Target));
expand_2(#datetime{date=Date, time=undefined}, Target) when Target < ?DAY_ACCURACY ->
%% Defer to the previous function clause to create a new time record
expand_2(Date, Target);
expand_2(#datetime{date=Date, time=Time}, Target) ->
expand_to_datetime(expand_3(Date, Target), expand_3(Time, Target));
expand_2(Record, Target) ->
expand_3(Record, Target).
%% Step 3 of the expansion process: populate lower bound values to
%% meet the desired accuracy. We only need concern ourselves with date
%% or time records; step 2 will make handle any datetime records.
expand_3(#date{year=undefined}, _Target) ->
incomplete_date;
expand_3(#date{}=Date, ?YEAR_ACCURACY) ->
Date;
expand_3(#date{month=undefined}=Date, Target) when Target < ?YEAR_ACCURACY->
expand_3(Date#date{month=1}, Target);
expand_3(#date{day=undefined}=Date, Target) when Target < ?MONTH_ACCURACY ->
Date#date{day=1};
%% We will consider a fractional value to satisfy any expansion target
expand_3(#time{fraction=Fraction}=Time, _Target) when Fraction /= undefined ->
Time;
expand_3(#time{hour=undefined}=Time, Target) when Target =< ?HOUR_ACCURACY ->
expand_3(Time#time{hour=0}, Target);
expand_3(#time{minute=undefined}=Time, Target) when Target =< ?MINUTE_ACCURACY ->
expand_3(Time#time{minute=0}, Target);
expand_3(#time{second=undefined}=Time, Target) when Target =< ?SECOND_ACCURACY ->
expand_3(Time#time{second=0}, Target);
expand_3(Record, _Target) ->
Record.
-spec round_fractional_seconds(compiled_record()) -> compiled_record();
('undefined') -> 'undefined'.
round_fractional_seconds(Record) ->
{_Adjust, NewRecord} = offset_round_fractional_seconds(Record),
NewRecord.
%% The integer returned as the first element of the tuple indicates
%% whether the time rolled over to midnight: 1 for a 1 day increase, 0
%% otherwise.
%%
%% If a datetime tuple is provided, the date element will be
%% incremented in the return value if applicable.
-spec offset_round_fractional_seconds(compiled_record()) ->
{0|1, compiled_record()};
('undefined') -> {0, 'undefined'}.
offset_round_fractional_seconds(undefined) ->
{0, undefined};
offset_round_fractional_seconds(#time{fraction=undefined}=Time) ->
{0, Time};
offset_round_fractional_seconds(#time{fraction=#fraction{value=Frac}}=Time) when Frac >= 0.5 ->
{DateBump, NewTime} =
jam_math:add_time(jam_erlang:to_erlangish_time(Time), {0, 0, 1}),
{DateBump, jam_erlang:tuple_to_record(Time#time{fraction=undefined}, NewTime)};
offset_round_fractional_seconds(#datetime{date=Date, time=Time}) ->
{DateAdj, NewTime} = offset_round_fractional_seconds(Time),
NewDate = jam_math:add_date(jam_erlang:to_erlangish_date(Date), DateAdj),
{DateAdj, #datetime{date=jam_erlang:tuple_to_record(#date{}, NewDate), time=NewTime#time{fraction=undefined}}};
offset_round_fractional_seconds(DateTime) ->
{0, DateTime}.
%% Note: if the time provided as the first argument does not include a
%% timezone, this will return `undefined'
-spec convert_tz(compiled_record(), string()) -> compiled_record();
('undefined', string()) -> 'undefined'.
convert_tz(Record, TZ) ->
{_Adjust, NewRecord} = offset_convert_tz(Record, TZ),
NewRecord.
-spec offset_convert_tz(compiled_record(), string()) -> {-1|0|1, compiled_record()};
('undefined', string()) -> {0, 'undefined'}.
%% Like other `offset_` functions this will return a tuple with an
%% initial value that indicates whether the date changed as a result
%% of changing time zones, and if supplied, the date will be
%% transformed.
%%
%% The new timezone argument must be a valid ISO 8601 timezone, so: +
%% or - is required, and hours/minutes must be 2 digits with an
%% optional : separator.
offset_convert_tz(undefined, _NewTz) ->
{0, undefined};
offset_convert_tz(#datetime{time=#time{timezone=undefined}}, _NewTz) ->
{0, undefined};
offset_convert_tz(#datetime{date=Date, time=Time}, NewTz) ->
{DateAdj, NewTime} = offset_convert_tz(Time, NewTz),
NewDate = jam_math:add_date(jam_erlang:to_erlangish_date(Date), DateAdj),
{DateAdj, #datetime{date=jam_erlang:tuple_to_record(#date{}, NewDate), time=NewTime}};
offset_convert_tz(#time{timezone=undefined}, _NewTz) ->
{0, undefined};
offset_convert_tz(#time{}=Time, NewTz) ->
convert_compiled_tz(Time, compile(jam_iso8601:parse_tz(NewTz))).
convert_compiled_tz(#time{timezone=TzRec}=Time, TzRec) ->
{0, Time};
convert_compiled_tz(#time{timezone=#timezone{hours=AddH, minutes=AddM}}=Time,
#timezone{label="Z"}=NewTz) ->
%% The old timezone has integer values expressed as values to add
%% to reach UTC, so this case is simple
{DateAdj, NewTime} =
jam_math:add_time(jam_erlang:to_erlangish_time(Time), {AddH, AddM}),
{DateAdj, jam_erlang:tuple_to_record(Time#time{timezone=NewTz}, NewTime)};
convert_compiled_tz(#time{timezone=#timezone{hours=OldAddH, minutes=OldAddM}}=Time,
#timezone{hours=NewAddH, minutes=NewAddM}=NewTz) ->
%% We convert to UTC first, then to the new timezone by inverting
%% the sign on the values to add.
{UTCAdj, UTCTime} =
jam_math:add_time(jam_erlang:to_erlangish_time(Time), {OldAddH, OldAddM}),
{NewAdj, NewTime} =
jam_math:add_time(UTCTime, {-NewAddH, -NewAddM}),
{NewAdj + UTCAdj, jam_erlang:tuple_to_record(Time#time{timezone=NewTz}, NewTime)}.
finish_compile(Error) when is_atom(Error) ->
Error;
finish_compile({undefined, Time}) ->
compile_time(Time);
finish_compile({Date, undefined}) ->
compile_date(Date);
finish_compile({Date, Time}) ->
#datetime{date=compile_date(Date),
time=compile_time(Time)}.
compile_date(#parsed_calendar{year=Year, month=Month, day=Day}) ->
jam_erlang:tuple_to_record(#date{}, {l2i(Year), l2i(Month), l2i(Day)});
compile_date(Date) ->
Date.
compile_time(undefined) ->
undefined;
compile_time(#parsed_time{fraction=undefined, timezone=TZ}=Time) ->
{Hour, Minute, Second} = jam_erlang:to_erlangish_time(Time),
jam_erlang:tuple_to_record(#time{timezone=compile_timezone(TZ)},
{l2i(Hour), l2i(Minute), l2i(Second)});
compile_time(#parsed_time{fraction=#parsed_fraction{value=Fractional},
timezone=TZ}=Time) ->
{Hour, Minute, Second} = jam_erlang:to_erlangish_time(Time),
%% Figure out what to do with the fractional value. Whatever is
%% left once we start applying it has to get passed down the chain.
Frac = {list_to_float("0." ++ Fractional), length(Fractional)},
{Min, FracRemainder} = maybe_fractional(Minute, Frac),
{Sec, FracRemainder2} = maybe_fractional(Second, FracRemainder),
jam_erlang:tuple_to_record(
#time{fraction=jam_erlang:tuple_to_record(#fraction{}, FracRemainder2),
timezone=compile_timezone(TZ)},
{l2i(Hour), Min, Sec}).
utc_timezone_record() ->
#timezone{label="Z", hours=0, minutes=0}.
%% We want the integer `timezone' fields to represent adjustments
%% necessary to convert to UTC, so India, with a +05:30 time zone,
%% will map to `{timezone, "+05:30", -5, -30}' or `{timezone, "+0530", -5, -30}'.
compile_timezone(undefined) ->
undefined;
compile_timezone(#parsed_timezone{label="Z"}) ->
utc_timezone_record();
compile_timezone(#parsed_timezone{label=TZ, hours=TZH, minutes=TZM}) ->
HourOffset = -l2i(TZH),
#timezone{label=TZ, hours=HourOffset,
minutes=timezone_minute_offset(HourOffset, TZM)}.
%% Must match sign of minutes to sign of hour
timezone_minute_offset(Hour, Minute) when Hour < 0 ->
-u2z(l2i(Minute));
timezone_minute_offset(_Hour, Minute) ->
u2z(l2i(Minute)).
%% Apply the fractional component once we figure out what the original
%% time string left unspecified
maybe_fractional(undefined, {Frac, Precision}) ->
FloatUnits = 60.0 * Frac,
IntUnits = trunc(FloatUnits),
Remainder = {FloatUnits - IntUnits, Precision},
{IntUnits, Remainder};
maybe_fractional(Value, Frac) ->
{list_to_integer(Value), Frac}.
%% Allowing `is_complete/1` to take times, dates, and datetimes can
%% lead to unexpected consequences such as a date returning true when
%% the client expected to validate a completed datetime
%% record. Instead, give dates and times their own function.
-spec is_complete(datetime_record()|parsed_datetime()) -> boolean().
is_complete(#datetime{date=Date, time=Time}) ->
is_complete_date(Date) andalso is_complete_time(Time);
is_complete(#parsed_datetime{date=Date, time=Time}) ->
is_complete_date(Date) andalso is_complete_time(Time);
is_complete(_) ->
false.
-spec is_complete_time(time_record()|parsed_time()) -> boolean().
is_complete_time(#time{second=undefined}) ->
false;
is_complete_time(#time{}) ->
true;
is_complete_time(#parsed_time{second=undefined}) ->
false;
is_complete_time(#parsed_time{}) ->
true;
is_complete_time(_) ->
false.
-spec is_complete_date(date_record()|parsed_date()) -> boolean().
is_complete_date(#date{day=undefined}) ->
false;
is_complete_date(#date{}) ->
true;
is_complete_date(#parsed_ordinal{}) ->
true;
is_complete_date(#parsed_calendar{day=undefined}) ->
false;
is_complete_date(#parsed_calendar{}) ->
true;
is_complete_date(_) ->
false.
-spec is_valid(compiled_record()|timezone()|'undefined') -> boolean().
is_valid(Record) ->
is_valid(Record, []).
%% The only flag for the options list for `is_valid/2' is
%% `leap_second_midnight'. If you want to enforce that :60 is only
%% valid at midnight UTC, convert the time to UTC first with
%% `convert_tz'
-spec is_valid(compiled_record()|timezone()|'undefined', list()) -> boolean().
is_valid(undefined, _Options) ->
false;
is_valid(#datetime{date=Date, time=Time}, Options) ->
is_valid_date(Date, Options) andalso is_valid_time(Time, Options);
is_valid(#date{}=Date, Options) ->
is_valid_date(Date, Options);
is_valid(#time{}=Time, Options) ->
is_valid_time(Time, Options);
is_valid(#timezone{}=TZ, Options) ->
is_valid_timezone(TZ, Options).
is_valid_date(#date{year=Year, month=undefined, day=undefined}, _Options)
when is_integer(Year) ->
true;
is_valid_date(#date{month=Month, day=undefined}, _Options) ->
Month > 0 andalso Month < 13;
is_valid_date(#date{}=Date, _Options) ->
calendar:valid_date(jam_erlang:to_erlangish_date(Date)).
%% A minute with 61 seconds (thus `second=60') can happen when leap
%% seconds are added. Leap seconds are added at midnight UTC.
%%
%% We don't always know what timezone we're evaluating, thus it is
%% configurable whether or not the `is_valid_time/2' function will
%% enforce the midnight-only constraint (second parameter).
midnight_leap_second({23, 59, 60}, true) ->
true;
midnight_leap_second({_, _, 60}, false) ->
true;
midnight_leap_second(_, _) ->
false.
is_valid_time_tuple({Hour, Minute, Second}, LeapSecondMustBeMidnight) ->
Hour > -1
andalso (Hour < 24 orelse (Hour == 24 andalso Minute + Second == 0))
andalso Minute > -1 andalso Minute < 60
andalso Second > -1
andalso (Second < 60 orelse
midnight_leap_second({Hour, Minute, Second},
LeapSecondMustBeMidnight)).
is_valid_time(#time{timezone=TZ}=Time, Options) when TZ /= undefined ->
is_valid_time(Time#time{timezone=undefined}, Options) andalso
is_valid_timezone(TZ, Options);
is_valid_time(#time{hour=Hour, minute=undefined, second=undefined}, _Options) ->
is_valid_time_tuple({Hour, 0, 0}, false);
is_valid_time(#time{hour=Hour, minute=Minute, second=undefined}, _Options) ->
is_valid_time_tuple({Hour, Minute, 0}, false);
is_valid_time(#time{}=Time, Options) ->
is_valid_time_tuple(jam_erlang:to_erlangish_time(Time),
lists:member(leap_second_midnight, Options)).
%% As of this writing, the valid time zone range is from -1200 to
%% +1400. Since politicians love to mess with this, going to treat
%% 1500 as an absolute maximum and hope for the best.
is_valid_timezone(#timezone{hours=Hours, minutes=Minutes}=TZ, _Options) ->
abs(tz_to_seconds(TZ)) =< 15 * 3600 andalso
is_valid_time_tuple({abs(Hours), abs(Minutes), 0}, []).
-spec normalize('undefined') -> 'undefined';
(date_record()) -> date_record();
(time_record()) -> time_record();
(datetime_record()) -> datetime_record().
normalize(Record) ->
{_Adjust, NewRecord} = offset_normalize(Record),
NewRecord.
-spec offset_normalize('undefined') -> {0, 'undefined'};
(date_record()) -> {integer(), date_record()};
(time_record()) -> {integer(), time_record()};
(datetime_record()) -> {integer(), datetime_record()}.
offset_normalize(undefined) ->
{0, undefined};
offset_normalize(#datetime{date=Date, time=Time}) ->
{DateAdjust, NewTime} = normalize_time(Time),
{DateAdjust, #datetime{date=normalize_date(Date, DateAdjust), time=NewTime}};
offset_normalize(#date{}=Date) ->
{0, Date};
offset_normalize(#time{}=Time) ->
normalize_time(Time).
normalize_date(#date{}=Date, 0) ->
Date;
normalize_date(#date{}=Date, Adjust) ->
jam_erlang:tuple_to_record(
#date{}, jam_math:add_date(jam_erlang:to_erlangish_date(Date), Adjust)).
%% Allow for 24:00:00 per the ISO 8601 standard and the occasional
%% leap second.
normalize_time(#time{hour=24}=Time) ->
{1, Time#time{hour=0}};
normalize_time(#time{hour=Hour, minute=Minute, second=60}=Time) ->
{DateAdj, NewTime} =
jam_math:add_time({Hour, Minute, 59}, {0, 0, 1}),
{DateAdj, jam_erlang:tuple_to_record(Time, NewTime)};
normalize_time(Time) ->
{0, Time}.
%% Ordinal date calculations stolen from Wikipedia.
%% To the day of Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
%% Add 0 31 59 90 120 151 181 212 243 273 304 334
%% Leap years 0 31 60 91 121 152 182 213 244 274 305 335
calculate_ordinal_date({Year, Day}) when is_list(Year) ->
calculate_ordinal_date({list_to_integer(Year), list_to_integer(Day)});
calculate_ordinal_date({Year, Day}) ->
calculate_ordinal_date(Year, Day, calendar:is_leap_year(Year)).
calculate_ordinal_date(Year, Day, _Leap) when Day < 32 ->
{Year, 1, Day};
calculate_ordinal_date(Year, Day, true) when Day < 61 ->
{Year, 2, Day - 31};
calculate_ordinal_date(Year, Day, false) when Day < 60 ->
{Year, 2, Day - 31};
calculate_ordinal_date(Year, Day, true) ->
calculate_ordinal_date_post_feb(Year, Day-1);
calculate_ordinal_date(Year, Day, false) ->
calculate_ordinal_date_post_feb(Year, Day).
calculate_ordinal_date_post_feb(Year, Day) when Day < 91 ->
{Year, 3, Day - 59};
calculate_ordinal_date_post_feb(Year, Day) when Day < 121 ->
{Year, 4, Day - 90};
calculate_ordinal_date_post_feb(Year, Day) when Day < 152 ->
{Year, 5, Day - 120};
calculate_ordinal_date_post_feb(Year, Day) when Day < 182 ->
{Year, 6, Day - 151};
calculate_ordinal_date_post_feb(Year, Day) when Day < 213 ->
{Year, 7, Day - 181};
calculate_ordinal_date_post_feb(Year, Day) when Day < 244 ->
{Year, 8, Day - 212};
calculate_ordinal_date_post_feb(Year, Day) when Day < 274 ->
{Year, 9, Day - 243};
calculate_ordinal_date_post_feb(Year, Day) when Day < 305 ->
{Year, 10, Day - 273};
calculate_ordinal_date_post_feb(Year, Day) when Day < 335 ->
{Year, 11, Day - 304};
calculate_ordinal_date_post_feb(Year, Day) ->
{Year, 12, Day - 334}.
-spec to_epoch(date_record()) -> missing_time;
(time_record()) -> missing_date;
(datetime_record()) -> integer().
to_epoch(Record) ->
to_epoch(Record, 0).
%% 2nd argument is the power of 10 reflecting subsecond accuracy. For
%% example, if nanosecond values are required, the epoch value would
%% have to be multiplied by 10^9, so the argument would be 9.
-spec to_epoch(date_record(), integer()) -> missing_time;
(time_record(), integer()) -> missing_date;
(datetime_record(), integer()) -> integer().
to_epoch(#date{}, _Precision) ->
missing_time;
to_epoch(#time{}, _Precision) ->
missing_date;
to_epoch(#datetime{}=DateTime, Precision) ->
check_complete_before_conversion(
is_complete(DateTime),
DateTime,
trunc(precision_to_mult(Precision))
).
%% When splitting an epoch value into two parts (`{Seconds,
%% FractionalSeconds}') we have to return the fractional seconds as a
%% zero-padded (on the left) string for later manipulation.
split_epoch(Integer, Precision) ->
FormatString = lists:flatten(io_lib:format("~~~B..0B", [Precision])),
Divisor = trunc(precision_to_mult(Precision)),
{Integer div Divisor,
lists:flatten(io_lib:format(FormatString, [Integer rem Divisor]))}.
-spec from_epoch(non_neg_integer()) -> datetime_record().
from_epoch(Epoch) ->
from_epoch(Epoch, 0).
%% The 2nd argument indicates the number of digits "below" UTC epoch
%% seconds. For example, if the input value is microseconds, the 2nd
%% argument should be 6.
-spec from_epoch(non_neg_integer(), non_neg_integer()) -> datetime_record().
from_epoch(Epoch, Precision) ->
{EpochSeconds, Remainder} = split_epoch(Epoch, Precision),
Fraction = determine_epoch_fraction(Remainder, Precision),
TZ = utc_timezone_record(),
{Date, Time} = utc_seconds_to_universal_datetime(EpochSeconds),
#datetime{
date=jam_erlang:tuple_to_record(#date{}, Date),
time=jam_erlang:tuple_to_record(#time{fraction=Fraction,
timezone=TZ}, Time)
}.
%% The incoming precision is the number of digits beyond epoch
%% seconds. For example, if the epoch value contained microseconds,
%% there were originally 6 digits to the right of the UTC epoch second
%% value.
%%
%% The precision that gets stashed in the `#fraction{}' record is the
%% number of "significant" digits. Again assuming microseconds, if the
%% original epoch value were `946690215020100', `020100' would be the
%% `Remainder' argument (as a string) and there are 4 significant
%% digits (the last two zeroes are inconsequential) so the resulting
%% record should be `#fraction{value=0.0201, precision=4}'.
determine_epoch_fraction([], _Precision) ->
undefined;
determine_epoch_fraction(Remainder, Precision) ->
Fraction = list_to_integer(Remainder) / precision_to_mult(Precision),
FractionPrecision = length(string:strip(Remainder, right, $0)),
#fraction{value=Fraction, precision=FractionPrecision}.
utc_seconds_to_universal_datetime(Seconds) ->
calendar:gregorian_seconds_to_datetime(Seconds + ?GREGORIAN_MAGIC).
precision_to_mult(Precision) ->
math:pow(10, Precision).
check_complete_before_conversion(false, _DateTime, _Precision) ->
incomplete_datetime;
check_complete_before_conversion(true, DateTime, Precision) ->
ErlangDateTime = convert_to_erlang(DateTime),
(calendar:datetime_to_gregorian_seconds(ErlangDateTime) - ?GREGORIAN_MAGIC)
* Precision + extra_precision(DateTime, Precision).
extra_precision(#datetime{time=#time{fraction=undefined}}, _Precision) ->
0;
extra_precision(#datetime{time=#time{fraction=#fraction{value=Float}}}, Precision) ->
round(Float * Precision).
%% To do this properly requires both a date and a time with timezone,
%% because the gregorian/epoch conversion requires the time be
%% specified in UTC. XXX: Stricter enforcement?
convert_to_erlang(#datetime{date=Date, time=Time}) ->
{DateAdjust, NewTime} = timezone_to_utc(Time),
NewDate = normalize_date(Date, DateAdjust),
{convert_to_erlang(NewDate), convert_to_erlang(NewTime)};
convert_to_erlang(#date{}=Date) ->
jam_erlang:to_erlangish_date(Date);
convert_to_erlang(#time{}=Time) ->
jam_erlang:to_erlangish_time(Time).
timezone_to_utc(#time{timezone=undefined}=Time) ->
{0, Time};
timezone_to_utc(#time{hour=Hour, minute=Minute,
timezone=#timezone{hours=HourAdj, minutes=MinuteAdj}}=Time) ->
{Wrap, NewHour, NewMinute} = adjust_time({Hour, HourAdj}, {Minute, MinuteAdj}),
{Wrap, Time#time{timezone=utc_timezone_record(), hour=NewHour, minute=NewMinute}}.
adjust_time({Hour, HourAdj}, {Minute, MinuteAdj}) ->
{ExtraHourAdj, NewMinute} = wrap(Minute+u2z(MinuteAdj), minute),
{DayAdj, NewHour} = wrap(Hour+u2z(HourAdj)+ExtraHourAdj, hour),
{DayAdj, NewHour, NewMinute}.
wrap(Int, month) ->
jam_math:wrap(Int, 13, 1);
wrap(Int, hour) ->
jam_math:wrap(Int, 24, 0);
wrap(Int, minute) ->
jam_math:wrap(Int, 60, 0);
wrap(Int, second) ->
jam_math:wrap(Int, 60, 0).
%% Convert to seconds. Must be the negation of the resulting integer
%% because the timezone record tracks the adjustment necessary to
%% convert to UTC, while users/developers will expect the same sign as
%% the original string
-spec tz_to_seconds(timezone()) -> integer().
tz_to_seconds(#timezone{hours=Hours, minutes=Minutes}) ->
-(Hours*3600 + Minutes*60).
-ifdef(TEST).
normalize_with_adjust_test_() ->
EquivWithAdjust = [
{#time{hour=0,minute=0,second=0},
#time{hour=24,minute=0,second=0}},
{#time{hour=0,minute=0,second=0},
#time{hour=23,minute=59,second=60}}
],
lists:map(fun({Normalized, Time}) ->
?_assertEqual({1, Normalized}, offset_normalize(Time))
end, EquivWithAdjust).
normalize_without_adjust_test_() ->
NoAdjust = [
%% Would not expect a datetime to be populated for
%% just a year, but let's make sure things don't blow
%% up
#datetime{date=#date{year=2016},
time=#time{}},
#datetime{date=#date{year=2016},
time=undefined},
#date{year=2016, month=2},
#time{hour=15, minute=7},
#time{hour=15}
],
lists:map(fun(Record) ->
?_assertEqual({0, Record}, offset_normalize(Record))
end, NoAdjust).
tz_valid_test_() ->
TZs = [
{#timezone{hours=15, minutes=00}, true},
{#timezone{hours=15, minutes=01}, false},
{#timezone{hours=-15, minutes=-00}, true},
{#timezone{hours=-15, minutes=-01}, false}
],
lists:map(fun({TZ, IsValid}) ->
?_assertEqual(IsValid, is_valid(TZ))
end, TZs).
tz_offset_test_() ->
TZs = [
{#timezone{hours=4, minutes=30}, -16200},
{#timezone{hours=-12, minutes=-45}, 45900},
{#timezone{hours=0, minutes=0}, 0}
],
lists:map(fun({TZ, Offset}) ->
?_assertEqual(Offset, tz_to_seconds(TZ))
end, TZs).
roundtrip_epoch_test_() ->
Epochs = [
{1466691033125, 3},
{1466691033, 0}
],
lists:map(fun({Epoch, Precision}) ->
?_assertEqual(Epoch,
to_epoch(from_epoch(Epoch, Precision), Precision))
end, Epochs).
expand_test_() ->
SameDate = [
{{2016, 3, undefined}, month},
{{1929, 10, 29}, day},
{{1429, 1, 1}, month},
{{99, undefined, undefined}, year}
],
SameTime = [
{{23, undefined, undefined}, hour},
{{15, 0, undefined}, hour},
{{15, 1, undefined}, minute},
{{3, 23, 60}, second}
],
NewDate = [
{{2016, undefined, undefined}, {2016, 1, undefined}, month},
{{1929, 10, undefined}, {1929, 10, 1}, day},
{{1429, undefined, undefined}, {1429, 1, 1}, day}
],
NewTime = [
{{undefined, undefined, undefined}, {0, undefined, undefined}, hour},
{{15, undefined, undefined}, {15, 0, undefined}, minute},
{{15, undefined, undefined}, {15, 0, 0}, second},
{{15, 5, undefined}, {15, 5, 0}, second}
],
lists:map(fun({Date, Accuracy}) ->
DateRecord = jam_erlang:tuple_to_record(#date{}, Date),
?_assertEqual(DateRecord, expand(DateRecord, Accuracy))
end, SameDate)
++
lists:map(fun({Time, Accuracy}) ->
TimeRecord = jam_erlang:tuple_to_record(#time{}, Time),
?_assertEqual(TimeRecord, expand(TimeRecord, Accuracy))
end, SameTime)
++
lists:map(fun({Old, New, Accuracy}) ->
OldDateRecord = jam_erlang:tuple_to_record(#date{}, Old),
NewDateRecord = jam_erlang:tuple_to_record(#date{}, New),
?_assertEqual(NewDateRecord, expand(OldDateRecord, Accuracy))
end, NewDate)
++
lists:map(fun({Old, New, Accuracy}) ->
OldTimeRecord = jam_erlang:tuple_to_record(#time{}, Old),
NewTimeRecord = jam_erlang:tuple_to_record(#time{}, New),
?_assertEqual(NewTimeRecord, expand(OldTimeRecord, Accuracy))
end, NewTime).
increment_test_() ->
Date = [
{{2016, 1, 9}, {2016, 1, 1}, -8},
{{1929, 10, 29}, {1930, 1, 2}, 65},
{{1429, 1, 1}, {1428, 12, 31}, -1},
{{1996, 2, 28}, {1996, 2, 29}, 1},
{{1997, undefined, undefined}, {1996, undefined, undefined}, -1},
{{1997, 1, undefined}, {1996, 12, undefined}, -1}
],
Time = [
{{23, 59, 59}, {0, 1, 22}, 22 + 60 + 1},
{{23, 59, undefined}, {1, 1, undefined}, 62}
],
OffsetTime = [
{1, {23, 59, 59}, {0, 1, 22}, 22 + 60 + 1},
{1, {23, 59, undefined}, {1, 1, undefined}, 62},
{-1, {3, undefined, undefined}, {20, undefined, undefined}, -7}
],
DateTime = [
{{{2016, 1, 9}, {undefined, undefined, undefined}},
{{2016, 1, 1}, {undefined, undefined, undefined}}, -8},
{{{2016, 1, 9}, {15, undefined, undefined}},
{{2016, 1, 8}, {16, undefined, undefined}}, -23}
],
lists:map(fun({Old, New, Incr}) ->
OldDateRecord = jam_erlang:tuple_to_record(#date{}, Old),
NewDateRecord = jam_erlang:tuple_to_record(#date{}, New),
?_assertEqual(NewDateRecord, increment(OldDateRecord, Incr))
end, Date)
++
lists:map(fun({Old, New, Incr}) ->
OldTimeRecord = jam_erlang:tuple_to_record(#time{}, Old),
NewTimeRecord = jam_erlang:tuple_to_record(#time{}, New),
?_assertEqual(NewTimeRecord, increment(OldTimeRecord, Incr))
end, Time)
++
lists:map(fun({DateAdj, Old, New, Incr}) ->
OldTimeRecord = jam_erlang:tuple_to_record(#time{}, Old),
NewTimeRecord = jam_erlang:tuple_to_record(#time{}, New),
?_assertEqual({DateAdj, NewTimeRecord}, offset_increment_time(OldTimeRecord, Incr))
end, OffsetTime)
++
lists:map(fun({OldDT, NewDT, Incr}) ->
?_assertEqual(jam_erlang:tuple_to_record(#datetime{}, NewDT),
increment(jam_erlang:tuple_to_record(#datetime{}, OldDT), Incr))
end, DateTime).
-endif. | src/jam.erl | 0.738103 | 0.483466 | jam.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc This module implements a specialized hash tree that is used
%% primarily by cluster metadata's anti-entropy exchanges and by
%% metadata clients for determining when groups of metadata keys have
%% changed locally. The tree can be used, generally, for determining
%% the differences in groups of keys, or to find missing groups, between
%% two stores.
%%
%% Each node of the tree is itself a hash tree, specifically a {@link
%% hashtree}. The tree has a fixed height but each node has a
%% variable amount of children. The height of the tree directly
%% corresponds to the number of prefixes supported by the tree. A list
%% of prefixes, or a "prefix list", represent a group of keys. Each
%% unique prefix list is a node in the tree. The leaves store hashes
%% for the individual keys in the segments of the node's {@link
%% hashtree}. The buckets of the leaves' hashtree provide an efficient
%% way of determining when keys in the segments differ between two
%% trees. The tails of the prefix list are used to roll up groups
%% into parent groups. For example, the prefixes `[a, b]', `[a, c]',
%% `[d, e]' will be rolled up into parent groups `a', containing `c'
%% and `b', and `d', containing only 'e'. The parent group's node has
%% children corresponding to each child group. The top-hashes of the
%% child nodes are stored in the parent nodes' segments. The parent
%% nodes' buckets are used as an efficient method for determining when
%% child groups differ between two trees. The root node corresponds to
%% the empty list and it acts like any other node, storing hashes for
%% the first level of child groups. The top hash of the root node is
%% the top hash of the tree.
%%
%% The tree in the example above might store something like:
%%
%% node parent top-hash segments
%% ---------------------------------------------------
%% root none 1 [{a, 2}, {d, 3}]
%% [a] root 2 [{b, 4}, {c, 5}]
%% [d] root 3 [{e, 6}]
%% [a,b] [a] 4 [{k1, 0}, {k2, 6}, ...]
%% [a,c] [a] 5 [{k1, 1}, {k2, 4}, ...]
%% [d,e] [d] 6 [{k1, 2}, {k2, 3}, ...]
%%
%%
%% When a key is inserted into the tree it is inserted into the leaf
%% corresponding to the given prefix list. The leaf and its parents
%% are not updated at this time. Instead the leaf is added to a dirty
%% set. The nodes are later updated in bulk.
%%
%% Updating the hashtree is a two step process. First, a snapshot of
%% the tree must be obtained. This prevents new writes from affecting
%% the update. Snapshotting the tree will snapshot each dirty
%% leaf. Since writes to nodes other than leaves only occur during
%% updates no snapshot is taken for them. Second, the tree is updated
%% using the snapshot. The update is performed by updating the {@link
%% hashtree} nodes at each level starting with the leaves. The top
%% hash of each node in a level is inserted into its parent node after
%% being updated. The list of dirty parents is then updated, moving up
%% the tree. Once the root is reached and has been updated the process
%% is complete. This process is designed to minimize the traversal of
%% the tree and ensure that each node is only updated once.
%%
%% The typical use for updating a tree is to compare it with another
%% recently updated tree. Comparison is done with the ``compare/4''
%% function. Compare provides a sort of fold over the differences of
%% the tree allowing for callers to determine what to do with those
%% differences. In addition, the caller can accumulate a value, such
%% as the difference list or stats about differencces.
%%
%% The tree implemented in this module assumes that it will be managed
%% by a single process and that all calls will be made to it synchronously, with
%% a couple exceptions:
%%
%% 1. Updating a tree with a snapshot can be done in another process. The snapshot
%% must be taken by the owning process, synchronously.
%% 2. Comparing two trees may be done by a seperate process. Compares should should use
%% a snapshot and only be performed after an update.
%%
%% The nodes in this tree are backend by LevelDB, however, this is
%% most likely temporary and Cluster Metadata's use of the tree is
%% ephemeral. Trees are only meant to live for the lifetime of a
%% running node and are rebuilt on start. To ensure the tree is fresh
%% each time, when nodes are created the backing LevelDB store is
%% opened, closed, and then re-opened to ensure any lingering files
%% are removed. Additionally, the nodes themselves (references to
%% {@link hashtree}, are stored in {@link ets}.
-module(hashtree_tree).
-export([new/2,
destroy/1,
insert/4,
insert/5,
update_snapshot/1,
update_perform/1,
local_compare/2,
compare/4,
top_hash/1,
prefix_hash/2,
get_bucket/4,
key_hashes/3]).
-export_type([tree/0, tree_node/0, handler_fun/1, remote_fun/0]).
-record(hashtree_tree, {
%% the identifier for this tree. used as part of the ids
%% passed to hashtree.erl and in keys used to store nodes in
%% the tree's ets tables.
id :: term(),
%% directory where nodes are stored on disk
data_root :: file:name_all(),
%% number of levels in the tree excluding leaves (height - 1)
num_levels :: non_neg_integer(),
%% ets table that holds hashtree nodes in the tree
nodes :: ets:tab(),
%% ets table that holds snapshot nodes
snapshot :: ets:tab(),
%% set of dirty leaves
dirty :: gb_set()
}).
-define(ROOT, '$ht_root').
-define(NUM_LEVELS, 2).
-opaque tree() :: #hashtree_tree{}.
-type prefix() :: atom() | binary().
-type prefixes() :: [prefix()].
-opaque tree_node() :: prefixes() | ?ROOT.
-type prefix_diff() :: {missing_prefix, local | remote, prefixes()}.
-type key_diffs() :: {key_diffs, prefixes(),[{missing |
remote_missing |
different, binary()}]}.
-type diff() :: prefix_diff() | key_diffs().
-type handler_fun(X) :: fun((diff(), X) -> X).
-type remote_fun() :: fun((prefixes(),
{get_bucket, {integer(), integer()}} |
{key_hashses, integer()}) -> orddict:orddict()).
%%%===================================================================
%%% API
%%%===================================================================
%% @doc Creates a new hashtree.
%%
%% Takes the following options:
%% * num_levels - the height of the tree excluding leaves. corresponds to the
%% length of the prefix list passed to {@link insert/5}.
%% * data_dir - the directory where the LevelDB instances for the nodes will
%% be stored.
-type new_opt_num_levels() :: {num_levels, non_neg_integer()}.
-type new_opt_data_dir() :: {data_dir, file:name_all()}.
-type new_opt() :: new_opt_num_levels() | new_opt_data_dir().
-type new_opts() :: [new_opt()].
-spec new(term(), new_opts()) -> tree().
new(TreeId, Opts) ->
NumLevels = proplists:get_value(num_levels, Opts, ?NUM_LEVELS),
DataRoot = data_root(Opts),
Tree = #hashtree_tree{id = TreeId,
data_root = DataRoot,
num_levels = NumLevels,
%% table needs to be public to allow async update
nodes = ets:new(undefined, [public]),
snapshot = undefined,
dirty = gb_sets:new()},
get_node(?ROOT, Tree),
Tree.
%% @doc Destroys the tree cleaning up any used resources.
%% This deletes the LevelDB files for the nodes.
-spec destroy(tree()) -> ok.
destroy(Tree) ->
ets:foldl(fun({_, Node}, _) ->
Node1 = hashtree:close(Node),
hashtree:destroy(Node1)
end, undefined, Tree#hashtree_tree.nodes),
catch ets:delete(Tree#hashtree_tree.nodes),
ok.
%% @doc an alias for insert(Prefixes, Key, Hash, [], Tree)
-spec insert(prefixes(), binary(), binary(), tree()) -> tree() | {error, term()}.
insert(Prefixes, Key, Hash, Tree) ->
insert(Prefixes, Key, Hash, [], Tree).
%% @doc Insert a hash into the tree. The length of `Prefixes' must
%% correspond to the height of the tree -- the value used for
%% `num_levels' when creating the tree. The hash is inserted into
%% a leaf of the tree and that leaf is marked as dirty. The tree is not
%% updated at this time. Future operations on the tree should used the
%% tree returend by this fucntion.
%%
%% Insert takes the following options:
%% * if_missing - if `true' then the hash is only inserted into the tree
%% if the key is not already present. This is useful for
%% ensuring writes concurrent with building the tree
%% take precedence over older values. `false' is the default
%% value.
-type insert_opt_if_missing() :: {if_missing, boolean()}.
-type insert_opt() :: insert_opt_if_missing().
-type insert_opts() :: [insert_opt()].
-spec insert(prefixes(), binary(), binary(), insert_opts(), tree()) -> tree() | {error, term()}.
insert(Prefixes, Key, Hash, Opts, Tree) ->
NodeName = prefixes_to_node_name(Prefixes),
case valid_prefixes(NodeName, Tree) of
true ->
insert_hash(Key, Hash, Opts, NodeName, Tree);
false ->
{error, bad_prefixes}
end.
%% @doc Snapshot the tree for updating. The return tree should be
%% updated using {@link update_perform/1} and to perform future operations
%% on the tree
-spec update_snapshot(tree()) -> tree().
update_snapshot(Tree=#hashtree_tree{dirty=Dirty,nodes=Nodes,snapshot=Snapshot0}) ->
catch ets:delete(Snapshot0),
FoldRes = gb_sets:fold(fun(DirtyName, Acc) ->
DirtyKey = node_key(DirtyName, Tree),
Node = lookup_node(DirtyName, Tree),
{DirtyNode, NewNode} = hashtree:update_snapshot(Node),
[{{DirtyKey, DirtyNode}, {DirtyKey, NewNode}} | Acc]
end, [], Dirty),
{Snaps, NewNodes} = lists:unzip(FoldRes),
Snapshot = ets:new(undefined, []),
ets:insert(Snapshot, Snaps),
ets:insert(Nodes, NewNodes),
Tree#hashtree_tree{dirty=gb_sets:new(),snapshot=Snapshot}.
%% @doc Update the tree with a snapshot obtained by {@link
%% update_snapshot/1}. This function may be called by a process other
%% than the one managing the tree.
-spec update_perform(tree()) -> ok.
update_perform(Tree=#hashtree_tree{snapshot=Snapshot}) ->
DirtyParents = ets:foldl(fun(DirtyLeaf, DirtyParentsAcc) ->
update_dirty_leaves(DirtyLeaf, DirtyParentsAcc, Tree)
end,
gb_sets:new(), Snapshot),
update_dirty_parents(DirtyParents, Tree),
catch ets:delete(Snapshot),
ok.
%% @doc Compare two local trees. This function is primarily for
%% local debugging and testing.
-spec local_compare(tree(), tree()) -> [diff()].
local_compare(T1, T2) ->
RemoteFun = fun(Prefixes, {get_bucket, {Level, Bucket}}) ->
hashtree_tree:get_bucket(Prefixes, Level, Bucket, T2);
(Prefixes, {key_hashes, Segment}) ->
[{_, Hashes}] = hashtree_tree:key_hashes(Prefixes, Segment, T2),
Hashes
end,
HandlerFun = fun(Diff, Acc) -> Acc ++ [Diff] end,
compare(T1, RemoteFun, HandlerFun, []).
%% @doc Compare a local and remote tree. `RemoteFun' is used to
%% access the buckets and segments of nodes in the remote
%% tree. `HandlerFun' will be called for each difference found in the
%% tree. A difference is either a missing local or remote prefix, or a
%% list of key differences, which themselves signify different or
%% missing keys. `HandlerAcc' is passed to the first call of
%% `HandlerFun' and each subsequent call is passed the value returned
%% by the previous call. The return value of this function is the
%% return value from the last call to `HandlerFun'.
-spec compare(tree(), remote_fun(), handler_fun(X), X) -> X.
compare(LocalTree, RemoteFun, HandlerFun, HandlerAcc) ->
compare(?ROOT, 1, LocalTree, RemoteFun, HandlerFun, HandlerAcc).
%% @doc Returns the top-hash of the tree. This is the top-hash of the
%% root node.
-spec top_hash(tree()) -> undefined | binary().
top_hash(Tree) ->
prefix_hash([], Tree).
%% @doc Returns the top-hash of the node corresponding to the given
%% prefix list. The length of the prefix list can be less than or
%% equal to the height of the tree. If the tree has not been updated
%% or if the prefix list is not found or invalid, then `undefined' is
%% returned. Otherwise the hash value from the most recent update is
%% returned.
-spec prefix_hash(prefixes(), tree()) -> undefined | binary().
prefix_hash(Prefixes, Tree) ->
NodeName = prefixes_to_node_name(Prefixes),
case lookup_node(NodeName, Tree) of
undefined -> undefined;
Node -> extract_top_hash(hashtree:top_hash(Node))
end.
%% @doc Returns the {@link hashtree} buckets for a given node in the
%% tree. This is used primarily for accessing buckets of a remote tree
%% during compare.
-spec get_bucket(tree_node(), integer(), integer(), tree()) -> orddict:orddict().
get_bucket(Prefixes, Level, Bucket, Tree) ->
case lookup_node(prefixes_to_node_name(Prefixes), Tree) of
undefined -> orddict:new();
Node -> hashtree:get_bucket(Level, Bucket, Node)
end.
%% @doc Returns the {@link hashtree} segment hashes for a given node
%% in the tree. This is used primarily for accessing key hashes of a
%% remote tree during compare.
-spec key_hashes(tree_node(), integer(), tree()) -> [{integer(), orddict:orddict()}].
key_hashes(Prefixes, Segment, Tree) ->
case lookup_node(prefixes_to_node_name(Prefixes), Tree) of
undefined -> [{Segment, orddict:new()}];
Node -> hashtree:key_hashes(Node, Segment)
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% @private
insert_hash(Key, Hash, Opts, NodeName, Tree) ->
Node = get_node(NodeName, Tree),
insert_hash(Key, Hash, Opts, NodeName, Node, Tree).
%% @private
insert_hash(Key, Hash, Opts, NodeName, Node, Tree=#hashtree_tree{dirty=Dirty}) ->
Node2 = hashtree:insert(Key, Hash, Node, Opts),
Dirty2 = gb_sets:add_element(NodeName, Dirty),
set_node(NodeName, Node2, Tree),
Tree#hashtree_tree{dirty=Dirty2}.
%% @private
update_dirty_leaves({DirtyKey, DirtyNode}, DirtyParents, Tree) ->
update_dirty(node_key_to_name(DirtyKey), DirtyNode, DirtyParents, Tree).
%% @private
update_dirty_parents(DirtyParents, Tree) ->
case gb_sets:is_empty(DirtyParents) of
true -> ok;
false ->
NextDirty = gb_sets:fold(
fun(DirtyParent, DirtyAcc) ->
DirtyNode = lookup_node(DirtyParent, Tree),
{DirtySnap, DirtyNode2} = hashtree:update_snapshot(DirtyNode),
NextDirty = update_dirty(DirtyParent, DirtySnap, DirtyAcc, Tree),
set_node(DirtyParent, DirtyNode2, Tree),
NextDirty
end, gb_sets:new(), DirtyParents),
update_dirty_parents(NextDirty, Tree)
end.
%% @private
update_dirty(DirtyName, DirtyNode, NextDirty, Tree) ->
hashtree:update_perform(DirtyNode),
case parent_node(DirtyName, Tree) of
undefined ->
NextDirty;
{ParentName, ParentNode} ->
TopHash = extract_top_hash(hashtree:top_hash(DirtyNode)),
ParentKey = to_parent_key(DirtyName),
%% ignore returned tree b/c we are tracking dirty in this fold seperately
insert_hash(ParentKey, TopHash, [], ParentName, ParentNode, Tree),
gb_sets:add_element(ParentName, NextDirty)
end.
%% @private
compare(NodeName, Level, LocalTree, RemoteFun, HandlerFun, HandlerAcc)
when Level =:= LocalTree#hashtree_tree.num_levels + 1 ->
Prefixes = node_name_to_prefixes(NodeName),
LocalNode = lookup_node(NodeName, LocalTree),
RemoteNode = fun(Action, Info) ->
RemoteFun(Prefixes, {Action, Info})
end,
AccFun = fun(Diffs, CompareAcc) ->
Res = HandlerFun({key_diffs, Prefixes, Diffs},
extract_compare_acc(CompareAcc, HandlerAcc)),
[{acc, Res}]
end,
CompareRes = hashtree:compare(LocalNode, RemoteNode, AccFun, []),
extract_compare_acc(CompareRes, HandlerAcc);
compare(NodeName, Level, LocalTree, RemoteFun, HandlerFun, HandlerAcc) ->
Prefixes = node_name_to_prefixes(NodeName),
LocalNode = lookup_node(NodeName, LocalTree),
RemoteNode = fun(Action, Info) ->
RemoteFun(Prefixes, {Action, Info})
end,
AccFoldFun = fun({missing, NodeKey}, HandlerAcc2) ->
missing_prefix(NodeKey, local, HandlerFun, HandlerAcc2);
({remote_missing, NodeKey}, HandlerAcc2) ->
missing_prefix(NodeKey, remote, HandlerFun, HandlerAcc2);
({different, NodeKey}, HandlerAcc2) ->
compare(from_parent_key(NodeKey), Level+1, LocalTree,
RemoteFun, HandlerFun, HandlerAcc2)
end,
AccFun = fun(Diffs, CompareAcc) ->
Res = lists:foldl(AccFoldFun,
extract_compare_acc(CompareAcc, HandlerAcc), Diffs),
[{acc, Res}]
end,
CompareRes = hashtree:compare(LocalNode, RemoteNode, AccFun, []),
extract_compare_acc(CompareRes, HandlerAcc).
%% @private
missing_prefix(NodeKey, Type, HandlerFun, HandlerAcc) ->
HandlerFun({missing_prefix, Type, node_name_to_prefixes(from_parent_key(NodeKey))},
HandlerAcc).
%% @private
extract_compare_acc([], HandlerAcc) ->
HandlerAcc;
extract_compare_acc([{acc, Acc}], _HandlerAcc) ->
Acc.
%% @private
get_node(NodeName, Tree) ->
Node = lookup_node(NodeName, Tree),
get_node(NodeName, Node, Tree).
%% @private
get_node(NodeName, undefined, Tree) ->
create_node(NodeName, Tree);
get_node(_NodeName, Node, _Tree) ->
Node.
%% @private
lookup_node(NodeName, Tree=#hashtree_tree{nodes=Nodes}) ->
NodeKey = node_key(NodeName, Tree),
case ets:lookup(Nodes, NodeKey) of
[] -> undefined;
[{NodeKey, Node}] -> Node
end.
%% @private
create_node(NodeName, Tree) ->
NodeId = node_id(NodeName, Tree),
NodePath = node_path(NodeId, Tree),
NumSegs = node_num_segs(NodeName),
Width = node_width(NodeName),
Opts = [{segment_path, NodePath}, {segments, NumSegs}, {width, Width}],
%% remove any existing node data in case of crash
ok = hashtree:destroy(NodePath),
Node = hashtree:new(NodeId, Opts),
set_node(NodeName, Node, Tree).
%% @private
set_node(NodeName, Node, Tree) when is_list(NodeName) orelse NodeName =:= ?ROOT ->
set_node(node_key(NodeName, Tree), Node, Tree);
set_node(NodeKey, Node, #hashtree_tree{nodes=Nodes}) when is_tuple(NodeKey) ->
ets:insert(Nodes, [{NodeKey, Node}]),
Node.
%% @private
parent_node(?ROOT, _Tree) ->
%% root has no parent
undefined;
parent_node([_Single], Tree) ->
%% parent of first level is the root
{?ROOT, get_node(?ROOT, Tree)};
parent_node([_Prefix | Parent], Tree) ->
%% parent of subsequent level is tail of node name
{Parent, get_node(Parent, Tree)}.
%% @private
node_width(?ROOT) ->
256;
node_width(NodeName) ->
case length(NodeName) < 2 of
true -> 512;
false -> 1024
end.
%% @private
node_num_segs(?ROOT) ->
256 * 256;
node_num_segs(NodeName) ->
case length(NodeName) < 2 of
true -> 512 * 512;
false -> 1024 * 1024
end.
%% @private
node_path({_, <<NodeInt:176/integer>>}, #hashtree_tree{data_root=DataRoot}) ->
NodeMD5 = riak_core_util:integer_to_list(NodeInt, 16),
filename:join(DataRoot, NodeMD5).
%% @private
node_key(NodeName, #hashtree_tree{id=TreeId}) ->
{TreeId, NodeName}.
%% @private
node_key_to_name({_TreeId, NodeName}) ->
NodeName.
%% @private
node_id(?ROOT, #hashtree_tree{id=TreeId}) ->
{TreeId, <<0:176/integer>>};
node_id(NodeName, #hashtree_tree{id=TreeId}) ->
<<NodeMD5:128/integer>> = riak_core_util:md5(term_to_binary(NodeName)),
{TreeId, <<NodeMD5:176/integer>>}.
%% @private
to_parent_key(NodeName) ->
term_to_binary(NodeName).
%% @private
from_parent_key(NodeKey) ->
binary_to_term(NodeKey).
%% @private
valid_prefixes(NodeName, #hashtree_tree{num_levels=NumLevels}) ->
length(NodeName) =:= NumLevels.
%% @private
prefixes_to_node_name([]) ->
?ROOT;
prefixes_to_node_name(Prefixes) ->
lists:reverse(Prefixes).
%% @private
node_name_to_prefixes(?ROOT) ->
[];
node_name_to_prefixes(NodeName) ->
lists:reverse(NodeName).
%% @private
extract_top_hash([]) ->
undefined;
extract_top_hash([{0, Hash}]) ->
Hash.
%% @private
data_root(Opts) ->
case proplists:get_value(data_dir, Opts) of
undefined ->
Base = "/tmp/hashtree_tree",
<<P:128/integer>> = riak_core_util:md5(term_to_binary(erlang:now())),
filename:join(Base, riak_core_util:integer_to_list(P, 16));
Root -> Root
end. | src/hashtree_tree.erl | 0.847999 | 0.637764 | hashtree_tree.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(random).
%% Reasonable random number generator.
%% The method is attributed to <NAME> and <NAME>
%% See "An efficient and portable pseudo-random number generator",
%% Journal of Applied Statistics. AS183. 1982. Also Byte March 1987.
-export([seed/0, seed/1, seed/3, uniform/0, uniform/1,
uniform_s/1, uniform_s/2, seed0/0]).
-define(PRIME1, 30269).
-define(PRIME2, 30307).
-define(PRIME3, 30323).
%%-----------------------------------------------------------------------
%% The type of the state
-type ran() :: {integer(), integer(), integer()}.
%%-----------------------------------------------------------------------
-spec seed0() -> ran().
seed0() ->
{3172, 9814, 20125}.
%% seed()
%% Seed random number generation with default values
-spec seed() -> ran().
seed() ->
case seed_put(seed0()) of
undefined -> seed0();
{_,_,_} = Tuple -> Tuple
end.
%% seed({A1, A2, A3})
%% Seed random number generation
-spec seed(SValue) -> 'undefined' | ran() when
SValue :: {A1, A2, A3} | integer(),
A1 :: integer(),
A2 :: integer(),
A3 :: integer().
seed(Int) when is_integer(Int) ->
A1 = (Int bsr 16) band 16#fffffff,
A2 = Int band 16#ffffff,
A3 = (Int bsr 36) bor (A2 bsr 16),
seed(A1, A2, A3);
seed({A1, A2, A3}) ->
seed(A1, A2, A3).
%% seed(A1, A2, A3)
%% Seed random number generation
-spec seed(A1, A2, A3) -> 'undefined' | ran() when
A1 :: integer(),
A2 :: integer(),
A3 :: integer().
seed(A1, A2, A3) ->
seed_put({(abs(A1) rem (?PRIME1-1)) + 1, % Avoid seed numbers that are
(abs(A2) rem (?PRIME2-1)) + 1, % even divisors of the
(abs(A3) rem (?PRIME3-1)) + 1}). % corresponding primes.
-spec seed_put(ran()) -> 'undefined' | ran().
seed_put(Seed) ->
put(random_seed, Seed).
%% uniform()
%% Returns a random float between 0 and 1.
-spec uniform() -> float().
uniform() ->
{A1, A2, A3} = case get(random_seed) of
undefined -> seed0();
Tuple -> Tuple
end,
B1 = (A1*171) rem ?PRIME1,
B2 = (A2*172) rem ?PRIME2,
B3 = (A3*170) rem ?PRIME3,
put(random_seed, {B1,B2,B3}),
R = B1/?PRIME1 + B2/?PRIME2 + B3/?PRIME3,
R - trunc(R).
%% uniform(N) -> I
%% Given an integer N >= 1, uniform(N) returns a random integer
%% between 1 and N.
-spec uniform(N) -> pos_integer() when
N :: pos_integer().
uniform(N) when is_integer(N), N >= 1 ->
trunc(uniform() * N) + 1.
%%% Functional versions
%% uniform_s(State) -> {F, NewState}
%% Returns a random float between 0 and 1.
-spec uniform_s(State0) -> {float(), State1} when
State0 :: ran(),
State1 :: ran().
uniform_s({A1, A2, A3}) ->
B1 = (A1*171) rem ?PRIME1,
B2 = (A2*172) rem ?PRIME2,
B3 = (A3*170) rem ?PRIME3,
R = B1/?PRIME1 + B2/?PRIME2 + B3/?PRIME3,
{R - trunc(R), {B1,B2,B3}}.
%% uniform_s(N, State) -> {I, NewState}
%% Given an integer N >= 1, uniform(N) returns a random integer
%% between 1 and N.
-spec uniform_s(N, State0) -> {integer(), State1} when
N :: pos_integer(),
State0 :: ran(),
State1 :: ran().
uniform_s(N, State0) when is_integer(N), N >= 1 ->
{F, State1} = uniform_s(State0),
{trunc(F * N) + 1, State1}. | lib/stdlib/src/random.erl | 0.68679 | 0.455925 | random.erl | starcoder |
-module(siamese).
-export([
new/0,
to_list/1,
from_list/1,
open_scope/1,
close_scope/1,
put/3,
remove/2,
find/2,
is_key/2,
get/2,
get/3,
size/1
]).
-type symtable() :: nonempty_list(map()).
%% Return an empty symbol table.
-spec new() -> symtable().
new() ->
[maps:new()].
%% Convert the symbol table to a list of {Symbol, Value} tuples.
-spec to_list(symtable()) -> list({term(), term()}).
to_list(Symtable) ->
lists:flatten([maps:to_list(Map) || Map <- Symtable]).
%% Create a symbol table from a list of {Symbol, Value} tuples.
-spec from_list(list({term(), term()})) -> symtable().
from_list(List) ->
lists:foldl(fun ({K, V}, Symtable) -> put(K, V, Symtable) end,
new(),
List).
%% Open a new scope in a symbol table by consing a new
%% empty map at the beginning of the stack. This allows
%% shadowing of identifiers.
-spec open_scope(symtable()) -> symtable().
open_scope(Symtable) ->
[maps:new() | Symtable].
%% Close the top-most scope by popping it off the stack.
-spec close_scope(symtable()) -> symtable().
close_scope([_ | Symtable]) ->
Symtable.
%% Try to find a symbol in the symbol table starting from the
%% inner-most scope to the outer-most. Return the tuple
%% {ok, Value} if Symbol is found, the atom undefined
%% otherwise.
-spec find(term(), symtable()) -> {ok, term()} | undefined.
find(_, []) ->
undefined;
find(Symbol, [Map | Rest]) ->
case maps:find(Symbol, Map) of
{ok, Value} -> {ok, Value};
error -> find(Symbol, Rest)
end.
%% Return the value associated with Symbol; if Symbol is
%% not found in the symbol table, raise an error.
-spec get(term(), symtable()) -> term().
get(Symbol, Symtable) ->
case find(Symbol, Symtable) of
{ok, Value} -> Value;
undefined -> error({badkey, Symbol})
end.
%% Return the value associated with Symbol; if Symbol is
%% not found in the symbol table, return Default.
-spec get(term(), symtable(), term()) -> term().
get(Symbol, Symtable, Default) ->
case find(Symbol, Symtable) of
{ok, Value} -> Value;
undefined -> Default
end.
%% Verify if a key exists in the symbol table.
-spec is_key(term(), symtable()) -> boolean().
is_key(Symbol, Symtable) ->
case find(Symbol, Symtable) of
{ok, _} -> true;
undefined -> false
end.
%% Try to add a new Symbol/Value pair in the current scope;
%% return the updated symbol table, or the atom
%% key_already_exists if Symbol already exists in the current
%% scope.
%% Return the atom invalid_symbol_table if the symbol table
%% is in an invalid state, i.e., an empty list containing no
%% scopes.
-spec put(term(), term(), symtable())
-> symtable() | invalid_symbol_table | key_already_exists.
put(_Symbol, _Value, []) ->
invalid_symbol_table;
put(Symbol, Value, [Map | Rest]) ->
case maps:is_key(Symbol, Map) of
true -> key_already_exists;
false -> [maps:put(Symbol, Value, Map) | Rest]
end.
%% Remove a Symbol/Value pair from the current scope.
%% Return the atom invalid_symbol_table if the symbol table
%% is in an invalid state, i.e., an empty list containing no
%% scopes.
-spec remove(term(), symtable()) -> symtable | invalid_symbol_table.
remove(_Symbol, []) ->
invalid_symbol_table;
remove(Symbol, [Map | Rest]) ->
[maps:remove(Symbol, Map) | Rest].
%% Return the total number of bindings in the symbol table. An
%% identifier that is shadowed (i.e., the same key occurs in multiple
%% scopes) is counted multiple times.
-spec size(symtable()) -> non_neg_integer().
size(Symtable) ->
lists:sum([maps:size(M) || M <- Symtable]). | src/siamese.erl | 0.598782 | 0.550003 | siamese.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%%
%% @end
%%%-----------------------------------------------------------------------
-module(otel_resource).
-export([create/1,
create/2,
merge/2,
schema_url/1,
attributes/1]).
-include("otel_resource.hrl").
-type key() :: unicode:latin1_binary().
%% values allowed in attributes of a resource are limited
-type value() :: unicode:latin1_binary() | integer() | float() | boolean().
-type schema_url() :: uri_string:uri_string().
-define(MAX_LENGTH, 255).
-record(resource, {schema_url :: schema_url() | undefined,
attributes :: otel_attributes:t()}).
-type t() :: #resource{}.
-spec create(#{key() => value()} | [{key(), value()}]) -> t().
create(Attributes) ->
create(Attributes, undefined).
%% verifies each key and value and drops any that don't pass verification
-spec create(#{key() => value()} | [{key(), value()}], schema_url() | undefined) -> t().
create(Map, SchemaUrl) when is_map(Map) ->
create(maps:to_list(Map), SchemaUrl);
create(List, SchemaUrl) when is_list(List) ->
List1 = lists:filtermap(fun({K, V}) ->
%% TODO: log an info or debug message when dropping?
case check_key(K) andalso check_value(V) of
{true, Value} ->
{true, {to_binary(K), Value}};
_ ->
false
end
end, lists:ukeysort(1, List)),
#resource{schema_url=SchemaUrl,
attributes=otel_attributes:new(List1, 128, 255)}.
-spec schema_url(t()) -> schema_url() | undefined.
schema_url(#resource{schema_url=Schema}) ->
Schema.
-spec attributes(t()) -> otel_attributes:t().
attributes(#resource{attributes=Attributes}) ->
Attributes.
%% in case of collision the updating, first argument, resource takes precedence.
-spec merge(t(), t()) -> t().
merge(#resource{schema_url=NewSchemaUrl,
attributes=NewAttributes}, Current=#resource{schema_url=CurrentSchemaUrl,
attributes=CurrentAttributes}) ->
SchameUrl = merge_schema_url(NewSchemaUrl, CurrentSchemaUrl),
NewMap = otel_attributes:map(NewAttributes),
Current#resource{schema_url=SchameUrl,
attributes=otel_attributes:set(NewMap, CurrentAttributes)}.
%%
%% when merging resources the schemas are checked to verify they match.
%% if they do match then the schema is set to `undefined' and the attributes are kept.
merge_schema_url(SchemaUrl, undefined) ->
SchemaUrl;
merge_schema_url(undefined, SchemaUrl) ->
SchemaUrl;
merge_schema_url(NewSchemaUrl, CurrentSchemaUrl) ->
merge_schema_url_(uri_string:normalize(NewSchemaUrl), uri_string:normalize(CurrentSchemaUrl)).
merge_schema_url_(SchemaUrl, SchemaUrl) ->
SchemaUrl;
merge_schema_url_(_, _) ->
undefined.
%% all resource strings, key or value, must be latin1 with length less than 255
check_string(S) ->
string:length(S) =< ?MAX_LENGTH.
%% a resource key must be a non-empty latin1 string
check_key(K) when is_binary(K) ; is_list(K) ->
check_string(K);
check_key(_) ->
false.
%% a resource value can be a latin1 string, integer, float or boolean
check_value(V) when is_integer(V) ;
is_float(V) ;
is_boolean(V) ->
{true, V};
check_value(V) when is_binary(V) ->
check_string_value(V);
check_value(V) when is_list(V) ->
try unicode:characters_to_binary(V) of
B ->
check_string_value(B)
catch
error:badarg ->
%% must be an array attribute value
{true, V}
end;
check_value(_) ->
false.
check_string_value(V) ->
case check_string(V) of
true ->
{true, V};
false ->
false
end.
to_binary(K) when is_binary(K) ->
K;
to_binary(K) when is_list(K) ->
list_to_binary(K). | apps/opentelemetry/src/otel_resource.erl | 0.511229 | 0.452657 | otel_resource.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2011 <NAME>
%% @doc 'slice' filter, get a range of elements from a list
%% Copyright 2011 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%
% Given a list = [1,2,3,4,5,6,7,8,9,0]
%
% Get all elements from element M to element N:
% {{ list|slice:[3,7] }} -> [3,4,5,6,7]
% {{ list|slice:[3,-3] }} -> [3,4,5,6,7]
% {{ list|slice:[-7,-3] }} -> [3,4,5,6,7]
% {{ list|slice:[-7,7] }} -> [3,4,5,6,7]
%
% Get all elements except the first N:
% {{ list|slice:[3,] }} -> [3,4,5,6,7,8,9,0]
% {{ list|slice:[-7,] }} -> [3,4,5,6,7,8,9,0]
%
% Get all elements up to element N
% {{ list|slice:[,3] }} -> [1,2,3]
% {{ list|slice:[3] }} -> [1,2,3]
%
% Get all elements except the last N:
% {{ list|slice:[,-3] }} -> [1,2,3,4,5,6,7]
% {{ list|slice:[-3] }} -> [1,2,3,4,5,6,7]
%
% {{ list|slice:[M,N] }}, where N < M will return []
% {{ list|slice:[,] }}, where N < M will return [1,2,3,4,5,6,7,8,9,0]
%
-module(filter_slice).
-export([slice/3]).
slice(undefined, _, _Context) ->
undefined;
slice(List, Slice, _Context) when is_list(List) ->
slice1(List, Slice);
slice(MaybeList, Slice, Context) ->
slice1(z_template_compiler_runtime:to_list(MaybeList, Context), Slice).
slice1(List, [undefined, undefined]) ->
slice2(List, 1, length(List));
slice1(List, [M, undefined]) ->
slice2(List, M, length(List));
slice1(List, [undefined, N]) ->
N1 = if
N < 0 -> length(List) + N;
true -> N
end,
slice2(List, 1, N1);
slice1(List, [M, N]) ->
slice2(List, M, N);
slice1(List, [M]) ->
slice1(List, [undefined, M]);
slice1(List, M) ->
slice1(List, [undefined, z_convert:to_integer(M)]).
slice2(List, M, N) ->
M1 = if
M =:= 0 -> throw({error, invalid_index});
M < 0 -> length(List) + M;
true -> M
end,
N1 = if
N =:= 0 -> throw({error, invalid_index});
N < 0 -> length(List) + N;
true -> N
end,
if
N1 < M1 -> [];
true -> lists:sublist(List, M1, N1 - M1 + 1)
end. | modules/mod_base/filters/filter_slice.erl | 0.520496 | 0.416025 | filter_slice.erl | starcoder |
%% Copyright (c) 2014-2018 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_db_ets.erl
%% Author : <NAME>
%% Purpose : Interface to an erlog database built with ETS.
%% The database is an ets table where the key is the functor pair {Name,Arity}.
%% The value is: {Functor,built_in} |
%% {Functor,clauses,NextTag,[{Tag,Head,Body}]} |
%% {Functor,code,{Module,Function}}.
%% Built-ins are defined by the system and cannot manipulated by user
%% code.
-module(erlog_db_ets).
-export([new/1]).
-export([add_built_in/2,add_compiled_proc/4,asserta_clause/4,assertz_clause/4]).
-export([retract_clause/3,abolish_clauses/2]).
-export([get_procedure/2,get_procedure_type/2]).
-export([get_interpreted_functors/1]).
%% new(InitArgs) -> Db.
new(Name) ->
ets:new(Name, [named_table,set,protected,{keypos,1}]).
%% add_built_in(Db, Functor) -> NewDb.
%% Add Functor as a built-in in the database.
add_built_in(Db, Functor) ->
ets:insert(Db, {Functor,built_in}),
Db.
%% add_compiled_proc(Db, Functor, Module, Function) -> {ok,NewDb} | error.
%% Add functor as a compiled procedure with code in M:F in the
%% database. Check that it is not a built-in, if so return error.
add_compiled_proc(Db, Functor, M, F) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> error;
_ ->
ets:insert(Db, {Functor,code,{M,F}}),
{ok,Db}
end.
%% asserta_clause(Db, Functor, Head, Body) -> {ok,NewDb} | error.
%% assertz_clause(Db, Functor, Head, Body) -> {ok,NewDb} | error.
%% We DON'T check format and just put it straight into the database.
asserta_clause(Db, Functor, Head, Body) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> error;
[{_,code,_}] -> error;
[{_,clauses,Tag,Cs}] ->
ets:insert(Db, {Functor,clauses,Tag+1,[{Tag,Head,Body}|Cs]}),
{ok,Db};
[] ->
ets:insert(Db, {Functor,clauses,1,[{0,Head,Body}]}),
{ok,Db}
end.
assertz_clause(Db, Functor, Head, Body) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> error;
[{_,code,_}] -> error;
[{_,clauses,Tag,Cs}] ->
ets:insert(Db, {Functor,clauses,Tag+1,Cs ++ [{Tag,Head,Body}]}),
{ok,Db};
[] ->
ets:insert(Db, {Functor,clauses,1,[{0,Head,Body}]}),
{ok,Db}
end.
%% retract_clause(Db, Functor, ClauseTag) -> {ok,NewDb} | error.
%% Retract (remove) the clause with tag ClauseTag from the list of
%% clauses of Functor.
retract_clause(Db, Functor, Tag) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> error; %Can't retract here
[{_,code,_}] -> error; %Can't retract here
[{_,clauses,Nt,Cs}] ->
ets:insert(Db, {Functor,clauses,Nt,lists:keydelete(Tag, 1, Cs)}),
{ok,Db};
[] -> {ok,Db} %Do nothing
end.
%% abolish_clauses(Db, Functor) -> NewDatabase.
abolish_clauses(Db, Functor) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> error; %Can't abolish here
[{_,code,_}] ->
ets:delete(Db, Functor),
{ok,Db};
[{_,clauses,_,_}] ->
ets:delete(Db, Functor),
{ok,Db};
[] -> {ok,Db} %Do nothing
end.
%% get_procedure(Db, Functor) ->
%% built_in | {code,{Mod,Func}} | {clauses,[Clause]} | undefined.
%% Return the procedure type and data for a functor.
get_procedure(Db, Functor) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> built_in;
[{_,code,C}] -> {code,C};
[{_,clauses,_,Cs}] -> {clauses,Cs};
[] -> undefined
end.
%% get_procedure_type(Db, Functor) ->
%% built_in | compiled | interpreted | undefined.
%% Return the procedure type for a functor.
get_procedure_type(Db, Functor) ->
case ets:lookup(Db, Functor) of
[{_,built_in}] -> built_in; %A built-in
[{_,code,_}] -> compiled; %Compiled (perhaps someday)
[{_,clauses,_,_}] -> interpreted; %Interpreted clauses
[] -> undefined %Undefined
end.
%% get_interp_functors(Db) -> [Functor].
get_interpreted_functors(Db) ->
ets:foldl(fun ({Func,clauses,_,_}, Fs) -> [Func|Fs];
(_, Fs) -> Fs
end, [], Db). | src/erlog_db_ets.erl | 0.550124 | 0.457561 | erlog_db_ets.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_epi_data).
-include("couch_epi.hrl").
%% ------------------------------------------------------------------
%% API Function Exports
%% ------------------------------------------------------------------
-export([interval/1, data/1]).
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
interval(Specs) ->
extract_minimal_interval(Specs).
data(Specs) ->
Locators = locate_sources(Specs),
case lists:foldl(fun collect_data/2, {ok, [], []}, Locators) of
{ok, Hashes, Data} ->
{ok, couch_epi_util:hash(Hashes), Data};
Error ->
Error
end.
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
collect_data({App, Locator}, {ok, HashAcc, DataAcc}) ->
case definitions(Locator) of
{ok, Hash, Data} ->
{ok, [Hash | HashAcc], [{App, Data} | DataAcc]};
Error ->
Error
end;
collect_data({_App, _Locator}, Error) ->
Error.
extract_minimal_interval(Specs) ->
lists:foldl(fun minimal_interval/2, undefined, Specs).
minimal_interval({_App, #couch_epi_spec{options = Options}}, Min) ->
case lists:keyfind(interval, 1, Options) of
{interval, Interval} -> min(Interval, Min);
false -> Min
end.
locate_sources(Specs) ->
lists:map(fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
{ok, Locator} = locate(ProviderApp, Src),
{ProviderApp, Locator}
end, Specs).
locate(App, {priv_file, FileName}) ->
case priv_path(App, FileName) of
{ok, FilePath} ->
ok = check_exists(FilePath),
{ok, {file, FilePath}};
Else ->
Else
end;
locate(_App, {file, FilePath}) ->
ok = check_exists(FilePath),
{ok, {file, FilePath}};
locate(_App, Locator) ->
{ok, Locator}.
priv_path(AppName, FileName) ->
case code:priv_dir(AppName) of
{error, _Error} = Error ->
Error;
Dir ->
{ok, filename:join(Dir, FileName)}
end.
check_exists(FilePath) ->
case filelib:is_regular(FilePath) of
true ->
ok;
false ->
{error, {notfound, FilePath}}
end.
definitions({file, FilePath}) ->
case file:consult(FilePath) of
{ok, Data} ->
{ok, hash_of_file(FilePath), Data};
{error, Reason} ->
{error, {FilePath, Reason}}
end;
definitions({module, Module}) when is_atom(Module) ->
definitions({module, [Module]});
definitions({module, Modules}) ->
Data = lists:append([M:data() || M <- Modules]),
Hash = couch_epi_functions_gen:hash(Modules),
{ok, Hash, Data}.
hash_of_file(FilePath) ->
{ok, Data} = file:read_file(FilePath),
couch_hash:md5_hash(Data). | src/couch_epi/src/couch_epi_data.erl | 0.570571 | 0.547948 | couch_epi_data.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2015, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
-module(sbroker_statem_queue).
-behaviour(sbroker_queue).
-export([init/3]).
-export([handle_in/5]).
-export([handle_out/2]).
-export([handle_fq_out/2]).
-export([handle_timeout/2]).
-export([handle_cancel/3]).
-export([handle_info/3]).
-export([code_change/4]).
-export([config_change/3]).
-export([len/1]).
-export([send_time/1]).
-export([terminate/2]).
-record(state, {config :: [non_neg_integer()],
out :: out | out_r,
drops :: [non_neg_integer()],
queue :: sbroker_queue:internal_queue()}).
%% This sbroker_queue module takes a list of non_neg_integer() and drops the
%% integer at head of the list (or the whole queue if the queue length is
%% lower). The tail is kept and used for the next call. Once the list is emptied
%% the original list is used in its place, if this list is empty no drops
%% occcur.
%%
%% Time is ignored completely to allow testing independent of time in sbroker.
%% Timing is tested for separately using `sbroker_queue_statem`.
init(Q, Time, {Out, Drops}) ->
handle_timeout(Time, #state{config=Drops, out=Out, drops=Drops, queue=Q}).
handle_in(SendTime, {Pid, _} = From, Value, Time, #state{queue=Q} = State) ->
Ref = monitor(process, Pid),
NState = State#state{queue=queue:in({SendTime, From, Value, Ref}, Q)},
handle_timeout(Time, NState).
handle_out(Time, #state{out=Out} = State) ->
{#state{queue=Q} = NState, TimeoutNext} = handle_timeout(Time, State),
case queue:Out(Q) of
{empty, _} ->
#state{config=Drops} = NState,
{empty, NState#state{drops=Drops}};
{{value, {SendTime, From, Value, Ref}}, NQ} ->
{SendTime, From, Value, Ref, NState#state{queue=NQ}, TimeoutNext}
end.
handle_fq_out(Time, State) ->
case handle_out(Time, State) of
{_, _, _, _, _, _} = Out ->
Out;
{empty, NState} ->
{empty, NState, infinity}
end.
%% If queue is empty don't change state.
handle_timeout(Time, #state{queue=Q} = State) ->
case queue:is_empty(Q) of
true ->
{State, infinity};
false ->
{timeout(Time, State), infinity}
end.
handle_cancel(Tag, Time, #state{queue=Q} = State) ->
Len = queue:len(Q),
Cancel = fun({_, {_, Tag2}, _, Ref}) when Tag2 =:= Tag ->
demonitor(Ref, [flush]),
false;
(_) ->
true
end,
NQ = queue:filter(Cancel, Q),
{NState, TimeoutNext} = handle_timeout(Time, State#state{queue=NQ}),
case queue:len(NQ) of
Len ->
{false, NState, TimeoutNext};
NLen ->
{Len - NLen, NState, TimeoutNext}
end.
handle_info({'DOWN', Ref, _, _, _}, Time, #state{queue=Q} = State) ->
NQ = queue:filter(fun({_, _, _, Ref2}) -> Ref2 =/= Ref end, Q),
handle_timeout(Time, State#state{queue=NQ});
handle_info(_, Time, State) ->
handle_timeout(Time, State).
code_change(_, _, #state{config=Config} = State, _) ->
{State#state{drops=Config}, infinity}.
config_change({Out, Config}, Time, #state{config=Config} = State) ->
handle_timeout(Time, State#state{out=Out});
config_change({Out, Config}, Time, State) ->
handle_timeout(Time, State#state{config=Config, out=Out, drops=Config}).
len(#state{queue=Q}) ->
queue:len(Q).
send_time(#state{queue=Q}) ->
case queue:peek(Q) of
{value, {SendTime, _, _, _}} ->
SendTime;
empty ->
empty
end.
terminate(_, #state{queue=Q}) ->
Q.
%% Internal
timeout(_, #state{config=[], drops=[]} = State) ->
State;
timeout(Time, #state{config=Config, drops=[]} = State) ->
timeout(Time, State#state{drops=Config});
timeout(Time, #state{drops=[Drop | Drops], queue=Q} = State) ->
Drop2 = min(Drop, queue:len(Q)),
{DropQ, NQ} = queue:split(Drop2, Q),
drop_queue(Time, DropQ),
State#state{drops=Drops, queue=NQ}.
drop_queue(Time, Q) ->
_ = [drop_item(Time, Item) || Item <- queue:to_list(Q)],
ok.
drop_item(Time, {SendTime, From, _, Ref}) ->
demonitor(Ref, [flush]),
sbroker_queue:drop(From, SendTime, Time). | test/sbroker_statem_queue.erl | 0.591369 | 0.404125 | sbroker_statem_queue.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_json).
-compile(inline).
-export([ encode/1
, encode/2
, safe_encode/1
, safe_encode/2
]).
-compile({inline,
[ encode/1
, encode/2
]}).
-export([ decode/1
, decode/2
, safe_decode/1
, safe_decode/2
]).
-compile({inline,
[ decode/1
, decode/2
]}).
-type(encode_options() :: jiffy:encode_options()).
-type(decode_options() :: jiffy:decode_options()).
-type(json_text() :: iolist() | binary()).
-type(json_term() :: jiffy:jiffy_decode_result()).
-export_type([json_text/0, json_term/0]).
-export_type([decode_options/0, encode_options/0]).
-spec(encode(json_term()) -> json_text()).
encode(Term) ->
encode(Term, [force_utf8]).
-spec(encode(json_term(), encode_options()) -> json_text()).
encode(Term, Opts) ->
to_binary(jiffy:encode(to_ejson(Term), Opts)).
-spec(safe_encode(json_term())
-> {ok, json_text()} | {error, Reason :: term()}).
safe_encode(Term) ->
safe_encode(Term, []).
-spec(safe_encode(json_term(), encode_options())
-> {ok, json_text()} | {error, Reason :: term()}).
safe_encode(Term, Opts) ->
try encode(Term, Opts) of
Json -> {ok, Json}
catch
error:Reason ->
{error, Reason}
end.
-spec(decode(json_text()) -> json_term()).
decode(Json) -> decode(Json, []).
-spec(decode(json_text(), decode_options()) -> json_term()).
decode(Json, Opts) ->
from_ejson(jiffy:decode(Json, Opts)).
-spec(safe_decode(json_text())
-> {ok, json_term()} | {error, Reason :: term()}).
safe_decode(Json) ->
safe_decode(Json, []).
-spec(safe_decode(json_text(), decode_options())
-> {ok, json_term()} | {error, Reason :: term()}).
safe_decode(Json, Opts) ->
try decode(Json, Opts) of
Term -> {ok, Term}
catch
error:Reason ->
{error, Reason}
end.
%%--------------------------------------------------------------------
%% Helpers
%%--------------------------------------------------------------------
-compile({inline,
[ to_ejson/1
, from_ejson/1
]}).
to_ejson([{_, _}|_] = L) ->
{[{K, to_ejson(V)} || {K, V} <- L ]};
to_ejson(L) when is_list(L) ->
[to_ejson(E) || E <- L];
to_ejson(T) -> T.
from_ejson(L) when is_list(L) ->
[from_ejson(E) || E <- L];
from_ejson({L}) ->
[{Name, from_ejson(Value)} || {Name, Value} <- L];
from_ejson(T) -> T.
to_binary(B) when is_binary(B) -> B;
to_binary(L) when is_list(L) ->
iolist_to_binary(L). | src/emqx_json.erl | 0.641422 | 0.436142 | emqx_json.erl | starcoder |
%%%===================================================================
%%% @copyright 2019 Klarna Bank AB (publ)
%%%
%%% @doc This module defines a stream processing node that can filter
%%% messages. It applies a pure predicate function to each incoming
%%% message.
%%%
%%% This behavior can be used in two modes: full and simplified. In
%%% simplified mode stream processing node is defined like following:
%%%
%%% ```{filter, fun(Offset, Message) -> true | false end}'''
%%%
%%% In full mode one has to create a callback module with
%%% `kflow_gen_filter' behavior.
%%%
%%% `filter' callback takes 3 arguments: first is offset of a message,
%%% second is the message itself and the third one is state of the
%%% callback module. This state is created in `init' callback and
%%% remains the same through the lifetime of the pipe. Return value of
%%% `filter' callback is a boolean that defines whether downstream
%%% nodes should see the message (`true') or ignore it (`false').
%%%
%%% `init' and `terminate' callbacks can be used e.g. when some
%%% resource should be obtained to process messages. Both callbacks
%%% are optional; configuration will be passed as is to
%%% `filter' callback when `init' is omitted.
%%%
%%% == Example ==
%%% ```
%%% -module(my_filter).
%%%
%%% -behavior(kflow_gen_filter).
%%%
%%% -export([init/1, filter/3, terminate/1]).
%%%
%%% init(Config) ->
%%% State = do_init(Config),
%%% State.
%%%
%%% filter(Offset, Message, State) ->
%%% true.
%%%
%%% terminate(State) ->
%%% do_cleanup(State).
%%% '''
%%%
%%% NOTE: Since state is immutable, it's actually shared between the
%%% routes.
%%%
%%% @end
%%%===================================================================
-module(kflow_gen_filter).
-behavior(kflow_gen).
-include("kflow.hrl").
-include_lib("hut/include/hut.hrl").
-export([init/2, handle_message/3, handle_flush/2, terminate/2]).
-export_type([callback_fun/0]).
-callback init(_Config) -> _State.
-callback filter(kflow:offset(), _DataIn, _State) -> _DataOut.
-callback terminate(_State) -> _.
-optional_callbacks([init/1, terminate/1]).
-type callback_fun() :: fun((kflow:offset(), _Message) -> boolean()).
-record(s1,
{ cb_module :: module()
, cb_state :: term()
}).
-record(s2,
{ function :: callback_fun()
}).
-type state() :: #s1{} | #s2{}.
%% @private
init(_NodeId, {?MODULE, Fun}) when is_function(Fun) ->
is_function(Fun, 2) orelse error({badarity, Fun}),
{ok, #s2{ function = Fun
}};
init(_NodeId, {CbModule, CbConfig}) ->
CbState = kflow_lib:optional_callback(CbModule, init, [CbConfig], CbConfig),
{ok, #s1{ cb_module = CbModule
, cb_state = CbState
}}.
%% @private
handle_message(Msg = #kflow_msg{hidden = true}, State, _) ->
%% Don't execute callback for a hidden message, simply pass it downstream:
{ok, [Msg], State};
handle_message(Msg0, State, _) ->
#kflow_msg{payload = Payload, offset = Offset} = Msg0,
Alive = case State of
#s1{cb_module = CbModule, cb_state = CbState} ->
CbModule:filter(Offset, Payload, CbState);
#s2{function = Fun} ->
Fun(Offset, Payload)
end,
case Alive of
true -> ok;
false -> ok;
_ -> error({non_boolean_result, Alive})
end,
Msg = Msg0#kflow_msg{hidden = not Alive},
{ok, [Msg], State}.
%% @private
handle_flush(State, _) ->
{ok, [], State}.
%% @private
terminate(#s1{cb_state = CbState, cb_module = CbModule}, _) ->
kflow_lib:optional_callback(CbModule, terminate, [CbState]);
terminate(#s2{}, _) ->
ok. | src/framework/kflow_gen_filter.erl | 0.55254 | 0.446857 | kflow_gen_filter.erl | starcoder |
%%%
%%% @title Dotted Version Vector Set
%%%
%%% @doc
%%% An Erlang implementation of *compact* Dotted Version Vectors, which
%%% provides a container for a set of concurrent values (siblings) with causal
%%% order information.
%%%
%%% @copyright The MIT License (MIT)
%%% Copyright (C) 2013
%%%
%%% Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
%%%
%%% The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
%%%
%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
%%%
%%% @author <NAME> <<EMAIL>>
%%% @author <NAME> <<EMAIL>>
%%%
-module(dvvset).
-export([new/1,
new/2,
new_list/1,
new_list/2,
sync/1,
join/1,
update/2,
update/3,
size/1,
ids/1,
values/1,
equal/2,
less/2,
map/2,
last/2,
lww/2,
reconcile/2
]).
-export_type([clock/0, vector/0, id/0, value/0]).
% % @doc
%% STRUCTURE details:
%% * entries() are sorted by id()
%% * each counter() also includes the number of values in that id()
%% * the values in each triple of entries() are causally ordered and each new value goes to the head of the list
-type clock() :: nonempty_list(). %% [entries(), values()].
-type vector() :: nonempty_list(). %% [[id(), counter()]].
-type entries() :: nonempty_list(). %% [[id(), counter(), values()]].
-type id() :: any().
-type values() :: [value()].
-type value() :: any().
-type counter() :: non_neg_integer().
%% @doc Constructs a new clock set without causal history,
%% and receives one value that goes to the anonymous list.
-spec new(value()) -> clock().
new(V) -> [[], [V]].
%% @doc Same as new/1, but receives a list of values, instead of a single value.
-spec new_list([value()]) -> clock().
new_list(Vs) when is_list(Vs) -> [[], Vs];
new_list(V) -> [[], [V]].
%% @doc Constructs a new clock set with the causal history
%% of the given version vector / vector clock,
%% and receives one value that goes to the anonymous list.
%% The version vector SHOULD BE the output of join/1.
-spec new(vector(), value()) -> clock().
new(VV, V) ->
VVS = lists:sort(VV), % defense against non-order preserving serialization
[[[I, N, []] || [I, N] <- VVS], [V]].
%% @doc Same as new/2, but receives a list of values, instead of a single value.
-spec new_list(vector(), [value()]) -> clock().
new_list(VV, Vs) when is_list(Vs) ->
VVS = lists:sort(VV), % defense against non-order preserving serialization
[[[I, N, []] || [I, N] <- VVS], Vs];
new_list(VV, V) -> new_list(VV, [V]).
%% @doc Synchronizes a list of clocks using sync/2.
%% It discards (causally) outdated values,
%% while merging all causal histories.
-spec sync([clock()]) -> clock().
sync(L) -> lists:foldl(fun sync/2, [], L).
%% Private function
-spec sync(clock(), clock()) -> clock().
sync([], C) -> C;
sync(C ,[]) -> C;
sync(C1=[E1,V1],C2=[E2,V2]) ->
V = case less(C1,C2) of
true -> V2; % C1 < C2 => return V2
false -> case less(C2,C1) of
true -> V1; % C2 < C1 => return V1
false -> % keep all unique anonymous values and sync entries()
sets:to_list(sets:from_list(V1++V2))
end
end,
[sync2(E1,E2),V].
%% Private function
-spec sync2(entries(), entries()) -> entries().
sync2([], C) -> C;
sync2(C, []) -> C;
sync2([[I1, N1, L1]=H1 | T1]=C1, [[I2, N2, L2]=H2 | T2]=C2) ->
if
I1 < I2 -> [H1 | sync2(T1, C2)];
I1 > I2 -> [H2 | sync2(T2, C1)];
true -> [merge(I1, N1, L1, N2, L2) | sync2(T1, T2)]
end.
%% Private function
-spec merge(id(), counter(), values(), counter(), values()) -> nonempty_list(). %% [id(), counter(), values()].
merge(I, N1, L1, N2, L2) ->
LL1 = length(L1),
LL2 = length(L2),
case N1 >= N2 of
true ->
case N1 - LL1 >= N2 - LL2 of
true -> [I, N1, L1];
false -> [I, N1, lists:sublist(L1, N1 - N2 + LL2)]
end;
false ->
case N2 - LL2 >= N1 - LL1 of
true -> [I, N2, L2];
false -> [I, N2, lists:sublist(L2, N2 - N1 + LL1)]
end
end.
%% @doc Return a version vector that represents the causal history.
-spec join(clock()) -> vector().
join([C,_]) -> [[I, N] || [I, N, _] <- C].
%% @doc Advances the causal history with the given id.
%% The new value is the *anonymous dot* of the clock.
%% The client clock SHOULD BE a direct result of new/2.
-spec update(clock(), id()) -> clock().
update([C,[V]], I) -> [event(C, I, V), []].
%% @doc Advances the causal history of the
%% first clock with the given id, while synchronizing
%% with the second clock, thus the new clock is
%% causally newer than both clocks in the argument.
%% The new value is the *anonymous dot* of the clock.
%% The first clock SHOULD BE a direct result of new/2,
%% which is intended to be the client clock with
%% the new value in the *anonymous dot* while
%% the second clock is from the local server.
-spec update(clock(), clock(), id()) -> clock().
update([Cc,[V]], Cr, I) ->
%% Sync both clocks without the new value
[C,Vs] = sync([Cc,[]], Cr),
%% We create a new event on the synced causal history,
%% with the id I and the new value.
%% The anonymous values that were synced still remain.
[event(C, I, V), Vs].
%% Private function
-spec event(vector(), id(), value()) -> entries().
event([], I, V) -> [[I, 1, [V]]];
event([[I, N, L] | T], I, V) -> [[I, N+1, [V | L]] | T];
event([[I1, _, _] | _]=C, I, V) when I1 > I -> [[I, 1, [V]] | C];
event([H | T], I, V) -> [H | event(T, I, V)].
%% @doc Returns the total number of values in this clock set.
-spec size(clock()) -> non_neg_integer().
size([C,Vs]) -> lists:sum([length(L) || [_,_,L] <- C]) + length(Vs).
%% @doc Returns all the ids used in this clock set.
-spec ids(clock()) -> [id()].
ids([C,_]) -> ([I || [I,_,_] <- C]).
%% @doc Returns all the values used in this clock set,
%% including the anonymous values.
-spec values(clock()) -> [value()].
values([C,Vs]) -> Vs ++ lists:append([L || [_,_,L] <- C]).
%% @doc Compares the equality of both clocks, regarding
%% only the causal histories, thus ignoring the values.
-spec equal(clock() | vector(), clock() | vector()) -> boolean().
equal([C1,_],[C2,_]) -> equal2(C1,C2); % DVVSet
equal(C1,C2) when is_list(C1) and is_list(C2) -> equal2(C1,C2). %vector clocks
%% Private function
-spec equal2(vector(), vector()) -> boolean().
equal2([], []) -> true;
equal2([[I, C, L1] | T1], [[I, C, L2] | T2])
when length(L1) =:= length(L2) ->
equal2(T1, T2);
equal2(_, _) -> false.
%% @doc Returns True if the first clock is causally older than
%% the second clock, thus values on the first clock are outdated.
%% Returns False otherwise.
-spec less(clock(), clock()) -> boolean().
less([C1,_], [C2,_]) -> greater(C2, C1, false).
%% Private function
-spec greater(vector(), vector(), boolean()) -> boolean().
greater([], [], Strict) -> Strict;
greater([_|_], [], _) -> true;
greater([], [_|_], _) -> false;
greater([[I, N1, _] | T1], [[I, N2, _] | T2], Strict) ->
if
N1 == N2 -> greater(T1, T2, Strict);
N1 > N2 -> greater(T1, T2, true);
N1 < N2 -> false
end;
greater([[I1, _, _] | T1], [[I2, _, _] | _]=C2, _) when I1 < I2 -> greater(T1, C2, true);
greater(_, _, _) -> false.
%% @doc Maps (applies) a function on all values in this clock set,
%% returning the same clock set with the updated values.
-spec map(fun((value()) -> value()), clock()) -> clock().
map(F, [C,Vs]) ->
[[ [I, N, lists:map(F, V)] || [I, N, V] <- C], lists:map(F, Vs)].
%% @doc Return a clock with the same causal history, but with only one
%% value in the anonymous placeholder. This value is the result of
%% the function F, which takes all values and returns a single new value.
-spec reconcile(Winner::fun(([value()]) -> value()), clock()) -> clock().
reconcile(F, C) ->
V = F(values(C)),
new(join(C), V).
%% @doc Returns the latest value in the clock set,
%% according to function F(A,B), which returns *true* if
%% A compares less than or equal to B, false otherwise.
-spec last(LessOrEqual::fun((value(),value()) -> boolean()), clock()) -> value().
last(F, C) ->
[_ ,_ , V2] = find_entry(F, C),
V2.
%% @doc Return a clock with the same causal history, but with only one
%% value in its original position. This value is the newest value
%% in the given clock, according to function F(A,B), which returns *true*
%% if A compares less than or equal to B, false otherwise.
-spec lww(LessOrEqual::fun((value(),value()) -> boolean()), clock()) -> clock().
lww(F, C=[E,_]) ->
case find_entry(F, C) of
[id, I, V] -> [join_and_replace(I, V, E),[]];
[anonym, _, V] -> new(join(C), V)
end.
%% find_entry/2 - Private function
find_entry(F, [[], [V|T]]) -> find_entry(F, null, V, [[],T], anonym);
find_entry(F, [[[_, _, []] | T], Vs]) -> find_entry(F, [T,Vs]);
find_entry(F, [[[I, _, [V|_]] | T], Vs]) -> find_entry(F, I, V, [T,Vs], id).
%% find_entry/5 - Private function
find_entry(F, I, V, C, Flag) ->
Fun = fun (A,B) ->
case F(A,B) of
false -> [left,A]; % A is newer than B
true -> [right,B] % A is older than B
end
end,
find_entry2(Fun, I, V, C, Flag).
%% find_entry2/5 - Private function
find_entry2(_, I, V, [[], []], anonym) -> [anonym, I , V];
find_entry2(_, I, V, [[], []], id) -> [id, I, V];
find_entry2(F, I, V, [[], [V1 | T]], Flag) ->
case F(V, V1) of
[left,V2] -> find_entry2(F, I, V2, [[],T], Flag);
[right,V2] -> find_entry2(F, I, V2, [[],T], anonym)
end;
find_entry2(F, I, V, [[[_, _, []] | T], Vs], Flag) -> find_entry2(F, I, V, [T, Vs], Flag);
find_entry2(F, I, V, [[[I1, _, [V1|_]] | T], Vs], Flag) ->
case F(V, V1) of
[left,V2] -> find_entry2(F, I, V2, [T, Vs], Flag);
[right,V2] -> find_entry2(F, I1, V2, [T, Vs], Flag)
end.
%% Private function
join_and_replace(Ir, V, C) ->
[if
I == Ir -> [I, N, [V]];
true -> [I, N, []]
end
|| [I, N, _] <- C]. | src/dvvset.erl | 0.587233 | 0.503052 | dvvset.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(hocon_util).
-export([deep_map_merge/2, deep_merge/2]).
-export([pipeline_fun/1, pipeline/3]).
-export([stack_multiple_push/2, stack_push/2, get_stack/2, top_stack/2]).
-export([is_same_file/2, real_file_name/1]).
-export([richmap_to_map/1]).
-export([env_prefix/1, is_array_index/1]).
-export([update_array_element/3]).
-include("hocon_private.hrl").
deep_map_merge(M1, M2) when is_map(M1), is_map(M2) ->
do_deep_merge(M1, M2, fun deep_map_merge/2);
deep_map_merge(_, Override) ->
Override.
do_deep_merge(M1, M2, GoDeep) when is_map(M1), is_map(M2) ->
maps:fold(
fun(K, V2, Acc) ->
V1 = maps:get(K, Acc, undefined),
NewV = do_deep_merge(V1, V2, GoDeep),
Acc#{K => NewV}
end, M1, M2);
do_deep_merge(V1, V2, GoDeep) ->
GoDeep(V1, V2).
deep_merge(#{?HOCON_T := array, ?HOCON_V := V1} = Base,
#{?HOCON_T := object, ?HOCON_V := V2} = Top) ->
NewV = deep_merge2(V1, V2),
case is_list(NewV) of
true ->
%% after merge, it's still an array, only update the value
%% keep the metadata
Base#{?HOCON_V => NewV};
false ->
%% after merge, it's no longer an array, return all old
Top
end;
deep_merge(V1, V2) ->
deep_merge2(V1, V2).
deep_merge2(M1, M2) when is_map(M1) andalso is_map(M2) ->
do_deep_merge(M1, M2, fun deep_merge/2);
deep_merge2(V1, V2) ->
case is_list(V1) andalso is_indexed_array(V2) of
true -> merge_array(V1, V2);
false -> V2
end.
pipeline_fun(Steps) ->
fun (Input) -> pipeline(Input, #{}, Steps) end.
pipeline(Input, Ctx, [Fun | Steps]) ->
Output = case is_function(Fun, 1) of
true -> Fun(Input);
false -> Fun(Input, Ctx)
end,
pipeline(Output, Ctx, Steps);
pipeline(Result, _Ctx, []) -> Result.
stack_multiple_push(List, Ctx) ->
lists:foldl(fun stack_push/2, Ctx, List).
stack_push({Key, Value}, Ctx) ->
Stack = get_stack(Key, Ctx),
Ctx#{Key => [Value | Stack]}.
get_stack(Key, Ctx) -> maps:get(Key, Ctx, []).
top_stack(Key, Ctx) -> hd(get_stack(Key, Ctx)).
is_same_file(A, B) ->
real_file_name(A) =:= real_file_name(B).
real_file_name(F) ->
case file:read_link_all(F) of
{ok, Real} -> Real;
{error, _} -> F
end.
%% @doc Convert richmap to plain-map.
richmap_to_map(RichMap) when is_map(RichMap) ->
richmap_to_map(maps:iterator(RichMap), #{});
richmap_to_map(Array) when is_list(Array) ->
[richmap_to_map(R) || R <- Array];
richmap_to_map(Other) ->
Other.
richmap_to_map(Iter, Map) ->
case maps:next(Iter) of
{?METADATA, _, I} ->
richmap_to_map(I, Map);
{?HOCON_T, _, I} ->
richmap_to_map(I, Map);
{?HOCON_V, M, _} when is_map(M) ->
richmap_to_map(maps:iterator(M), #{});
{?HOCON_V, A, _} when is_list(A) ->
[richmap_to_map(R) || R <- A];
{?HOCON_V, V, _} ->
V;
{K, V, I} ->
richmap_to_map(I, Map#{K => richmap_to_map(V)});
none ->
Map
end.
env_prefix(Default) ->
case os:getenv("HOCON_ENV_OVERRIDE_PREFIX") of
V when V =:= false orelse V =:= [] -> Default;
Prefix -> Prefix
end.
is_array_index(I) when is_binary(I) ->
try
{true, binary_to_integer(I)}
catch
_ : _ ->
false
end.
is_indexed_array(M) when is_map(M) ->
lists:all(fun(K) -> case is_array_index(K) of
{true, _} -> true;
_ -> false
end
end, maps:keys(M));
is_indexed_array(_) ->
false.
%% convert indexed array to key-sorted tuple {index, value} list
indexed_array_as_list(M) when is_map(M) ->
lists:keysort(
1, lists:map(fun({K, V}) ->
{true, I} = is_array_index(K),
{I, V}
end, maps:to_list(M))).
merge_array(Array, Top) when is_list(Array) ->
ToMerge = indexed_array_as_list(Top),
do_merge_array(Array, ToMerge).
do_merge_array(Array, []) -> Array;
do_merge_array(Array, [{I, Value} | Rest]) ->
GoDeep = fun(Elem) -> deep_merge(Elem, Value) end,
NewArray = update_array_element(Array, I, GoDeep),
do_merge_array(NewArray, Rest).
update_array_element(List, Index, GoDeep) when is_list(List) ->
MinIndex = 1,
MaxIndex = length(List) + 1,
Index < MinIndex andalso throw({bad_array_index, "index starts from 1"}),
Index > MaxIndex andalso
begin
Msg0 = io_lib:format("should not be greater than ~p.", [MaxIndex]),
Msg1 = case Index > 9 of
true ->
"~nEnvironment variable overrides applied in alphabetical "
"make sure to use zero paddings such as '02' to ensure "
"10 is ordered after it";
false ->
[]
end,
throw({bad_array_index, [Msg0, Msg1]})
end,
{Head, Tail0} = lists:split(Index - 1, List),
{Nth, Tail} = case Tail0 of
[] -> {#{}, []};
[H | T] -> {H, T}
end,
Head ++ [GoDeep(Nth) | Tail]. | src/hocon_util.erl | 0.551815 | 0.412234 | hocon_util.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc riak_core_stat_q is an interface to query folsom stats
%% To use, call `get_stats/1' with a query `Path'.
%% A `Path' is a list of atoms | binaries. The module creates a set
%% of `ets:select/1' guards, one for each element in `Path'
%% For each stat that has a key that matches `Path' we calculate the
%% current value and return it. This module makes use of
%% `riak_core_stat_calc_proc'
%% to cache and limit stat calculations.
-module(riak_core_stat_q).
-compile(export_all).
-export_type([path/0,
stat_name/0]).
-type path() :: [] | [atom()|binary()].
-type stats() :: [stat()].
-type stat() :: {stat_name(), stat_value()}.
-type stat_name() :: tuple().
-type stat_value() :: integer() | [tuple()].
%% @doc To allow for namespacing, and adding richer dimensions, stats
%% are named with a tuple key. The key (like `{riak_kv, node, gets}' or
%% `{riak_kv, vnode, puts, time}') can
%% be seen as an hierarchical path. With `riak_kv' at the root and
%% the other elements as branches / leaves.
%% This module allows us to get only the stats at and below a particular key.
%% `Path' is a list of atoms or the empty list.
%% an example path might be `[riak_kv]' which will return every
%% stat that has `riak_kv' in the first element of its key tuple.
%% You may use the atom '_' at any point
%% in `Path' as a wild card.
-spec get_stats(path()) -> stats().
get_stats(Path) ->
%% get all the stats that are at Path
NamesNTypes = names_and_types(Path),
calculate_stats(NamesNTypes).
%% @doc queries folsom's metrics table for stats that match our path
names_and_types(Path) ->
Guards = guards_from_path(Path),
ets:select(folsom, [{{'$1','$2'}, Guards,['$_']}]).
guards_from_path(Path) ->
SizeGuard = size_guard(length(Path)),
%% Going to reverse it is why this way around
Guards = [SizeGuard, {is_tuple, '$1'}],
add_guards(Path, Guards, 1).
add_guards([], Guards, _Cnt) ->
lists:reverse(Guards);
add_guards(['_'|Path], Guards, Cnt) ->
add_guards(Path, Guards, Cnt+1);
add_guards([Elem|Path], Guards, Cnt) ->
add_guards(Path, [guard(Elem, Cnt) | Guards], Cnt+1).
guard(Elem, Cnt) when Cnt > 0 ->
{'==', {element, Cnt, '$1'}, Elem}.
-spec size_guard(pos_integer()) -> tuple().
size_guard(N) ->
{'>=', {size, '$1'}, N}.
calculate_stats(NamesAndTypes) ->
[{Name, get_stat(Stat)} || {Name, _Type}=Stat <- NamesAndTypes].
%% Create/lookup a cache/calculation process
get_stat(Stat) ->
Pid = riak_core_stat_calc_sup:calc_proc(Stat),
riak_core_stat_calc_proc:value(Pid).
%% BAD uses internl knowledge of folsom metrics record
%% This is a callback function used by
%% riak_core_stat_calc_proc when it calculates a stat's
%% current value.
calc_stat({Name, {metric, _Tags, gauge, _HistLen}}) ->
GuageVal = folsom_metrics:get_metric_value(Name),
calc_guage(GuageVal);
calc_stat({Name, {metric, _Tags, histogram, _HistLen}}) ->
folsom_metrics:get_histogram_statistics(Name);
calc_stat({Name, {metric, _Tags, _Type, _HistLen}}) ->
folsom_metrics:get_metric_value(Name).
%% some crazy people put funs in folsom gauges
%% so that they can have a consistent interface
%% to access stats from disperate sources
calc_guage({function, Mod, Fun}) ->
Mod:Fun();
calc_guage(Val) ->
Val. | src/riak_core_stat_q.erl | 0.682997 | 0.484197 | riak_core_stat_q.erl | starcoder |
%% ---------------------------------------------------------------------
%% Licensed under the Apache License, Version 2.0 (the "License"); you may
%% not use this file except in compliance with the License. You may obtain
%% a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2012 <NAME>
%% @doc Trivial Basic interpreter in Erlang
-module(basic).
-export([run/2]).
-include_lib("eunit/include/eunit.hrl").
-define(INTERPRETED, true).
-include("basic_test.erl").
run(N, Prog) ->
ets:new(var, [private, named_table]),
ets:new(line, [private, named_table, ordered_set]),
lists:foreach(fun (T) -> ets:insert(line, T) end, Prog),
goto(N).
stop(N) ->
ets:delete(var),
ets:delete(line),
N.
goto('$end_of_table') -> stop(0);
goto(L) ->
L1 = ets:next(line, L),
%% user-supplied line numbers might not exist
case ets:lookup(line, L) of
[{_, X}] ->
stmt(X, L1);
_ ->
goto(L1)
end.
stmt({print, S, As}, L) -> io:format(S, [expr(A) || A <- As]), goto(L);
stmt({set, V, X}, L) -> ets:insert(var, {V, expr(X)}), goto(L);
stmt({goto, X}, _L) -> goto(expr(X));
stmt({stop, X}, _L) -> stop(expr(X));
stmt({iff, X, A, B}, _L) ->
case expr(X) of
0 -> goto(B);
_ -> goto(A)
end.
expr(X) when is_number(X) ; is_list(X) ->
X;
expr(X) when is_atom(X) ->
case ets:lookup(var, X) of
[] -> 0;
[{_,V}] -> V
end;
expr({plus, X, Y}) ->
expr(X) + expr(Y);
expr({equal, X, Y}) ->
bool(expr(X) == expr(Y));
expr({gt, X, Y}) ->
bool(expr(X) > expr(Y));
expr({knot, X}) ->
case expr(X) of
0 -> 1;
_ -> 0
end.
bool(true) -> 1;
bool(false) -> 0. | lib/syntax_tools/examples/merl/basic.erl | 0.510985 | 0.410815 | basic.erl | starcoder |
%% Copyright (c) 2022 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : lfe_shell_docs.erl
%% Author : <NAME>
%% Purpose : Render LFE docs for output in shell.
%% The interface is loosely modelled on the shell_docs module.
-module(lfe_shell_docs).
-export([render/2,render/3,render/4]).
-include("lfe.hrl").
-include("lfe_docs.hrl").
%% Coloured strings for the LFE banner, red, green, yellow and blue.
-define(RED(Str), "\e[31m" ++ Str ++ "\e[0m").
-define(GRN(Str), "\e[1;32m" ++ Str ++ "\e[0m").
-define(YLW(Str), "\e[1;33m" ++ Str ++ "\e[0m").
-define(BLU(Str), "\e[1;34m" ++ Str ++ "\e[0m").
-define(BOLD(Str), "\e[1m" ++ Str ++ "\e[0m").
%% render(Module, Docs) -> unicode:chardata().
render(Bin, Docs) when is_binary(Bin) ->
{ok,{Mod,_}} = beam_lib:chunks(Bin, [], []), %Sneaky!
render(Mod, Docs);
render(Mod, #docs_v1{format = ?LFE_FORMAT, module_doc=Mdoc}) ->
[red_line(60),
lfe_io:format1(?BLU("~p")++"\n\n", [Mod]),
return_doc(Mod, Mdoc)].
%% render(Module, Function, Docs) -> unicode:chardata().
render(_Mod, Name, #docs_v1{format = ?LFE_FORMAT, docs = Docs}) ->
Render = fun ({{function,_Func,_Ar},_,Sig,Doc,Meta}) ->
[red_line(60),
return_sig(function, Sig, Meta),
return_doc(Sig, Doc)];
({{macro,_Macro,_},_,Sig,Doc,Meta}) ->
[red_line(60),
return_sig(macro, Sig, Meta),
return_doc(Sig, Doc)]
end,
Ret = [ Render(F) || {{_,N,_},_,_,_,_}=F <- Docs, N =:= Name ],
return_render(Ret, function_missing).
%% render(Module, Function, Arity, Docs) -> unicode:chardata().
render(_Mod, Name, Arity, #docs_v1{format = ?LFE_FORMAT, docs = Docs}) ->
Render = fun ({{function,_Func,_Ar},_,Sig,Doc,Meta}) ->
[red_line(60),
return_sig(function, Sig, Meta),
return_doc(Sig, Doc)]
end,
Ret = [ Render(F) || {{function,N,A},_,_,_,_}=F <- Docs,
N =:= Name, A =:= Arity ],
return_render(Ret, function_missing).
return_doc(_Missing, #{<<"en">> := Dv}) ->
lfe_io:format1("~s\n", [Dv]);
return_doc(Missing, None) when None =:= none; None =:= #{} ->
lfe_io:format1(<<"No documentation for ~s\n">>, [Missing]);
return_doc(Missing, _Docs) ->
lfe_io:format1(<<"Unknown format for ~s\n">>, [Missing]).
%% return_sig(_Type, _Sig, #{signature:=[Spec]}) ->
%% lfe_io:format1(?BLU("~s") ++ "\n", [erl_pp:form(Spec)]);
return_sig(Type, Sig, _Meta) ->
lfe_io:format1(?BLU("~s ~s") ++ "\n\n", [Type,Sig]).
return_render([], Error) -> {error,Error};
return_render(FDocs, _Error) -> FDocs.
%% red_line(Length) -> ok.
%% Output a red line of Length characters.
red_line(Len) ->
io_lib:format(?RED("~*c")++"\n", [Len,$-]). | src/lfe_shell_docs.erl | 0.59561 | 0.419648 | lfe_shell_docs.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2021, <COMPANY>
%%% @doc
%%% Providing a fun API for sliding a list backward and forward with adding empty spaces
%%% once values matched and combined
%%% @end
%%%-------------------------------------------------------------------
-module(tofe_vector).
-record(vector, {list = [] :: list(), values = [] :: list(), spaces = [] :: list()}).
%% API
-export([slide/2]).
%% --------------------------------------------------------------------
%% @doc
%% Performing sliding operation with returning new list of values including new spaces in case of
%% merged cells due to matched values in a list
%% @end
%% --------------------------------------------------------------------
slide(List, Direction) ->
do_slide(List, Direction).
do_slide(List, forward) ->
slide_list(List, last_to_first, front);
do_slide(List, backward) ->
slide_list(List, first_to_last, rear).
slide_list(List, Direction, AddSpacesTo) ->
[pipe](
List,
new(List),
values_and_spaces(_),
combine_matched(_, Direction),
add_spaces(_, AddSpacesTo),
list(_)
).
%% --------------------------------------------------------------------
%% @doc
%% Returning a list of values from a vector record
%% @end
%% --------------------------------------------------------------------
list(#vector{list = List}) -> List.
%% --------------------------------------------------------------------
%% @doc
%% Creating a vector record from initial list of values
%% @end
%% --------------------------------------------------------------------
new(List) -> #vector{list = List}.
%% --------------------------------------------------------------------
%% @doc
%% Updating vector record spaces and values before processing values
%% @end
%% --------------------------------------------------------------------
values_and_spaces(Vector)->
V = lists:foldr(fun space_or_value/2, Vector, Vector#vector.list),
V.
space_or_value(Item, Vector) when Item == null ->
Vector#vector{spaces = [Item | Vector#vector.spaces]};
space_or_value(Item, Vector) ->
Vector#vector{values = [Item | Vector#vector.values]}.
%% --------------------------------------------------------------------
%% @doc
%% Combining matched values and updating a vector record with a new list of values
%% @end
%% --------------------------------------------------------------------
combine_matched(#vector{values = Values} = Vector, first_to_last) ->
[pipe](
Values,
combine_pairs(_),
new_values(_, Vector)
);
combine_matched(#vector{values = Values} = Vector, last_to_first) ->
[pipe](
Values,
lists:reverse(_),
combine_pairs(_),
lists:reverse(_),
new_values(_, Vector)
).
%% --------------------------------------------------------------------
%% @doc
%% Combining a list of values by matching same values.
%% @end
%% --------------------------------------------------------------------
combine_pairs([]) -> [];
combine_pairs([V1, V2 | Tail]) when V1 == V2 ->
[V1 + V2 | combine_pairs(Tail)];
combine_pairs([V1 | Tail]) ->
[V1 | combine_pairs(Tail)].
%% --------------------------------------------------------------------
%% @doc
%% Updating a list with new values in a vector record
%% @end
%% --------------------------------------------------------------------
new_values(NewValues, Vector) ->
Vector#vector{
list = NewValues,
spaces = Vector#vector.spaces ++ missed_spaces(Vector#vector.values, NewValues)
}.
%% --------------------------------------------------------------------
%% @doc
%% adding missed spaces appeared after merging values. Can be an empty list
%% @end
%% --------------------------------------------------------------------
missed_spaces(Values, NewValues) ->
lists:duplicate(length(Values) - length(NewValues), null).
%% --------------------------------------------------------------------
%% @doc
%% adding spaces to a list of a vector record (to front or to back)
%% @end
%% --------------------------------------------------------------------
add_spaces(#vector{list = List} = Vector, front) ->
Vector#vector{list = Vector#vector.spaces ++ List};
add_spaces(#vector{list = List} = Vector, rear) ->
Vector#vector{list = List ++ Vector#vector.spaces}. | src/tofe_vector.erl | 0.57332 | 0.630571 | tofe_vector.erl | starcoder |
%% ``The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with your Erlang distribution. If not, it can be
%% retrieved via the world wide web at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% The Initial Developer of the Original Code is Corelatus AB.
%% Portions created by Corelatus are Copyright 2003, Corelatus
%% AB. All Rights Reserved.''
%%
%% @doc Module to print out terms for logging. Limits by length rather than depth.
%%
%% The resulting string may be slightly larger than the limit; the intention
%% is to provide predictable CPU and memory consumption for formatting
%% terms, not produce precise string lengths.
%%
%% Typical use:
%%
%% trunc_io:print(Term, 500).
%%
%% Source license: Erlang Public License.
%% Original author: <NAME>, <tt><EMAIL></tt>
-module(trunc_io).
-author('<EMAIL>').
%% And thanks to <NAME> for a bug fix
-export([print/2, fprint/2, safe/2]). % interface functions
-export([perf/0, perf/3, perf1/0, test/0, test/2]). % testing functions
-version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
%% @doc Returns an flattened list containing the ASCII representation of the given
%% term.
-spec fprint(term(), pos_integer()) -> string().
fprint(T, Max) ->
{L, _} = print(T, Max),
lists:flatten(L).
%% @doc Same as print, but never crashes.
%%
%% This is a tradeoff. Print might conceivably crash if it's asked to
%% print something it doesn't understand, for example some new data
%% type in a future version of Erlang. If print crashes, we fall back
%% to io_lib to format the term, but then the formatting is
%% depth-limited instead of length limited, so you might run out
%% memory printing it. Out of the frying pan and into the fire.
%%
-spec safe(term(), pos_integer()) -> {string(), pos_integer()} | {string()}.
safe(What, Len) ->
case catch print(What, Len) of
{L, Used} when is_list(L) -> {L, Used};
_ -> {"unable to print" ++ io_lib:write(What, 99)}
end.
%% @doc Returns {List, Length}
-spec print(term(), pos_integer()) -> {iolist(), pos_integer()}.
print(_, Max) when Max < 0 -> {"...", 3};
print(Tuple, Max) when is_tuple(Tuple) ->
{TC, Len} = tuple_contents(Tuple, Max-2),
{[${, TC, $}], Len + 2};
%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
%% to be truncated. This isn't strictly true, someone could make an
%% arbitrarily long bignum. Let's assume that won't happen unless someone
%% is being malicious.
%%
print(Atom, _Max) when is_atom(Atom) ->
L = atom_to_list(Atom),
{L, length(L)};
print(<<>>, _Max) ->
{"<<>>", 4};
print(Binary, Max) when is_binary(Binary) ->
B = binary_to_list(Binary, 1, lists:min([Max, size(Binary)])),
{L, Len} = alist_start(B, Max-4),
{["<<", L, ">>"], Len};
print(Float, _Max) when is_float(Float) ->
L = float_to_list(Float),
{L, length(L)};
print(Fun, _Max) when is_function(Fun) ->
L = erlang:fun_to_list(Fun),
{L, length(L)};
print(Integer, _Max) when is_integer(Integer) ->
L = integer_to_list(Integer),
{L, length(L)};
print(Pid, _Max) when is_pid(Pid) ->
L = pid_to_list(Pid),
{L, length(L)};
print(Ref, _Max) when is_reference(Ref) ->
L = erlang:ref_to_list(Ref),
{L, length(L)};
print(Port, _Max) when is_port(Port) ->
L = erlang:port_to_list(Port),
{L, length(L)};
print(List, Max) when is_list(List) ->
alist_start(List, Max).
%% Returns {List, Length}
tuple_contents(Tuple, Max) ->
L = tuple_to_list(Tuple),
list_body(L, Max).
%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
%% Returns {List, Length}
list_body([], _) -> {[], 0};
list_body(_, Max) when Max < 4 -> {"...", 3};
list_body([H|T], Max) ->
{List, Len} = print(H, Max),
{Final, FLen} = list_bodyc(T, Max - Len),
{[List|Final], FLen + Len};
list_body(X, Max) -> %% improper list
{List, Len} = print(X, Max - 1),
{[$|,List], Len + 1}.
list_bodyc([], _) -> {[], 0};
list_bodyc(_, Max) when Max < 4 -> {"...", 3};
list_bodyc([H|T], Max) ->
{List, Len} = print(H, Max),
{Final, FLen} = list_bodyc(T, Max - Len - 1),
{[$,, List|Final], FLen + Len + 1};
list_bodyc(X,Max) -> %% improper list
{List, Len} = print(X, Max - 1),
{[$|,List], Len + 1}.
%% The head of a list we hope is ascii. Examples:
%%
%% [65,66,67] -> "ABC"
%% [65,0,67] -> "A"[0,67]
%% [0,65,66] -> [0,65,66]
%% [65,b,66] -> "A"[b,66]
%%
alist_start([], _) -> {"[]", 2};
alist_start(_, Max) when Max < 4 -> {"...", 3};
alist_start([H|T], Max) when H >= 16#20, H =< 16#7e -> % definitely printable
{L, Len} = alist([H|T], Max-1),
{[$\"|L], Len + 1};
alist_start([H|T], Max) when H == 9; H == 10; H == 13 -> % show as space
{L, Len} = alist(T, Max-1),
{[$ |L], Len + 1};
alist_start(L, Max) ->
{R, Len} = list_body(L, Max-2),
{[$[, R, $]], Len + 2}.
alist([], _) -> {"\"", 1};
alist(_, Max) when Max < 5 -> {"...\"", 4};
alist([H|T], Max) when H >= 16#20, H =< 16#7e -> % definitely printable
{L, Len} = alist(T, Max-1),
{[H|L], Len + 1};
alist([H|T], Max) when H == 9; H == 10; H == 13 -> % show as space
{L, Len} = alist(T, Max-1),
{[$ |L], Len + 1};
alist(L, Max) ->
{R, Len} = list_body(L, Max-3),
{[$\", $[, R, $]], Len + 3}.
%%--------------------
%% The start of a test suite. So far, it only checks for not crashing.
%% @hidden
-spec test() -> ok.
test() ->
test(trunc_io, print).
%% @hidden
-spec test(atom(), atom()) -> ok.
test(Mod, Func) ->
Simple_items = [atom, 1234, 1234.0, {tuple}, [], [list], "string", self(),
<<1,2,3>>, make_ref(), fun() -> ok end],
F = fun(A) ->
Mod:Func(A, 100),
Mod:Func(A, 2),
Mod:Func(A, 20)
end,
G = fun(A) ->
case catch F(A) of
{'EXIT', _} -> exit({failed, A});
_ -> ok
end
end,
lists:foreach(G, Simple_items),
Tuples = [ {1,2,3,a,b,c}, {"abc", def, 1234},
{{{{a},b,c,{d},e}},f}],
Lists = [ [1,2,3,4,5,6,7], lists:seq(1,1000),
[{a}, {a,b}, {a, [b,c]}, "def"], [a|b], [$a|$b] ],
lists:foreach(G, Tuples),
lists:foreach(G, Lists).
%% @hidden
-spec perf() -> ok.
perf() ->
{New, _} = timer:tc(trunc_io, perf, [trunc_io, print, 1000]),
{Old, _} = timer:tc(trunc_io, perf, [io_lib, write, 1000]),
io:fwrite("New code took ~p us, old code ~p\n", [New, Old]).
%% @hidden
-spec perf(atom(), atom(), integer()) -> done.
perf(M, F, Reps) when Reps > 0 ->
test(M,F),
perf(M,F,Reps-1);
perf(_,_,_) ->
done.
%% @hidden
%% Performance test. Needs a particularly large term I saved as a binary...
-spec perf1() -> {non_neg_integer(), non_neg_integer()}.
perf1() ->
{ok, Bin} = file:read_file("bin"),
A = binary_to_term(Bin),
{N, _} = timer:tc(trunc_io, print, [A, 1500]),
{M, _} = timer:tc(io_lib, write, [A]),
{N, M}. | src/trunc_io.erl | 0.587943 | 0.441071 | trunc_io.erl | starcoder |
%% Copyright (c) 2014-2018 <NAME>.
%%
%% Fuse_lb is configured with a set of fuses. It will dispatch work requests via
%% them using the selected algorithm. If a fuse burns it will be removed from
%% the set. Once the fuse is mended it will again be included in the set.
%% Different load balancing algorithms can be used such as round_robin or prio.
%% Fuse_lb does not impose a limit on the number of simultaneous ongoing
%% requests per fuse. If you need such a limit, then instead use fuse_pool.
%%
%% When initializing a fuse_lb a [fuse:fuse_data()] list is provided with config
%% for the respective fuse. It contains the fuse user data, probe back-off
%% schedule, and the probe function. Fuses start out in a burnt state which
%% means that they will call the probe function to initialize themselves.
-module(fuse_lb).
-behaviour(gen_server).
-export([start_link/2, start_link/3, call/2, num_fuses_active/1, stop/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3]).
%% 'algorithm' is the load balancing algorithm to use. 'available' is the fuses
%% that are not burnt. 'log' is the log fun.
-record(state, {algorithm=undefined, available=[], log=undefined}).
%%%_* API ==============================================================
-spec start_link([fuse:fuse_data()],
atom()) -> ignore | {error, _} | {ok, pid()}.
start_link(FusesConfig, Algorithm) ->
gen_server:start_link(?MODULE, [FusesConfig, Algorithm], []).
-spec start_link([fuse:fuse_data()],
atom(), fun()) -> ignore | {error, _} | {ok, pid()}.
start_link(FusesConfig, Algorithm, Log) ->
gen_server:start_link(?MODULE, [FusesConfig, Algorithm, Log], []).
-spec call(pid() | atom(), fun()) -> {ok, any()} | {error, fuse_burnt} |
{error, no_fuses_left}.
call(Lb, Fun) ->
{ok, Fuse} = gen_server:call(Lb, get_fuse),
case Fuse of
no_fuses_left -> {error, no_fuses_left};
_ ->
case fuse:call(Fuse, Fun) of
{available, X} -> {ok, X};
{unavailable, X} -> {ok, X};
{error, fuse_burnt} = E -> E;
{error, E} -> error(E) %% Not expected error.
end
end.
-spec num_fuses_active(pid() | atom()) -> integer().
num_fuses_active(Lb) -> gen_server:call(Lb, num_fuses_active).
-spec stop(pid() | atom()) -> any().
stop(Lb) -> gen_server:call(Lb, stop).
%%%_* Gen server callbacks =============================================
init([FusesConfig, Algorithm]) ->
init(FusesConfig, Algorithm, fun(_, _) -> ok end);
init([FusesConfig, Algorithm, LogFun]) ->
init(FusesConfig, Algorithm, LogFun).
init(FusesConfig, Algorithm, LogFun) ->
lists:map(fun(FuseData) ->
{ok, Fuse} = fuse:start_link(FuseData, self(), LogFun),
Fuse
end, FusesConfig),
{ok, #state{algorithm=Algorithm, available=[], log=LogFun}}.
handle_call(get_fuse, _From, #state{algorithm=Algorithm,
available=Available0} = S) ->
{Fuse, Available} = pick(Algorithm, Available0),
{reply, {ok, Fuse}, S#state{algorithm=Algorithm, available=Available}};
handle_call(num_fuses_active, _From, #state{available=Available} = S) ->
{reply, length(Available), S};
handle_call(stop, _From, S) ->
{stop, normal, ok, S}.
handle_cast({fuse_burnt, F}, #state{available=Available, log=L} = S) ->
L("fuse_lb: Fuse (pid=~p) burnt, removing from pool.", [F]),
{noreply, S#state{available=Available -- [F]}};
handle_cast({fuse_mended, F}, #state{algorithm=Algorithm, available=Available,
log=L} = S) ->
L("fuse_lb: Adding fuse (pid=~p) to pool.", [F]),
{noreply, S#state{available=add_back_fuse(Algorithm, F, Available)}};
handle_cast(stop, S) -> {stop, ok, S};
handle_cast(Msg, S) -> {stop, {unexpected_cast, Msg}, S}.
handle_info(Msg, S) -> {stop, {unexpected_info, Msg}, S}.
terminate(_Reason, _State) -> ok.
code_change(_OldVsn, State, _Extra) -> {ok, State}.
%%%_* Internal =========================================================
pick(_, []) -> {no_fuses_left, []};
pick(round_robin, [H | T]) -> {H, T ++ [H]};
pick(prio, [H | _T] = L) -> {H, L}.
add_back_fuse(_, F, Available) -> lists:usort([F | Available]). | src/fuse_lb.erl | 0.518546 | 0.422981 | fuse_lb.erl | starcoder |
%%%
%%% Copyright (c) 2018-2020 <NAME>
%%% All rights reserved.
%%% Distributed under the terms of the MIT License. See the LICENSE file.
%%%
%%% @doc
%%% Allow Header Implementation.
%%%
-module(ersip_hdr_allow).
-export([has/2,
from_list/1,
to_list/1,
from_method_set/1,
to_method_set/1,
make/1,
parse/1,
build/2,
assemble/1,
assemble_bin/1,
raw/1
]).
-export_type([allow/0, raw/0]).
%%===================================================================
%% Types
%%===================================================================
-type allow() :: {allow, ersip_method_set:set()}.
-type raw() :: ersip_method_set:raw().
-type parse_result() :: {ok, allow()} | {error, parse_error()}.
-type parse_error() :: no_allow | {invalid_allow, binary()}.
%%===================================================================
%% API
%%===================================================================
%% @doc Check if header has method.
%% Example:
%% ```
%% Allow = ersip_hdr_allow:make(<<"INVITE, ACK, BYE, CANCEL, OPTIONS">>),
%% true = ersip_hdr_allow:has(ersip_method:make(<<"INVITE">>), Allow).
%% '''
-spec has(ersip_method:method(), allow()) -> boolean().
has(M, {allow, MSet}) ->
ersip_method_set:has(M, MSet).
%% @doc Create Allow header from list of methods.
-spec from_list([ersip_method:method()]) -> allow().
from_list(MethodList) ->
{allow, ersip_method_set:new(MethodList)}.
%% @doc Get all methods from Allow header.
-spec to_list(allow()) -> [ersip_method:method()].
to_list({allow, MethodSet}) ->
ersip_method_set:to_list(MethodSet).
%% @doc Create Allow header from method set.
-spec from_method_set(ersip_method_set:set()) -> allow().
from_method_set({method_set, _} = MethodSet) ->
{allow, MethodSet}.
%% @doc Get method set from Allow header.
-spec to_method_set(allow()) -> ersip_method_set:set().
to_method_set({allow, MethodSet}) ->
MethodSet.
%% @doc Create Allow header from binary or from raw value.
%% Raise error if input is not well-formed Allow header or incorrect raw value.
%% Example:
%% ```
%% Allow = ersip_hdr_allow:make(<<"INVITE, ACK, BYE, CANCEL, OPTIONS">>).
%% Allow = ersip_hdr_allow:make([<<"INVITE>>, <<"ACK">>, <<"BYE">>, <<"CANCEL">>, <<"OPTIONS">>]).
%% '''
-spec make(binary() | raw()) -> allow().
make(Value) when is_binary(Value) ->
case parse(Value) of
{ok, Allow} -> Allow;
{error, Error} -> error(Error)
end;
make(RawValue) when is_list(RawValue) ->
from_method_set(ersip_method_set:make(RawValue)).
%% @doc Parse header from binary or from ersip_hdr header.
-spec parse(ersip_hdr:header() | binary()) -> parse_result().
parse(HeaderBin) when is_binary(HeaderBin) ->
parse_header_list([HeaderBin]);
parse(Header) ->
case ersip_hdr:raw_values(Header) of
[] -> {error, no_allow};
HeaderList -> parse_header_list(HeaderList)
end.
%% @doc Create lowlevel ersip_hdr from Allow header.
-spec build(HeaderName :: binary(), allow()) -> ersip_hdr:header().
build(HdrName, {allow, _} = Allow) ->
Hdr = ersip_hdr:new(HdrName),
ersip_hdr:add_value([assemble(Allow)], Hdr).
%% @doc Serialize header to iolist.
-spec assemble(allow()) -> iolist().
assemble({allow, _} = Allow) ->
ersip_iolist:join(<<", ">>,
[ersip_method:to_binary(Method)
|| Method <- to_list(Allow)
]).
%% @doc Serialize the header to binary.
-spec assemble_bin(allow()) -> binary().
assemble_bin(Allow) ->
iolist_to_binary(assemble(Allow)).
%% @doc Get raw value (in plain erlang types) of the header.
-spec raw(allow()) -> raw().
raw({allow, _} = Allow) ->
ersip_method_set:raw(to_method_set(Allow)).
%%===================================================================
%% Internal implementation
%%===================================================================
%% @private
-spec parse_header_list([binary()]) -> parse_result().
parse_header_list(HeaderList) ->
try
MethodList0 = [binary:split(H, <<",">>, [global]) || H <- HeaderList],
MethodList1 = lists:flatten(MethodList0),
MethodList = [ersip_bin:trim_lws(Tag) || Tag <- MethodList1],
L = lists:map(fun(Val) ->
case ersip_method:parse(iolist_to_binary(Val)) of
{ok, Method, <<>>} -> Method;
{ok, _, _} -> throw({error, {invalid_method, Val}});
{error, _} = Error -> throw(Error)
end
end,
MethodList),
{ok, from_list(L)}
catch
throw:{error, _} = Error ->
{error, {invalid_allow, Error}}
end. | src/message/ersip_hdr_allow.erl | 0.571647 | 0.459137 | ersip_hdr_allow.erl | starcoder |
%%%-------------------------------------------------------------------
%%% Licensed to the Apache Software Foundation (ASF) under one
%%% or more contributor license agreements. See the NOTICE file
%%% distributed with this work for additional information
%%% regarding copyright ownership. The ASF licenses this file
%%% to you under the Apache License, Version 2.0 (the
%%% "License"); you may not use this file except in compliance
%%% with the License. You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing,
%%% software distributed under the License is distributed on an
%%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%%% KIND, either express or implied. See the License for the
%%% specific language governing permissions and limitations
%%% under the License.
%%%
%%% @doc This module facilitates encoding/decoding of thrift data
%%% encoded with the binary protocol.
%%% @end
%%% -------------------------------------------------------------------
-module(otter_lib_thrift).
-export([
encode/1,
decode/1,
encode_implicit_list/1,
decode_implicit_list/1
]).
%% encode/decode basic thrift binary data
%% e.g. The transport (e.g. HTTP) data is an "implicit" list starting
%% with the element type, and number of elements ..
%%--------------------------------------------------------------------
%% @doc Decodes a binary input as implicit list to a thrift decoded data
%% structure, i.e. list of {Id, Type, Value} tuples. Implicit list is e.g.
%% how OpenZipkin expects the spans to be send. It is called implicit as it
%% does not contain a list type tag in the beginning.
%% @end
%%--------------------------------------------------------------------
-spec decode_implicit_list(BinaryData :: binary()) -> term().
decode_implicit_list(BinaryData) ->
decode(list, BinaryData).
%%--------------------------------------------------------------------
%% @doc Encodes a list of thrift data structures to binary e.g. to be sent
%% on the wire.
%% @end
%%--------------------------------------------------------------------
-spec encode_implicit_list(term()) -> binary().
encode_implicit_list(Data) ->
encode({list, Data}).
%%--------------------------------------------------------------------
%% @doc Encodes a {Id, Type, Data} tuple to binary.
%% @end
%%--------------------------------------------------------------------
-spec encode({Id :: integer(), Type :: atom(), Data :: term()}) -> binary();
({Type :: atom(), Data :: term()}) -> binary().
encode({Id, Type, Data}) ->
TypeId = map_type(Type),
EData = encode({Type, Data}),
<<TypeId, Id:16, EData/bytes>>;
%% .. and without Id (i.e. part of list/set/map)
encode({bool, true}) ->
<<1>>;
encode({bool, false}) ->
<<0>>;
encode({byte, Val}) ->
<<Val>>;
encode({double, Val}) ->
<<Val:64>>;
encode({i16, Val}) ->
<<Val:16>>;
encode({i32, Val}) ->
<<Val:32>>;
encode({i64, Val}) ->
<<Val:64>>;
encode({string, Val}) when is_list(Val) ->
Size = length(Val),
% Might want to convert this to UTF-8 binary first, however for now
% I'll leave it to the next encoding when binary can be provided in
% UTF-8 format. In this part is kindly expected it to be ASCII
% string
Bytes = list_to_binary(Val),
<<Size:32, Bytes/bytes>>;
encode({string, Val}) when is_binary(Val) ->
Size = byte_size(Val),
<<Size:32, Val/bytes>>;
encode({list, {ElementType, Data}}) ->
ElementTypeId = map_type(ElementType),
Size = length(Data),
EData = list_to_binary([
encode({ElementType, Element}) ||
Element <- Data
]),
<<ElementTypeId, Size:32, EData/bytes>>;
encode({set, Data}) ->
encode({list, Data});
encode({struct, Data}) ->
EData = list_to_binary([
encode(StructElement) ||
StructElement <- Data
]),
<<EData/bytes, 0>>;
encode({map, {KeyType, ValType, Data}}) ->
KeyTypeId = map_type(KeyType),
ValTypeId = map_type(ValType),
Size = length(Data),
EData = list_to_binary([
[encode({KeyType, Key}), encode({ValType, Val})] ||
{Key, Val} <- Data
]),
<<KeyTypeId, ValTypeId, Size:32, EData/bytes>>.
%% Decoding functions
%%--------------------------------------------------------------------
%% @doc Decodes a binary to {{Id, Type, Data}, Rest} tuple and rest of data.
%% @end
%%--------------------------------------------------------------------
-spec decode( BinaryData :: binary()) -> {{Id :: integer(), Type :: atom(), Data :: term()}, Rest :: binary()}.
decode(<<TypeId, Id:16, Data/bytes>>) ->
Type = map_type(TypeId),
{Val, Rest} = decode(Type, Data),
{{Id, Type, Val}, Rest}.
decode(bool, <<Val, Rest/bytes>>) ->
{Val == 1, Rest};
decode(byte, <<Val, Rest/bytes>>) ->
{Val, Rest};
decode(double, <<Val:64, Rest/bytes>>) ->
{Val, Rest};
decode(i16, <<Val:16, Rest/bytes>>) ->
{Val, Rest};
decode(i32, <<Val:32, Rest/bytes>>) ->
{Val, Rest};
decode(i64, <<Val:64, Rest/bytes>>) ->
{Val, Rest};
decode(string, <<ByteLen:32, BytesAndRest/bytes>>) ->
<<Bytes:ByteLen/bytes, Rest/bytes>> = BytesAndRest,
{Bytes, Rest};
decode(struct, Data) ->
decode_struct(Data, []);
decode(map, <<KeyTypeId, ValTypeId, Size:32, KVPsAndRest/bytes>>) ->
decode_map(
map_type(KeyTypeId),
map_type(ValTypeId),
Size,
KVPsAndRest,
[]
);
%% Lists and Sets are encoded the same way
decode(set, Data) ->
decode(list, Data);
decode(list, <<ElementTypeId, Size:32, ElementsAndRest/bytes>>) ->
decode_list(
map_type(ElementTypeId),
Size,
ElementsAndRest,
[]
).
%% Helpers
decode_struct(Data, Acc) ->
case decode(Data) of
{Val, <<0, Rest/bytes>>} ->
{lists:reverse([Val | Acc]), Rest};
{Val, Rest} ->
decode_struct(Rest, [Val | Acc])
end.
decode_map(KeyType, ValType, 0, Rest, Acc) ->
{{{KeyType, ValType}, lists:reverse(Acc)}, Rest};
decode_map(KeyType, ValType, Size, KVPsAndRest, Acc) ->
{Key, ValAndRest} = decode(KeyType, KVPsAndRest),
{Val, Rest} = decode(ValType, ValAndRest),
decode_map(KeyType, ValType, Size-1, Rest, [{Key, Val} | Acc]).
decode_list(ElementType, 0, Rest, Acc) ->
{{ElementType, lists:reverse(Acc)}, Rest};
decode_list(ElementType, Size, Elements, Acc) ->
{Data, Rest} = decode(ElementType, Elements),
decode_list(ElementType, Size-1, Rest, [Data | Acc]).
map_type(2) -> bool;
map_type(3) -> byte;
map_type(4) -> double;
map_type(6) -> i16;
map_type(8) -> i32;
map_type(10) -> i64;
map_type(11) -> string;
map_type(12) -> struct;
map_type(13) -> map;
map_type(14) -> set;
map_type(15) -> list;
map_type(bool) -> 2;
map_type(byte) -> 3;
map_type(double)-> 4;
map_type(i16) -> 6;
map_type(i32) -> 8;
map_type(i64) -> 10;
map_type(string)-> 11;
map_type(struct)-> 12;
map_type(map) -> 13;
map_type(set) -> 14;
map_type(list) -> 15. | src/otter_lib_thrift.erl | 0.554229 | 0.400456 | otter_lib_thrift.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(estatsd_protocol_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
all() -> emqx_ct:all(?MODULE).
t_encode(_) ->
try estatsd_protocol:encode(aaa, example, 10, 1, []) of
_ -> ct:fail(should_throw_error)
catch error:Reason ->
?assertEqual(Reason, {bad_type, aaa})
end,
try estatsd_protocol:encode(counter, example, 10, 2, []) of
_ -> ct:fail(should_throw_error)
catch error:Reason1 ->
?assertEqual(Reason1, {bad_sample_rate, 2})
end,
?assertEqual(<<"example:10|c">>, iolist_to_binary(estatsd_protocol:encode(counter, example, 10, 1, []))),
?assertEqual(<<"example:-10|c">>, iolist_to_binary(estatsd_protocol:encode(counter, example, -10, 1, []))),
?assertEqual(<<"example:-10|c|@0.2">>, iolist_to_binary(estatsd_protocol:encode(counter, example, -10, 0.2, []))),
?assertEqual(<<"example:-10|c|@0.2|#first:a,second:b">>, iolist_to_binary(estatsd_protocol:encode(counter, example, -10, 0.2, [{"first", "a"}, {<<"second">>, "b"}]))),
?assertEqual(<<"example:10|g">>, iolist_to_binary(estatsd_protocol:encode(gauge, example, 10, 1, []))),
?assertEqual(<<"example:+10|g">>, iolist_to_binary(estatsd_protocol:encode(gauge_delta, example, 10, 1, []))),
?assertEqual(<<"example:-10|g">>, iolist_to_binary(estatsd_protocol:encode(gauge_delta, example, -10, 1, []))),
?assertEqual(<<"example:10|ms">>, iolist_to_binary(estatsd_protocol:encode(timing, example, 10, 1, []))),
?assertEqual(<<"example:10|h">>, iolist_to_binary(estatsd_protocol:encode(histogram, example, 10, 1, []))),
?assertEqual(<<"example:10|s">>, iolist_to_binary(estatsd_protocol:encode(set, example, 10, 1, []))),
try estatsd_protocol:encode(gauge, example, -10, 1, []) of
_ -> ct:fail(should_throw_error)
catch error:Reason2 ->
?assertEqual(Reason2, {bad_value, -10})
end,
try estatsd_protocol:encode(timing, example, -10, 1, []) of
_ -> ct:fail(should_throw_error)
catch error:Reason3 ->
?assertEqual(Reason3, {bad_value, -10})
end,
try estatsd_protocol:encode(set, example, -10, 1, []) of
_ -> ct:fail(should_throw_error)
catch error:Reason4 ->
?assertEqual(Reason4, {bad_value, -10})
end. | test/estatsd_protocol_SUITE.erl | 0.523908 | 0.46223 | estatsd_protocol_SUITE.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose : Type-based optimisations.
-module(beam_type).
-export([module/2]).
-import(lists, [filter/2,foldl/3,keyfind/3,member/2,
reverse/1,reverse/2,sort/1]).
-define(UNICODE_INT, {integer,{0,16#10FFFF}}).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
module({Mod,Exp,Attr,Fs0,Lc}, _Opts) ->
Fs = [function(F) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
function({function,Name,Arity,CLabel,Asm0}) ->
try
Asm1 = beam_utils:live_opt(Asm0),
Asm2 = opt(Asm1, [], tdb_new()),
Asm3 = beam_utils:live_opt(Asm2),
Asm = beam_utils:delete_live_annos(Asm3),
{function,Name,Arity,CLabel,Asm}
catch
Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
%% opt([Instruction], Accumulator, TypeDb) -> {[Instruction'],TypeDb'}
%% Keep track of type information; try to simplify.
opt([{block,Body1}|Is], [{block,Body0}|Acc], Ts0) ->
{Body2,Ts} = simplify(Body1, Ts0),
Body = merge_blocks(Body0, Body2),
opt(Is, [{block,Body}|Acc], Ts);
opt([{block,Body0}|Is], Acc, Ts0) ->
{Body,Ts} = simplify(Body0, Ts0),
opt(Is, [{block,Body}|Acc], Ts);
opt([I0|Is], Acc, Ts0) ->
case simplify_basic([I0], Ts0) of
{[],Ts} -> opt(Is, Acc, Ts);
{[I],Ts} -> opt(Is, [I|Acc], Ts)
end;
opt([], Acc, _) -> reverse(Acc).
%% simplify(Instruction, TypeDb) -> NewInstruction
%% Simplify an instruction using type information (this is
%% technically a "strength reduction").
simplify(Is0, TypeDb0) ->
{Is,_} = BasicRes = simplify_basic(Is0, TypeDb0),
case simplify_float(Is, TypeDb0) of
not_possible -> BasicRes;
{_,_}=Res -> Res
end.
%% simplify_basic([Instruction], TypeDatabase) -> {[Instruction],TypeDatabase'}
%% Basic simplification, mostly tuples, no floating point optimizations.
simplify_basic(Is, Ts) ->
simplify_basic_1(Is, Ts, []).
simplify_basic_1([{set,[D],[{integer,Index},Reg],{bif,element,_}}=I0|Is], Ts0, Acc) ->
I = case max_tuple_size(Reg, Ts0) of
Sz when 0 < Index, Index =< Sz ->
{set,[D],[Reg],{get_tuple_element,Index-1}};
_Other -> I0
end,
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc]);
simplify_basic_1([{set,[D],[TupleReg],{get_tuple_element,0}}=I|Is0], Ts0, Acc) ->
case tdb_find(TupleReg, Ts0) of
{tuple,_,[Contents]} ->
simplify_basic_1([{set,[D],[Contents],move}|Is0], Ts0, Acc);
_ ->
Ts = update(I, Ts0),
simplify_basic_1(Is0, Ts, [I|Acc])
end;
simplify_basic_1([{set,_,_,{try_catch,_,_}}=I|Is], _Ts, Acc) ->
simplify_basic_1(Is, tdb_new(), [I|Acc]);
simplify_basic_1([{test,is_atom,_,[R]}=I|Is], Ts, Acc) ->
case tdb_find(R, Ts) of
boolean -> simplify_basic_1(Is, Ts, Acc);
_ -> simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,is_integer,_,[R]}=I|Is], Ts, Acc) ->
case tdb_find(R, Ts) of
integer -> simplify_basic_1(Is, Ts, Acc);
{integer,_} -> simplify_basic_1(Is, Ts, Acc);
_ -> simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,is_tuple,_,[R]}=I|Is], Ts, Acc) ->
case tdb_find(R, Ts) of
{tuple,_,_} -> simplify_basic_1(Is, Ts, Acc);
_ -> simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,test_arity,_,[R,Arity]}=I|Is], Ts0, Acc) ->
case tdb_find(R, Ts0) of
{tuple,Arity,_} ->
simplify_basic_1(Is, Ts0, Acc);
_Other ->
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,is_map,_,[R]}=I|Is], Ts0, Acc) ->
case tdb_find(R, Ts0) of
map -> simplify_basic_1(Is, Ts0, Acc);
_Other ->
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,is_nonempty_list,_,[R]}=I|Is], Ts0, Acc) ->
case tdb_find(R, Ts0) of
nonempty_list -> simplify_basic_1(Is, Ts0, Acc);
_Other ->
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{test,is_eq_exact,Fail,[R,{atom,_}=Atom]}=I|Is0], Ts0, Acc0) ->
Acc = case tdb_find(R, Ts0) of
{atom,_}=Atom -> Acc0;
{atom,_} -> [{jump,Fail}|Acc0];
_ -> [I|Acc0]
end,
Ts = update(I, Ts0),
simplify_basic_1(Is0, Ts, Acc);
simplify_basic_1([{test,is_record,_,[R,{atom,_}=Tag,{integer,Arity}]}=I|Is], Ts0, Acc) ->
case tdb_find(R, Ts0) of
{tuple,Arity,[Tag]} ->
simplify_basic_1(Is, Ts0, Acc);
_Other ->
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc])
end;
simplify_basic_1([{select,select_val,Reg,_,_}=I0|Is], Ts, Acc) ->
I = case tdb_find(Reg, Ts) of
{integer,Range} ->
simplify_select_val_int(I0, Range);
boolean ->
simplify_select_val_bool(I0);
_ ->
I0
end,
simplify_basic_1(Is, tdb_new(), [I|Acc]);
simplify_basic_1([I|Is], Ts0, Acc) ->
Ts = update(I, Ts0),
simplify_basic_1(Is, Ts, [I|Acc]);
simplify_basic_1([], Ts, Acc) ->
Is = reverse(Acc),
{Is,Ts}.
simplify_select_val_int({select,select_val,R,_,L0}=I, {Min,Max}) ->
Vs = sort([V || {integer,V} <- L0]),
case eq_ranges(Vs, Min, Max) of
false -> I;
true -> simplify_select_val_1(L0, {integer,Max}, R, [])
end.
simplify_select_val_bool({select,select_val,R,_,L}=I) ->
Vs = sort([V || {atom,V} <- L]),
case Vs of
[false,true] ->
simplify_select_val_1(L, {atom,false}, R, []);
_ ->
I
end.
simplify_select_val_1([Val,F|T], Val, R, Acc) ->
L = reverse(Acc, T),
{select,select_val,R,F,L};
simplify_select_val_1([V,F|T], Val, R, Acc) ->
simplify_select_val_1(T, Val, R, [F,V|Acc]).
eq_ranges([H], H, H) -> true;
eq_ranges([H|T], H, Max) -> eq_ranges(T, H+1, Max);
eq_ranges(_, _, _) -> false.
%% simplify_float([Instruction], TypeDatabase) ->
%% {[Instruction],TypeDatabase'} | not_possible
%% Simplify floating point operations in blocks.
%%
simplify_float(Is0, Ts0) ->
{Is1,Ts} = simplify_float_1(Is0, Ts0, [], []),
Is2 = opt_fmoves(Is1, []),
Is3 = flt_need_heap(Is2),
try
{flt_liveness(Is3),Ts}
catch
throw:not_possible -> not_possible
end.
simplify_float_1([{set,[],[],fclearerror}|Is], Ts, Rs, Acc) ->
simplify_float_1(Is, Ts, Rs, clearerror(Acc));
simplify_float_1([{set,[],[],fcheckerror}|Is], Ts, Rs, Acc) ->
simplify_float_1(Is, Ts, Rs, checkerror(Acc));
simplify_float_1([{set,[{fr,_}],_,_}=I|Is], Ts, Rs, Acc) ->
simplify_float_1(Is, Ts, Rs, [I|Acc]);
simplify_float_1([{set,[D0],[A0],{alloc,_,{gc_bif,'-',{f,0}}}}=I|Is]=Is0,
Ts0, Rs0, Acc0) ->
case tdb_find(A0, Ts0) of
float ->
A = coerce_to_float(A0),
{Rs1,Acc1} = load_reg(A, Ts0, Rs0, Acc0),
{D,Rs} = find_dest(D0, Rs1),
Areg = fetch_reg(A, Rs),
Acc = [{set,[D],[Areg],{bif,fnegate,{f,0}}}|clearerror(Acc1)],
Ts = tdb_update([{D0,float}], Ts0),
simplify_float_1(Is, Ts, Rs, Acc);
_Other ->
Ts = update(I, Ts0),
{Rs,Acc} = flush(Rs0, Is0, Acc0),
simplify_float_1(Is, Ts, Rs, [I|checkerror(Acc)])
end;
simplify_float_1([{set,[D0],[A0,B0],{alloc,_,{gc_bif,Op0,{f,0}}}}=I|Is]=Is0,
Ts0, Rs0, Acc0) ->
case float_op(Op0, A0, B0, Ts0) of
no ->
Ts = update(I, Ts0),
{Rs,Acc} = flush(Rs0, Is0, Acc0),
simplify_float_1(Is, Ts, Rs, [I|checkerror(Acc)]);
{yes,Op} ->
A = coerce_to_float(A0),
B = coerce_to_float(B0),
{Rs1,Acc1} = load_reg(A, Ts0, Rs0, Acc0),
{Rs2,Acc2} = load_reg(B, Ts0, Rs1, Acc1),
{D,Rs} = find_dest(D0, Rs2),
Areg = fetch_reg(A, Rs),
Breg = fetch_reg(B, Rs),
Acc = [{set,[D],[Areg,Breg],{bif,Op,{f,0}}}|clearerror(Acc2)],
Ts = tdb_update([{D0,float}], Ts0),
simplify_float_1(Is, Ts, Rs, Acc)
end;
simplify_float_1([{set,_,_,{try_catch,_,_}}=I|Is]=Is0, _Ts, Rs0, Acc0) ->
Acc = flush_all(Rs0, Is0, Acc0),
simplify_float_1(Is, tdb_new(), Rs0, [I|Acc]);
simplify_float_1([{set,_,_,{line,_}}=I|Is], Ts, Rs, Acc) ->
simplify_float_1(Is, Ts, Rs, [I|Acc]);
simplify_float_1([I|Is], Ts0, [], Acc) ->
Ts = update(I, Ts0),
simplify_float_1(Is, Ts, [], [I|Acc]);
simplify_float_1([I|Is]=Is0, Ts0, Rs0, Acc0) ->
Ts = update(I, Ts0),
{Rs,Acc} = flush(Rs0, Is0, Acc0),
simplify_float_1(Is, Ts, Rs, [I|checkerror(Acc)]);
simplify_float_1([], Ts, [], Acc) ->
Is = reverse(Acc),
{Is,Ts}.
coerce_to_float({integer,I}=Int) ->
try float(I) of
F ->
{float,F}
catch _:_ ->
%% Let the overflow happen at run-time.
Int
end;
coerce_to_float(Other) -> Other.
opt_fmoves([{set,[{x,_}=R],[{fr,_}]=Src,fmove}=I1,
{set,[_]=Dst,[{x,_}=R],move}=I2|Is], Acc) ->
case beam_utils:is_killed_block(R, Is) of
false -> opt_fmoves(Is, [I2,I1|Acc]);
true -> opt_fmoves(Is, [{set,Dst,Src,fmove}|Acc])
end;
opt_fmoves([I|Is], Acc) ->
opt_fmoves(Is, [I|Acc]);
opt_fmoves([], Acc) -> reverse(Acc).
clearerror(Is) ->
clearerror(Is, Is).
clearerror([{set,[],[],fclearerror}|_], OrigIs) -> OrigIs;
clearerror([{set,[],[],fcheckerror}|_], OrigIs) -> [{set,[],[],fclearerror}|OrigIs];
clearerror([_|Is], OrigIs) -> clearerror(Is, OrigIs);
clearerror([], OrigIs) -> [{set,[],[],fclearerror}|OrigIs].
%% merge_blocks(Block1, Block2) -> Block.
%% Combine two blocks and eliminate any move instructions that assign
%% to registers that are killed later in the block.
%%
merge_blocks(B1, [{'%live',_,_}|B2]) ->
merge_blocks_1(B1++[{set,[],[],stop_here}|B2]).
merge_blocks_1([{set,[],_,stop_here}|Is]) -> Is;
merge_blocks_1([{set,[D],_,move}=I|Is]) ->
case beam_utils:is_killed_block(D, Is) of
true -> merge_blocks_1(Is);
false -> [I|merge_blocks_1(Is)]
end;
merge_blocks_1([I|Is]) -> [I|merge_blocks_1(Is)].
%% flt_need_heap([Instruction]) -> [Instruction]
%% Insert need heap allocation instructions in the instruction stream
%% to properly account for both inserted floating point operations and
%% normal term build operations (such as put_list/3).
%%
%% Ignore old heap allocation instructions (except if they allocate a stack
%% frame too), as they may be in the wrong place (because gc_bif instructions
%% could have been converted to floating point operations).
flt_need_heap(Is) ->
flt_need_heap_1(reverse(Is), 0, 0, []).
flt_need_heap_1([{set,[],[],{alloc,_,Alloc}}|Is], H, Fl, Acc) ->
case Alloc of
{_,nostack,_,_} ->
%% Remove any existing test_heap/2 instruction.
flt_need_heap_1(Is, H, Fl, Acc);
{Z,Stk,_,Inits} when is_integer(Stk) ->
%% Keep any allocate*/2 instruction and recalculate heap need.
I = {set,[],[],{alloc,regs,{Z,Stk,build_alloc(H, Fl),Inits}}},
flt_need_heap_1(Is, 0, 0, [I|Acc])
end;
flt_need_heap_1([I|Is], H0, Fl0, Acc) ->
{Ns,H1,Fl1} = flt_need_heap_2(I, H0, Fl0),
flt_need_heap_1(Is, H1, Fl1, [I|Ns]++Acc);
flt_need_heap_1([], H, Fl, Acc) ->
flt_alloc(H, Fl) ++ Acc.
%% First come all instructions that build. We pass through, while we
%% add to the need for heap words and floats on the heap.
flt_need_heap_2({set,[_],[{fr,_}],fmove}, H, Fl) ->
{[],H,Fl+1};
flt_need_heap_2({set,_,_,put_list}, H, Fl) ->
{[],H+2,Fl};
flt_need_heap_2({set,_,_,{put_tuple,_}}, H, Fl) ->
{[],H+1,Fl};
flt_need_heap_2({set,_,_,put}, H, Fl) ->
{[],H+1,Fl};
%% Then the "neutral" instructions. We just pass them.
flt_need_heap_2({set,[{fr,_}],_,_}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,[],[],fclearerror}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,[],[],fcheckerror}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,_,_,{bif,_,_}}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,_,_,move}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,_,_,{get_tuple_element,_}}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,_,_,get_list}, H, Fl) ->
{[],H,Fl};
flt_need_heap_2({set,_,_,{try_catch,_,_}}, H, Fl) ->
{[],H,Fl};
%% All other instructions should cause the insertion of an allocation
%% instruction if needed.
flt_need_heap_2(_, H, Fl) ->
{flt_alloc(H, Fl),0,0}.
flt_alloc(0, 0) ->
[];
flt_alloc(H, 0) ->
[{set,[],[],{alloc,regs,{nozero,nostack,H,[]}}}];
flt_alloc(H, F) ->
[{set,[],[],{alloc,regs,{nozero,nostack,
build_alloc(H, F),[]}}}].
build_alloc(Words, 0) -> Words;
build_alloc(Words, Floats) -> {alloc,[{words,Words},{floats,Floats}]}.
%% flt_liveness([Instruction]) -> [Instruction]
%% (Re)calculate the number of live registers for each heap allocation
%% function. We base liveness of the number of register map at the
%% beginning of the instruction sequence.
%%
%% A 'not_possible' term will be thrown if the set of live registers
%% is not continous at an allocation function (e.g. if {x,0} and {x,2}
%% are live, but not {x,1}).
flt_liveness([{'%live',_Live,Regs}=LiveInstr|Is]) ->
flt_liveness_1(Is, Regs, [LiveInstr]).
flt_liveness_1([{set,Ds,Ss,{alloc,Live0,Alloc}}|Is], Regs0, Acc) ->
Live = min(Live0, live_regs(Regs0)),
I = {set,Ds,Ss,{alloc,Live,Alloc}},
Regs1 = init_regs(Live),
Regs = x_live(Ds, Regs1),
flt_liveness_1(Is, Regs, [I|Acc]);
flt_liveness_1([{set,Ds,_,_}=I|Is], Regs0, Acc) ->
Regs = x_live(Ds, Regs0),
flt_liveness_1(Is, Regs, [I|Acc]);
flt_liveness_1([{'%live',_,_}], _Regs, Acc) ->
reverse(Acc).
init_regs(Live) ->
(1 bsl Live) - 1.
live_regs(Regs) ->
live_regs_1(Regs, 0).
live_regs_1(0, N) -> N;
live_regs_1(R, N) ->
case R band 1 of
0 -> throw(not_possible);
1 -> live_regs_1(R bsr 1, N+1)
end.
x_live([{x,N}|Rs], Regs) -> x_live(Rs, Regs bor (1 bsl N));
x_live([_|Rs], Regs) -> x_live(Rs, Regs);
x_live([], Regs) -> Regs.
%% update(Instruction, TypeDb) -> NewTypeDb
%% Update the type database to account for executing an instruction.
%%
%% First the cases for instructions inside basic blocks.
update({'%live',_,_}, Ts) -> Ts;
update({set,[D],[S],move}, Ts) ->
tdb_copy(S, D, Ts);
update({set,[D],[{integer,I},Reg],{bif,element,_}}, Ts0) ->
tdb_update([{Reg,{tuple,I,[]}},{D,kill}], Ts0);
update({set,[D],[_Index,Reg],{bif,element,_}}, Ts0) ->
tdb_update([{Reg,{tuple,0,[]}},{D,kill}], Ts0);
update({set,[D],Args,{bif,N,_}}, Ts0) ->
Ar = length(Args),
BoolOp = erl_internal:new_type_test(N, Ar) orelse
erl_internal:comp_op(N, Ar) orelse
erl_internal:bool_op(N, Ar),
case BoolOp of
true ->
tdb_update([{D,boolean}], Ts0);
false ->
tdb_update([{D,kill}], Ts0)
end;
update({set,[D],[S],{get_tuple_element,0}}, Ts) ->
tdb_update([{D,{tuple_element,S,0}}], Ts);
update({set,[D],[S],{alloc,_,{gc_bif,float,{f,0}}}}, Ts0) ->
%% Make sure we reject non-numeric literal argument.
case possibly_numeric(S) of
true -> tdb_update([{D,float}], Ts0);
false -> Ts0
end;
update({set,[D],[S1,S2],{alloc,_,{gc_bif,'band',{f,0}}}}, Ts) ->
case keyfind(integer, 1, [S1,S2]) of
{integer,N} ->
update_band(N, D, Ts);
false ->
tdb_update([{D,integer}], Ts)
end;
update({set,[D],[S1,S2],{alloc,_,{gc_bif,'/',{f,0}}}}, Ts0) ->
%% Make sure we reject non-numeric literals.
case possibly_numeric(S1) andalso possibly_numeric(S2) of
true -> tdb_update([{D,float}], Ts0);
false -> Ts0
end;
update({set,[D],[S1,S2],{alloc,_,{gc_bif,Op,{f,0}}}}, Ts0) ->
case op_type(Op) of
integer ->
tdb_update([{D,integer}], Ts0);
{float,_} ->
case {tdb_find(S1, Ts0),tdb_find(S2, Ts0)} of
{float,_} -> tdb_update([{D,float}], Ts0);
{_,float} -> tdb_update([{D,float}], Ts0);
{_,_} -> tdb_update([{D,kill}], Ts0)
end;
unknown ->
tdb_update([{D,kill}], Ts0)
end;
update({set,[],_Src,_Op}, Ts0) -> Ts0;
update({set,[D],_Src,_Op}, Ts0) ->
tdb_update([{D,kill}], Ts0);
update({set,[D1,D2],_Src,_Op}, Ts0) ->
tdb_update([{D1,kill},{D2,kill}], Ts0);
update({kill,D}, Ts) ->
tdb_update([{D,kill}], Ts);
%% Instructions outside of blocks.
update({test,is_float,_Fail,[Src]}, Ts0) ->
tdb_update([{Src,float}], Ts0);
update({test,test_arity,_Fail,[Src,Arity]}, Ts0) ->
tdb_update([{Src,{tuple,Arity,[]}}], Ts0);
update({test,is_map,_Fail,[Src]}, Ts0) ->
tdb_update([{Src,map}], Ts0);
update({get_map_elements,_,Src,{list,Elems0}}, Ts0) ->
{_Ss,Ds} = beam_utils:split_even(Elems0),
Elems = [{Dst,kill} || Dst <- Ds],
tdb_update([{Src,map}|Elems], Ts0);
update({test,is_nonempty_list,_Fail,[Src]}, Ts0) ->
tdb_update([{Src,nonempty_list}], Ts0);
update({test,is_eq_exact,_,[Reg,{atom,_}=Atom]}, Ts) ->
case tdb_find(Reg, Ts) of
error ->
Ts;
{tuple_element,TupleReg,0} ->
tdb_update([{TupleReg,{tuple,1,[Atom]}}], Ts);
_ ->
Ts
end;
update({test,is_record,_Fail,[Src,Tag,{integer,Arity}]}, Ts) ->
tdb_update([{Src,{tuple,Arity,[Tag]}}], Ts);
%% Binary matching
update({test,bs_get_integer2,_,_,Args,Dst}, Ts) ->
tdb_update([{Dst,get_bs_integer_type(Args)}], Ts);
update({test,bs_get_utf8,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,?UNICODE_INT}], Ts);
update({test,bs_get_utf16,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,?UNICODE_INT}], Ts);
update({test,bs_get_utf32,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,?UNICODE_INT}], Ts);
update({bs_init,_,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,kill}], Ts);
update({bs_put,_,_,_}, Ts) ->
Ts;
update({bs_save2,_,_}, Ts) ->
Ts;
update({bs_restore2,_,_}, Ts) ->
Ts;
update({bs_context_to_binary,Dst}, Ts) ->
tdb_update([{Dst,kill}], Ts);
update({test,bs_start_match2,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,kill}], Ts);
update({test,bs_get_binary2,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,kill}], Ts);
update({test,bs_get_float2,_,_,_,Dst}, Ts) ->
tdb_update([{Dst,float}], Ts);
update({test,_Test,_Fail,_Other}, Ts) ->
Ts;
%% Calls
update({call_ext,Ar,{extfunc,math,Math,Ar}}, Ts) ->
case is_math_bif(Math, Ar) of
true -> tdb_update([{{x,0},float}], Ts);
false -> tdb_kill_xregs(Ts)
end;
update({call_ext,3,{extfunc,erlang,setelement,3}}, Ts0) ->
Ts = tdb_kill_xregs(Ts0),
case tdb_find({x,1}, Ts0) of
{tuple,Sz,_}=T0 ->
T = case tdb_find({x,0}, Ts0) of
{integer,{I,I}} when I > 1 ->
%% First element is not changed. The result
%% will have the same type.
T0;
_ ->
%% Position is 1 or unknown. May change the
%% first element of the tuple.
{tuple,Sz,[]}
end,
tdb_update([{{x,0},T}], Ts);
_ ->
Ts
end;
update({call,_Arity,_Func}, Ts) -> tdb_kill_xregs(Ts);
update({call_ext,_Arity,_Func}, Ts) -> tdb_kill_xregs(Ts);
update({make_fun2,_,_,_,_}, Ts) -> tdb_kill_xregs(Ts);
update({call_fun, _}, Ts) -> tdb_kill_xregs(Ts);
update({apply, _}, Ts) -> tdb_kill_xregs(Ts);
update({line,_}, Ts) -> Ts;
%% The instruction is unknown. Kill all information.
update(_I, _Ts) -> tdb_new().
update_band(N, Reg, Ts) ->
Type = update_band_1(N, 0),
tdb_update([{Reg,Type}], Ts).
update_band_1(N, Bits) when Bits < 64 ->
case 1 bsl Bits of
P when P =:= N + 1 ->
{integer,{0,N}};
P when P > N + 1 ->
integer;
_ ->
update_band_1(N, Bits+1)
end;
update_band_1(_, _) ->
%% Negative or large positive number. Give up.
integer.
get_bs_integer_type([_,{integer,N},U,{field_flags,Fl}])
when N*U < 64 ->
NumBits = N*U,
case member(unsigned, Fl) of
true ->
{integer,{0,(1 bsl NumBits)-1}};
false ->
%% Signed integer. Don't bother.
integer
end;
get_bs_integer_type(_) ->
%% Avoid creating ranges with a huge upper limit.
integer.
is_math_bif(cos, 1) -> true;
is_math_bif(cosh, 1) -> true;
is_math_bif(sin, 1) -> true;
is_math_bif(sinh, 1) -> true;
is_math_bif(tan, 1) -> true;
is_math_bif(tanh, 1) -> true;
is_math_bif(acos, 1) -> true;
is_math_bif(acosh, 1) -> true;
is_math_bif(asin, 1) -> true;
is_math_bif(asinh, 1) -> true;
is_math_bif(atan, 1) -> true;
is_math_bif(atanh, 1) -> true;
is_math_bif(erf, 1) -> true;
is_math_bif(erfc, 1) -> true;
is_math_bif(exp, 1) -> true;
is_math_bif(log, 1) -> true;
is_math_bif(log2, 1) -> true;
is_math_bif(log10, 1) -> true;
is_math_bif(sqrt, 1) -> true;
is_math_bif(atan2, 2) -> true;
is_math_bif(pow, 2) -> true;
is_math_bif(ceil, 1) -> true;
is_math_bif(floor, 1) -> true;
is_math_bif(fmod, 2) -> true;
is_math_bif(pi, 0) -> true;
is_math_bif(_, _) -> false.
%% Reject non-numeric literals.
possibly_numeric({x,_}) -> true;
possibly_numeric({y,_}) -> true;
possibly_numeric({integer,_}) -> true;
possibly_numeric({float,_}) -> true;
possibly_numeric(_) -> false.
max_tuple_size(Reg, Ts) ->
case tdb_find(Reg, Ts) of
{tuple,Sz,_} -> Sz;
_Other -> 0
end.
float_op('/', A, B, _) ->
case possibly_numeric(A) andalso possibly_numeric(B) of
true -> {yes,fdiv};
false -> no
end;
float_op(Op, {float,_}, B, _) ->
case possibly_numeric(B) of
true -> arith_op(Op);
false -> no
end;
float_op(Op, A, {float,_}, _) ->
case possibly_numeric(A) of
true -> arith_op(Op);
false -> no
end;
float_op(Op, A, B, Ts) ->
case {tdb_find(A, Ts),tdb_find(B, Ts)} of
{float,_} -> arith_op(Op);
{_,float} -> arith_op(Op);
{_,_} -> no
end.
find_dest(V, Rs0) ->
case find_reg(V, Rs0) of
{ok,FR} ->
{FR,mark(V, Rs0, dirty)};
error ->
Rs = put_reg(V, Rs0, dirty),
{ok,FR} = find_reg(V, Rs),
{FR,Rs}
end.
load_reg({float,_}=F, _, Rs0, Is0) ->
Rs = put_reg(F, Rs0, clean),
{ok,FR} = find_reg(F, Rs),
Is = [{set,[FR],[F],fmove}|Is0],
{Rs,Is};
load_reg(V, Ts, Rs0, Is0) ->
case find_reg(V, Rs0) of
{ok,_FR} -> {Rs0,Is0};
error ->
Rs = put_reg(V, Rs0, clean),
{ok,FR} = find_reg(V, Rs),
Op = case tdb_find(V, Ts) of
float -> fmove;
_ -> fconv
end,
Is = [{set,[FR],[V],Op}|Is0],
{Rs,Is}
end.
arith_op(Op) ->
case op_type(Op) of
{float,Instr} -> {yes,Instr};
_ -> no
end.
op_type('+') -> {float,fadd};
op_type('-') -> {float,fsub};
op_type('*') -> {float,fmul};
%% '/' and 'band' are specially handled.
op_type('bor') -> integer;
op_type('bxor') -> integer;
op_type('bsl') -> integer;
op_type('bsr') -> integer;
op_type('div') -> integer;
op_type(_) -> unknown.
flush(Rs, [{set,[_],[_,_,_],{bif,is_record,_}}|_]=Is0, Acc0) ->
Acc = flush_all(Rs, Is0, Acc0),
{[],Acc};
flush(Rs, [{set,[_],[],{put_tuple,_}}|_]=Is0, Acc0) ->
Acc = flush_all(Rs, Is0, Acc0),
{[],Acc};
flush(Rs0, [{set,Ds,Ss,_Op}|_], Acc0) ->
Save = cerl_sets:from_list(Ss),
Acc = save_regs(Rs0, Save, Acc0),
Rs1 = foldl(fun(S, A) -> mark(S, A, clean) end, Rs0, Ss),
Kill = cerl_sets:from_list(Ds),
Rs = kill_regs(Rs1, Kill),
{Rs,Acc};
flush(Rs0, Is, Acc0) ->
Acc = flush_all(Rs0, Is, Acc0),
{[],Acc}.
flush_all([{_,{float,_},_}|Rs], Is, Acc) ->
flush_all(Rs, Is, Acc);
flush_all([{I,V,dirty}|Rs], Is, Acc0) ->
Acc = checkerror(Acc0),
case beam_utils:is_killed_block(V, Is) of
true -> flush_all(Rs, Is, Acc);
false -> flush_all(Rs, Is, [{set,[V],[{fr,I}],fmove}|Acc])
end;
flush_all([{_,_,clean}|Rs], Is, Acc) -> flush_all(Rs, Is, Acc);
flush_all([free|Rs], Is, Acc) -> flush_all(Rs, Is, Acc);
flush_all([], _, Acc) -> Acc.
save_regs(Rs, Save, Acc) ->
foldl(fun(R, A) -> save_reg(R, Save, A) end, Acc, Rs).
save_reg({I,V,dirty}, Save, Acc) ->
case cerl_sets:is_element(V, Save) of
true -> [{set,[V],[{fr,I}],fmove}|checkerror(Acc)];
false -> Acc
end;
save_reg(_, _, Acc) -> Acc.
kill_regs(Rs, Kill) ->
[kill_reg(R, Kill) || R <- Rs].
kill_reg({_,V,_}=R, Kill) ->
case cerl_sets:is_element(V, Kill) of
true -> free;
false -> R
end;
kill_reg(R, _) -> R.
mark(V, [{I,V,_}|Rs], Mark) -> [{I,V,Mark}|Rs];
mark(V, [R|Rs], Mark) -> [R|mark(V, Rs, Mark)];
mark(_, [], _) -> [].
fetch_reg(V, [{I,V,_}|_]) -> {fr,I};
fetch_reg(V, [_|SRs]) -> fetch_reg(V, SRs).
find_reg(V, [{I,V,_}|_]) -> {ok,{fr,I}};
find_reg(V, [_|SRs]) -> find_reg(V, SRs);
find_reg(_, []) -> error.
put_reg(V, Rs, Dirty) -> put_reg_1(V, Rs, Dirty, 0).
put_reg_1(V, [free|Rs], Dirty, I) -> [{I,V,Dirty}|Rs];
put_reg_1(V, [R|Rs], Dirty, I) -> [R|put_reg_1(V, Rs, Dirty, I+1)];
put_reg_1(V, [], Dirty, I) -> [{I,V,Dirty}].
checkerror(Is) ->
checkerror_1(Is, Is).
checkerror_1([{set,[],[],fcheckerror}|_], OrigIs) -> OrigIs;
checkerror_1([{set,_,_,{bif,fadd,_}}|_], OrigIs) -> checkerror_2(OrigIs);
checkerror_1([{set,_,_,{bif,fsub,_}}|_], OrigIs) -> checkerror_2(OrigIs);
checkerror_1([{set,_,_,{bif,fmul,_}}|_], OrigIs) -> checkerror_2(OrigIs);
checkerror_1([{set,_,_,{bif,fdiv,_}}|_], OrigIs) -> checkerror_2(OrigIs);
checkerror_1([{set,_,_,{bif,fnegate,_}}|_], OrigIs) -> checkerror_2(OrigIs);
checkerror_1([_|Is], OrigIs) -> checkerror_1(Is, OrigIs);
checkerror_1([], OrigIs) -> OrigIs.
checkerror_2(OrigIs) -> [{set,[],[],fcheckerror}|OrigIs].
%%% Routines for maintaining a type database. The type database
%%% associates type information with registers.
%%%
%%% {tuple,Size,First} means that the corresponding register contains a
%%% tuple with *at least* Size elements. An tuple with unknown
%%% size is represented as {tuple,0,[]}. First is either [] (meaning that
%%% the tuple's first element is unknown) or [FirstElement] (the contents
%%% of the first element).
%%%
%%% 'float' means that the register contains a float.
%%%
%%% 'integer' or {integer,{Min,Max}} that the register contains an
%%% integer.
%% tdb_new() -> EmptyDataBase
%% Creates a new, empty type database.
tdb_new() -> [].
%% tdb_find(Register, Db) -> Information|error
%% Returns type information or the atom error if there is no type
%% information available for Register.
tdb_find({x,_}=K, Ts) -> tdb_find_1(K, Ts);
tdb_find({y,_}=K, Ts) -> tdb_find_1(K, Ts);
tdb_find(_, _) -> error.
tdb_find_1(K, Ts) ->
case orddict:find(K, Ts) of
{ok,Val} -> Val;
error -> error
end.
%% tdb_copy(Source, Dest, Db) -> Db'
%% Update the type information for Dest to have the same type
%% as the Source.
tdb_copy({Tag,_}=S, D, Ts) when Tag =:= x; Tag =:= y ->
case tdb_find(S, Ts) of
error -> orddict:erase(D, Ts);
Type -> orddict:store(D, Type, Ts)
end;
tdb_copy(Literal, D, Ts) ->
Type = case Literal of
{atom,_} -> Literal;
{float,_} -> float;
{integer,Int} -> {integer,{Int,Int}};
{literal,[_|_]} -> nonempty_list;
{literal,#{}} -> map;
{literal,Tuple} when tuple_size(Tuple) >= 1 ->
Lit = tag_literal(element(1, Tuple)),
{tuple,tuple_size(Tuple),[Lit]};
_ -> term
end,
if
Type =:= term ->
orddict:erase(D, Ts);
true ->
verify_type(Type),
orddict:store(D, Type, Ts)
end.
tag_literal(A) when is_atom(A) -> {atom,A};
tag_literal(F) when is_float(F) -> {float,F};
tag_literal(I) when is_integer(I) -> {integer,I};
tag_literal([]) -> nil;
tag_literal(Lit) -> {literal,Lit}.
%% tdb_update([UpdateOp], Db) -> NewDb
%% UpdateOp = {Register,kill}|{Register,NewInfo}
%% Updates a type database. If a 'kill' operation is given, the type
%% information for that register will be removed from the database.
%% A kill operation takes precedence over other operations for the same
%% register (i.e. [{{x,0},kill},{{x,0},{tuple,5,[]}}] means that the
%% the existing type information, if any, will be discarded, and the
%% the '{tuple,5,[]}' information ignored.
%%
%% If NewInfo information is given and there exists information about
%% the register, the old and new type information will be merged.
%% For instance, {tuple,5,_} and {tuple,10,_} will be merged to produce
%% {tuple,10,_}.
tdb_update(Uis0, Ts0) ->
Uis1 = filter(fun ({{x,_},_Op}) -> true;
({{y,_},_Op}) -> true;
(_) -> false
end, Uis0),
tdb_update1(lists:sort(Uis1), Ts0).
tdb_update1([{Key,kill}|Ops], [{K,_Old}|_]=Db) when Key < K ->
tdb_update1(remove_key(Key, Ops), Db);
tdb_update1([{Key,Type}=New|Ops], [{K,_Old}|_]=Db) when Key < K ->
verify_type(Type),
[New|tdb_update1(Ops, Db)];
tdb_update1([{Key,kill}|Ops], [{Key,_}|Db]) ->
tdb_update1(remove_key(Key, Ops), Db);
tdb_update1([{Key,NewInfo}|Ops], [{Key,OldInfo}|Db]) ->
[{Key,merge_type_info(NewInfo, OldInfo)}|tdb_update1(Ops, Db)];
tdb_update1([{_,_}|_]=Ops, [Old|Db]) ->
[Old|tdb_update1(Ops, Db)];
tdb_update1([{Key,kill}|Ops], []) ->
tdb_update1(remove_key(Key, Ops), []);
tdb_update1([{_,Type}=New|Ops], []) ->
verify_type(Type),
[New|tdb_update1(Ops, [])];
tdb_update1([], Db) -> Db.
%% tdb_kill_xregs(Db) -> NewDb
%% Kill all information about x registers. Also kill all tuple_element
%% dependencies from y registers to x registers.
tdb_kill_xregs([{{x,_},_Type}|Db]) -> tdb_kill_xregs(Db);
tdb_kill_xregs([{{y,_},{tuple_element,{x,_},_}}|Db]) -> tdb_kill_xregs(Db);
tdb_kill_xregs([Any|Db]) -> [Any|tdb_kill_xregs(Db)];
tdb_kill_xregs([]) -> [].
remove_key(Key, [{Key,_Op}|Ops]) -> remove_key(Key, Ops);
remove_key(_, Ops) -> Ops.
merge_type_info(I, I) -> I;
merge_type_info({tuple,Sz1,Same}, {tuple,Sz2,Same}=Max) when Sz1 < Sz2 ->
Max;
merge_type_info({tuple,Sz1,Same}=Max, {tuple,Sz2,Same}) when Sz1 > Sz2 ->
Max;
merge_type_info({tuple,Sz1,[]}, {tuple,_Sz2,First}=Tuple2) ->
merge_type_info({tuple,Sz1,First}, Tuple2);
merge_type_info({tuple,_Sz1,First}=Tuple1, {tuple,Sz2,_}) ->
merge_type_info(Tuple1, {tuple,Sz2,First});
merge_type_info(integer, {integer,_}=Int) ->
Int;
merge_type_info({integer,_}=Int, integer) ->
Int;
merge_type_info({integer,{Min1,Max1}}, {integer,{Min2,Max2}}) ->
{integer,{max(Min1, Min2),min(Max1, Max2)}};
merge_type_info(NewType, _) ->
verify_type(NewType),
NewType.
verify_type({atom,_}) -> ok;
verify_type(boolean) -> ok;
verify_type(integer) -> ok;
verify_type({integer,{Min,Max}})
when is_integer(Min), is_integer(Max) -> ok;
verify_type(map) -> ok;
verify_type(nonempty_list) -> ok;
verify_type({tuple,Sz,[]}) when is_integer(Sz) -> ok;
verify_type({tuple,Sz,[_]}) when is_integer(Sz) -> ok;
verify_type({tuple_element,_,_}) -> ok;
verify_type(float) -> ok. | lib/compiler/src/beam_type.erl | 0.503174 | 0.462534 | beam_type.erl | starcoder |
%%
%% Copyright 2012 - 2013 <NAME>, All Rights Reserved
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @description
%% scalar data type encode / decode utility
%%
%% built-in data type abbreviation:
%% a - atom, s - binary, c - list, i - integer, f - float
%%
%% extended data type set
%% s - string (binary)
%% ls - long string (synonym unicode:characters_to_binary + error handling)
%% c - character list
%% lc - long characters (synonym unicode:characters_to_list + error handling)
%%
%% @todo
%% x(...) - unsigned int as a hexadecimal number.
%% o(...) - unsigned int in octal.
%%
-module(htstream_codec).
-export([
i/1, %% int as a signed decimal number.
f/1, %% double in normal (fixed-point) notation
s/1, %% null-terminated string (binary in Erlang)
ls/1, %% null-terminated Unicode string
c/1, %% char (character list)
lc/1, %% Unicode characters
a/1, %% existing atom
atom/1, %% new atom
decode/1
]).
%%
%% scalar to integer
-spec i(_) -> integer().
i(X) when is_binary(X) -> btoi(X);
i(X) when is_atom(X) -> atoi(X);
i(X) when is_list(X) -> ltoi(X);
i(X) when is_integer(X) -> X;
i(X) when is_float(X) -> ftoi(X).
btoi(X) -> ltoi(btol(X)).
atoi(X) -> ltoi(atol(X)).
ltoi(X) -> list_to_integer(X).
ftoi(X) -> erlang:trunc(X).
%%
%% scalar to float
-spec f(_) -> float().
f(X) when is_binary(X) -> btof(X);
f(X) when is_atom(X) -> atof(X);
f(X) when is_list(X) -> ltof(X);
f(X) when is_integer(X) -> itof(X);
f(X) when is_float(X) -> X.
btof(X) -> ltof(btol(X)).
atof(X) -> ltof(atol(X)).
ltof(X) -> list_to_float(X).
itof(X) -> X + 0.0.
%%
%% scalar to string
-spec s(_) -> binary().
s(undefined) -> <<>>;
s(X) when is_binary(X) -> btos(X);
s(X) when is_atom(X) -> atos(X);
s(X) when is_list(X) -> ltos(X);
s(X) when is_integer(X) -> itos(X);
s(X) when is_float(X) -> ftos(X).
btos(X) -> X.
atos(X) -> atom_to_binary(X, utf8).
ltos(X) -> iolist_to_binary(X).
itos(X) -> ltos(itol(X)).
ftos(X) -> ltos(io_lib:format("~.9f", [X])).
%%
%% scalar to Unicode string
-spec ls(_) -> binary().
ls(X) when is_binary(X) -> utob(X);
ls(X) when is_atom(X) -> atos(X);
ls(X) when is_list(X) -> utob(X);
ls(X) when is_integer(X) -> itos(X);
ls(X) when is_float(X) -> ftos(X).
utob(X) ->
case unicode:characters_to_binary(X) of
{incomplete, _} ->
exit(rought);
{error, _} ->
exit(badarg);
Y ->
Y
end.
%%
%% character list
-spec c(_) -> list().
c(undefined) -> [];
c(X) when is_binary(X) -> btol(X);
c(X) when is_atom(X) -> atol(X);
c(X) when is_list(X) -> X;
c(X) when is_integer(X) -> itol(X);
c(X) when is_float(X) -> ftol(X).
btol(X) -> binary_to_list(X).
atol(X) -> atom_to_list(X).
itol(X) -> integer_to_list(X).
ftol(X) -> lists:flatten(io_lib:format("~.9f", [X])).
%%
%% scalar to Unicode characters
-spec lc(_) -> list().
lc(X) when is_binary(X) -> utoc(X);
lc(X) when is_atom(X) -> atol(X);
lc(X) when is_list(X) -> utoc(X);
lc(X) when is_integer(X) -> itol(X);
lc(X) when is_float(X) -> ftol(X).
utoc(X) ->
case unicode:characters_to_list(X) of
{incomplete, _} ->
exit(rought);
{error, _} ->
exit(badarg);
Y ->
Y
end.
%%
%% existing atom
-spec a(_) -> atom().
a(X) when is_binary(X) -> btoa(X);
a(X) when is_atom(X) -> X;
a(X) when is_list(X) -> ltoa(X);
a(X) when is_integer(X) -> itoa(X);
a(X) when is_float(X) -> ftoa(X).
btoa(X) -> binary_to_existing_atom(X, utf8).
ltoa(X) -> list_to_existing_atom(X).
itoa(X) -> ltoa(itol(X)).
ftoa(X) -> ltoa(ftol(X)).
%%
%% new atom
-spec atom(_) -> atom().
atom(X) when is_binary(X) -> btoaa(X);
atom(X) when is_atom(X) -> X;
atom(X) when is_list(X) -> ltoaa(X);
atom(X) when is_integer(X) -> itoaa(X);
atom(X) when is_float(X) -> ftoaa(X).
btoaa(X) -> binary_to_atom(X, utf8).
ltoaa(X) -> list_to_atom(X).
itoaa(X) -> ltoaa(itol(X)).
ftoaa(X) -> ltoaa(ftol(X)).
%%
%% decode scalar type
-spec decode(list() | binary()) -> any().
decode(undefined) ->
undefined;
decode(<<"true">>) ->
true;
decode(<<"false">>) ->
false;
decode(X)
when is_binary(X) ->
case re:run(X, "^(-?[0-9]+)(\\.[0-9]+)?([eE][+-]?[0-9])?$") of
{match, [_, _]} -> btoi(X);
{match, [_, _, _]} -> btof(X);
{match, [_, _, _, _]} -> btof(X);
nomatch -> deref(X)
end;
decode("true") ->
true;
decode("false") ->
false;
decode(X)
when is_list(X) ->
case re:run(X, "^(-?[0-9]+)(\\.[0-9]+)?([eE][+-]?[0-9])?$") of
{match, [_, _]} -> ltoi(X);
{match, [_, _, _]} -> ltof(X);
{match, [_, _, _, _]} -> ltof(X);
nomatch -> X
end.
% binary is reference to part of large binary received from server
% copy binary to reduce size
deref(X) ->
case binary:referenced_byte_size(X) of
Size when Size > 2 * byte_size(X) ->
binary:copy(X);
_ ->
X
end.
%%%------------------------------------------------------------------
%%%
%%% private
%%%
%%%------------------------------------------------------------------ | src/htstream_codec.erl | 0.528533 | 0.40486 | htstream_codec.erl | starcoder |
%% @doc Default configurion and config-parsing functions.
-module(mas_config).
-include("mas.hrl").
-export([proplist_to_record/1,
options_specs/0]).
-define(LOAD(Proplist, Prop),
Prop = case proplists:lookup(Prop, Proplist) of
{Prop, Value} ->
Value;
none ->
erlang:error({"mas missing option", Prop})
end).
-define(LOAD(Proplist, Prop, Default),
Prop = proplists:get_value(Prop, Proplist, Default)).
%% @doc Transform a proplist with config properties to a record
-spec proplist_to_record([tuple()]) -> config().
proplist_to_record(Options) ->
Proplist = Options ++ default_options(),
#config{?LOAD(Proplist, model),
?LOAD(Proplist, agent_env),
?LOAD(Proplist, topology),
?LOAD(Proplist, migration_probability),
?LOAD(Proplist, log_dir),
?LOAD(Proplist, islands),
?LOAD(Proplist, population_size),
?LOAD(Proplist, write_interval),
?LOAD(Proplist, arena_timeout),
?LOAD(Proplist, skel_workers),
?LOAD(Proplist, skel_split_size),
?LOAD(Proplist, skel_pull)}.
-spec options_specs() -> [getopt:option_spec()].
options_specs() ->
[{model, $M, "model", atom,
"Model on which mas will be run. Possible options are: `mas_skel`, `mas_sequential`, `mas_concurrent` and `mas_hybrid`"},
{agent_env, undefined, "agent_env", atom,
"Module implementing `mas_agent_env` behaviour"},
{topology, $T, "topology", {atom, mesh},
"Island topologies (ring, mesh)"},
{migration_probability, undefined, "migration_probability", {float, 0.0001},
"The probability of migration of an agent with positive energy"},
{log_dir, $L, "log_dir", {atom, standard_io},
"The default path to write the logs to. The `standard_io` atom cause the logs to be sent to the standard output"},
{islands, $I, "islands", {integer, 4},
"The number of islands"},
{population_size, $P, "population_size", {integer, 100},
"The initial size of an island's population"},
{write_interval, undefined, "write_interval", {integer, 1000},
"How often the logs are writen to output (in milliseconds)"},
{arena_timeout, undefined, "arena_timeout", {integer, 3000},
"How long an arena should wait for agents to come before performing a defective interaction"},
{skel_workers, $W, "skel_workers", {integer, 4},
"Number of workers used in skel map skeleton."},
{skel_split_size, undefined, "skel_split_size", {integer, 20},
"Granularity of workload send to each worker. To disable set to value greater that `population_size`"},
{skel_pull, undefined, "skel_pull", {atom, enable},
"`enable` or `disable` work-pulling in skel map."}
].
default_options() ->
{ok, {Options, _}} =
getopt:parse(options_specs(), ""),
Options. | src/utils/mas_config.erl | 0.584034 | 0.450903 | mas_config.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @doc Handle outgoing messages to a peer
%% <p>This module handles all outgoing messaging for a peer. It
%% supports various API calls to facilitate this</p>
%% <p>Note that this module has two modes, <em>fast</em> and
%% <em>slow</em>. The fast mode outsources the packet encoding to the
%% C-layer, whereas the slow mode doesn't. The price to pay is that we
%% have a worse granularity on the rate calculations, so we only shift
%% into the fast gear when we have a certain amount of traffic going on.</p>
%% <p>The shift fast to slow, or slow to fast, is synchronized with
%% the process {@link etorrent_peer_recv}, so if altering the code,
%% beware of that</p>
%% @end
-module(etorrent_peer_send).
-include("etorrent_rate.hrl").
-include("log.hrl").
-behaviour(gen_server).
-ignore_xref([{'start_link', 3}]).
%% Apart from standard gen_server things, the main idea of this module is
%% to serve as a mediator for the peer in the send direction. Precisely,
%% we have a message we can send to the process, for each of the possible
%% messages one can send to a peer.
-export([start_link/3,
check_choke/1,
go_fast/1,
go_slow/1,
local_request/2, remote_request/4, cancel/4,
choke/1, unchoke/1, have/2,
not_interested/1, interested/1,
extended_msg/1,
bitfield/2]).
%% gen_server callbacks
-export([init/1, handle_info/2, terminate/2, code_change/3,
handle_call/3, handle_cast/2]).
-ignore_xref({start_link, 5}).
-type( mode() :: 'fast' | 'slow').
-record(state, {socket = none,
requests = none,
fast_extension = false,
mode = slow :: mode(),
control_pid = none,
rate = none,
choke = true,
interested = false, % Are we interested in the peer?
torrent_id = none,
file_system_pid = none}).
-define(DEFAULT_KEEP_ALIVE_INTERVAL, 120*1000). % From proto. spec.
-define(MAX_REQUESTS, 1024). % Maximal number of requests a peer may make.
%%====================================================================
%% @doc Start the send process
%% @end
-spec start_link(port(), integer(), boolean()) ->
ignore | {ok, pid()} | {error, any()}.
start_link(Socket, TorrentId, FastExtension) ->
gen_server:start_link(?MODULE,
[Socket, TorrentId, FastExtension], []).
%% @doc Queue up a remote request
%% <p>A remote request is a request from the peer at the other
%% end. This call queues up the chunk request in our send queue.</p>
%% @end
%%--------------------------------------------------------------------
-spec remote_request(pid(), integer(), integer(), integer()) -> ok.
remote_request(Pid, Index, Offset, Len) ->
gen_server:cast(Pid, {remote_request, Index, Offset, Len}).
%%--------------------------------------------------------------------
%% Func: local_request(Pid, Index, Offset, Len)
%% Description: We request a piece from the peer: {Index, Offset, Len}
%%--------------------------------------------------------------------
%% @doc send a REQUEST message
%% <p>The dual to {@link remote_request/4}. We queue up a chunk for
%% the peer to send back to us</p>
%% @end
-spec local_request(pid(), {integer(), integer(), integer()}) -> ok.
local_request(Pid, {Index, Offset, Size}) ->
gen_server:cast(Pid, {local_request, {Index, Offset, Size}}).
%% @doc Send a CANCEL message.
%% @end
-spec cancel(pid(), integer(), integer(), integer()) -> ok.
cancel(Pid, Index, Offset, Len) ->
gen_server:cast(Pid, {cancel, Index, Offset, Len}).
%% @doc CHOKE the peer.
%% @end
-spec choke(pid()) -> ok.
choke(Pid) ->
gen_server:cast(Pid, choke).
%% @doc UNCHOKE the peer.
%% end
-spec unchoke(pid()) -> ok.
unchoke(Pid) ->
gen_server:cast(Pid, unchoke).
%% @doc Ask the process to check if a rechoke is necessary.
%% <p>This call is used whenever we want to check the choke state of the peer.
%% If it is true, we perform a rechoke request. It is probably the wrong
%% place to issue the rechoke request. Rather, it would be better if a
%% control process does this.</p>
%% @end
-spec check_choke(pid()) -> ok.
check_choke(Pid) ->
gen_server:cast(Pid, check_choke).
%% @doc send a NOT_INTERESTED message
%% @end
-spec not_interested(pid()) -> ok.
not_interested(Pid) ->
gen_server:cast(Pid, not_interested).
%% @doc send an INTERESTED message
%% @end
-spec interested(pid()) -> ok.
interested(Pid) ->
gen_server:cast(Pid, interested).
%% @doc send a HAVE message
%% @end
-spec have(pid(), integer()) -> ok.
have(Pid, PieceNumber) ->
gen_server:cast(Pid, {have, PieceNumber}).
%% @doc Send a BITFIELD message to the peer
%% @end
-spec bitfield(pid(), binary()) -> ok. %% This should be checked
bitfield(Pid, BitField) ->
gen_server:cast(Pid, {bitfield, BitField}).
%% @doc Send off the default EXT_MSG to the peer
%% <p>This is part of BEP-10</p>
%% @end
-spec extended_msg(pid()) -> ok.
extended_msg(Pid) ->
gen_server:cast(Pid, extended_msg).
%% @doc Request that we enable fast messaging mode.
%% <p>In this mode, message encoding is done by the underlying Erlang
%% VM and not by us.</p>
%% <p><b>Note:</b> The only intended caller of this function is {@link
%% etorrent_peer_recv}, when it wants synchronization on the gear cange.</p>
%% @end
-spec go_fast(pid()) -> ok.
go_fast(Pid) ->
gen_server:cast(Pid, {go_fast, self()}).
%% @doc Request that we enable slow messaging mode.
%% <p>In this mode, message encoding is done by the code in this
%% module, not by the VM kernel</p>
%% <p><b>Note:</b> The only intended caller of this function is {@link
%% etorrent_peer_recv}, when it wants synchronization on the gear cange.</p>
%% @end
-spec go_slow(pid()) -> ok.
go_slow(Pid) ->
gen_server:cast(Pid, {go_slow, self()}).
%%--------------------------------------------------------------------
%% Send off a piece message
send_piece(Index, Offset, Len, S) ->
{ok, PieceData} =
etorrent_io:read_chunk(S#state.torrent_id, Index, Offset, Len),
Msg = {piece, Index, Offset, PieceData},
ok = etorrent_torrent:statechange(S#state.torrent_id,
[{add_upload, Len}]),
send_message(Msg, S).
send_message(Msg, S) ->
send_message(Msg, S, 0).
%% @todo: Think about the stop messages here. They are definitely wrong.
send_message(Msg, S, Timeout) ->
case send(Msg, S) of
{ok, NS} -> {noreply, NS, Timeout};
{error, closed, NS} -> {stop, normal, NS};
{error, ebadf, NS} -> {stop, normal, NS}
end.
send(Msg, #state { torrent_id = Id} = S) ->
case etorrent_proto_wire:send_msg(S#state.socket, Msg, S#state.mode) of
{ok, Sz} ->
NR = etorrent_rate:update(S#state.rate, Sz),
ok = etorrent_torrent:statechange(Id, [{add_upload, Sz}]),
{ok, S#state { rate = NR}};
{{error, E}, _Amount} ->
{error, E, S}
end.
perform_choke(#state { fast_extension = true} = S) ->
perform_fast_ext_choke(S);
perform_choke(#state { choke = true } = S) ->
{noreply, S, 0};
perform_choke(S) ->
local_choke(S),
send_message(choke, S#state{choke = true, requests = queue:new() }).
perform_fast_ext_choke(#state { choke = true } = S) ->
{noreply, S, 0};
perform_fast_ext_choke(S) ->
local_choke(S),
{ok, NS} = send(choke, S),
FS = empty_requests(NS),
{noreply, FS, 0}.
empty_requests(S) ->
empty_requests(queue:out(S#state.requests), S).
empty_requests({empty, Q}, S) ->
S#state { requests = Q };
empty_requests({{value, {Index, Offset, Len}}, Next}, S) ->
{ok, NS} = send({reject_request, Index, Offset, Len}, S),
empty_requests(queue:out(Next), NS).
local_choke(S) ->
etorrent_peer_states:set_local_choke(S#state.torrent_id,
S#state.control_pid).
%%====================================================================
%% @private
handle_call(_Request, _From, State) ->
Reply = ok,
{reply, Reply, State}.
%% @private
init([Socket, TorrentId, FastExtension]) ->
erlang:send_after(?DEFAULT_KEEP_ALIVE_INTERVAL, self(), tick),
erlang:send_after(?RATE_UPDATE, self(), rate_update),
gproc:add_local_name({peer, Socket, sender}),
FS = gproc:lookup_local_name({torrent, TorrentId, fs}),
{CPid, _} = gproc:await({n,l,{peer, Socket, control}}), % @todo: Change to a timeout later on, when gproc has been fixed
%% This may fail, but I want to check it
false = CPid == undefined,
{ok, #state{socket = Socket,
requests = queue:new(),
rate = etorrent_rate:init(),
control_pid = CPid,
torrent_id = TorrentId,
fast_extension = FastExtension,
file_system_pid = FS}}. %% Quickly enter a timeout.
%% Whenever a tick is hit, we send out a keep alive message on the line.
%% @private
handle_info(tick, S) ->
erlang:send_after(?DEFAULT_KEEP_ALIVE_INTERVAL, self(), tick),
send_message(keep_alive, S, 0);
%% When we are requested to update our rate, we do it here.
handle_info(rate_update, S) ->
erlang:send_after(?RATE_UPDATE, self(), rate_update),
Rate = etorrent_rate:update(S#state.rate, 0),
ok = etorrent_peer_states:set_send_rate(S#state.torrent_id,
S#state.control_pid,
Rate#peer_rate.rate),
{noreply, S#state { rate = Rate }};
%% Different timeouts.
%% When we are choking the peer and the piece cache is empty, garbage_collect() to reclaim
%% space quickly rather than waiting for it to happen.
%% @todo Consider if this can be simplified. It looks wrong here.
handle_info(timeout, #state { choke = true} = S) ->
{noreply, S};
handle_info(timeout, #state { choke = false, requests = Reqs} = S) ->
case queue:out(Reqs) of
{empty, _} ->
{noreply, S};
{{value, {Index, Offset, Len}}, NewQ} ->
send_piece(Index, Offset, Len, S#state { requests = NewQ } )
end;
handle_info(Msg, S) ->
?WARN([got_unknown_message, Msg, S]),
{stop, {unknown_msg, Msg}}.
%% Handle requests to choke and unchoke. If we are already choking the peer,
%% there is no reason to send the message again.
%% @private
handle_cast(choke, S) -> perform_choke(S);
handle_cast(unchoke, #state { choke = false } = S) -> {noreply, S, 0};
handle_cast(unchoke,
#state { choke = true, torrent_id = Torrent_Id, control_pid = ControlPid } = S) ->
ok = etorrent_peer_states:set_local_unchoke(Torrent_Id, ControlPid),
send_message(unchoke, S#state{choke = false});
%% A request to check the current choke state and ask for a rechoking
handle_cast(check_choke, #state { choke = true } = S) ->
{noreply, S, 0};
handle_cast(check_choke, #state { choke = false } = S) ->
ok = etorrent_choker:perform_rechoke(),
{noreply, S, 0};
%% Regular messages. We just send them onwards on the wire.
handle_cast({bitfield, BF}, S) ->
send_message({bitfield, BF}, S);
handle_cast(extended_msg, S) ->
send_message({extended, 0, etorrent_proto_wire:extended_msg_contents()}, S);
handle_cast(not_interested, #state { interested = false} = S) ->
{noreply, S, 0};
handle_cast(not_interested, #state { interested = true } = S) ->
send_message(not_interested, S#state { interested = false });
handle_cast(interested, #state { interested = true } = S) ->
{noreply, S, 0};
handle_cast(interested, #state { interested = false } = S) ->
send_message(interested, S#state { interested = true });
handle_cast({have, Pn}, S) ->
send_message({have, Pn}, S);
%% Cancels are handled specially when the fast extension is enabled.
handle_cast({cancel, Idx, Offset, Len},
#state { fast_extension = true, requests = Requests} = S) ->
try
true = queue:member({Idx, Offset, Len}, Requests),
NQ = etorrent_utils:queue_remove({Idx, Offset, Len}, Requests),
{noreply, S#state { requests = NQ}, 0}
catch
exit:badmatch -> {stop, normal, S}
end;
handle_cast({cancel, Index, OffSet, Len}, S) ->
NQ = etorrent_utils:queue_remove({Index, OffSet, Len}, S#state.requests),
{noreply, S#state{requests = NQ}, 0};
handle_cast({local_request, {Index, Offset, Size}}, S) ->
send_message({request, Index, Offset, Size}, S);
handle_cast({remote_request, Idx, Offset, Len},
#state { fast_extension = true, choke = true } = S) ->
send_message({reject_request, Idx, Offset, Len}, S, 0);
handle_cast({remote_request, _Index, _Offset, _Len}, #state { choke = true } = S) ->
{noreply, S, 0};
handle_cast({remote_request, Index, Offset, Len},
#state { choke = false, fast_extension = FastExtension,
requests = Reqs} = S) ->
case queue:len(Reqs) > ?MAX_REQUESTS of
true when FastExtension == true ->
send_message({reject_request, Index, Offset, Len}, S, 0);
true ->
{stop, max_queue_len_exceeded, S};
false ->
%% @todo consider to make a check here if the request is already
%% on queue. I don't think we will have that, except if stray.
NQ = queue:in({Index, Offset, Len}, S#state.requests),
{noreply, S#state{requests = NQ}, 0}
end;
handle_cast({go_fast, Pid}, S) ->
ok = etorrent_peer_recv:cb_go_fast(Pid),
{noreply, S#state { mode = fast }};
handle_cast({go_slow, Pid}, S) ->
ok = etorrent_peer_recv:cb_go_slow(Pid),
{noreply, S#state { mode = slow }};
handle_cast(_Msg, S) ->
{noreply, S}.
%% @private
terminate(_Reason, _S) ->
ok.
%% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}. | apps/etorrent/src/etorrent_peer_send.erl | 0.534127 | 0.455441 | etorrent_peer_send.erl | starcoder |
-module(hoax_expect).
-export([
assert_exported/2,
parse/2
]).
-include("hoax_int.hrl").
assert_exported([Expect = #expectation{key={_,F,A}} | Rest], Exports) ->
lists:member({F, A}, Exports) orelse
error({no_such_function_to_mock, {F, A}}),
hoax_tab:insert(Expect),
assert_exported(Rest, Exports);
assert_exported([], _) ->
ok.
parse(Mod, expect_no_interactions) ->
hoax_tab:insert(#expectation{key={Mod,undefined,undefined}, expected_count=0}),
[];
parse(Mod,[]) ->
error({no_expectations_for_mock, Mod});
parse(Mod,Expects) ->
[expectation(Mod, Ex) || Ex <- Expects].
expectation(Mod, {FunctionName, Lambda}) when is_function(Lambda) ->
expectation(Mod, FunctionName, hoax_fun:create_wildcard_for_args(Lambda), Lambda, undefined);
expectation(Mod, {Function, Args}) when is_atom(Mod), is_list(Args) ->
expectation(Mod, Function, Args, default, undefined);
expectation(Mod, {Function, Args, Count}) when is_atom(Mod),
is_list(Args),
is_integer(Count) ->
expectation(Mod, Function, Args, default, Count);
expectation(Mod, {Function, Args, {X,Y}}) when is_atom(Mod),
is_list(Args),
X == return;
X == error;
X == exit;
X == throw ->
expectation(Mod, Function, Args, {X,Y}, undefined);
expectation(Mod, {Function, Args, {X,Y}, Count}) when is_atom(Mod),
is_list(Args),
is_integer(Count),
X == return;
X == error;
X == exit;
X == throw ->
expectation(Mod, Function, Args, {X,Y}, Count);
expectation(_, Other) ->
error({bad_expectation_syntax, Other}).
expectation(Mod, Function, Args, Action, Count) ->
#expectation{
key = {Mod, Function, length(Args)},
expected_args = Args,
action = Action,
expected_count = Count
}. | src/hoax_expect.erl | 0.664323 | 0.690419 | hoax_expect.erl | starcoder |
% @copyright 2013-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Like histogram.erl but takes ?RT:key() as value and operates
%% in the key space according to the used routing table.
%% The histogram's interval span is ['(', BaseKey, BaseKey, ']'].
%% @version $Id$
-module(histogram_rt).
-author('<EMAIL>').
-vsn('$Id$').
-export([create/2, add/2, add/3, get_data/1, get_size/1, merge/2,
get_num_elements/1, get_num_inserts/1]).
-export([merge_weighted/3, normalize_count/2]).
-export([foldl_until/2, foldr_until/2]).
-export([is_histogram/1]).
%% for testing
-export([tester_create_histogram/2]).
-include("scalaris.hrl").
-export_type([histogram/0, base_key/0]).
-type key() :: ?RT:key().
-type internal_value() :: histogram:value().
-type external_data_item() :: {key(), pos_integer()}.
-type external_data_list() :: [external_data_item()].
-type base_key() :: key().
-opaque(histogram() :: {Histogram::histogram:histogram(),
BaseKey::base_key()
}).
-spec create(Size::non_neg_integer(), BaseKey::base_key()) -> histogram().
create(Size, BaseKey) ->
Histogram = histogram:create(Size),
{Histogram, BaseKey}.
-spec add(Value::key(), Histogram::histogram()) -> histogram().
add(Value, Histogram) ->
add(Value, 1, Histogram).
-spec add(Value::key(), Count::pos_integer(), Histogram::histogram()) -> histogram().
add(Value, Count, {Histogram, BaseKey}) ->
NewHistogram = histogram:add(normalize(Value, BaseKey), Count, Histogram),
{NewHistogram, BaseKey}.
-spec get_data(Histogram::histogram()) -> external_data_list().
get_data({Histogram, BaseKey}) ->
Data = histogram:get_data(Histogram),
lists:map(fun({Value, Count}) ->
{denormalize(Value, BaseKey), Count}
end, Data).
-spec get_size(Histogram::histogram()) -> non_neg_integer().
get_size({Histogram, _BaseKey}) ->
histogram:get_size(Histogram).
-spec get_num_elements(Histogram::histogram()) -> non_neg_integer().
get_num_elements({Histogram, _BaseKey}) ->
histogram:get_num_elements(Histogram).
-spec get_num_inserts(Histogram::histogram()) -> non_neg_integer().
get_num_inserts({Histogram, _BaseKey}) ->
histogram:get_num_inserts(Histogram).
%% @doc Merges the given two histograms by adding every data point of
%% Hist2 to Hist1
-spec merge(Hist1::histogram(), Hist2::histogram()) -> histogram().
merge(Hist1, Hist2) ->
merge_weighted(Hist1, Hist2, 1).
%% @doc Merges Hist2 into Hist1 and applies a weight to the Count of Hist2
-spec merge_weighted(Hist1::histogram(), Hist2::histogram(), Weight::pos_integer()) -> histogram().
merge_weighted({Histogram1, BaseKey}, {Histogram2, BaseKey}, Weight) ->
{histogram:merge_weighted(Histogram1, Histogram2, Weight), BaseKey};
merge_weighted(Hist1, Hist2, Weight) ->
case get_size(Hist1) of
0 -> Hist1;
_ ->
DataHist2 = get_data(Hist2),
lists:foldl(fun({Value, Count}, Hist) ->
add(Value, Count*Weight, Hist)
end,
Hist1, DataHist2)
end.
%% @doc Normalizes the Count by a normalization constant N
-spec normalize_count(N::pos_integer(), Histogram::histogram()) -> histogram().
normalize_count(N, {Histogram, BaseKey}) ->
{histogram:normalize_count(N, Histogram), BaseKey}.
%% @doc Traverses the histogram until TargetCount entries have been found
%% and returns the value at this position.
-spec foldl_until(TargetCount::non_neg_integer(), histogram())
-> {fail, Value::key() | nil, SumSoFar::non_neg_integer()} |
{ok, Value::key() | nil, Sum::non_neg_integer()}.
foldl_until(TargetCount, {Histogram, BaseKey}) ->
Result = histogram:foldl_until(TargetCount, Histogram),
case Result of
{_Status, nil, _Sum} -> Result;
{Status, Range, Sum} -> {Status, denormalize(Range, BaseKey), Sum}
end.
%% @doc Like foldl_until but traverses the list from the right
-spec foldr_until(TargetCount::non_neg_integer(), histogram())
-> {fail, Value::key() | nil, SumSoFar::non_neg_integer()} |
{ok, Value::key() | nil, Sum::non_neg_integer()}.
foldr_until(TargetCount, {Histogram, BaseKey}) ->
Result = histogram:foldr_until(TargetCount, Histogram),
case Result of
{_Status, nil, _Sum} -> Result;
{Status, Range, Sum} -> {Status, denormalize(Range, BaseKey), Sum}
end.
-compile({inline, [normalize/2]}).
-spec normalize(Value::key(), BaseKey::base_key()) -> internal_value().
normalize(Value, BaseKey) ->
?RT:get_range(BaseKey, Value).
-compile({inline, [denormalize/2]}).
-spec denormalize(Value::internal_value(), BaseKey::base_key()) -> key().
denormalize(Value, BaseKey) ->
?RT:get_split_key(BaseKey, BaseKey, {Value, trunc(?RT:n())}).
-spec is_histogram(histogram() | any()) -> boolean().
is_histogram({_Histogram, _BaseKey}) ->
true;
is_histogram(_) ->
false.
-spec tester_create_histogram(Entries::[{key(), pos_integer()}], BaseKey::base_key()) -> histogram().
tester_create_histogram(Entries, BaseKey) ->
HistogramRT = create(length(Entries), BaseKey),
lists:foldl(fun({Value, Count}, Hist) -> add(Value, Count, Hist) end, HistogramRT, Entries). | src/histogram_rt.erl | 0.686055 | 0.55435 | histogram_rt.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021-2022 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc Khepri path API.
%%
%% A path is the type used by Khepri to reference nodes in the tree structure.
%% A path describes how to reach a node from the root node.
%%
%% A path, or <em>native path</em>, is a list of components. Components can be
%% Erlang atoms and binaries. Example:
%%
%% ```
%% %% Native path.
%% Path = [stock, wood, oak].
%% '''
%%
%% A path may contain conditions to tune how a node is matched or to match
%% multiple nodes at once. This is called a <em>path pattern</em>. A path
%% pattern may contain conditions in addition to regular components (Erlang
%% atoms and binaries). See {@link khepri_condition} to learn more about
%% conditions. Example:
%%
%% ```
%% %% Path pattern with a condition on `wood'.
%% PathPattern = [stock,
%% #if_all{conditions = [wood,
%% #if_node_exists{exists = true}]},
%% oak].
%% '''
%%
%% To be user-friendly, Unix-like string-based paths are accepted by most
%% functions. These <em>Unix paths</em> have the `"/path/to/node"' and is the
%% equivalent of the `[path, to, node]' native path.
%%
%% ```
%% %% Unix path, equivalent of the first native path example.
%% UnixPath = "/stock/wood/oak".
%% '''
-module(khepri_path).
-include("include/khepri.hrl").
-export([compile/1,
from_string/1,
component_from_string/1,
maybe_from_string/1,
to_string/1,
component_to_string/1,
combine_with_conditions/2,
targets_specific_node/1,
component_targets_specific_node/1,
is_valid/1,
ensure_is_valid/1,
abspath/2,
realpath/1,
pattern_includes_root_node/1]).
-type node_id() :: atom() | binary().
%% A node name.
-type component() :: node_id() | ?ROOT_NODE | ?THIS_NODE | ?PARENT_NODE.
%% Component name in a path to a node.
%% TODO: Rename to node_path()
-type path() :: [component()].
%% Path to a node.
-type pattern() :: [pattern_component()].
%% Path pattern which may match zero, one or more nodes.
-type pattern_component() :: component() | khepri_condition:condition().
-export_type([path/0,
pattern/0,
component/0,
pattern_component/0,
node_id/0]).
-spec compile(pattern()) -> pattern().
%% @private
compile(PathPattern) ->
lists:map(fun khepri_condition:compile/1, PathPattern).
-spec from_string(string()) -> pattern().
from_string("") ->
[];
from_string("/" ++ PathString) ->
from_string(PathString, []);
from_string("./" ++ PathString) ->
from_string(PathString, [?THIS_NODE]);
from_string(".") ->
[?THIS_NODE];
from_string("../" ++ PathString) ->
from_string(PathString, [?PARENT_NODE]);
from_string("..") ->
[?PARENT_NODE];
from_string(PathString) ->
from_string(PathString, [?THIS_NODE]).
from_string(PathString, ParentPath) ->
ReOpts = [{capture, all_but_first, list}, dotall],
case re:run(PathString, "^((?U)<<.*>>)(?:/(.*)|$)", ReOpts) of
{match, [ComponentString, Rest]} ->
Component = component_from_string(ComponentString),
from_string(Rest, [Component | ParentPath]);
{match, [ComponentString]} ->
Component = component_from_string(ComponentString),
lists:reverse([Component | ParentPath]);
nomatch ->
case re:run(PathString, "^([^/]*)(?:/(.*)|$)", ReOpts) of
{match, ["", Rest]} ->
from_string(Rest, ParentPath);
{match, [ComponentString, Rest]} ->
Component = component_from_string(ComponentString),
from_string(Rest, [Component | ParentPath]);
{match, [""]} ->
lists:reverse(ParentPath);
{match, [ComponentString]} ->
Component = component_from_string(ComponentString),
lists:reverse([Component | ParentPath])
end
end.
-spec component_from_string(string()) -> pattern_component().
component_from_string("/") ->
?ROOT_NODE;
component_from_string(".") ->
?THIS_NODE;
component_from_string("..") ->
?PARENT_NODE;
component_from_string("*") ->
?STAR;
component_from_string("**") ->
?STAR_STAR;
component_from_string(Component) ->
ReOpts1 = [{capture, all_but_first, list}, dotall],
Component1 = case re:run(Component, "^<<(.*)>>$", ReOpts1) of
{match, [C]} -> C;
nomatch -> Component
end,
ReOpts2 = [{capture, none}],
case re:run(Component1, "\\*", ReOpts2) of
match ->
ReOpts3 = [global, {return, list}],
Regex = re:replace(Component1, "\\*", ".*", ReOpts3),
#if_name_matches{regex = "^" ++ Regex ++ "$"};
nomatch when Component1 =:= Component ->
list_to_atom(Component);
nomatch ->
list_to_binary(Component1)
end.
-spec maybe_from_string(pattern() | string()) -> pattern().
maybe_from_string([Component | _] = Path)
when ?IS_NODE_ID(Component) orelse
?IS_CONDITION(Component) ->
Path;
maybe_from_string([?ROOT_NODE]) ->
[];
maybe_from_string([Component] = Path)
when ?IS_SPECIAL_PATH_COMPONENT(Component) ->
Path;
maybe_from_string([] = Path) ->
Path;
maybe_from_string([Char | _] = Path)
when is_integer(Char) andalso
Char >= 0 andalso Char =< 16#10ffff andalso
not ?IS_SPECIAL_PATH_COMPONENT(Char) ->
from_string(Path);
maybe_from_string([Char1, Char2 | _] = Path)
when ?IS_SPECIAL_PATH_COMPONENT(Char1) ->
if
?IS_NODE_ID(Char2) orelse ?IS_CONDITION(Char2) -> Path;
true -> from_string(Path)
end.
-spec to_string(path()) -> string().
to_string([?ROOT_NODE | Path]) ->
"/" ++
string:join(
lists:map(fun component_to_string/1, Path),
"/");
to_string([?THIS_NODE = Component]) ->
component_to_string(Component);
to_string([?THIS_NODE | Path]) ->
string:join(
lists:map(fun component_to_string/1, Path),
"/");
to_string([?PARENT_NODE | _] = Path) ->
string:join(
lists:map(fun component_to_string/1, Path),
"/");
to_string(Path) ->
"/" ++
string:join(
lists:map(fun component_to_string/1, Path),
"/").
-spec component_to_string(component()) -> string().
component_to_string(?ROOT_NODE) ->
"/";
component_to_string(?THIS_NODE) ->
".";
component_to_string(?PARENT_NODE) ->
"..";
component_to_string(Component) when is_atom(Component) ->
atom_to_list(Component);
component_to_string(Component) when is_binary(Component) ->
lists:flatten(
io_lib:format("<<~s>>", [Component])).
-spec combine_with_conditions(pattern(), [khepri_condition:condition()]) ->
pattern().
combine_with_conditions(Path, []) ->
Path;
combine_with_conditions(Path, Conditions) ->
[ChildName | Rest] = lists:reverse(Path),
Combined = #if_all{conditions = [ChildName | Conditions]},
lists:reverse([Combined | Rest]).
-spec targets_specific_node(pattern()) ->
{true, path()} | false.
targets_specific_node(PathPattern) ->
targets_specific_node(PathPattern, []).
targets_specific_node([Condition | Rest], Path) ->
case component_targets_specific_node(Condition) of
{true, Component} -> targets_specific_node(Rest, [Component | Path]);
false -> false
end;
targets_specific_node([], Path) ->
{true, lists:reverse(Path)}.
-spec component_targets_specific_node(pattern_component()) ->
{true, component()} | false.
%% @private
component_targets_specific_node(ChildName)
when ?IS_PATH_COMPONENT(ChildName) ->
{true, ChildName};
component_targets_specific_node(#if_not{condition = Cond}) ->
component_targets_specific_node(Cond);
component_targets_specific_node(#if_all{conditions = []}) ->
false;
component_targets_specific_node(#if_all{conditions = Conds}) ->
lists:foldl(
fun
(Cond, {true, _} = True) ->
case component_targets_specific_node(Cond) of
True -> True;
{true, _} -> false;
false -> True
end;
(Cond, false) ->
case component_targets_specific_node(Cond) of
{true, _} = True -> True;
false -> false
end;
(Cond, undefined) ->
component_targets_specific_node(Cond)
end, undefined, Conds);
component_targets_specific_node(#if_any{conditions = []}) ->
false;
component_targets_specific_node(#if_any{conditions = Conds}) ->
lists:foldl(
fun
(Cond, {true, _} = True) ->
case component_targets_specific_node(Cond) of
True -> True;
{true, _} -> false;
false -> false
end;
(_, false) ->
false;
(Cond, undefined) ->
component_targets_specific_node(Cond)
end, undefined, Conds);
component_targets_specific_node(_) ->
false.
-spec is_valid(PathPattern) -> IsValid when
PathPattern :: pattern(),
IsValid :: true | {false, pattern_component()}.
is_valid(PathPattern) when is_list(PathPattern) ->
lists:foldl(
fun
(_, {false, _} = False) -> False;
(Component, _) -> khepri_condition:is_valid(Component)
end, true, PathPattern);
is_valid(NotPathPattern) ->
{false, NotPathPattern}.
-spec ensure_is_valid(PathPattern) -> ok | no_return() when
PathPattern :: pattern().
ensure_is_valid(PathPattern) ->
case is_valid(PathPattern) of
true -> ok;
{false, Path} -> throw({invalid_path, Path})
end.
-spec abspath(pattern(), pattern()) -> pattern().
abspath([FirstComponent | _] = AbsolutePath, _)
when FirstComponent =/= ?THIS_NODE andalso FirstComponent =/= ?PARENT_NODE ->
AbsolutePath;
abspath([_ | _] = RelativePath, BasePath) ->
realpath(BasePath ++ RelativePath, []);
abspath([] = PathToRoot, _) ->
PathToRoot.
-spec realpath(pattern()) -> pattern().
realpath(Path) ->
realpath(Path, []).
realpath([?ROOT_NODE | Rest], _Result) ->
realpath(Rest, []);
realpath([?THIS_NODE | Rest], Result) ->
realpath(Rest, Result);
realpath([?PARENT_NODE | Rest], [_ | Result]) ->
realpath(Rest, Result);
realpath([?PARENT_NODE | Rest], [] = Result) ->
realpath(Rest, Result);
realpath([Component | Rest], Result) ->
realpath(Rest, [Component | Result]);
realpath([], Result) ->
lists:reverse(Result).
pattern_includes_root_node(Path) ->
pattern_includes_root_node1(realpath(Path)).
pattern_includes_root_node1([#if_name_matches{regex = any}]) -> true;
pattern_includes_root_node1([#if_path_matches{regex = any}]) -> true;
pattern_includes_root_node1(_) -> false. | src/khepri_path.erl | 0.582372 | 0.425605 | khepri_path.erl | starcoder |
-module(matmul).
-export([start/1, gatherer/3, matsize_bytag/1, gen_dotprodme/3, dotprod_worker/0]).
-export([gen_col_vectors/1, split_matrix_bytag/1, split_row_vector_bytag/1]).
-export([do_multiply/3]).
% Example Matrix Multiplication demo program
% We expect InFile to contain a pair of "out" tuples for each of the
% two matrices.
start(InFile) ->
%% read the matrices
espace:infile(InFile),
Mat_C = do_multiply(mat_a, mat_b, mat_c),
print_matrix(Mat_C),
ok.
%%
%% Given multiple matrices with Tag1 and Tag2, save the result as Tag3
do_multiply(Tag1, Tag2, Tag3) ->
% get and save the matrix sizes
matsize_bytag(Tag1),
matsize_bytag(Tag2),
% split both matrices into row vectors
espace:worker({?MODULE, split_matrix_bytag, [Tag1]}),
espace:worker({?MODULE, split_matrix_bytag, [Tag2]}),
% for the second matrix, split further into elements, then
% reassemble as column vectors
espace:worker({?MODULE, split_row_vector_bytag, [Tag2]}),
espace:worker({?MODULE, gen_col_vectors, [Tag2]}),
% generate the place holders for the result matrix
% these will be consumed by the dotprod worker processes
espace:worker({?MODULE, gen_dotprodme, [Tag1, Tag2, Tag3]}),
% two worker processes should be sufficient for our demo
% each worker will take 'dotprodme' tuples and produce 'dotprod' tuples
espace:worker({?MODULE, dotprod_worker, []}),
espace:worker({?MODULE, dotprod_worker, []}),
% the final results collector
espace:worker({?MODULE, gatherer, [Tag1, Tag2, Tag3]}),
% wait for the final matrix
{[Matrix_C], _} = espace:rd({matrix, Tag3, '$1'}),
% Now clean up the Tuple Space
Tlist = [{matrix, '_', '_'},
{matsize, '_', '_'},
{row_vector, '_', '_', '_'},
{col_vector, '_', '_', '_'},
{dotprodme, '_', '_', '_', '_', '_'}],
drain_tspool(Tlist),
Matrix_C.
drain_tspool([]) ->
ok;
drain_tspool([Pattern|Rest]) ->
drain_pattern(Pattern),
drain_tspool(Rest).
drain_pattern(Pattern) ->
case espace:inp(Pattern) of
nomatch ->
ok;
{_,_} ->
drain_pattern(Pattern)
end.
print_matrix(M) ->
lists:foreach(fun (R) -> io:format("~w~n", [R]) end, M).
gatherer(Tag1, Tag2, Tag3) ->
% we need to know how many dot product elements to expect and wait for
{[Nrows], _} = espace:rd({matsize, Tag1, {'$1', '_'}}),
{[Ncols], _} = espace:rd({matsize, Tag2, {'_', '$2'}}),
ResultElements = Nrows*Ncols,
% save the count, which will be decremented as the results come in
espace:out({dotprod_count, ResultElements}),
% we wait until all done
wait_for_results(Tag3),
% put out the poison pill for the dotprod workers
espace:out({dotprodme, 'STOP', x, x, x, x}),
% Now let's build and save the result matrix
espace:out({matrix, Tag3, make_matrix(Tag3, Nrows, Ncols)}),
ok.
wait_for_results(Tag) ->
% wait for a new dotprod
{[Row, Col, Val], _} = espace:in({dotprod, Tag, '$1', '$2', '$3'}),
% put it back as an 'element', so that we don't count it again
espace:out({element, Tag, Row, Col, Val}),
% check the counter
{[Count], _} = espace:in({dotprod_count, '$1'}),
case Count of
1 -> % was this the last one?
done; % we have now collected all the dotprod tuples
_ -> % still need to get some more!
espace:out({dotprod_count, Count-1}),
wait_for_results(Tag)
end.
make_matrix(Tag, Nrows, Ncols) ->
lists:map(fun (RowNum) -> make_row_vector(Tag, RowNum, Ncols) end, lists:seq(1, Nrows)).
make_row_vector(Tag, RowNum, Ncols) ->
lists:map(fun (ColNum) -> element(Tag, RowNum, ColNum) end, lists:seq(1, Ncols)).
make_col_vector(Tag, ColNum, Nrows) ->
lists:map(fun (RowNum) -> element(Tag, RowNum, ColNum) end, lists:seq(1, Nrows)).
gen_col_vectors(Tag) ->
{[Nrows, Ncols], _} = espace:rd({matsize, Tag, {'$1', '$2'}}),
lists:foreach(
fun (ColNum) -> espace:out({col_vector, Tag, ColNum, make_col_vector(Tag, ColNum, Nrows)}) end,
lists:seq(1, Ncols)
).
element(Tag, RowNum, ColNum) ->
{[Value], _} = espace:in({element, Tag, RowNum, ColNum, '$1'}),
Value.
gen_dotprodme(Tag1, Tag2, Tag3) ->
{[Rows1, _], _} = espace:rd({matsize, Tag1, {'$1', '$2'}}),
{[_, Cols2], _} = espace:rd({matsize, Tag2, {'$1', '$2'}}),
% we generate the tuples sequentially, but we can also parallelize this
DotProd = fun ({R, C}) -> espace:out({dotprodme, Tag1, R, Tag2, C, Tag3}) end,
lists:foreach(DotProd, [ {R,C} || R <- lists:seq(1,Rows1), C <- lists:seq(1,Cols2)]),
ok.
matsize_bytag(Tag) ->
espace:eval({matsize,
Tag,
fun () ->
{[Mat], _} = espace:rd({matrix, Tag, '$1'}),
Rows = length(Mat),
Cols = length(hd(Mat)),
{Rows, Cols}
end
}
).
split_matrix_bytag(Tag) ->
{[Matrix], _} = espace:rd({matrix, Tag, '$1'}),
split_matrix(Matrix, Tag, 1).
split_matrix([], _Tag, _RowNum) ->
ok;
split_matrix([H|T], Tag, RowNum) ->
espace:out({row_vector, Tag, RowNum, H}),
split_matrix(T, Tag, RowNum+1).
split_row_vector_bytag(Tag) ->
{[Nrows], _} = espace:rd({matsize, Tag, {'$1', '_'}}),
lists:foreach(
fun (RowNum) ->
{[RowVec],_} = espace:in({row_vector, Tag, RowNum, '$1'}),
split_row_vector(RowVec, Tag, RowNum, 1)
end,
lists:seq(1, Nrows)).
split_row_vector([], _Tag, _RowNm, _ColNum) ->
ok;
split_row_vector([H|T], Tag, RowNum, ColNum) ->
espace:out({element, Tag, RowNum, ColNum, H}),
split_row_vector(T, Tag, RowNum, ColNum+1).
dotprod_worker() ->
{[Tag1, RowNum, Tag2, ColNum, Tag3], Tuple} = espace:in({dotprodme, '$1', '$2', '$3', '$4', '$5'}),
case Tag1 of
'STOP' -> % "poison pill"?
espace:out(Tuple), % put it back for the fellow workers!
done;
_ ->
{[Vec1], _} = espace:rd({row_vector, Tag1, RowNum, '$1'}),
{[Vec2], _} = espace:rd({col_vector, Tag2, ColNum, '$1'}),
DotProd = dot_prod(Vec1, Vec2),
espace:out({dotprod, Tag3, RowNum, ColNum, DotProd}),
dotprod_worker()
end.
dot_prod(Vec1, Vec2) when length(Vec1) == length(Vec2) ->
lists:sum( lists:zipwith(fun (X,Y) -> X*Y end, Vec1, Vec2) ). | Examples/matmul/matmul.erl | 0.550849 | 0.64816 | matmul.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright <2013-2018> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
%% @doc saturn_SUITE:
%% Test the basic api of saturn
-module(saturn_SUITE).
%% common_test callbacks
-export([
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0
]).
%% tests
-export([
replication/1,
remote_read/1
]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
init_per_suite(Config) ->
test_utils:init_multi_dc(?MODULE, Config).
end_per_suite(Config) ->
Config.
init_per_testcase(_Name, Config) ->
Config.
end_per_testcase(Name, _) ->
ct:print("[ OK ] ~p", [Name]),
ok.
all() ->
[replication,
remote_read
].
server_name(Node)->
{saturn_client_receiver, Node}.
eventual_read(Key, Node, ExpectedResult) ->
eventual_read(Key, Node, ExpectedResult, 0).
eventual_read(Key, Node, ExpectedResult, Clock) ->
Result = gen_server:call(server_name(Node), {read, Key, Clock}, infinity),
case Result of
{ok, {ExpectedResult, _Clock}} -> Result;
_ ->
ct:print("I read: ~p, expecting: ~p",[Result, ExpectedResult]),
timer:sleep(500),
eventual_read(Key, Node, ExpectedResult)
end.
replication(Config) ->
ct:print("Starting test replication", []),
BKey={2, key1},
[Leaf1 | Leaf2] = proplists:get_value(leafs, Config),
%% Reading a key thats empty
Result1 = gen_server:call(server_name(Leaf1), {read, BKey, 0}, infinity),
?assertMatch({ok, {empty, 0}}, Result1),
%% Update key
Result2 = gen_server:call(server_name(Leaf1), {update, BKey, 3, 0}, infinity),
?assertMatch({ok, _Clock1}, Result2),
Result3 = gen_server:call(server_name(Leaf1), {read, BKey, 0}, infinity),
?assertMatch({ok, {3, _Clock1}}, Result3),
Result4 = eventual_read(BKey, Leaf2, 3),
?assertMatch({ok, {3, _Clock1}}, Result4).
remote_read(Config) ->
ct:print("Starting test remote_read", []),
BKey={0, key1},
[Leaf1 | Leaf2] = proplists:get_value(leafs, Config),
%% Reading a key thats empty
Result1 = gen_server:call(server_name(Leaf1), {read, BKey, 0}, infinity),
?assertMatch({ok, {empty, 0}}, Result1),
%% Update key
Result2 = gen_server:call(server_name(Leaf1), {update, BKey, 3, 0}, infinity),
?assertMatch({ok, _Clock1}, Result2),
Result3 = gen_server:call(server_name(Leaf1), {read, BKey, 0}, infinity),
?assertMatch({ok, {3, _Clock1}}, Result3),
ct:print("About to perform the remote read", []),
Result4 = eventual_read(BKey, Leaf2, 3),
?assertMatch({ok, {3, _Clock1}}, Result4). | test/saturn_SUITE.erl | 0.580709 | 0.558327 | saturn_SUITE.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author tumilok
%%% @copyright (C) 2020, <COMPANY>
%%% @doc
%%%
%%% @end
%%% Created : 24. Mar 2020 16:17
%%%-------------------------------------------------------------------
-module(pollution).
-author("tumilok").
%% API
-export([
createMonitor/0,
addStation/3,
addValue/5,
removeValue/4,
getOneValue/4,
getStationMean/3,
getDailyMean/3,
getHourlyMean/3,
getDailyAverageDataCount/3,
getDailyOverLimit/4,
getMaximumGradientStations/2,
getMinValue/2,
getMaxValue/2
]).
% record contains the information about station
-record(station, {coords, name}).
% record contains the information about measurement
-record(measurement, {type, date}).
% function creates new monitor
createMonitor() -> maps:new().
% function uses an argument key to find the full key of data
getStationKey([], _) -> null;
getStationKey([#station{name=Key, coords=Coords}|_], Key) -> #station{name=Key, coords=Coords};
getStationKey([#station{name=Name, coords=Key}|_], Key) -> #station{name=Name, coords=Key};
getStationKey([_|T], Key) -> getStationKey(T, Key).
% function adds new station to the given monitor
addStation(Name, _, _) when not is_list(Name) -> {error, "Name must be a list"};
addStation(_, Coords, _) when not is_tuple(Coords) -> {error, "Coordinate must be a tuple"};
addStation(Name, Coords, Monitor) ->
NameExists = getStationKey(maps:keys(Monitor), Name),
CoordsExists = getStationKey(maps:keys(Monitor), Coords),
case {NameExists, CoordsExists} of
{null, null} -> Monitor#{#station{name=Name, coords=Coords} => maps:new()};
{null, _} -> {error, "Coordinate of such a value is already exist"};
{_, _} -> {error, "Name of such a value is already exist"}
end.
% function finds station with the help of key
% and adds given measurement to it
addValue(_, Date, _, _, _) when not is_tuple(Date) -> {error, "Date must be a tuple"};
addValue(_, _, Type, _, _) when not is_list(Type) -> {error, "Type must be a list"};
addValue(_, _, _, Val, _) when not (is_integer(Val) or is_float(Val)) -> {error, "Value must be a number"};
addValue(_, _, _, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
addValue(Key, Date, Type, Val, Monitor) ->
Value = maps:find(getStationKey(maps:keys(Monitor), Key), Monitor),
case Value of
error -> {error, "Station doesn't exist"};
{ok, Measurement} ->
Exists = maps:find(#measurement{type=Type, date=Date}, Measurement),
case Exists of
error ->
case maps:size(Measurement) of
0 -> maps:put(getStationKey(maps:keys(Monitor), Key), maps:put(#measurement{type=Type, date=Date}, Val, Measurement), Monitor);
_ -> maps:update(getStationKey(maps:keys(Monitor), Key), maps:put(#measurement{type=Type, date=Date}, Val, Measurement), Monitor)
end;
_ -> {error, "Type and Date of such value are already exist"}
end
end.
% function finds measurement with the help of provided information
% and removes it from the station map
removeValue(_, Date, _, _) when not is_tuple(Date) -> {error, "Date must be a tuple"};
removeValue(_, _, Type, _) when not is_list(Type) -> {error, "Type must be a list"};
removeValue(_, _, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
removeValue(Key, Date, Type, Monitor) ->
Value = maps:find(getStationKey(maps:keys(Monitor), Key), Monitor),
case Value of
error -> {error, "Station doesn't exist"};
{ok, Measurement} ->
ToRemove = maps:find(#measurement{type=Type, date=Date}, Measurement),
case ToRemove of
error -> {error, "Measurements with such parameters don't exist"};
_ -> NewMeasurement = maps:remove(#measurement{type=Type, date=Date}, Measurement),
maps:update(getStationKey(maps:keys(Monitor), Key), NewMeasurement, Monitor)
end
end.
% function finds measurement with the help of provided information
% and returns it's value
getOneValue(_, Date, _, _) when not is_tuple(Date) -> {error, "Date must be a tuple"};
getOneValue(_, _, Type, _) when not is_list(Type) -> {error, "Type must be a list"};
getOneValue(_, _, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
getOneValue(Key, Date, Type, Monitor) ->
Value = maps:find(getStationKey(maps:keys(Monitor), Key), Monitor),
case Value of
error -> {error, "Station doesn't exist"};
{ok, Measurement} ->
ToReturn = maps:find(#measurement{type=Type, date=Date}, Measurement),
case ToReturn of
error -> {error, "Measurements with such parameters don't exist"};
{ok, Val} -> Val
end
end.
% function finds station with the help of provided key
% and runs getStationMeanByType function
getStationMean(_, Type, _) when not is_list(Type) -> {error, "Type must be a list"};
getStationMean(_, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
getStationMean(Key, Type, Monitor) ->
Value = maps:find(getStationKey(maps:keys(Monitor), Key), Monitor),
case Value of
error -> {error, "Station doesn't exist"};
{ok, Measurement} ->
getStationMeanByType(maps:to_list(Measurement), Type, 0, 0)
end.
% function iterates measurements, checks its
% type and returns actual mean of all suitable values
getStationMeanByType([], _, _, 0) -> 0.0;
getStationMeanByType([], _, Sum, Size) -> Sum / Size;
getStationMeanByType([{#measurement{type=Type}, Val}|T], Type, Sum, Size) ->
getStationMeanByType(T, Type, Sum+Val, Size+1);
getStationMeanByType([_|T], Type, Sum, Size) ->
getStationMeanByType(T, Type, Sum, Size).
% function iterates all stations,
% runs given function and returns mean
iterateStationsMean(_, [], _, _, _, _, 0) -> 0.0;
iterateStationsMean(_, [], _, _, _, Sum, Size) -> Sum / Size;
iterateStationsMean(Monitor, [H|T], Fun, Type, Arg, Sum, Size) ->
{ok, Val} = maps:find(H, Monitor),
{MSum, MSize} = Fun(maps:to_list(Val), Type, Arg, {0,0}),
iterateStationsMean(Monitor, T, Fun, Type, Arg, Sum + MSum, Size + MSize).
% function runs recursive function iterateStationsMean
getDailyMean(Type, _, _) when not is_list(Type) -> {error, "Type must be a list"};
getDailyMean(_, Day, _) when not is_tuple(Day) -> {error, "Day must be a tuple"};
getDailyMean(_, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
getDailyMean(Type, Day, Monitor) ->
iterateStationsMean(Monitor, maps:keys(Monitor), fun getDailyMeanByType/4, Type, Day, 0, 0).
% function iterates measurements, checks its
% type and date, and returns sum and size of all suitable values
getDailyMeanByType([], _, _, {Sum,Size}) -> {Sum,Size};
getDailyMeanByType([{#measurement{type=Type, date={Day, _}}, Val}|T], Type, Day, {Sum,Size}) ->
getDailyMeanByType(T, Type, Day, {Sum+Val,Size+1});
getDailyMeanByType([_|T], Type, Day, {Sum,Size}) ->
getDailyMeanByType(T, Type, Day, {Sum,Size}).
% function runs recursive function iterateStationsMean
getHourlyMean(Type, _, _) when not is_list(Type) -> {error, "Type must be a list"};
getHourlyMean(_, Hour, _) when not is_integer(Hour) -> {error, "Day must be an integer"};
getHourlyMean(_, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
getHourlyMean(Type, Hour, Monitor) ->
iterateStationsMean(Monitor, maps:keys(Monitor), fun getHourlyMeanByType/4, Type, Hour, 0, 0).
% function iterates measurements, checks its
% type and hour, and returns sum and size of all suitable values
getHourlyMeanByType([], _, _, {Sum,Size}) -> {Sum,Size};
getHourlyMeanByType([{#measurement{type=Type, date={_ ,{Hour,_,_}}}, Val}|T], Type, Hour, {Sum,Size}) ->
getHourlyMeanByType(T, Type, Hour, {Sum+Val,Size+1});
getHourlyMeanByType([_|T], Type, Hour, {Sum,Size}) ->
getHourlyMeanByType(T, Type, Hour, {Sum,Size}).
% function runs recursive function finds station and runs countMeasurementsByDay
getDailyAverageDataCount(_, Date, _) when not is_tuple(Date) -> {error, "Date must be a tuple"};
getDailyAverageDataCount(_, _, Monitor) when Monitor == #{} -> {error, "Monitor is empty"};
getDailyAverageDataCount(Key, Date, Monitor) ->
Value = maps:find(getStationKey(maps:keys(Monitor), Key), Monitor),
case Value of
error -> {error, "Station doesn't exist"};
{ok, Measurement} -> countMeasurementsByDay(maps:to_list(Measurement), Date, 0)
end.
% function iterates measurements, checks its
% date, and returns amount of all suitable values
countMeasurementsByDay([], _, Count) -> Count;
countMeasurementsByDay([{#measurement{date={Date, _}}, _}|T], Date, Count) ->
countMeasurementsByDay(T, Date, Count+1);
countMeasurementsByDay([_|T], Date, Count) ->
countMeasurementsByDay(T, Date, Count).
% function runs recursive function iterateStationsLimit
getDailyOverLimit(Date, _, _, _) when not is_tuple(Date) -> {error, "Date must be a tuple"};
getDailyOverLimit(_, Type, _, _) when not is_list(Type) -> {error, "Type must be a list"};
getDailyOverLimit(_, _, Limit, _) when not (is_integer(Limit) or is_float(Limit)) -> {error, "Limit must be a number"};
getDailyOverLimit(_, _, _, Monitor) when not Monitor == #{} -> {error, "Monitor is empty"};
getDailyOverLimit(Date, Type, Limit, Monitor) ->
iterateStationsLimit(Monitor, maps:keys(Monitor), Date, Type, Limit, 0).
% function iterates all stations,runs
% isOverLimit function and returns
% number of stations that exceeded limit
iterateStationsLimit(_, [], _, _, _, Count) -> Count;
iterateStationsLimit(Monitor, [H|T], Date, Type, Limit, Count) ->
{ok, Val} = maps:find(H, Monitor),
IsTrue = isOverLimit(maps:to_list(Val), Date, Type, Limit),
case IsTrue of
true -> iterateStationsLimit(Monitor, T, Date, Type, Limit, Count+1);
false -> iterateStationsLimit(Monitor, T, Date, Type, Limit, Count)
end.
% function iterates measurements, checks its
% Type and Date, and returns true if value
% of measurement was greater than limit
isOverLimit([], _, _, _) -> false;
isOverLimit([{#measurement{type=Type, date={Date, _}}, Val}|T], Date, Type, Limit) ->
case Val > Limit of
true -> true;
false -> isOverLimit(T, Date, Type, Limit)
end;
isOverLimit([_|T], Date, Type, Limit) ->
isOverLimit(T, Date, Type, Limit).
% function runs recursive function iterateStationsMinMaxDiff
% with getDiff function as argument
getMaximumGradientStations(Type, _) when not is_list(Type) -> {error, "Type must be a list"};
getMaximumGradientStations(_, Monitor) when not Monitor == #{} -> {error, "Monitor is empty"};
getMaximumGradientStations(Type, Monitor) ->
iterateStationsMinMaxDiff(Monitor, maps:keys(Monitor), fun getDiff/2, Type, 100000, -100000).
% function returns difference of values
getDiff(Min, Max) -> Max - Min.
% function runs recursive function iterateStationsMinMaxDiff
% with getMin function as argument
getMinValue(Type, _) when not is_list(Type) -> {error, "Type must be a list"};
getMinValue(_, Monitor) when not Monitor == #{} -> {error, "Monitor is empty"};
getMinValue(Type, Monitor) ->
iterateStationsMinMaxDiff(Monitor, maps:keys(Monitor), fun getMin/2, Type, 100000, -100000).
% function returns min value
getMin(Min, _) -> Min.
% function runs recursive function iterateStationsMinMaxDiff
% with getMax function as argument
getMaxValue(Type, _) when not is_list(Type) -> {error, "Type must be a list"};
getMaxValue(_, Monitor) when not Monitor == #{} -> {error, "Monitor is empty"};
getMaxValue(Type, Monitor) ->
iterateStationsMinMaxDiff(Monitor, maps:keys(Monitor), fun getMax/2, Type, 100000, -100000).
% function returns max value
getMax(_, Max) -> Max.
% function iterates all stations,runs
% getMaximumGradientStationsByType function and returns
% given function with values min and max
iterateStationsMinMaxDiff(_, [], _, _, 100000, -100000) -> error;
iterateStationsMinMaxDiff(_, [], Fun, _, Min, Max) -> Fun(Min, Max);
iterateStationsMinMaxDiff(Monitor, [H|T], Fun, Type, Min, Max) ->
{ok, Val} = maps:find(H, Monitor),
{MMin, MMax} = getMaximumGradientStationsByType(maps:to_list(Val), Type, {100000,-100000}),
case {MMin < Min, MMax > Max} of
{true, true} -> iterateStationsMinMaxDiff(Monitor, T, Fun, Type, MMin, MMax);
{true, false} -> iterateStationsMinMaxDiff(Monitor, T, Fun, Type, MMin, Max);
{false, true} -> iterateStationsMinMaxDiff(Monitor, T, Fun, Type, Min, MMax);
{false, false} -> iterateStationsMinMaxDiff(Monitor, T, Fun, Type, Min, Max)
end.
% function iterates measurements, checks its
% Type, and returns min and max value
getMaximumGradientStationsByType([], _, {Min, Max}) -> {Min, Max};
getMaximumGradientStationsByType([{#measurement{type=Type}, Val}|T], Type, {Min, Max}) ->
case {Val < Min, Val > Max} of
{true, true} -> getMaximumGradientStationsByType(T, Type, {Val, Val});
{true, false} -> getMaximumGradientStationsByType(T, Type, {Val, Max});
{false, true} -> getMaximumGradientStationsByType(T, Type, {Min, Val});
{false, false} -> getMaximumGradientStationsByType(T, Type, {Min, Max})
end;
getMaximumGradientStationsByType([_|T], Type, {Min, Max}) ->
getMaximumGradientStationsByType(T, Type, {Min, Max}). | src/pollution.erl | 0.521959 | 0.461684 | pollution.erl | starcoder |
-module(complex_numbers).
-define(DELTA, 0.005).
-export([abs/1,
add/2,
conjugate/1,
divide/2,
equal/2,
exp/1,
imaginary/1,
mul/2,
new/2,
real/1,
sub/2,
test_version/0]).
-record(complex, {real = 0 :: number(), imaginary = 0 :: number()}).
-opaque complex() :: #complex{}.
-export_type([complex/0]).
%% API
-spec abs(complex()) -> number().
abs(#complex{real = R, imaginary = I}) ->
math:sqrt(math:pow(R, 2) + math:pow(I, 2)).
-spec add(complex(), complex()) -> complex().
add(#complex{real = R1, imaginary = I1}, #complex{real = R2, imaginary = I2}) ->
#complex{real = R1 + R2, imaginary = I1 + I2}.
-spec conjugate(complex()) -> complex().
conjugate(#complex{real = R, imaginary = I}) ->
#complex{real = R, imaginary = -I}.
-spec divide(complex(), complex()) -> complex().
divide(#complex{real = R1, imaginary = I1}, #complex{real = R2, imaginary = I2}) ->
#complex{real = (R1 * R2 + I1 * I2) / (math:pow(R2, 2) + math:pow(I2, 2)),
imaginary = (I1 * R2 - R1 * I2) / (math:pow(R2, 2) + math:pow(I2, 2))}.
-spec equal(complex(), complex()) -> boolean().
equal(#complex{real = R1, imaginary = I1}, #complex{real = R2, imaginary = I2}) when
erlang:abs(R2 - R1) =< ?DELTA,
erlang:abs(I2 - I1) =< ?DELTA ->
true;
equal(_Z1, _Z2) ->
false.
-spec exp(complex()) -> complex().
exp(#complex{real = R, imaginary = I}) ->
#complex{real = math:exp(R) * math:cos(I),
imaginary = math:exp(R) * math:sin(I)}.
-spec imaginary(complex()) -> number().
imaginary(#complex{imaginary = I}) ->
I.
-spec mul(complex(), complex()) -> complex().
mul(#complex{real = R1, imaginary = I1}, #complex{real = R2, imaginary = I2}) ->
#complex{real = R1 * R2 - I1 * I2,
imaginary = I1 * R2 + R1 * I2}.
-spec new(number(), number()) -> complex().
new(R, I) ->
#complex{real = R, imaginary = I}.
-spec real(complex()) -> number().
real(#complex{real = R}) ->
R.
-spec sub(complex(), complex()) -> complex().
sub(#complex{real = R1, imaginary = I1}, #complex{real = R2, imaginary = I2}) ->
#complex{real = R1 - R2, imaginary = I1 - I2}.
-spec test_version() -> integer().
test_version() ->
2. | erlang/complex-numbers/src/complex_numbers.erl | 0.507812 | 0.4231 | complex_numbers.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
%%
%%% Description: SSH math utilities
-module(ssh_math).
-export([ilog2/1, ipow/3, invert/2, ipow2/3]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% INTEGER utils
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% number of bits (used) in a integer = isize(N) = |log2(N)|+1
ilog2(N) ->
ssh_bits:isize(N) - 1.
%% calculate A^B mod M
ipow(A, B, M) when M > 0, B >= 0 ->
crypto:mod_exp(A, B, M).
ipow2(A, B, M) when M > 0, B >= 0 ->
if A == 1 ->
1;
true ->
ipow2(A, B, M, 1)
end.
ipow2(A, 1, M, Prod) ->
(A*Prod) rem M;
ipow2(_A, 0, _M, Prod) ->
Prod;
ipow2(A, B, M, Prod) ->
B1 = B bsr 1,
A1 = (A*A) rem M,
if B - B1 == B1 ->
ipow2(A1, B1, M, Prod);
true ->
ipow2(A1, B1, M, (A*Prod) rem M)
end.
%% %%
%% %% Normal gcd
%% %%
%% gcd(R, Q) when abs(Q) < abs(R) -> gcd1(Q,R);
%% gcd(R, Q) -> gcd1(R,Q).
%% gcd1(0, Q) -> Q;
%% gcd1(R, Q) ->
%% gcd1(Q rem R, R).
%% %%
%% %% Least common multiple of (R,Q)
%% %%
%% lcm(0, _Q) -> 0;
%% lcm(_R, 0) -> 0;
%% lcm(R, Q) ->
%% (Q div gcd(R, Q)) * R.
%% %%
%% %% Extended gcd gcd(R,Q) -> {G, {A,B}} such that G == R*A + Q*B
%% %%
%% %% Here we could have use for a bif divrem(Q, R) -> {Quote, Remainder}
%% %%
%% egcd(R,Q) when abs(Q) < abs(R) -> egcd1(Q,R,1,0,0,1);
%% egcd(R,Q) -> egcd1(R,Q,0,1,1,0).
%% egcd1(0,Q,_,_,Q1,Q2) -> {Q, {Q2,Q1}};
%% egcd1(R,Q,R1,R2,Q1,Q2) ->
%% D = Q div R,
%% egcd1(Q rem R, R, Q1-D*R1, Q2-D*R2, R1, R2).
%%
%% Invert an element X mod P
%% Calculated as {1, {A,B}} = egcd(X,P),
%% 1 == P*A + X*B == X*B (mod P) i.e B is the inverse element
%%
%% X > 0, P > 0, X < P (P should be prime)
%%
invert(X,P) when X > 0, P > 0, X < P ->
I = inv(X,P,1,0),
if
I < 0 -> P + I;
true -> I
end.
inv(0,_,_,Q) -> Q;
inv(X,P,R1,Q1) ->
D = P div X,
inv(P rem X, X, Q1 - D*R1, R1).
%% %%
%% %% Integer square root
%% %%
%% isqrt(0) -> 0;
%% isqrt(1) -> 1;
%% isqrt(X) when X >= 0 ->
%% R = X div 2,
%% isqrt(X div R, R, X).
%% isqrt(Q,R,X) when Q < R ->
%% R1 = (R+Q) div 2,
%% isqrt(X div R1, R1, X);
%% isqrt(_, R, _) -> R. | source/otp_src_R14B02/lib/ssh/src/ssh_math.erl | 0.549882 | 0.439146 | ssh_math.erl | starcoder |
%% @doc Physics: 4th-order Runge-Kutta integration
%%
%% This module is loosely based on concepts from the following articles:
%% <ul>
%% <li>[http://gafferongames.com/game-physics/integration-basics/]</li>
%% <li>[http://gafferongames.com/game-physics/physics-in-3d/]</li>
%% </ul>
% @copyright 2012 <NAME>
% Licensed under the MIT license; see the LICENSE file for details.
-module(pre_physics_rk4).
% -------------------------------------------------------------------------
% external api
-export([simulate/1, default_physical/0, to_proplist/1, diff_to_proplist/2, from_proplist/1,
update_from_proplist/2, get_prop/2]).
% -------------------------------------------------------------------------
-record(physical, {
% Updated values (assume these change every frame)
position = {0, 0, 0} :: vector:vec(),
linear_momentum = {0, 0, 0} :: vector:vec(),
orientation = {1, 0, 0, 0} :: quaternion:quat(),
angular_momentum = {0, 0, 0} :: vector:vec(),
% Input-only values
force_absolute = {0, 0, 0} :: vector:vec(),
force_relative = {0, 0, 0} :: vector:vec(),
torque_absolute = {0, 0, 0} :: vector:vec(),
torque_relative = {0, 0, 0} :: vector:vec(),
% Purely calculated values (DON'T try to change these externally)
last_update :: erlang:timestamp(),
linear_velocity = {0, 0, 0} :: vector:vec(),
angular_velocity = {0, 0, 0} :: vector:vec(),
spin = {1, 0, 0, 0} :: quaternion:quat(),
% Intrinsic values (should NOT change during the life of an object)
mass = 1 :: float(),
inverse_mass = 1 :: float(),
inertia_tensor = 1 :: float(),
inverse_inertia_tensor = 1 :: float()
}).
%% ------------------------------------------------------------------------
%% External API
%% ------------------------------------------------------------------------
%% @doc Simulate physical movement of the 'physical' object represented by `InitialPhysical`, over the time since that
%% object's 'last_update' timestamp.
simulate(InitialPhysical) ->
LastUpdate = InitialPhysical#physical.last_update,
ThisUpdate = os:timestamp(),
NewPhysical = simulate(timer:now_diff(ThisUpdate, LastUpdate) / 1000000, InitialPhysical),
NewPhysical#physical {
last_update = ThisUpdate
}.
%% @doc Simulate physical movement of the 'physical' object represented by
%% `InitialPhysical', over the given `TimeDelta'.
simulate(TimeDelta, InitialPhysical) ->
simulate_internal(TimeDelta, InitialPhysical).
%% ------------------------------------------------------------------------
%% @doc Create a new 'physical' record, with the 'last_update' timestamp set to the current time.
default_physical() ->
#physical{last_update = os:timestamp()}.
%% ------------------------------------------------------------------------
to_proplist(Physical) ->
[
{position, vector:vec_to_list(Physical#physical.position)},
{linear_momentum, vector:vec_to_list(Physical#physical.linear_momentum)},
{orientation, quaternion:quat_to_list(Physical#physical.orientation)},
{angular_momentum, vector:vec_to_list(Physical#physical.angular_momentum)},
{force_absolute, vector:vec_to_list(Physical#physical.force_absolute)},
{force_relative, vector:vec_to_list(Physical#physical.force_relative)},
{torque_absolute, vector:vec_to_list(Physical#physical.torque_absolute)},
{torque_relative, vector:vec_to_list(Physical#physical.torque_relative)},
%{last_update, pre_channel_entity:generate_timestamp(Physical#physical.last_update)},
{linear_velocity, vector:vec_to_list(Physical#physical.linear_velocity)},
{angular_velocity, vector:vec_to_list(Physical#physical.angular_velocity)},
{spin, quaternion:quat_to_list(Physical#physical.spin)},
{mass, Physical#physical.mass},
{inverse_mass, Physical#physical.inverse_mass},
{inertia_tensor, Physical#physical.inertia_tensor},
{inverse_inertia_tensor, Physical#physical.inverse_inertia_tensor}
].
%% ------------------------------------------------------------------------
diff_to_proplist(OldPhysical, NewPhysical) ->
filter_diff_list([
{position, NewPhysical#physical.position, OldPhysical#physical.position},
{linear_momentum, NewPhysical#physical.linear_momentum, OldPhysical#physical.linear_momentum},
{orientation, NewPhysical#physical.orientation, OldPhysical#physical.orientation},
{angular_momentum, NewPhysical#physical.angular_momentum, OldPhysical#physical.angular_momentum},
{force_absolute, NewPhysical#physical.force_absolute, OldPhysical#physical.force_absolute},
{force_relative, NewPhysical#physical.force_relative, OldPhysical#physical.force_relative},
{torque_absolute, NewPhysical#physical.torque_absolute, OldPhysical#physical.torque_absolute},
{torque_relative, NewPhysical#physical.torque_relative, OldPhysical#physical.torque_relative},
%{last_update, NewPhysical#physical.last_update, OldPhysical#physical.last_update},
{linear_velocity, NewPhysical#physical.linear_velocity, OldPhysical#physical.linear_velocity},
{angular_velocity, NewPhysical#physical.angular_velocity, OldPhysical#physical.angular_velocity},
{spin, NewPhysical#physical.spin, OldPhysical#physical.spin},
{mass, NewPhysical#physical.mass, OldPhysical#physical.mass},
{inverse_mass, NewPhysical#physical.inverse_mass, OldPhysical#physical.inverse_mass},
{inertia_tensor, NewPhysical#physical.inertia_tensor, OldPhysical#physical.inertia_tensor},
{inverse_inertia_tensor,
NewPhysical#physical.inverse_inertia_tensor, OldPhysical#physical.inverse_inertia_tensor}
]).
filter_diff_list([{_Key, OldAndNewValue, OldAndNewValue} | Rest]) ->
filter_diff_list(Rest);
%filter_diff_list([{last_update, NewValue, _OldValue} | Rest]) ->
% [{last_update, pre_channel_entity:generate_timestamp(NewValue)} | filter_diff_list(Rest)];
filter_diff_list([{Key, {_, _, _, _} = NewValue, _OldValue} | Rest]) ->
[{Key, quaternion:quat_to_list(NewValue)} | filter_diff_list(Rest)];
filter_diff_list([{Key, {_, _, _} = NewValue, _OldValue} | Rest]) ->
[{Key, vector:vec_to_list(NewValue)} | filter_diff_list(Rest)];
filter_diff_list([{Key, NewValue, _OldValue} | Rest]) ->
[{Key, NewValue} | filter_diff_list(Rest)];
filter_diff_list([]) ->
[].
%% ------------------------------------------------------------------------
from_proplist(PhysicalProplist) ->
update_from_proplist(#physical{}, PhysicalProplist).
%% ------------------------------------------------------------------------
update_from_proplist(Physical, []) ->
Physical;
update_from_proplist(Physical, [{}]) ->
Physical;
update_from_proplist(Physical, [{position, Val} | Rest]) ->
update_from_proplist(Physical#physical{position = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{linear_momentum, Val} | Rest]) ->
update_from_proplist(Physical#physical{linear_momentum = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{orientation, Val} | Rest]) ->
update_from_proplist(Physical#physical{orientation = quaternion:to_quat(Val)}, Rest);
update_from_proplist(Physical, [{angular_momentum, Val} | Rest]) ->
update_from_proplist(Physical#physical{angular_momentum = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{force_absolute, Val} | Rest]) ->
update_from_proplist(Physical#physical{force_absolute = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{force_relative, Val} | Rest]) ->
update_from_proplist(Physical#physical{force_relative = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{torque_absolute, Val} | Rest]) ->
update_from_proplist(Physical#physical{torque_absolute = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{torque_relative, Val} | Rest]) ->
update_from_proplist(Physical#physical{torque_relative = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{last_update, Val} | Rest]) ->
update_from_proplist(Physical#physical{last_update = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{linear_velocity, Val} | Rest]) ->
update_from_proplist(Physical#physical{linear_velocity = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{angular_velocity, Val} | Rest]) ->
update_from_proplist(Physical#physical{angular_velocity = vector:to_vec(Val)}, Rest);
update_from_proplist(Physical, [{spin, Val} | Rest]) ->
update_from_proplist(Physical#physical{spin = quaternion:to_quat(Val)}, Rest);
update_from_proplist(Physical, [{mass, Val} | Rest]) ->
update_from_proplist(Physical#physical{mass = Val}, Rest);
update_from_proplist(Physical, [{inverse_mass, Val} | Rest]) ->
update_from_proplist(Physical#physical{inverse_mass = Val}, Rest);
update_from_proplist(Physical, [{inertia_tensor, Val} | Rest]) ->
update_from_proplist(Physical#physical{inertia_tensor = Val}, Rest);
update_from_proplist(Physical, [{inverse_inertia_tensor, Val} | Rest]) ->
update_from_proplist(Physical#physical{inverse_inertia_tensor = Val}, Rest).
%% ------------------------------------------------------------------------
get_prop(position, Physical) ->
Physical#physical.position;
get_prop(linear_momentum, Physical) ->
Physical#physical.linear_momentum;
get_prop(orientation, Physical) ->
Physical#physical.orientation;
get_prop(angular_momentum, Physical) ->
Physical#physical.angular_momentum;
get_prop(force_absolute, Physical) ->
Physical#physical.force_absolute;
get_prop(force_relative, Physical) ->
Physical#physical.force_relative;
get_prop(torque_absolute, Physical) ->
Physical#physical.torque_absolute;
get_prop(torque_relative, Physical) ->
Physical#physical.torque_relative;
get_prop(last_update, Physical) ->
Physical#physical.last_update;
get_prop(linear_velocity, Physical) ->
Physical#physical.linear_velocity;
get_prop(angular_velocity, Physical) ->
Physical#physical.angular_velocity;
get_prop(spin, Physical) ->
Physical#physical.spin;
get_prop(mass, Physical) ->
Physical#physical.mass;
get_prop(inverse_mass, Physical) ->
Physical#physical.inverse_mass;
get_prop(inertia_tensor, Physical) ->
Physical#physical.inertia_tensor;
get_prop(inverse_inertia_tensor, Physical) ->
Physical#physical.inverse_inertia_tensor.
%% ------------------------------------------------------------------------
%% Internal Helpers
%% ------------------------------------------------------------------------
-spec evaluate(TimeDelta, Velocity, Force, Spin, Torque, State) -> {Velocity, Force, Spin, Torque, State} when
TimeDelta :: float(),
Velocity :: vector:vec(),
Force :: vector:vec(),
Spin :: quaternion:quat(),
Torque :: vector:vec(),
State :: #physical{}.
evaluate(TimeDelta, Velocity, Force, Spin, Torque, State) ->
#physical{
position = Position,
linear_velocity = InitialVelocity,
orientation = Orientation,
angular_momentum = AngularMomentum
} = State,
{SpinW, SpinX, SpinY, SpinZ} = Spin,
{OrientW, OrientX, OrientY, OrientZ} = Orientation,
NextPosition = vector:add(Position, vector:multiply(TimeDelta, Velocity)),
NextVelocity = vector:add(InitialVelocity, vector:multiply(TimeDelta, Force)),
%XXX: I have no idea if/how this works, but it's what the example code did...
NextOrientation = {
OrientW + SpinW * TimeDelta,
OrientX + SpinX * TimeDelta,
OrientY + SpinY * TimeDelta,
OrientZ + SpinZ * TimeDelta
},
NextAngularMomentum = vector:add(AngularMomentum, vector:multiply(TimeDelta, Torque)),
State1 = State#physical{
position = NextPosition,
linear_velocity = NextVelocity,
orientation = quaternion:unit(NextOrientation),
angular_momentum = NextAngularMomentum
},
%TODO: Should _NextAngularVelocity even be returned?
{NextSpin, State2} = update_state(State1),
{NextForce, NextTorque} = forces(TimeDelta, State2),
{NextVelocity, NextForce, NextSpin, NextTorque, State2}.
%% ------------------------------------------------------------------------
-spec forces(TimeDelta, State) -> {Force, Torque} when
TimeDelta :: float(),
State :: #physical{},
Force :: vector:vec(),
Torque :: vector:vec().
%FIXME: This should be a callback so the controller can do stuff like target-velocity calculations.
forces(_TimeDelta, State) ->
#physical{
force_absolute = ForceAbs,
force_relative = ForceRel,
orientation = Orientation,
torque_absolute = TorqueAbs,
torque_relative = TorqueRel
} = State,
Force = vector:add(ForceAbs, quaternion:rotate(ForceRel, Orientation)),
Torque = vector:add(TorqueAbs, quaternion:rotate(TorqueRel, Orientation)),
{Force, Torque}.
%% ------------------------------------------------------------------------
-spec update_state(State) -> {Spin, AngularVelocity} when
State :: #physical{},
Spin :: quaternion:quat(),
AngularVelocity :: vector:vec().
update_state(State) ->
#physical{
linear_momentum = LinearMomentum,
orientation = Orientation,
angular_momentum = AngularMomentum,
inverse_mass = InverseMass,
inverse_inertia_tensor = InverseInertia
} = State,
LinearVelocity = vector:multiply(InverseMass, LinearMomentum),
AngularVelocity = vector:multiply(InverseInertia, AngularMomentum),
{AngularVelocityX, AngularVelocityY, AngularVelocityZ} = AngularVelocity,
Spin = quaternion:multiply(
0.5,
quaternion:multiply(
{0, AngularVelocityX, AngularVelocityY, AngularVelocityZ},
Orientation
)
),
NewState = State#physical{
linear_velocity = LinearVelocity,
angular_velocity = AngularVelocity,
spin = Spin
},
{Spin, NewState}.
%% ------------------------------------------------------------------------
simulate_internal(TimeDelta, State) ->
#physical{
position = Position,
linear_velocity = Velocity,
orientation = {OrientW, OrientX, OrientY, OrientZ},
angular_momentum = AngularMomentum
} = State,
{Velocity1, Force1, Spin1, Torque1, State1} = evaluate(0, {0, 0, 0}, {0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0}, State),
{Velocity2, Force2, Spin2, Torque2, State2} = evaluate(TimeDelta * 0.5, Velocity1, Force1, Spin1, Torque1, State1),
{Velocity3, Force3, Spin3, Torque3, State3} = evaluate(TimeDelta * 0.5, Velocity2, Force2, Spin2, Torque2, State2),
{Velocity4, Force4, Spin4, Torque4, State4} = evaluate(TimeDelta, Velocity3, Force3, Spin3, Torque3, State3),
NewVelocity = vector:multiply(1.0 / 6.0, vector:add(Velocity1, vector:multiply(2.0, vector:add(Velocity2, Velocity3)), Velocity4)),
NewAcceleration = vector:multiply(1.0 / 6.0, vector:add(Force1, vector:multiply(2.0, vector:add(Force2, Force3)), Force4)),
{Spin1W, Spin1X, Spin1Y, Spin1Z} = Spin1,
{Spin2W, Spin2X, Spin2Y, Spin2Z} = Spin2,
{Spin3W, Spin3X, Spin3Y, Spin3Z} = Spin3,
{Spin4W, Spin4X, Spin4Y, Spin4Z} = Spin4,
NewSpinW = 1.0 / 6.0 * (Spin1W + 2.0 * (Spin2W + Spin3W) + Spin4W),
NewSpinX = 1.0 / 6.0 * (Spin1X + 2.0 * (Spin2X + Spin3X) + Spin4X),
NewSpinY = 1.0 / 6.0 * (Spin1Y + 2.0 * (Spin2Y + Spin3Y) + Spin4Y),
NewSpinZ = 1.0 / 6.0 * (Spin1Z + 2.0 * (Spin2Z + Spin3Z) + Spin4Z),
NewTorque = vector:multiply(1.0 / 6.0, vector:add(Torque1, vector:multiply(2.0, vector:add(Torque2, Torque3)), Torque4)),
State5 = State4#physical{
position = vector:add(Position, vector:multiply(TimeDelta, NewVelocity)),
linear_velocity = vector:add(Velocity, vector:multiply(TimeDelta, NewAcceleration)),
orientation = quaternion:unit({
OrientW + TimeDelta * NewSpinW,
OrientX + TimeDelta * NewSpinX,
OrientY + TimeDelta * NewSpinY,
OrientZ + TimeDelta * NewSpinZ
}),
angular_momentum = vector:add(AngularMomentum, vector:multiply(TimeDelta, NewTorque))
},
% Debug Simulation
%?warning("Before simulate_internal: ~p", [State]),
%?warning("After simulate_internal: ~p", [State5]),
State5. | apps/pre_entity_layer/src/pre_physics_rk4.erl | 0.659076 | 0.53358 | pre_physics_rk4.erl | starcoder |
%%% @author <NAME> <<EMAIL>>
%%% @author <NAME> <<EMAIL>>
%%%
%%% @copyright 2012 Selectel Ltd.
%%%
%%% @doc NIF-based date and time parsing and formatting for Erlang.
%%% This module implements an interface to strptime/strftime with
%%% appropriate handling of Erlang datetime formats.
%%%
%%% All exported functions in this module can throw `badarg' if
%%% malformed input is provided.
%%%
%%% A <em>Type</em> argument, accepted by some of the exported functions
%%% should be one of the following:
%%%
%%% ```
%%% | Type | Description |
%%% |----------+----------------------------------------------------|
%%% | unix | UNIX timestamp, a positive integer denoting number |
%%% | | of seconds since 1 Jan 1970. |
%%% | now | @see erlang:now/0 |
%%% | datetime | @see calendar:datetime/0 |
%%% '''
%%%
%%% A <em>Format</em> argument to any of the exported functions is
%%% either a {@type binary()} with strptime/strftime compatible tokens or
%%% one of the following atoms: iso8601, rfc1123, rfc2822. In the latter
%%% case a predefined format will be used.
%%%
%%% *A note about 32-bit systems*
%%%
%%% Functions of "format" family can return "{error, time_overflow}" if
%%% the underlying 32-bit value overflows. This is presumably possible only
%%% on 32-bit systems. Minimum datetime for such systems is
%%% `{{1901,12,13},{20,45,52}}' and maximum is `{{2038,1,19},{3,14,7}}'.
%%%
%%% @end
%%%
-module(tempo).
-on_load(nif_init/0).
-ifdef(DEBUG).
-compile([export_all]).
-endif.
-define(STUB, not_loaded(?LINE)).
-define(MEGA, 1000000).
-define(MICRO, 0.000001).
-define(EPOCH_ZERO,
calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})).
-export([parse/2, parse/3,
parse_unix/2, parse_now/2, parse_datetime/2,
format/2, format/3,
format_unix/2, format_now/2, format_datetime/2]).
-type unix_timestamp() :: float().
-type format() :: binary()
| iso8601
| rfc1123
| rfc2822.
-type datetime_type() :: unix
| now
| datetime.
-type datetime_value() :: unix_timestamp()
| erlang:timestamp()
| calendar:datetime().
%% @doc Parses {Type, Binary} tuple according to provided format, returns
%% ok/error tuples with datetime in format that depends on atom Type.
%% @equiv parse(Format, DatetimeType, Binary)
%% @end
-spec parse(format(), {datetime_type(), binary()}) -> {ok, datetime_value()}
| {error, format_mismatch}.
parse(Format, {Type, Bin}) -> parse(Format, Bin, Type).
%% @doc Parses Binary according to Format and returns ok/error tuple with
%% datetime in format that depends on atom Type.
%% @end
-spec parse(format(), binary(),
datetime_type()) -> {ok, datetime_value()}
| {error, format_mismatch}.
parse(Format, Bin, Type) ->
case Type of
unix -> parse_unix(Format, Bin);
now -> parse_now(Format, Bin);
datetime -> parse_datetime(Format, Bin)
end.
%% @doc Helper function similar to {@link parse/3}.
%% @equiv parse(Format, Binary, timestamp)
%% @end
-spec parse_unix(format(), binary()) -> {ok, unix_timestamp()}
| {error, format_mismatch}.
parse_unix(Format, Bin) ->
case strptime(convert_format(Format), Bin) of
{ok, Timestamp} ->
{ok, round(Timestamp)};
Err -> Err
end.
%% @doc Helper function similar to {@link parse/3}.
%% @equiv parse(Format, Binary, now)
%% @end
-spec parse_now(format(), binary()) -> {ok, erlang:timestamp()}
| {error, format_mismatch}.
parse_now(Format, Bin) ->
case parse_unix(Format, Bin) of
{ok, RawTimestamp} ->
Timestamp = trunc(RawTimestamp),
MicroSecs = RawTimestamp - Timestamp,
MegaSecs = Timestamp div ?MEGA,
Secs = Timestamp rem ?MEGA,
{ok, {MegaSecs, Secs, MicroSecs}};
Err -> Err
end.
%% @doc Helper function similar to {@link parse/3}.
%% @equiv parse(Format, Binary, datetime)
%% @end
-spec parse_datetime(format(), binary()) -> {ok, calendar:datetime()}
| {error, format_mismatch}.
parse_datetime(Format, Bin) ->
case parse_unix(Format, Bin) of
{ok, Timestamp} ->
DT = calendar:gregorian_seconds_to_datetime(?EPOCH_ZERO +
Timestamp),
{ok, DT};
Err -> Err
end.
%% @doc Formats {Type, Datetime} tuple according to Format. The way in which
%% Datetime will be handled depends on Type.
%% @equiv format(Format, Datetime, Type)
%% @end
-spec format(format(),
{datetime_type(), datetime_value()}) -> {ok, binary()}
| {error, invalid_time}
| {error, time_overflow}.
format(Format, {Type, Datetime}) -> format(Format, Datetime, Type).
%% @doc Formats Datetime according to Format. The way in which
%% Datetime will be handled depends on Type.
%% @end
-spec format(format(), datetime_value(),
datetime_type()) -> {ok, binary()}
| {error, invalid_time}
| {error, time_overflow}.
format(Format, Datetime, Type) ->
case Type of
unix -> format_unix(Format, Datetime);
now -> format_now(Format, Datetime);
datetime -> format_datetime(Format, Datetime)
end.
%% @doc Helper function similar to {@link format/3}.
%% @equiv format(Format, Datetime, timestamp)
%% @end
-spec format_unix(format(), unix_timestamp()) -> {ok, binary()}
| {error, invalid_time}
| {error, time_overflow}.
format_unix(Format, Timestamp) ->
strftime(convert_format(Format), float(Timestamp)).
%% @doc Helper function similar to {@link format/3}.
%% @equiv format(Format, Datetime, now)
%% @end
-spec format_now(format(), erlang:timestamp()) -> {ok, binary()}
| {error, invalid_time}
| {error, time_overflow}.
format_now(Format, {MegaSecs, Secs, MicroSecs}) ->
Timestamp = ?MEGA * MegaSecs + Secs + ?MICRO * MicroSecs,
format_unix(Format, Timestamp).
%% @doc Helper function similar to {@link format/3}.
%% @equiv format(Format, Datetime, datetime)
%% @end
-spec format_datetime(format(), calendar:datetime()) -> {ok, binary()}
| {error, invalid_time}
| {error, time_overflow}.
format_datetime(Format, Datetime) ->
Timestamp = calendar:datetime_to_gregorian_seconds(Datetime) - ?EPOCH_ZERO,
format_unix(Format, Timestamp).
%% @private
%% @doc Returns a predefined format for a given "format atom".
%% @end
-spec convert_format(format()) -> binary().
convert_format(X) when is_binary(X) -> X;
convert_format(iso8601) -> <<"%Y-%m-%dT%H:%M:%SZ">>;
convert_format(rfc1123) -> <<"%a, %d %b %Y %H:%M:%S GMT">>;
convert_format(rfc2822) -> <<"%a, %d %b %Y %H:%M:%S +0000">>;
convert_format(X) -> error(badarg, [X]).
%% @private
%% @doc This function will be replaced with NIF's strptime.
%% @end
-spec strptime(binary(), binary()) -> {ok, integer()}
| {error, format_mismatch}.
strptime(_Format, _DT) -> ?STUB.
%% @private
%% @doc This function will be replaced with NIF's strftime.
%% @end
-spec strftime(binary(), integer()) -> {ok, binary()}
| {error, invalid_time}.
strftime(_Format, _DT) -> ?STUB.
%% @private
%% @doc Searches for NIF in private directory of "tempo" application.
%% @end
-spec nif_init() -> ok | {error, _}.
nif_init() ->
PrivDir = case code:priv_dir(tempo) of
{error, _} ->
EbinDir = filename:dirname(code:which(?MODULE)),
AppPath = filename:dirname(EbinDir),
filename:join(AppPath, "priv");
Path ->
Path
end,
erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
%% @private
%% @doc Helper for exiting gracefully when NIF can't be loaded.
%% @end
-spec not_loaded(pos_integer()) -> ok.
not_loaded(Line) -> exit({not_loaded, [{module, ?MODULE}, {line, Line}]}). | src/tempo.erl | 0.616012 | 0.501709 | tempo.erl | starcoder |
-module(pop_chain).
-export([
new/1,
add_block_in_order/3,
add_transaction/2,
generate_new_block/2,
find_block_by_id/2,
get_genesis_block/1,
get_head_block/1,
get_verfier_next_block_time/2,
resolve_fork/3,
compute_block_hash/1,
compute_transaction_hash/1,
apply_block_signature/2,
apply_transaction_signature/2,
get_status_info/1
]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("stdlib/include/assert.hrl").
-include("potato_records.hrl").
map_key_match_assert(Map, KeyList) ->
K1 = lists:sort(maps:keys(Map)),
K2 = lists:sort(KeyList),
?assertEqual(K1, K2, {"key mismatch", K1, K2}),
ok.
transaction_map_structure_assert(T) when is_map(T) ->
map_key_match_assert(T, ?transaction),
CD = maps:get(consensus_data, T),
map_key_match_assert(CD, ?consensus_transaction_data),
ok.
check_block_map_structure(B) when is_map(B) ->
map_key_match_assert(B, ?block),
CD = maps:get(consensus_data, B),
map_key_match_assert(CD, ?consensus_block_data),
TL = maps:get(transactions, B),
lists:map(fun transaction_map_structure_assert/1, TL),
ok.
%% get entry in block's consensus data
get_block_cd(Entry, Block) -> maps:get(Entry, maps:get(consensus_data, Block)).
%% @doc Computes hash of the block.
compute_block_hash(Block) when is_map(Block) ->
#{consensus_data := CD} = Block,
CleanBlock = Block#{
this_id := undefined,
consensus_data := CD#{signature := undefined}
},
my_crypto:hash( my_serializer:serialize_object(CleanBlock) ).
%% @doc Computes hash of the transaction.
compute_transaction_hash(Transaction) when is_map(Transaction) ->
CD = maps:get(consensus_data, Transaction),
CleanTransaction = Transaction#{
consensus_data := CD#{signature := undefined}
},
my_crypto:hash( my_serializer:serialize_object(CleanTransaction) ).
%% @doc Inserts signature into the block.
%%
%% Checks that block's this_id is correct and that signature is correct.
apply_block_signature(Signature, Block)
when is_map(Block) ->
Hash = maps:get(this_id, Block),
PubKey = get_block_cd(verifier_pub_key, Block),
?assertEqual(Hash, compute_block_hash(Block)),
?assert(true == my_crypto:verify(Hash, Signature, PubKey)),
CD = maps:get(consensus_data, Block),
Block#{consensus_data := CD#{signature := Signature}}.
%% @doc Inserts signature into the transaction.
%%
%% Checks that the signature is correct.
apply_transaction_signature(Signature, T)
when is_map(T) ->
Hash = compute_transaction_hash(T),
PubKey = maps:get(player_id, T),
?assert(true == my_crypto:verify(Hash, Signature, PubKey)),
CD = maps:get(consensus_data, T),
T#{consensus_data := CD#{signature := Signature}}.
check_transaction_correctness(Transaction, ChainId) when is_map(Transaction) ->
transaction_map_structure_assert(Transaction),
#{
consensus_data := CD,
player_id := PlrKey
} = Transaction,
#{
signature := PlrSgn,
chain_id := TrChainId
} = CD,
Hash = compute_transaction_hash(Transaction),
%% verify signature's correctness
?assert(my_crypto:verify(Hash, PlrSgn, PlrKey), "transaction signature failed verification"),
%% compare ChainId
?assertEqual(TrChainId, ChainId, "bad transaction ChainId"),
ok.
%% @doc Adds block to blocktree.
%%
%% The block should new and right after already existing block.
%% Block's structure is checked and error is generated if it is incorrect.
add_block_in_order(Block, CurrentTime, ProtocolData)
when
is_record(ProtocolData, pop_chain),
is_map(Block)
->
%% checks maps for block and transactions inside
check_block_map_structure(Block),
#pop_chain{
pop_config_data = #pop_config_data{
verifiers_arr = VerifiersArr,
time_between_blocks = TimeBetween,
time_desync_margin = TimeDesyncMargin,
chain_id = MainChainId
},
tree_data = TD0
} = ProtocolData,
#{
previous_id := PrevId,
this_id := ThisId,
transactions := BlockTransactionsList,
consensus_data := #{
signature := VerSgn,
verifier_pub_key := VerKey,
verifier_index := VerIndex,
timestamp := Tmp
}
} = Block,
%% check that signer is one of the verifiers
#verifier_public_info{
index = VerIndexChk,
public_key = PubKeyChk
} = array:get(VerIndex, VerifiersArr),
?assertEqual(VerIndex, VerIndexChk, "indices don't match"),
?assertEqual(VerKey, PubKeyChk, "keys don't match"),
%% check that the time is correct for this verifier
%% (also check that it is not too far into the future)
%% should be larger, and have the correct remainder
PreviousBlock = blocktree:get_block_by_id(PrevId, TD0),
PreviousBlockTimestamp = maps:get(timestamp, maps:get(consensus_data, PreviousBlock)),
?assert(PreviousBlockTimestamp < Tmp, "time should be larger than previous"),
?assert(Tmp - TimeDesyncMargin < CurrentTime, "block cannot be in the future"),
VerNum = array:size(VerifiersArr),
?assertEqual(Tmp rem (TimeBetween * VerNum), TimeBetween * VerIndex, "bad time for that verifier"),
%% check ThisId hash correctness
Hash = ThisId,
?assertEqual(Hash, compute_block_hash(Block), "incorrect hash"),
%% check signature's correctness
?assert(my_crypto:verify(Hash, VerSgn, VerKey), "signature failed verification"),
%% (OPTIONAL) check sequence of different verifiers
%% Verify transactions
lists:map(fun(T) -> check_transaction_correctness(T, MainChainId) end, BlockTransactionsList),
%% add this block to tree_data
%% this can trigger errors if the block if poorly formed
%% also fails if block is orphan or already exists
TD1 = blocktree:add_block_in_order(Block, TD0),
%% current last block keeps track of the last block in the chain
%% update it, if new block is the last one
CurrentHeadBlock = ProtocolData#pop_chain.head_block,
HeadBlock = resolve_fork(Block, CurrentHeadBlock, TD1),
%% ?debugVal(CurrentHeadBlock),
%% ?debugVal(Block),
%% ?debugVal(HeadBlock),
NewProtocolData = ProtocolData#pop_chain{tree_data = TD1, head_block = HeadBlock},
NewProtocolData.
%% @doc Initialize protocol data.
%%
%% Creates a tree with genesis block with a fixed timestamp.
%% Genesis block's id is <b>genesis</b>, it is the only block with non SHA hash id.
new(PopConfigData)
when is_record(PopConfigData, pop_config_data) ->
CurrentTime = PopConfigData#pop_config_data.init_time,
?assert(CurrentTime >= 0),
TD0 = blocktree:new(),
B0 = blocktree:generate_new_block(undefined, TD0),
B1 = B0#{
this_id := genesis,
consensus_data := #{
timestamp => CurrentTime,
signature => undefined,
verifier_pub_key => undefined,
verifier_index => undefined
}
},
check_block_map_structure(B1),
TD1 = blocktree:add_block_in_order(B1, TD0),
PC = #pop_chain{
pop_config_data = PopConfigData,
tree_data = TD1,
head_block = B1,
genesis_block = B1
},
PC.
%% @doc Resolves a fork between two branches.
%%
%% Block with larger height wins.
%% If heights are the same, then branches are traced to the first branching point,
%% and then block at branching point with earlier timestamp wins.
%% If those timestamps are equal, block with smaller hash wins.
resolve_fork(B1, B2, TreeData)
when
is_map(B1),
is_map(B2),
is_record(TreeData, tree_data) ->
H1 = maps:get(height, B1),
H2 = maps:get(height, B2),
if
H1 > H2 ->
B1;
H2 > H1 ->
B2;
H2 == H1 ->
Branch = resolve_fork_select_branch(B1, B2, TreeData),
if
Branch == first ->
B1;
Branch == second ->
B2
end
end.
resolve_fork_select_branch(B1, B2, TreeData) ->
Id1 = maps:get(previous_id, B1),
Id2 = maps:get(previous_id, B2),
if
Id1 == Id2 ->
resolve_fork_same_parent(B1, B2);
Id1 /= Id2 ->
BB1 = blocktree:get_block_by_id(Id1, TreeData),
BB2 = blocktree:get_block_by_id(Id2, TreeData),
resolve_fork_select_branch(BB1, BB2, TreeData)
end.
resolve_fork_same_parent(B1, B2) ->
T1 = get_block_cd(timestamp, B1),
T2 = get_block_cd(timestamp, B2),
if
T1 < T2 ->
first;
T2 < T1 ->
second;
T1 == T2 ->
Id1 = maps:get(this_id, B1),
Id2 = maps:get(this_id, B2),
if
Id1 < Id2 ->
first;
Id2 < Id1 ->
second
end
end.
%% @doc Get the next appropriate time for a verifier to make a block.
get_verfier_next_block_time(VerifierIndex, PC)
when is_record(PC, pop_chain) ->
#pop_chain{
pop_config_data = #pop_config_data{
verifiers_arr = VerifierArr,
time_between_blocks = TimeBetweenBlocks
},
head_block = HeadBlock
} = PC,
VerNum = array:size(VerifierArr),
LastTime = get_block_cd(timestamp, HeadBlock),
LastZeroTime = LastTime - (LastTime rem (VerNum * TimeBetweenBlocks)),
NextTime = LastZeroTime + VerifierIndex * TimeBetweenBlocks,
if
NextTime =< LastTime ->
NextTime + VerNum * TimeBetweenBlocks;
NextTime > LastTime ->
NextTime
end.
%% @doc Creates next block in the chain for a given verifier.
%%
%% Created block is unsigned.
%% (Pending transactions are put into the block by blocktree.)
generate_new_block(VerifierIndex, PC)
when is_record(PC, pop_chain) ->
Time = get_verfier_next_block_time(VerifierIndex, PC),
TD = PC#pop_chain.tree_data,
VerData = array:get(VerifierIndex, PC#pop_chain.pop_config_data#pop_config_data.verifiers_arr),
VerPub = VerData#verifier_public_info.public_key,
LastId = maps:get(this_id, PC#pop_chain.head_block),
B0 = blocktree:generate_new_block(LastId, TD),
B1 = B0#{
consensus_data := #{
timestamp => Time,
signature => undefined,
verifier_pub_key => VerPub,
verifier_index => VerifierIndex
}
},
Hash = compute_block_hash(B1),
B2 = B1#{this_id := Hash},
check_block_map_structure(B2),
B2.
%% @doc Finds block by id.
%%
%% Only looks among non-orphan blocks.
%% Returns {ok, Block} or error.
find_block_by_id(Id, PC)
when is_record(PC, pop_chain) ->
maps:find(Id, PC#pop_chain.tree_data#tree_data.block_map).
%% @doc Gets first block.
get_genesis_block(PC) -> PC#pop_chain.genesis_block.
%% @doc Gets last block.
get_head_block(PC) -> PC#pop_chain.head_block.
%% @doc Adds transaction to the structure.
%%
%% Checks transaction correctness first.
%% Returns {Status, Container} where Status is
%% ignored_duplicate, updated_old, added_new
add_transaction(T, PC)
when is_record(PC, pop_chain),
is_map(T) ->
check_transaction_correctness(T, PC#pop_chain.pop_config_data#pop_config_data.chain_id),
TD = PC#pop_chain.tree_data,
{Status, TD1} = blocktree:add_new_transaction(T, TD),
{Status, PC#pop_chain{tree_data = TD1} }.
get_status_info(_PopChain = #pop_chain{tree_data = Tree, head_block = HB}) ->
blocktree:get_status_info(Tree) ++
[
{"block chain size", maps:get(height, HB)}
]. | src/pop_chain.erl | 0.599602 | 0.504761 | pop_chain.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core: Core Riak Application
%%
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc the local view of the cluster's ring configuration
%%
%% Numerous processes concurrently read and access the ring in a
%% variety of time sensitive code paths. To make this efficient,
%% `riak_core' uses `mochiglobal' which exploits the Erlang constant
%% pool to provide constant-time access to the ring without needing
%% to copy data into individual process heaps.
%%
%% However, updating a `mochiglobal' value is very slow, and becomes slower
%% the larger the item being stored. With large rings, the delay can
%% become too long during periods of high ring churn, where hundreds of
%% ring events are being triggered a second.
%%
%% As of Riak 1.4, `riak_core' uses a hybrid approach to solve this
%% problem. When a ring is first written, it is written to a shared ETS
%% table. If no ring events have occurred for 90 seconds, the ring is
%% then promoted to `mochiglobal'. This provides fast updates during
%% periods of ring churn, while eventually providing very fast reads
%% after the ring stabilizes. The downside is that reading from the ETS
%% table before promotion is slower than `mochiglobal', and requires
%% copying the ring into individual process heaps.
%%
%% To alleviate the slow down while in the ETS phase, `riak_core'
%% exploits the fact that most time sensitive operations access the ring
%% in order to read only a subset of its data: bucket properties and
%% partition ownership. Therefore, these pieces of information are
%% extracted from the ring and stored in the ETS table as well to
%% minimize copying overhead. Furthermore, the partition ownership
%% information (represented by the {@link chash} structure) is converted
%% into a binary {@link chashbin} structure before being stored in the
%% ETS table. This `chashbin' structure is fast to copy between processes
%% due to off-heap binary sharing. Furthermore, this structure provides a
%% secondary benefit of being much faster than the traditional `chash'
%% structure for normal operations.
%%
%% As of Riak 1.4, it is therefore recommended that operations that
%% can be performed by directly using the bucket properties API or
%% `chashbin' structure do so using those methods rather than
%% retrieving the ring via `get_my_ring/0' or `get_raw_ring/0'.
-module(riak_core_ring_manager).
-define(RING_KEY, riak_ring).
-behaviour(gen_server).
-export([start_link/0,
start_link/1,
get_my_ring/0,
get_raw_ring/0,
get_raw_ring_chashbin/0,
get_chash_bin/0,
get_ring_id/0,
get_bucket_meta/1,
refresh_my_ring/0,
refresh_ring/2,
set_my_ring/1,
write_ringfile/0,
prune_ringfiles/0,
read_ringfile/1,
find_latest_ringfile/0,
force_update/0,
do_write_ringfile/1,
ring_trans/2,
run_fixups/3,
set_cluster_name/1,
stop/0,
is_stable_ring/0]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-record(state, {
mode,
raw_ring,
ring_changed_time,
inactivity_timer
}).
-export([setup_ets/1, cleanup_ets/1, set_ring_global/1]). %% For EUnit testing
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-define(ETS, ets_riak_core_ring_manager).
-define(PROMOTE_TIMEOUT, 90000).
%% ===================================================================
%% Public API
%% ===================================================================
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [live], []).
%% Testing entry point
start_link(test) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [test], []).
%% @spec get_my_ring() -> {ok, riak_core_ring:riak_core_ring()} | {error, Reason}
get_my_ring() ->
Ring = case riak_core_mochiglobal:get(?RING_KEY) of
ets ->
case ets:lookup(?ETS, ring) of
[{_, RingETS}] ->
RingETS;
_ ->
undefined
end;
RingMochi ->
RingMochi
end,
case Ring of
Ring when is_tuple(Ring) -> {ok, Ring};
undefined -> {error, no_ring}
end.
get_raw_ring() ->
try
Ring = ets:lookup_element(?ETS, raw_ring, 2),
{ok, Ring}
catch
_:_ ->
gen_server:call(?MODULE, get_raw_ring, infinity)
end.
get_raw_ring_chashbin() ->
try
Ring = ets:lookup_element(?ETS, raw_ring, 2),
{ok, CHBin} = get_chash_bin(),
{ok, Ring, CHBin}
catch
_:_ ->
gen_server:call(?MODULE, get_raw_ring_chashbin, infinity)
end.
%% @spec refresh_my_ring() -> ok
refresh_my_ring() ->
gen_server:call(?MODULE, refresh_my_ring, infinity).
refresh_ring(Node, ClusterName) ->
gen_server:cast({?MODULE, Node}, {refresh_my_ring, ClusterName}).
%% @spec set_my_ring(riak_core_ring:riak_core_ring()) -> ok
set_my_ring(Ring) ->
gen_server:call(?MODULE, {set_my_ring, Ring}, infinity).
get_ring_id() ->
case ets:lookup(?ETS, id) of
[{_, Id}] ->
Id;
_ ->
{0,0}
end.
%% @doc Return metadata for the given bucket. If a bucket
%% for the non-default type is provided {error, no_type}
%% is returned when the type does not exist
get_bucket_meta({<<"default">>, Name}) ->
get_bucket_meta(Name);
get_bucket_meta({_Type, _Name}=Bucket) ->
%% reads from cluster metadata ets table
%% these aren't stored in ring manager ever
riak_core_bucket:get_bucket(Bucket);
get_bucket_meta(Bucket) ->
case ets:lookup(?ETS, {bucket, Bucket}) of
[] ->
undefined;
[{_, undefined}] ->
undefined;
[{_, Meta}] ->
{ok, Meta}
end.
%% @doc Return the {@link chashbin} generated from the current ring
get_chash_bin() ->
case ets:lookup(?ETS, chashbin) of
[{chashbin, CHBin}] ->
{ok, CHBin};
_ ->
{error, no_ring}
end.
%% @spec write_ringfile() -> ok
write_ringfile() ->
gen_server:cast(?MODULE, write_ringfile).
ring_trans(Fun, Args) ->
gen_server:call(?MODULE, {ring_trans, Fun, Args}, infinity).
set_cluster_name(Name) ->
gen_server:call(?MODULE, {set_cluster_name, Name}, infinity).
is_stable_ring() ->
gen_server:call(?MODULE, is_stable_ring, infinity).
%% @doc Exposed for support/debug purposes. Forces the node to change its
%% ring in a manner that will trigger reconciliation on gossip.
force_update() ->
ring_trans(
fun(Ring, _) ->
NewRing = riak_core_ring:update_member_meta(node(), Ring, node(),
unused, os:timestamp()), %% normally, should be a vclock.
{new_ring, NewRing}
end, []),
ok.
do_write_ringfile(Ring) ->
case ring_dir() of
"<nostore>" -> nop;
Dir ->
{{Year, Month, Day},{Hour, Minute, Second}} = calendar:universal_time(),
TS = io_lib:format(".~B~2.10.0B~2.10.0B~2.10.0B~2.10.0B~2.10.0B",
[Year, Month, Day, Hour, Minute, Second]),
Cluster = app_helper:get_env(riak_core, cluster_name),
FN = Dir ++ "/riak_core_ring." ++ Cluster ++ TS,
do_write_ringfile(Ring, FN)
end.
do_write_ringfile(Ring, FN) ->
ok = filelib:ensure_dir(FN),
try
false = riak_core_ring:check_lastgasp(Ring),
ok = riak_core_util:replace_file(FN, term_to_binary(Ring))
catch
_:Err ->
lager:error("Unable to write ring to \"~s\" - ~p\n", [FN, Err]),
{error,Err}
end.
%% @spec find_latest_ringfile() -> string()
find_latest_ringfile() ->
Dir = ring_dir(),
case file:list_dir(Dir) of
{ok, Filenames} ->
Cluster = app_helper:get_env(riak_core, cluster_name),
Timestamps = [list_to_integer(TS) || {"riak_core_ring", C1, TS} <-
[list_to_tuple(string:tokens(FN, ".")) || FN <- Filenames],
C1 =:= Cluster],
SortedTimestamps = lists:reverse(lists:sort(Timestamps)),
case SortedTimestamps of
[Latest | _] ->
{ok, Dir ++ "/riak_core_ring." ++ Cluster ++ "." ++ integer_to_list(Latest)};
_ ->
{error, not_found}
end;
{error, Reason} ->
{error, Reason}
end.
%% @spec read_ringfile(string()) -> riak_core_ring:riak_core_ring() | {error, any()}
read_ringfile(RingFile) ->
case file:read_file(RingFile) of
{ok, Binary} ->
R = binary_to_term(Binary),
false = riak_core_ring:check_lastgasp(R),
R;
{error, Reason} ->
{error, Reason}
end.
%% @spec prune_ringfiles() -> ok | {error, Reason}
prune_ringfiles() ->
case ring_dir() of
"<nostore>" -> ok;
Dir ->
Cluster = app_helper:get_env(riak_core, cluster_name),
case file:list_dir(Dir) of
{error,enoent} -> ok;
{error, Reason} ->
{error, Reason};
{ok, []} -> ok;
{ok, Filenames} ->
Timestamps = [TS || {"riak_core_ring", C1, TS} <-
[list_to_tuple(string:tokens(FN, ".")) || FN <- Filenames],
C1 =:= Cluster],
if Timestamps /= [] ->
%% there are existing ring files
TSPat = [io_lib:fread("~4d~2d~2d~2d~2d~2d",TS) ||
TS <- Timestamps],
TSL = lists:reverse(lists:sort([TS ||
{ok,TS,[]} <- TSPat])),
Keep = prune_list(TSL),
KeepTSs = [lists:flatten(
io_lib:format(
"~B~2.10.0B~2.10.0B~2.10.0B~2.10.0B~2.10.0B",K))
|| K <- Keep],
DelFNs = [Dir ++ "/" ++ FN || FN <- Filenames,
lists:all(fun(TS) ->
string:str(FN,TS)=:=0
end, KeepTSs)],
_ = [file:delete(DelFN) || DelFN <- DelFNs],
ok;
true ->
%% directory wasn't empty, but there are no ring
%% files in it
ok
end
end
end.
%% @private (only used for test instances)
stop() ->
gen_server:cast(?MODULE, stop).
%% ===================================================================
%% gen_server callbacks
%% ===================================================================
init([Mode]) ->
setup_ets(Mode),
Ring = reload_ring(Mode),
Ring2 = node_level_config(Ring),
State = set_ring(Ring2, #state{mode = Mode}),
riak_core_ring_events:ring_update(Ring2),
{ok, State}.
reload_ring(test) ->
riak_core_ring:fresh(16,node());
reload_ring(live) ->
case riak_core_ring_manager:find_latest_ringfile() of
{ok, RingFile} ->
case riak_core_ring_manager:read_ringfile(RingFile) of
{error, Reason} ->
lager:critical("Failed to read ring file: ~p",
[lager:posix_error(Reason)]),
throw({error, Reason});
Ring ->
%% Upgrade the ring data structure if necessary.
case riak_core_ring:legacy_ring(Ring) of
true ->
lager:info("Upgrading legacy ring"),
riak_core_ring:upgrade(Ring);
false ->
Ring
end
end;
{error, not_found} ->
lager:warning("No ring file available."),
riak_core_ring:fresh();
{error, Reason} ->
lager:critical("Failed to load ring file: ~p",
[lager:posix_error(Reason)]),
throw({error, Reason})
end.
handle_call(get_raw_ring, _From, #state{raw_ring=Ring} = State) ->
{reply, {ok, Ring}, State};
handle_call(get_raw_ring_chashbin, _From, #state{raw_ring=Ring} = State) ->
{ok, CHBin} = get_chash_bin(),
{reply, {ok, Ring, CHBin}, State};
handle_call({set_my_ring, RingIn}, _From, State) ->
Ring = riak_core_ring:upgrade(RingIn),
State2 = prune_write_notify_ring(Ring, State),
{reply,ok,State2};
handle_call(refresh_my_ring, _From, State) ->
%% Pompt the claimant before creating a fresh ring for shutdown, so that
%% any final actions can be taken
ok = riak_core_claimant:pending_close(State#state.raw_ring, get_ring_id()),
%% This node is leaving the cluster so create a fresh ring file
FreshRing = riak_core_ring:fresh(),
LastGaspRing = riak_core_ring:set_lastgasp(FreshRing),
State2 = set_ring(LastGaspRing, State),
%% Make sure the fresh ring gets written before stopping, that the updated
%% state global ring has the last gasp, but not the persisted ring (so that
%% on restart there will be no last gasp indicator.
ok = do_write_ringfile(FreshRing),
%% Handoff is complete and fresh ring is written
%% so we can safely stop now.
riak_core:stop("node removal completed, exiting."),
{reply,ok,State2};
handle_call({ring_trans, Fun, Args}, _From, State=#state{raw_ring=Ring}) ->
case catch Fun(Ring, Args) of
{new_ring, NewRing} ->
State2 = prune_write_notify_ring(NewRing, State),
riak_core_gossip:random_recursive_gossip(NewRing),
{reply, {ok, NewRing}, State2};
{set_only, NewRing} ->
State2 = prune_write_ring(NewRing, State),
{reply, {ok, NewRing}, State2};
{reconciled_ring, NewRing} ->
State2 = prune_write_notify_ring(NewRing, State),
riak_core_gossip:recursive_gossip(NewRing),
{reply, {ok, NewRing}, State2};
ignore ->
{reply, not_changed, State};
{ignore, Reason} ->
{reply, {not_changed, Reason}, State};
Other ->
lager:error("ring_trans: invalid return value: ~p",
[Other]),
{reply, not_changed, State}
end;
handle_call({set_cluster_name, Name}, _From, State=#state{raw_ring=Ring}) ->
NewRing = riak_core_ring:set_cluster_name(Ring, Name),
State2 = prune_write_notify_ring(NewRing, State),
{reply, ok, State2};
handle_call(is_stable_ring, _From, State) ->
{IsStable, _DeltaMS} = is_stable_ring(State),
{reply, IsStable, State}.
handle_cast(stop, State) ->
{stop,normal,State};
handle_cast({refresh_my_ring, ClusterName}, State) ->
{ok, Ring} = get_my_ring(),
case riak_core_ring:cluster_name(Ring) of
ClusterName ->
handle_cast(refresh_my_ring, State);
_ ->
{noreply, State}
end;
handle_cast(refresh_my_ring, State) ->
{_, _, State2} = handle_call(refresh_my_ring, undefined, State),
{noreply, State2};
handle_cast(write_ringfile, test) ->
{noreply,test};
handle_cast(write_ringfile, State=#state{raw_ring=Ring}) ->
ok = do_write_ringfile(Ring),
{noreply,State}.
handle_info(inactivity_timeout, State) ->
case is_stable_ring(State) of
{true,DeltaMS} ->
lager:debug("Promoting ring after ~p", [DeltaMS]),
promote_ring(),
State2 = State#state{inactivity_timer=undefined},
{noreply, State2};
{false,DeltaMS} ->
Remaining = ?PROMOTE_TIMEOUT - DeltaMS,
State2 = set_timer(Remaining, State),
{noreply, State2}
end;
handle_info(_Info, State) ->
{noreply, State}.
%% @private
terminate(_Reason, _State) ->
ok.
%% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ===================================================================
%% Internal functions
%% ===================================================================
ring_dir() ->
case app_helper:get_env(riak_core, ring_state_dir) of
undefined ->
filename:join(app_helper:get_env(riak_core, platform_data_dir, "data"), "ring");
D ->
D
end.
prune_list([X|Rest]) ->
lists:usort(lists:append([[X],back(1,X,Rest),back(2,X,Rest),
back(3,X,Rest),back(4,X,Rest),back(5,X,Rest)])).
back(_N,_X,[]) -> [];
back(N,X,[H|T]) ->
case lists:nth(N,X) =:= lists:nth(N,H) of
true -> back(N,X,T);
false -> [H]
end.
%% @private
run_fixups([], _Bucket, BucketProps) ->
BucketProps;
run_fixups([{App, Fixup}|T], BucketName, BucketProps) ->
BP = try Fixup:fixup(BucketName, BucketProps) of
{ok, NewBucketProps} ->
NewBucketProps;
{error, Reason} ->
lager:error("Error while running bucket fixup module "
"~p from application ~p on bucket ~p: ~p", [Fixup, App,
BucketName, Reason]),
BucketProps
catch
What:Why ->
lager:error("Crash while running bucket fixup module "
"~p from application ~p on bucket ~p : ~p:~p", [Fixup, App,
BucketName, What, Why]),
BucketProps
end,
run_fixups(T, BucketName, BP).
%% Add node level configs to ring
-spec node_level_config(riak_core_ring:riak_core_ring()) -> riak_core_ring:riak_core_ring().
node_level_config(Ring) ->
%% Check node participation in coverage queries and update ring
Node = node(),
ParticipateInCoverage = app_helper:get_env(riak_core,participate_in_coverage),
riak_core_ring:update_member_meta(Node, Ring, Node, participate_in_coverage, ParticipateInCoverage).
set_ring(Ring, State) ->
set_ring_global(Ring),
Now = os:timestamp(),
State2 = State#state{raw_ring=Ring, ring_changed_time=Now},
State3 = maybe_set_timer(?PROMOTE_TIMEOUT, State2),
State3.
maybe_set_timer(Duration, State=#state{inactivity_timer=undefined}) ->
set_timer(Duration, State);
maybe_set_timer(_Duration, State) ->
State.
set_timer(Duration, State) ->
Timer = erlang:send_after(Duration, self(), inactivity_timeout),
State#state{inactivity_timer=Timer}.
setup_ets(Mode) ->
%% Destroy prior version of ETS table. This is necessary for certain
%% eunit tests, but is unneeded for normal Riak operation.
catch ets:delete(?ETS),
Access = case Mode of
live -> protected;
test -> public
end,
?ETS = ets:new(?ETS, [named_table, Access, {read_concurrency, true}]),
Id = reset_ring_id(),
ets:insert(?ETS, [{changes, 0}, {promoted, 0}, {id, Id}]),
ok.
cleanup_ets(test) ->
ets:delete(?ETS).
reset_ring_id() ->
%% Maintain ring id epoch using mochiglobal to ensure ring id remains
%% monotonic even if the riak_core_ring_manager crashes and restarts
Epoch = case riak_core_mochiglobal:get(riak_ring_id_epoch) of
undefined ->
0;
Value ->
Value
end,
riak_core_mochiglobal:put(riak_ring_id_epoch, Epoch + 1),
{Epoch + 1, 0}.
%% Set the ring in mochiglobal/ETS. Exported during unit testing
%% to make test setup simpler - no need to spin up a riak_core_ring_manager
%% process.
set_ring_global(Ring) ->
DefaultProps = case application:get_env(riak_core, default_bucket_props) of
{ok, Val} ->
Val;
_ ->
[]
end,
%% run fixups on the ring before storing it in mochiglobal
FixedRing = case riak_core:bucket_fixups() of
[] -> Ring;
Fixups ->
Buckets = riak_core_ring:get_buckets(Ring),
lists:foldl(
fun(Bucket, AccRing) ->
BucketProps = riak_core_bucket:get_bucket(Bucket, Ring),
%% Merge anything in the default properties but not in
%% the bucket's properties. This is to ensure default
%% properties added after the bucket is created are
%% inherited to the bucket.
MergedProps = riak_core_bucket:merge_props(
BucketProps, DefaultProps),
%% fixup the ring
NewBucketProps = run_fixups(Fixups, Bucket, MergedProps),
%% update the bucket in the ring
riak_core_ring:update_meta({bucket,Bucket},
NewBucketProps,
AccRing)
end, Ring, Buckets)
end,
%% Mark ring as tainted to check if it is ever leaked over gossip or
%% relied upon for any non-local ring operations.
TaintedRing = riak_core_ring:set_tainted(FixedRing),
%% Extract bucket properties and place into ETS table. We want all bucket
%% additions, modifications, and deletions to appear in a single atomic
%% operation. Since ETS does not provide a means to change + delete
%% multiple values in a single operation, we emulate the deletion by
%% overwriting all deleted buckets with the "undefined" atom that has
%% special meaning in `riak_core_bucket:get_bucket_props/2`. We then
%% cleanup these values in a subsequent `ets:match_delete`.
OldBuckets = ets:select(?ETS, [{{{bucket, '$1'}, '_'}, [], ['$1']}]),
BucketDefaults = [{{bucket, Bucket}, undefined} || Bucket <- OldBuckets],
BucketMeta =
[{{bucket, Bucket}, Meta}
|| Bucket <- riak_core_ring:get_buckets(TaintedRing),
{ok,Meta} <- [riak_core_ring:get_meta({bucket, Bucket}, TaintedRing)]],
BucketMeta2 = lists:ukeysort(1, BucketMeta ++ BucketDefaults),
CHBin = chashbin:create(riak_core_ring:chash(TaintedRing)),
{Epoch, Id} = ets:lookup_element(?ETS, id, 2),
Actions = [{ring, TaintedRing},
{raw_ring, Ring},
{id, {Epoch,Id+1}},
{chashbin, CHBin} | BucketMeta2],
ets:insert(?ETS, Actions),
ets:match_delete(?ETS, {{bucket, '_'}, undefined}),
case riak_core_mochiglobal:get(?RING_KEY) of
ets ->
ok;
_ ->
riak_core_mochiglobal:put(?RING_KEY, ets)
end,
ok.
promote_ring() ->
{ok, Ring} = get_my_ring(),
riak_core_mochiglobal:put(?RING_KEY, Ring).
%% Persist a new ring file, set the global value and notify any listeners
prune_write_notify_ring(Ring, State) ->
State2 = prune_write_ring(Ring, State),
riak_core_ring_events:ring_update(Ring),
State2.
prune_write_ring(Ring, State) ->
riak_core_ring:check_tainted(Ring, "Error: Persisting tainted ring"),
ok = riak_core_ring_manager:prune_ringfiles(),
_ = do_write_ringfile(Ring),
State2 = set_ring(Ring, State),
State2.
is_stable_ring(#state{ring_changed_time=Then}) ->
DeltaUS = erlang:max(0, timer:now_diff(os:timestamp(), Then)),
DeltaMS = DeltaUS div 1000,
IsStable = DeltaMS >= ?PROMOTE_TIMEOUT,
{IsStable, DeltaMS}.
%% ===================================================================
%% Unit tests
%% ===================================================================
-ifdef(TEST).
back_test() ->
X = [1,2,3],
List1 = [[1,2,3],[4,2,3], [7,8,3], [11,12,13], [1,2,3]],
List2 = [[7,8,9], [1,2,3]],
List3 = [[1,2,3]],
?assertEqual([[4,2,3]], back(1, X, List1)),
?assertEqual([[7,8,9]], back(1, X, List2)),
?assertEqual([], back(1, X, List3)),
?assertEqual([[7,8,3]], back(2, X, List1)),
?assertEqual([[11,12,13]], back(3, X, List1)).
prune_list_test() ->
TSList1 = [[2011,2,28,16,32,16],[2011,2,28,16,32,36],[2011,2,28,16,30,27],[2011,2,28,16,32,16],[2011,2,28,16,32,36]],
TSList2 = [[2011,2,28,16,32,36],[2011,2,28,16,31,16],[2011,2,28,16,30,27],[2011,2,28,16,32,16],[2011,2,28,16,32,36]],
PrunedList1 = [[2011,2,28,16,30,27],[2011,2,28,16,32,16]],
PrunedList2 = [[2011,2,28,16,31,16],[2011,2,28,16,32,36]],
?assertEqual(PrunedList1, prune_list(TSList1)),
?assertEqual(PrunedList2, prune_list(TSList2)).
set_ring_global_test() ->
setup_ets(test),
application:set_env(riak_core,ring_creation_size, 4),
Ring = riak_core_ring:fresh(),
set_ring_global(Ring),
promote_ring(),
?assert(riak_core_ring:nearly_equal(Ring, riak_core_mochiglobal:get(?RING_KEY))),
cleanup_ets(test).
set_my_ring_test() ->
setup_ets(test),
application:set_env(riak_core,ring_creation_size, 4),
Ring = riak_core_ring:fresh(),
set_ring_global(Ring),
{ok, MyRing} = get_my_ring(),
?assert(riak_core_ring:nearly_equal(Ring, MyRing)),
cleanup_ets(test).
refresh_my_ring_test() ->
setup_ets(test),
Core_Settings = [{ring_creation_size, 4},
{ring_state_dir, "/tmp"},
{cluster_name, "test"}],
[begin
put({?MODULE,AppKey}, app_helper:get_env(riak_core, AppKey)),
ok = application:set_env(riak_core, AppKey, Val)
end || {AppKey, Val} <- Core_Settings],
riak_core_ring_events:start_link(),
riak_core_ring_manager:start_link(test),
riak_core_claimant:start_link(),
riak_core_vnode_sup:start_link(),
riak_core_vnode_master:start_link(riak_core_vnode),
riak_core_test_util:setup_mockring1(),
?assertEqual(ok, riak_core_ring_manager:refresh_my_ring()),
riak_core_ring_manager:stop(),
riak_core_claimant:stop(),
%% Cleanup the ring file created for this test
{ok, RingFile} = find_latest_ringfile(),
file:delete(RingFile),
[ok = application:set_env(riak_core, AppKey, get({?MODULE, AppKey}))
|| {AppKey, _Val} <- Core_Settings],
ok.
-define(TEST_RINGDIR, "ring_manager_eunit").
-define(TEST_RINGFILE, (?TEST_RINGDIR ++ "/test.ring")).
-define(TMP_RINGFILE, (?TEST_RINGFILE ++ ".tmp")).
do_write_ringfile_test() ->
%% Make sure no data exists from previous runs
file:change_mode(?TMP_RINGFILE, 8#00644),
file:delete(?TMP_RINGFILE),
file:change_mode(?TEST_RINGFILE, 8#00644),
file:delete(?TEST_RINGFILE),
%% Check happy path
GenR = fun(Name) -> riak_core_ring:fresh(64, Name) end,
?assertEqual(ok, do_write_ringfile(GenR(happy), ?TEST_RINGFILE)),
%% Check write fails (create .tmp file with no write perms)
ok = file:write_file(?TMP_RINGFILE, <<"no write for you">>),
ok = file:change_mode(?TMP_RINGFILE, 8#00444),
?assertMatch({error,_}, do_write_ringfile(GenR(tmp_perms), ?TEST_RINGFILE)),
ok = file:change_mode(?TMP_RINGFILE, 8#00644),
ok = file:delete(?TMP_RINGFILE),
%% Check rename fails
ok = file:change_mode(?TEST_RINGDIR, 8#00444),
?assertMatch({error,_}, do_write_ringfile(GenR(ring_perms), ?TEST_RINGFILE)),
ok = file:change_mode(?TEST_RINGDIR, 8#00755),
ok = file:change_mode(?TEST_RINGFILE, 8#00644),
ok = file:delete(?TEST_RINGFILE).
is_stable_ring_test() ->
{A,B,C} = Now = os:timestamp(),
TimeoutSecs = ?PROMOTE_TIMEOUT div 1000,
Within = {A, B - (TimeoutSecs div 2), C},
Outside = {A, B - (TimeoutSecs + 1), C},
?assertMatch({true,_},is_stable_ring(#state{ring_changed_time={0,0,0}})),
?assertMatch({true,_},is_stable_ring(#state{ring_changed_time=Outside})),
?assertMatch({false,_},is_stable_ring(#state{ring_changed_time=Within})),
?assertMatch({false,_},is_stable_ring(#state{ring_changed_time=Now})).
-endif. | src/riak_core_ring_manager.erl | 0.669421 | 0.485417 | riak_core_ring_manager.erl | starcoder |
-module(poly_SUITE).
-include_lib("eunit/include/eunit.hrl").
-export([all/0, init_per_testcase/2, end_per_testcase/2]).
-export(
[
eval_test/1,
zeroize_test/1,
self_subtract_test/1,
add_zero_test/1,
sub_zero_test/1,
mul_poly_test/1,
add_different_sizes_poly_test/1,
negative_cmp_test/1,
f_of_x_test/1
]
).
all() ->
[
eval_test,
zeroize_test,
self_subtract_test,
add_zero_test,
sub_zero_test,
mul_poly_test,
add_different_sizes_poly_test,
negative_cmp_test,
f_of_x_test
].
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, Config) ->
Config.
eval_test(_Config) ->
%% poly = x³ + x - 2.
Poly = erlang_tc_poly:from_coeffs([-2, 1, 0, 5]),
Samples = [{-1, -8}, {2, 40}, {3, 136}, {5, 628}],
%% check f(a) = b
?assert(
lists:all(
fun({Point, Answer}) ->
AnswerFr = erlang_tc_fr:into(Answer),
EvalFr = erlang_tc_poly:eval(Poly, Point),
erlang_tc_fr:cmp(AnswerFr, EvalFr)
end,
Samples
)
),
%% poly can be interpolated because num_sample >= degree + 1
?assert(erlang_tc_poly:cmp(Poly, erlang_tc_poly:interpolate(Samples))),
?assertEqual(3, erlang_tc_poly:degree(Poly)),
ok.
zeroize_test(_Config) ->
%% random_poly -> zeroize -> is_zero
?assert(erlang_tc_poly:is_zero(erlang_tc_poly:zeroize(erlang_tc_poly:random(4)))),
BiPoly = erlang_tc_bipoly:random(3),
BiCommitment = erlang_tc_bipoly:commitment(BiPoly),
ZeroBiPoly = erlang_tc_bipoly:zeroize(BiPoly),
ZeroBiCommitment = erlang_tc_bipoly:commitment(ZeroBiPoly),
?assertEqual(false, erlang_tc_bicommitment:cmp(ZeroBiCommitment, BiCommitment)),
?assert(
erlang_tc_g1:cmp(
erlang_tc_g1:zero(),
erlang_tc_bicommitment:eval(
ZeroBiCommitment,
rand:uniform(100),
rand:uniform(100)
)
)
),
ok.
self_subtract_test(_Config) ->
%% f(x) - f(x) = 0
P = erlang_tc_poly:random(2),
?assert(erlang_tc_poly:cmp(erlang_tc_poly:zero(), erlang_tc_poly:sub(P, P))).
add_zero_test(_Config) ->
%% f(x) + 0 = f(x)
P = erlang_tc_poly:random(2),
?assert(erlang_tc_poly:cmp(P, erlang_tc_poly:add_scalar(0, P))).
sub_zero_test(_Config) ->
%% f(x) - 0 = f(x)
P = erlang_tc_poly:random(2),
?assert(erlang_tc_poly:cmp(P, erlang_tc_poly:sub_scalar(0, P))).
mul_poly_test(_Config) ->
%% p1 = (x² + 1)
%% p2 = (x - 1)
%% p1 * p2 = p3 = x³ - x² + x - 1
%% p1(p) * p2(p) = p3(p)
P1 = erlang_tc_poly:from_coeffs([1, 0, 1]),
P2 = erlang_tc_poly:from_coeffs([-1, 1]),
P3 = erlang_tc_poly:from_coeffs([-1, 1, -1, 1]),
?assert(erlang_tc_poly:cmp(P3, erlang_tc_poly:mul(P1, P2))),
P1Eval = erlang_tc_poly:eval(P1, 5),
P2Eval = erlang_tc_poly:eval(P2, 5),
P3Eval = erlang_tc_poly:eval(P3, 5),
?assert(erlang_tc_fr:cmp(erlang_tc_fr:into(26), P1Eval)),
?assert(erlang_tc_fr:cmp(erlang_tc_fr:into(4), P2Eval)),
?assert(erlang_tc_fr:cmp(erlang_tc_fr:into(104), P3Eval)),
ok.
add_different_sizes_poly_test(_Config) ->
P1 = erlang_tc_poly:random(5),
P2 = erlang_tc_poly:random(8),
AddedPoly = erlang_tc_poly:add(P1, P2),
%% result should be of degree 8
?assertEqual(8, erlang_tc_poly:degree(AddedPoly)),
%% if we subtract B from the result, we should get back A with degree 5
SubPoly = erlang_tc_poly:sub(AddedPoly, P2),
?assertEqual(5, erlang_tc_poly:degree(SubPoly)),
?assert(erlang_tc_poly:cmp(P1, SubPoly)),
ok.
negative_cmp_test(_Config) ->
P1 = erlang_tc_poly:random(5),
P2 = erlang_tc_poly:add(P1, P1),
%% since P1 /= 2*P1
?assertEqual(false, erlang_tc_poly:cmp(P1, P2)),
ok.
f_of_x_test(_Config) ->
%% f(x) = 5x², f(2) = 5 * 2 * 2
P = erlang_tc_poly:from_coeffs([0, 0, 5]),
Eval = erlang_tc_poly:eval(P, 2),
?assert(erlang_tc_fr:cmp(erlang_tc_fr:into(5 * 2 * 2), Eval)),
ok. | test/poly_SUITE.erl | 0.663451 | 0.657428 | poly_SUITE.erl | starcoder |
-module(aesim_utils).
%=== INCLUDES ==================================================================
-include_lib("stdlib/include/assert.hrl").
-include("aesim_types.hrl").
%=== EXPORTS ===================================================================
-export([address_group/1]).
-export([format/2]).
-export([format_time/1]).
-export([format_minimal_time/1]).
-export([rand/1, rand/2]).
-export([skewed_rand/2]).
-export([rand_take/1, rand_take/2]).
-export([rand_pick/1, rand_pick/2, rand_pick/3]).
-export([list_add_new/2]).
-export([reduce_metric/1]).
-export([sum/1]).
%=== API FUNCTIONS =============================================================
-spec address_group(address()) -> address_group().
address_group({{A, B, _ ,_}, _}) -> {A, B}.
-spec format(string(), [term()]) -> string().
format(Format, Params) ->
lists:flatten(io_lib:format(Format, Params)).
-spec format_time(non_neg_integer()) -> string().
format_time(Miliseconds) ->
Sec = Miliseconds div 1000,
Min = Sec div 60,
Hour = Min div 60,
Args = [Hour, Min rem 60, Sec rem 60, Miliseconds rem 1000],
format("~bh~2.10.0bm~2.10.0bs~3.10.0b", Args).
-spec format_minimal_time(non_neg_integer()) -> string().
format_minimal_time(Miliseconds) ->
Sec = Miliseconds div 1000,
Min = Sec div 60,
Hour = Min div 60,
Parts = [{"", Miliseconds rem 1000}, {"s", Sec rem 60},
{"m", Min rem 60}, {"h", Hour}],
IoList = lists:foldl(fun
({_Postfix, 0}, Acc) -> Acc;
({Postfix, Value}, Acc) -> [integer_to_list(Value), Postfix | Acc]
end, [], Parts),
lists:flatten(IoList).
%% Returns X where 0 <= X < N
-spec rand(non_neg_integer()) -> non_neg_integer().
rand(N) -> rand:uniform(N) - 1.
%% @doc Generates a random integer with a skewed distribution.
%% If the given skew is `1` the distribution is uniform.
%% The more the skew is larger than `1` the more the distribution is skewed
%% toward the small values.
-spec skewed_rand(non_neg_integer(), float()) -> non_neg_integer().
skewed_rand(N, Skew) ->
floor(N * math:pow(rand:uniform(), Skew)).
%% Returns X where N <= X < M
-spec rand(non_neg_integer(), non_neg_integer()) -> non_neg_integer().
rand(N, M) -> N + rand:uniform(M - N) - 1.
-spec rand_take(list()) -> {term(), list()}.
rand_take(Col) when is_list(Col)->
{L1, [R | L2]} = lists:split(rand(length(Col)), Col),
{R, L1 ++ L2}.
-spec rand_take(pos_integer(), list()) -> {[term()], list()}.
rand_take(1, Col) when is_list(Col) ->
{Item, L} = rand_take(Col),
{[Item], L};
rand_take(N, Col) when is_list(Col), N > 1 ->
{Item, L1} = rand_take(Col),
{Items, L2} = rand_take(N - 1, L1),
{[Item | Items], L2}.
-spec rand_pick(list()) -> term().
rand_pick(Col) when is_list(Col) -> lists:nth(1 + rand(length(Col)), Col).
-spec rand_pick(pos_integer(), list(), list()) -> [term()].
rand_pick(N, Col) when is_list(Col), N > 0 ->
{Items, _} = rand_take(N, Col),
Items.
%% Really basic implementation....
-spec rand_pick(pos_integer(), list()) -> [term()].
rand_pick(_, [], _) -> [];
rand_pick(0, _, _) -> [];
rand_pick(N, Col, Exclude) when is_list(Col), N > 0 ->
{L1, [R | L2]} = lists:split(rand(length(Col)), Col),
case lists:member(R, Exclude) of
true -> rand_pick(N, L1 ++ L2, Exclude);
false -> [R | rand_pick(N - 1, L1 ++ L2, Exclude)]
end.
-spec list_add_new(term(), list()) -> list().
list_add_new(Value, List) ->
case lists:member(Value, List) of
false -> [Value | List];
true -> List
end.
-spec reduce_metric([integer()]) -> {integer(), integer(), integer(), integer()}.
reduce_metric([_|_] = Values) ->
{Total, Min, Max, Count} = lists:foldl(fun(V, {T, Mn, Mx, C}) ->
{T + V, safe_min(Mn, V), safe_max(Mx, V), C + 1}
end, {0, undefined, undefined, 0}, Values),
Avg = round(Total / Count),
Median = lists:nth(max(Count div 2, 1), lists:sort(Values)),
{Min, Avg, Median, Max}.
-spec sum([integer()]) -> integer().
sum(Values) ->
lists:foldl(fun(V, Acc) -> Acc + V end, 0, Values).
%=== INTERNAL FUNCTIONS ========================================================
safe_min(undefined, V) -> V;
safe_min(V1, V2) -> min(V1, V2).
safe_max(undefined, V) -> V;
safe_max(V1, V2) -> max(V1, V2). | src/aesim_utils.erl | 0.503418 | 0.578657 | aesim_utils.erl | starcoder |
%% Copyright (c) 2020 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(emqx_rule_sqltester).
-include("rule_engine.hrl").
-include("rule_events.hrl").
-include_lib("emqx/include/logger.hrl").
-export([ test/1
]).
-spec(test(#{}) -> {ok, Result::map()} | no_return()).
test(#{<<"rawsql">> := Sql, <<"ctx">> := Context}) ->
case emqx_rule_sqlparser:parse_select(Sql) of
{ok, Select} ->
InTopic = maps:get(<<"topic">>, Context, <<>>),
EventTopics = emqx_rule_sqlparser:select_from(Select),
case lists:all(fun is_publish_topic/1, EventTopics) of
true ->
%% test if the topic matches the topic filters in the rule
case emqx_rule_utils:can_topic_match_oneof(InTopic, EventTopics) of
true -> test_rule(Sql, Select, Context, EventTopics);
false -> {error, nomatch}
end;
false ->
%% the rule is for both publish and events, test it directly
test_rule(Sql, Select, Context, EventTopics)
end;
Error -> error(Error)
end.
test_rule(Sql, Select, Context, EventTopics) ->
RuleId = iolist_to_binary(["test_rule", emqx_rule_id:gen()]),
ActInstId = iolist_to_binary(["test_action", emqx_rule_id:gen()]),
Rule = #rule{
id = RuleId,
rawsql = Sql,
for = EventTopics,
is_foreach = emqx_rule_sqlparser:select_is_foreach(Select),
fields = emqx_rule_sqlparser:select_fields(Select),
doeach = emqx_rule_sqlparser:select_doeach(Select),
incase = emqx_rule_sqlparser:select_incase(Select),
conditions = emqx_rule_sqlparser:select_where(Select),
actions = [#action_instance{
id = ActInstId,
name = test_rule_sql}]
},
FullContext = fill_default_values(hd(EventTopics), emqx_rule_maps:atom_key_map(Context)),
try
ok = emqx_rule_registry:add_action_instance_params(
#action_instance_params{id = ActInstId,
params = #{},
apply = sql_test_action()}),
emqx_rule_runtime:apply_rule(Rule, FullContext)
of
{ok, Data} -> {ok, flatten(Data)};
{error, nomatch} -> {error, nomatch}
after
ok = emqx_rule_registry:remove_action_instance_params(ActInstId)
end.
is_publish_topic(<<"$events/", _/binary>>) -> false;
is_publish_topic(_Topic) -> true.
flatten([]) -> [];
flatten([D1]) -> D1;
flatten([D1 | L]) when is_list(D1) ->
D1 ++ flatten(L).
sql_test_action() ->
fun(Data, _Envs) ->
?LOG(info, "Testing Rule SQL OK"), Data
end.
fill_default_values(Event, Context) ->
maps:merge(?EG_ENVS(Event), Context). | src/emqx_rule_sqltester.erl | 0.54698 | 0.420391 | emqx_rule_sqltester.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(kai_store_SUITE).
-compile(export_all).
-include("kai.hrl").
-include("kai_test.hrl").
all() -> [test_ets, test_dets,
test_update_conflict_ets, test_update_conflict_dets,
test_perf].
test(Conf) ->
kai_config:start_link(Conf),
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 11511},
{rpc_max_processes, 2},
{max_connections, 32},
{n, 1}, {r, 1}, {w, 1},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2}
]),
kai_version:start_link(),
kai_store:start_link(),
Data1 = #data{
key = "item-1",
bucket = 3,
last_modified = now(),
checksum = erlang:md5(<<"value-1">>),
flags = "0",
vector_clocks = vclock:fresh(),
value = (<<"value-1">>)
},
kai_store:put(Data1),
?assertEqual(
Data1,
kai_store:get(#data{key="item-1", bucket=3})
),
?assertEqual(
undefined,
kai_store:get(#data{key="item-2", bucket=1})
),
Data2 = #data{
key = "item-2",
bucket = 1,
last_modified = now(),
checksum = erlang:md5(<<"value-2">>),
flags = "0",
vector_clocks = [],
value = (<<"value-2">>)
},
kai_store:put(Data2),
?assertEqual(
Data2,
kai_store:get(#data{key="item-2", bucket=1})
),
Data3 = #data{
key = "item-3",
bucket = 3,
last_modified = now(),
checksum = erlang:md5(<<"value-3">>),
flags = "0",
vector_clocks = [],
value = (<<"value-3">>)
},
kai_store:put(Data3),
?assertEqual(
Data3,
kai_store:get(#data{key="item-3", bucket=3})
),
{list_of_data, ListOfData1} = kai_store:list(1),
?assertEqual(1, length(ListOfData1)),
?assert(lists:keymember("item-2", 2, ListOfData1)),
{list_of_data, ListOfData2} = kai_store:list(2),
?assertEqual(0, length(ListOfData2)),
{list_of_data, ListOfData3} = kai_store:list(3),
?assertEqual(2, length(ListOfData3)),
?assert(lists:keymember("item-1", 2, ListOfData3)),
?assert(lists:keymember("item-3", 2, ListOfData3)),
Data1b = #data{
key = "item-1",
bucket = 3,
last_modified = now(),
checksum = erlang:md5(<<"value-1">>),
flags = "0",
vector_clocks = [],
value = (<<"value-1b">>)
},
kai_store:put(Data1b),
?assertEqual(
Data1b,
kai_store:get(#data{key="item-1", bucket=3})
),
kai_store:delete(#data{key="item-1", bucket=3}),
?assertEqual(
undefined,
kai_store:get(#data{key="item-1", bucket=3})
),
{list_of_data, ListOfData4} = kai_store:list(3),
?assertEqual(1, length(ListOfData4)),
?assert(lists:keymember("item-3", 2, ListOfData4)),
?assert(is_integer(kai_store:info(bytes))),
?assertEqual(2, kai_store:info(size)),
kai_store:stop(),
kai_version:stop(),
kai_config:stop(),
ok.
test_ets() -> [].
test_ets(_Conf) ->
test([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2},
{store, ets}
]).
test_dets() -> [].
test_dets(_Conf) ->
test([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2},
{store, dets},
{dets_dir, "."},
{number_of_tables, 2}
]),
file:delete("./1"), file:delete("./2").
test_update_conflict(Conf) ->
kai_config:start_link(Conf),
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 11511},
{rpc_max_processes, 2},
{max_connections, 32},
{n, 1}, {r, 1}, {w, 1},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2}
]),
kai_version:start_link(),
kai_store:start_link(),
Data1 = #data{
key = "item-1",
bucket = 3,
last_modified = now(),
checksum = erlang:md5(<<"value-1">>),
flags = "0",
vector_clocks = vclock:increment(node1, vclock:fresh()),
value = (<<"value-1">>)
},
kai_store:put(Data1),
%% conflict vclock
Data2 = Data1#data{
vector_clocks = vclock:increment(node2, vclock:fresh())
},
{error, Reason} = kai_store:put(Data2),
ct:pal(default, "~p: ~p~n", ["Reason", Reason]),
kai_store:stop(),
kai_version:stop(),
kai_config:stop(),
ok.
test_update_conflict_ets() -> [].
test_update_conflict_ets(_Conf) ->
test_update_conflict([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2},
{store, ets}
]).
test_update_conflict_dets() -> [].
test_update_conflict_dets(_Conf) ->
test_update_conflict([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2},
{store, dets},
{dets_dir, "."},
{number_of_tables, 2}
]),
file:delete("./1"), file:delete("./2").
test_perf_put(T) ->
lists:foreach(
fun(I) ->
Key = "item-" ++ integer_to_list(I),
Value = list_to_binary("value-" ++ integer_to_list(I)),
Data = #data{
key = Key,
bucket = 3,
last_modified = now(),
checksum = erlang:md5(Value),
flags = "0",
vector_clocks = [],
value = Value
},
kai_store:put(Data)
end,
lists:seq(1, T)
).
test_perf_get(T) ->
lists:foreach(
fun(I) ->
Key = "item-" ++ integer_to_list(I),
kai_store:get(#data{key=Key, bucket=0})
end,
lists:seq(1, T)
).
test_perf() -> [].
test_perf(_Conf) ->
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 11011},
{n, 3},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2},
{store, ets}
]),
kai_store:start_link(),
T = 10000,
{Usec, _} = timer:tc(?MODULE, test_perf_put, [T]),
?assert(Usec < 100*T),
io:format("average time to put data: ~p [usec]", [Usec/T]),
{Usec2, _} = timer:tc(?MODULE, test_perf_get, [T]),
?assert(Usec2 < 100*T),
io:format("average time to get data: ~p [usec]", [Usec2/T]),
kai_store:stop(),
kai_config:stop().
p(Label, Message) ->
ct:pal(default, "~p: ~p~n", [Label, Message]). | test/kai_store_SUITE.erl | 0.553143 | 0.49707 | kai_store_SUITE.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_rpc_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("proper/include/proper.hrl").
-include_lib("eunit/include/eunit.hrl").
all() -> emqx_ct:all(?MODULE).
t_prop_rpc(_) ->
ok = load(),
Opts = [{to_file, user}, {numtests, 10}],
{ok, _Apps} = application:ensure_all_started(gen_rpc),
ok = application:set_env(gen_rpc, call_receive_timeout, 1),
ok = emqx_logger:set_log_level(emergency),
?assert(proper:quickcheck(prop_node(), Opts)),
?assert(proper:quickcheck(prop_node_with_key(), Opts)),
?assert(proper:quickcheck(prop_nodes(), Opts)),
?assert(proper:quickcheck(prop_nodes_with_key(), Opts)),
ok = application:stop(gen_rpc),
ok = unload().
prop_node() ->
?FORALL(Node, nodename(),
begin
?assert(emqx_rpc:cast(Node, erlang, system_time, [])),
case emqx_rpc:call(Node, erlang, system_time, []) of
{badrpc, _Reason} -> true;
Delivery when is_integer(Delivery) -> true;
_Other -> false
end
end).
prop_node_with_key() ->
?FORALL({Node, Key}, nodename_with_key(),
begin
?assert(emqx_rpc:cast(Key, Node, erlang, system_time, [])),
case emqx_rpc:call(Key, Node, erlang, system_time, []) of
{badrpc, _Reason} -> true;
Delivery when is_integer(Delivery) -> true;
_Other -> false
end
end).
prop_nodes() ->
?FORALL(Nodes, nodesname(),
begin
case emqx_rpc:multicall(Nodes, erlang, system_time, []) of
{badrpc, _Reason} -> true;
{RealResults, RealBadNodes}
when is_list(RealResults);
is_list(RealBadNodes) ->
true;
_Other -> false
end
end).
prop_nodes_with_key() ->
?FORALL({Nodes, Key}, nodesname_with_key(),
begin
case emqx_rpc:multicall(Key, Nodes, erlang, system_time, []) of
{badrpc, _Reason} -> true;
{RealResults, RealBadNodes}
when is_list(RealResults);
is_list(RealBadNodes) ->
true;
_Other -> false
end
end).
%%--------------------------------------------------------------------
%% helper
%%--------------------------------------------------------------------
load() ->
ok = meck:new(gen_rpc, [passthrough, no_history]),
ok = meck:expect(gen_rpc, multicall,
fun(Nodes, Mod, Fun, Args) ->
gen_rpc:multicall(Nodes, Mod, Fun, Args, 1)
end).
unload() ->
ok = meck:unload(gen_rpc).
%%--------------------------------------------------------------------
%% Generator
%%--------------------------------------------------------------------
nodename() ->
?LET({NodePrefix, HostName},
{node_prefix(), hostname()},
begin
Node = NodePrefix ++ "@" ++ HostName,
list_to_atom(Node)
end).
nodename_with_key() ->
?LET({NodePrefix, HostName, Key},
{node_prefix(), hostname(), choose(0, 10)},
begin
Node = NodePrefix ++ "@" ++ HostName,
{list_to_atom(Node), Key}
end).
nodesname() ->
oneof([list(nodename()), ['emqxct@127.0.0.1']]).
nodesname_with_key() ->
oneof([{list(nodename()), choose(0, 10)}, {['emqxct@127.0.0.1'], 1}]).
node_prefix() ->
oneof(["emqxct", text_like()]).
text_like() ->
?SUCHTHAT(Text, list(range($a, $z)), (length(Text) =< 5 andalso length(Text) > 0)).
hostname() ->
oneof([ipv4_address(), ipv6_address(), "127.0.0.1", "localhost"]).
ipv4_address() ->
?LET({Num1, Num2, Num3, Num4},
{ choose(0, 255)
, choose(0, 255)
, choose(0, 255)
, choose(0, 255)},
make_ip([Num1, Num2, Num3, Num4], ipv4)).
ipv6_address() ->
?LET({Num1, Num2, Num3, Num4, Num5, Num6},
{ choose(0, 65535)
, choose(0, 65535)
, choose(0, 65535)
, choose(0, 65535)
, choose(0, 65535)
, choose(0, 65535)},
make_ip([Num1, Num2, Num3, Num4, Num5, Num6], ipv6)).
make_ip(NumList, ipv4) when is_list(NumList) ->
string:join([integer_to_list(Num) || Num <- NumList], ".");
make_ip(NumList, ipv6) when is_list(NumList) ->
string:join([integer_to_list(Num) || Num <- NumList], ":");
make_ip(_List, _protocol) ->
"127.0.0.1". | test/emqx_rpc_SUITE.erl | 0.600891 | 0.516108 | emqx_rpc_SUITE.erl | starcoder |
%% ekvs_vcmanager
%% Implements functionalities for handling vector clocks
%% Store the vector clock of the current node
-module(ekvs_vcmanager).
-behaviour(gen_server).
%% Server functions
-export([start_link/0]).
-export([view_change/1]).
-export([merge_vcs/1]).
-export([new_event/1]).
-export([update_vc/1]).
-export([get_vc/0]).
-export([get_cp/0]).
%% -export([broadcast_vc_to/1]).
-export([get_timestamp/0]).
-export([cp_to_vc/1]).
-export([vc_to_cp/1]).
-export([happens_before/2]).
-export([happens_before_or_equal/2]).
%% Server Callbacks
-export([init/1, terminate/2, handle_info/2, handle_cast/2, handle_call/3, code_change/3]).
-ifdef(TEST).
-compile(export_all).
-endif.
-record(state, {vector_clock}).
%%%%% Interface %%%%%
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
view_change(AllNodes) ->
%% Update the VC of the current node after a view change
gen_server:call(?MODULE, {view_change, AllNodes}).
merge_vcs(VCs) ->
%% Given a list of vector clocks, merge them with the
%% vector clock of the current node and return the result.
%% The result is also used to update the vector clock
%% of the current node
gen_server:call(?MODULE, {merge_vcs, VCs}).
new_event(CausalPayload) ->
gen_server:call(?MODULE, {new_event, CausalPayload}).
update_vc(VC) ->
gen_server:call(?MODULE, {update_vc, VC}).
get_vc() ->
gen_server:call(?MODULE, get_vc).
get_cp() ->
gen_server:call(?MODULE, get_cp).
%% broadcast_vc_to(Nodes) ->
%% Broadcast the vector clock of the current node to all
%% the specified nodes
%% gen_server:call(?MODULE, {broadcasted_vc, Nodes}).
%%%%% Server Callbacks %%%%%
init([]) ->
%% All nodes are initialized with clock 0
VC = get_default_vc(),
{ok, #state{vector_clock=VC}}.
handle_call({view_change, AllNodes}, _From, #state{vector_clock=VC}) ->
%% Increment the number of view changes
ViewChanges = {view_changes, maps:get(view_changes, VC) + 1},
NewVCList = [{Node, maps:get(Node, VC, 0)} || Node <- AllNodes],
NewVC = maps:from_list(NewVCList ++ [ViewChanges]),
{reply, ok, #state{vector_clock=NewVC}};
handle_call({new_event, CausalPayload}, _From, #state{vector_clock=VC}) ->
ekvs_debug:call({new_event,CausalPayload, VC}),
RequestVC = cp_to_vc(CausalPayload),
MergedVC = get_merged_vcs([RequestVC, VC]),
Clock = maps:get(node(), MergedVC),
NewVC = maps:put(node(), Clock+1, MergedVC),
ekvs_debug:return({new_event,NewVC}),
{reply, NewVC, #state{vector_clock=NewVC}};
handle_call({merge_vcs, VCs}, _From, #state{vector_clock=VC}) ->
MergedVC = get_merged_vcs(VCs ++ [VC]),
{reply, MergedVC, #state{vector_clock=MergedVC}};
handle_call({update_vc, NewVC}, _From, #state{vector_clock=VC}) ->
MergedVC = get_merged_vcs([VC, NewVC]),
{reply, ok, #state{vector_clock=MergedVC}};
handle_call(get_vc, _From, S=#state{vector_clock=VC}) ->
{reply, VC, S};
handle_call(get_cp, _From, S=#state{vector_clock=VC}) ->
{reply, vc_to_cp(VC), S}.
handle_info({debug, Pid}, View) ->
%% Hack to do some debug
Pid ! View,
{noreply, View};
handle_info(Msg, View) ->
io:format("Unknown message: ~p~n", [Msg]),
{noreply, View}.
handle_cast(Msg, State) ->
io:format("Unknown message: ~p~n", [Msg]),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(_Reason, _State) ->
ok.
%%%%% Util functions %%%%%
get_timestamp() ->
%% Return a timestamp as a binary string in the format
%% 2018-3-6 2:30:13.143818
%% so that it can be compared lexicographically using the
%% min function
%% https://stackoverflow.com/questions/37190769/how-to-compare-two-timestamps-with-erlang
TS = {_,_,MS} = os:timestamp(),
{{Y,Mon,D},{H,Min,S}} = calendar:now_to_local_time(TS),
TimestampStr = io_lib:format("~w-~w-~w ~w:~w:~w.~w", [Y,Mon,D,H,Min,S,MS]),
list_to_binary(TimestampStr).
get_default_vc() ->
Nodes = ekvs_viewmanager:get_all_nodes(),
%% All nodes are initialized with clock 0
VCList = lists:map(fun(Node) -> {Node, 0} end, Nodes),
%% Number of view_changes is initialized to 0
maps:from_list(VCList ++ [{view_changes, 0}]).
%% CausalPayload is a binary string in the format
%% <<"node@<ip1>:Clock1,...,node@<ipN>:ClockN,view_changes=NumViewChanges">>
%%
%% VC is the map representation of that string
%% 'node@<ip1>' => Clock1
%% ...
%% 'node@<ipN>' => ClockN
%% 'view_changes' => NumViewChanges
%%
cp_to_vc(<<"">>) -> get_default_vc();
cp_to_vc(CausalPayload) ->
NodeClockStrList = string:split(CausalPayload, ",", all),
StrToPair = fun(NodeClockStr) ->
[Node, Clock] = string:split(NodeClockStr, ":", all),
{binary_to_atom(Node, latin1), binary_to_integer(Clock)}
end,
NodeClockList = lists:map(StrToPair, NodeClockStrList),
maps:from_list(NodeClockList).
vc_to_cp(VC) ->
NodeClockList = maps:to_list(VC),
PairToStr = fun({Node,Clock}) ->
L = [atom_to_list(Node), integer_to_list(Clock)],
string:join(L, ":") end,
NodeClockStrList = lists:map(PairToStr, NodeClockList),
NodeClockStr = string:join(NodeClockStrList, ","),
list_to_binary(NodeClockStr).
happens_before(VCa, VCb) when is_map(VCa) andalso is_map(VCb) ->
ekvs_debug:call({happens_before, VCa, VCb}),
%% Standard happens_before relationship for vector clocks
%% This function should be called only if the VCa and VCb
%% are comparable
true = comparable_vcs([VCa, VCb]),
VCaKeys = maps:keys(VCa),
VCbKeys = maps:keys(VCb),
%% First make sure the nodes are the same (Keys)
VCaKeys = VCbKeys,
%% Then get the clock values
VCaClocks = [maps:get(Node, VCa) || Node <- VCaKeys],
VCbClocks = [maps:get(Node, VCb) || Node <- VCbKeys],
Clocks = lists:zip(VCaClocks, VCbClocks),
%% All have to be =<, at least
LessOrEqual = lists:all(fun({Ca, Cb}) -> Ca =< Cb end, Clocks),
Less = lists:any(fun({Ca, Cb}) -> Ca < Cb end, Clocks),
LessOrEqual andalso Less.
happens_before_or_equal(VCa, VCb) when is_map(VCa) andalso is_map(VCb) ->
happens_before(VCa, VCb) orelse VCa =:= VCb.
comparable_vcs([_VC]) -> true;
comparable_vcs(VCs) ->
%% A list of vector clocks is comparable iff all of them refer
%% to the same set of nodes
io:format("comparable_vcs ? VCs ~n~p~n", [VCs]),
VCNodesList = [maps:keys(VC) || VC <- VCs],
VCNodes = lists:sort(hd(VCNodesList)),
Comparable = fun(OtherVCNodes) ->
VCNodes =:= lists:sort(OtherVCNodes) end,
%% Return true if they are all comparable
lists:all(Comparable, tl(VCNodesList)).
get_merged_vcs(VCs) ->
%% Vector Clocks might be non-comparable because of view_changes
%% Perform the merge only on most recent ones
ekvs_debug:call({get_merged_vcs, VCs}),
ViewChanges = [maps:get(view_changes, VC) || VC <- VCs],
Min = lists:min(ViewChanges),
Max = lists:max(ViewChanges),
CompVCs = case Min =:= Max of
false -> %% Take only the most recent vector clocks
IsRecent = fun(VC) -> maps:get(view_changes, VC) =:= Max end,
lists:filter(IsRecent, VCs);
true -> %% All vector clocks have the same number of view_changes
VCs
end,
io:format("CompsVCs ~p~n", [CompVCs]),
%% Make sure the vector clocks are now comparable
true = comparable_vcs(CompVCs),
Nodes = maps:keys(hd(CompVCs)),
GetNodeClockPair = fun(Node) ->
Clock = lists:max([maps:get(Node, VC) || VC <- CompVCs]),
{Node, Clock} end,
MergedVCList = lists:map(GetNodeClockPair, Nodes),
MergedVC = maps:from_list(MergedVCList),
ekvs_debug:return({get_merged_vcs, MergedVC}),
MergedVC. | src/ekvs_vcmanager.erl | 0.530966 | 0.476641 | ekvs_vcmanager.erl | starcoder |
%% @copyright 2015 <NAME> All Rights Reserved.
%%
%% @doc Binary pattern match Based Mustach template engine for Erlang/OTP.
%%
%% This library support all of mustache syntax. <br />
%% Please refer to [the documentation for how to use the mustache](http://mustache.github.io/mustache.5.html) as the need arises.
%%
-module(bbmustache).
%%----------------------------------------------------------------------------------------------------------------------
%% Exported API
%%----------------------------------------------------------------------------------------------------------------------
-export([
render/2,
render/3,
parse_binary/1,
parse_file/1,
compile/2,
compile/3,
trim_whitespace/1,
dict_from_file/1,
map_from_file/1,
map_from_line/1,
map_from_file_line/1
]).
-export_type([
template/0,
data/0,
option/0
]).
%%----------------------------------------------------------------------------------------------------------------------
%% Defines & Records & Types
%%----------------------------------------------------------------------------------------------------------------------
-define(PARSE_ERROR, incorrect_format).
-define(FILE_ERROR, file_not_found).
-define(COND(Cond, TValue, FValue),
case Cond of true -> TValue; false -> FValue end).
-type key() :: binary().
-type source() :: binary().
%% If you use lamda expressions, the original text is necessary.
%%
%% ```
%% e.g.
%% template:
%% {{#lamda}}a{{b}}c{{/lamda}}
%% parse result:
%% {'#', <<"lamda">>, [<<"a">>, {'n', <<"b">>}, <<"c">>], <<"a{{b}}c">>}
%% '''
%%
%% NOTE:
%% Since the binary reference is used internally, it is not a capacitively large waste.
%% However, the greater the number of tags used, it should use the wasted memory.
-type tag() :: {n, key()}
| {'&', key()}
| {'#', key(), [tag()], source()}
| {'^', key(), [tag()]}
| binary(). % plain text
-record(?MODULE,
{
data :: [tag()]
}).
-opaque template() :: #?MODULE{}.
%% @see parse_binary/1
%% @see parse_file/1
-record(state,
{
dirname = <<>> :: file:filename_all(),
start = <<"{{">> :: binary(),
stop = <<"}}">> :: binary()
}).
-type state() :: #state{}.
-type data_key() :: atom() | binary() | string().
%% You can choose one from these as the type of key in {@link data/0}.
-type data_value() :: data() | iodata() | number() | atom() | fun((data(), function()) -> iodata()).
%% Function is intended to support a lambda expression.
-type assoc_data() :: [{atom(), data_value()}] | [{binary(), data_value()}] | [{string(), data_value()}].
-type option() :: {key_type, atom | binary | string}.
%% - key_type: Specify the type of the key in {@link data/0}. Default value is `string'.
-ifdef(namespaced_types).
-type maps_data() :: #{atom() => data_value()} | #{binary() => data_value()} | #{string() => data_value()}.
-type data() :: maps_data() | assoc_data().
-else.
-type data() :: assoc_data().
-endif.
%% All key in assoc list or maps must be same type.
%% @see render/2
%% @see compile/2
-type endtag() :: {endtag, {state(), EndTag :: binary(), LastTagSize :: non_neg_integer(), Rest :: binary(), Result :: [tag()]}}.
%%----------------------------------------------------------------------------------------------------------------------
%% Exported Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @equiv render(Bin, Data, [])
-spec render(binary(), data()) -> binary().
render(Bin, Data) ->
render(Bin, Data, []).
%% @equiv compile(parse_binary(Bin), Data, Options)
-spec render(binary(), data(), [option()]) -> binary().
render(Bin, Data, Options) ->
compile(parse_binary(Bin), Data, Options).
%% @doc Create a {@link template/0} from a binary.
-spec parse_binary(binary()) -> template().
parse_binary(Bin) when is_binary(Bin) ->
parse_binary_impl(#state{}, Bin).
%% @doc Create a {@link template/0} from a file.
-spec parse_file(file:filename_all()) -> template().
parse_file(Filename) ->
case file:read_file(Filename) of
{ok, Bin} -> parse_binary_impl(#state{dirname = filename:dirname(Filename)}, Bin);
_ -> error(?FILE_ERROR, [Filename])
end.
%% @equiv compile(Template, Data, [])
-spec compile(template(), data()) -> binary().
compile(Template, Data) ->
compile(Template, Data, []).
%% @doc Embed the data in the template.
%%
%% ```
%% 1> Template = bbmustache:parse_binary(<<"{{name}}">>).
%% 2> bbmustache:compile(Template, #{"name" => "Alice"}).
%% <<"Alice">>
%% '''
%% Data support assoc list or maps (OTP17 or later). <br />
%% All key in assoc list or maps must be same type.
-spec compile(template(), data(), [option()]) -> binary().
compile(#?MODULE{data = Tags} = T, Data, Options) ->
case check_data_type(Data) of
false -> error(function_clause, [T, Data]);
_ -> iolist_to_binary(lists:reverse(compile_impl(Tags, Data, [], Options)))
end.
%%----------------------------------------------------------------------------------------------------------------------
%% Internal Function
%%----------------------------------------------------------------------------------------------------------------------
%% @doc Default Value for non Existing Key Values.
%%
%% Deafult Values for Keys Starting with Capitals is the <<"{{Key}}">> itself. Otherwise <<>>.
get_default(Key) ->
case Key of
<<A:8,_/binary>> when A >= 65 andalso A =< 90 ->
<<"{{", Key/binary, "}}">>;
_ ->
<<>>
end.
%% @doc {@link compile/2}
%%
%% ATTENTION: The result is a list that is inverted.
-spec compile_impl(Template :: [tag()], data(), Result :: iodata(), Options :: [option()]) -> iodata().
compile_impl([], _, Result, _) ->
Result;
compile_impl([{n, Key} | T], Map, Result, Options) ->
compile_impl(T, Map, [escape(to_iodata(data_get(convert_keytype(Key, Options), Map, get_default(Key)))) | Result], Options);
compile_impl([{'&', Key} | T], Map, Result, Options) ->
compile_impl(T, Map, [to_iodata(data_get(convert_keytype(Key, Options), Map, <<>>)) | Result], Options);
compile_impl([{'#', Key, Tags, Source} | T], Map, Result, Options) ->
Value = data_get(convert_keytype(Key, Options), Map, false),
case check_data_type(Value) of
true -> compile_impl(T, Map, compile_impl(Tags, Value, Result, Options), Options);
_ when is_list(Value) -> compile_impl(T, Map, lists:foldl(fun(X, Acc) -> compile_impl(Tags, X, Acc, Options) end,
Result, Value), Options);
_ when Value =:= false -> compile_impl(T, Map, Result, Options);
_ when is_function(Value, 2) -> compile_impl(T, Map, [Value(Source, fun(Text) -> render(Text, Map, Options) end) | Result], Options);
_ -> compile_impl(T, Map, compile_impl(Tags, Map, Result, Options), Options)
end;
compile_impl([{'^', Key, Tags} | T], Map, Result, Options) ->
Value = data_get(convert_keytype(Key, Options), Map, false),
case Value =:= [] orelse Value =:= false of
true -> compile_impl(T, Map, compile_impl(Tags, Map, Result, Options), Options);
false -> compile_impl(T, Map, Result, Options)
end;
compile_impl([Bin | T], Map, Result, Options) ->
compile_impl(T, Map, [Bin | Result], Options).
%% @see parse_binary/1
-spec parse_binary_impl(state(), Input :: binary()) -> template().
parse_binary_impl(State, Input) ->
#?MODULE{data = parse(State, Input)}.
%% @doc Analyze the syntax of the mustache.
-spec parse(state(), binary()) -> [tag()].
parse(State, Bin) ->
case parse1(State, Bin, []) of
{endtag, {_, OtherTag, _, _, _}} ->
error({?PARSE_ERROR, {section_is_incorrect, OtherTag}});
{_, Tags} ->
lists:reverse(Tags)
end.
%% @doc Part of the `parse/1'
%%
%% ATTENTION: The result is a list that is inverted.
-spec parse1(state(), Input :: binary(), Result :: [tag()]) -> {state(), [tag()]} | endtag().
parse1(#state{start = Start, stop = Stop} = State, Bin, Result) ->
case binary:split(Bin, Start) of
[B1] -> {State, [B1 | Result]};
[B1, <<"{", B2/binary>>] -> parse2(State, binary:split(B2, <<"}", Stop/binary>>), [B1 | Result]);
[B1, B2] -> parse3(State, binary:split(B2, Stop), [B1 | Result])
end.
%% @doc Part of the `parse/1'
%%
%% 2nd Argument: [TagBinary(may exist unnecessary spaces to the end), RestBinary]
%% ATTENTION: The result is a list that is inverted.
-spec parse2(state(), iolist(), Result :: [tag()]) -> {state(), [tag()]} | endtag().
parse2(State, [B1, B2], Result) ->
parse1(State, B2, [{'&', remove_space_from_edge(B1)} | Result]);
parse2(_, _, _) ->
error({?PARSE_ERROR, unclosed_tag}).
%% @doc Part of the `parse/1'
%%
%% 2nd Argument: [TagBinary(may exist unnecessary spaces to the end), RestBinary]
%% ATTENTION: The result is a list that is inverted.
-spec parse3(state(), iolist(), Result :: [tag()]) -> {state(), [tag()]} | endtag().
parse3(State, [B1, B2], Result) ->
case remove_space_from_head(B1) of
<<"&", Tag/binary>> ->
parse1(State, B2, [{'&', remove_space_from_edge(Tag)} | Result]);
<<T, Tag/binary>> when T =:= $#; T =:= $^ ->
parse_loop(State, ?COND(T =:= $#, '#', '^'), remove_space_from_edge(Tag), B2, Result);
<<"=", Tag0/binary>> ->
Tag1 = remove_space_from_tail(Tag0),
Size = byte_size(Tag1) - 1,
case Size >= 0 andalso Tag1 of
<<Tag2:Size/binary, "=">> -> parse_delimiter(State, Tag2, B2, Result);
_ -> error({?PARSE_ERROR, {unsupported_tag, <<"=", Tag0/binary>>}})
end;
<<"!", _/binary>> ->
parse1(State, B2, Result);
<<"/", Tag/binary>> ->
{endtag, {State, remove_space_from_edge(Tag), byte_size(B1) + 4, B2, Result}};
<<">", Tag/binary>> ->
parse_jump(State, remove_space_from_edge(Tag), B2, Result);
Tag ->
parse1(State, B2, [{n, remove_space_from_tail(Tag)} | Result])
end;
parse3(_, _, _) ->
error({?PARSE_ERROR, unclosed_tag}).
%% @doc Part of the `parse/1'
%%
%% ATTENTION: The result is a list that is inverted.
-spec parse4(state(), Input :: binary(), Result :: [tag()]) -> {state(), [tag()]} | endtag().
parse4(State, <<"\r\n", Rest/binary>>, Result) ->
parse1(State, Rest, Result);
parse4(State, <<"\n", Rest/binary>>, Result) ->
parse1(State, Rest, Result);
parse4(State, Input, Result) ->
parse1(State, Input, Result).
%% @doc Loop processing part of the `parse/1'
%%
%% `{{# Tag}}' or `{{^ Tag}}' corresponds to this.
-spec parse_loop(state(), '#' | '^', Tag :: binary(), Input :: binary(), Result :: [tag()]) -> [tag()] | endtag().
parse_loop(State0, Mark, Tag, Input, Result0) ->
case parse4(State0, Input, []) of
{endtag, {State, Tag, LastTagSize, Rest, Result1}} ->
case Mark of
'#' -> Source = binary:part(Input, 0, byte_size(Input) - byte_size(Rest) - LastTagSize),
parse4(State, Rest, [{'#', Tag, lists:reverse(Result1), Source} | Result0]);
'^' -> parse4(State, Rest, [{'^', Tag, lists:reverse(Result1)} | Result0])
end;
{endtag, {_, OtherTag, _, _, _}} ->
error({?PARSE_ERROR, {section_is_incorrect, OtherTag}});
_ ->
error({?PARSE_ERROR, {section_end_tag_not_found, <<"/", Tag/binary>>}})
end.
%% @doc Endtag part of the `parse/1'
-spec parse_jump(state(), Tag :: binary(), NextBin :: binary(), Result :: [tag()]) -> [tag()] | endtag().
parse_jump(#state{dirname = Dirname} = State0, Tag, NextBin, Result0) ->
Filename0 = <<Tag/binary, ".mustache">>,
Filename = ?COND(Dirname =:= <<>>, Filename0, filename:join([Dirname, Filename0])),
case file:read_file(Filename) of
{ok, Bin} ->
case parse4(State0, Bin, Result0) of
{endtag, {_, Tag, _, _, _}} -> error({?PARSE_ERROR, {section_begin_tag_not_found, <<"#", Tag/binary>>}});
{State, Result} -> parse4(State, NextBin, Result)
end;
_ ->
error(?FILE_ERROR, [Filename])
end.
%% @doc Update delimiter part of the `parse/1'
%%
%% ParseDelimiterBin :: e.g. `{{=%% %%=}}' -> `%% %%'
-spec parse_delimiter(state(), ParseDelimiterBin :: binary(), NextBin :: binary(), Result :: [tag()]) -> [tag()] | endtag().
parse_delimiter(State0, ParseDelimiterBin, NextBin, Result) ->
case binary:match(ParseDelimiterBin, <<"=">>) of
nomatch ->
case [X || X <- binary:split(ParseDelimiterBin, <<" ">>, [global]), X =/= <<>>] of
[Start, Stop] -> parse4(State0#state{start = Start, stop = Stop}, NextBin, Result);
_ -> error({?PARSE_ERROR, delimiters_may_not_contain_whitespaces})
end;
_ ->
error({?PARSE_ERROR, delimiters_may_not_contain_equals})
end.
%% @doc Remove the space from the edge.
-spec remove_space_from_edge(binary()) -> binary().
remove_space_from_edge(Bin) ->
remove_space_from_tail(remove_space_from_head(Bin)).
%% @doc Remove the space from the head.
-spec remove_space_from_head(binary()) -> binary().
remove_space_from_head(<<" ", Rest/binary>>) -> remove_space_from_head(Rest);
remove_space_from_head(Bin) -> Bin.
%% @doc Remove the space from the tail.
-spec remove_space_from_tail(binary()) -> binary().
remove_space_from_tail(<<>>) -> <<>>;
remove_space_from_tail(Bin) ->
PosList = binary:matches(Bin, <<" ">>),
LastPos = remove_space_from_tail_impl(lists:reverse(PosList), byte_size(Bin)),
binary:part(Bin, 0, LastPos).
%% @see remove_space_from_tail/1
-spec remove_space_from_tail_impl([{non_neg_integer(), pos_integer()}], non_neg_integer()) -> non_neg_integer().
remove_space_from_tail_impl([{X, Y} | T], Size) when Size =:= X + Y ->
remove_space_from_tail_impl(T, X);
remove_space_from_tail_impl(_, Size) ->
Size.
%% @doc term to iodata
-spec to_iodata(number() | binary() | string() | atom()) -> iodata().
to_iodata(Integer) when is_integer(Integer) ->
list_to_binary(integer_to_list(Integer));
to_iodata(Float) when is_float(Float) ->
io_lib:format("~p", [Float]);
to_iodata(Atom) when is_atom(Atom) ->
list_to_binary(atom_to_list(Atom));
to_iodata(X) ->
X.
%% @doc HTML Escape
-spec escape(iodata()) -> binary().
escape(IoData) ->
Bin = iolist_to_binary(IoData),
<< <<(escape_char(X))/binary>> || <<X:8>> <= Bin >>.
%% @see escape/1
-spec escape_char(0..16#FFFF) -> binary().
escape_char($<) -> <<"<">>;
escape_char($>) -> <<">">>;
escape_char($&) -> <<"&">>;
escape_char($") -> <<""">>;
escape_char($') -> <<"'">>;
escape_char($/) -> <<"/">>;
escape_char($`) -> <<"`">>;
escape_char($=) -> <<"=">>;
escape_char(C) -> <<C:8>>.
%% @doc convert to {@link data_key/0} from binary.
-spec convert_keytype(binary(), [option()]) -> data_key().
convert_keytype(KeyBin, Options) ->
case proplists:get_value(key_type, Options, string) of
atom ->
try binary_to_existing_atom(KeyBin, utf8) of
Atom -> Atom
catch
_:_ -> <<" ">> % It is not always present in data/0
end;
string -> binary_to_list(KeyBin);
binary -> KeyBin
end.
%% @doc fetch the value of the specified key from {@link data/0}
-spec data_get(data_key(), data(), Default :: term()) -> term().
-ifdef(namespaced_types).
data_get(Dot, Data, _Default) when Dot =:= "."; Dot =:= '.'; Dot =:= <<".">> ->
Data;
data_get(Key, Map, Default) when is_map(Map) ->
maps:get(Key, Map, Default);
data_get(Key, AssocList, Default) ->
proplists:get_value(Key, AssocList, Default).
-else.
data_get(Dot, Data, _Default) when Dot =:= "."; Dot =:= '.'; Dot =:= <<".">> ->
Data;
data_get(Key, AssocList, Default) ->
proplists:get_value(Key, AssocList, Default).
-endif.
%% @doc check whether the type of {@link data/0}
%%
%% maybe: There is also the possibility of iolist
-spec check_data_type(data() | term()) -> boolean() | maybe.
-ifdef(namespaced_types).
check_data_type([]) -> maybe;
check_data_type([{_, _} | _]) -> true;
check_data_type(Map) -> is_map(Map).
-else.
check_data_type([]) -> maybe;
check_data_type([{_, _} | _]) -> true;
check_data_type(_) -> false.
-endif.
trim_whitespace(Input) ->
re:replace(Input, "(^\\s+)|(\\s+$)", "", [global,{return,list}]).
dict_from_file(Filename) ->
case file:open(Filename, [read]) of
{ok, FP} ->
dict_from_file_(FP, dict:new());
{error,_} ->
'invalid_filename'
end.
dict_from_file_(FP, Dict) ->
case file:read_line(FP) of
{ok, LN} ->
case string:tokens(LN, ",") of
[K,V|_] ->
KK = trim_whitespace(K),
VV = trim_whitespace(V),
Dict2 = dict:store(KK, VV, Dict);
_ ->
Dict2 = Dict
end,
dict_from_file_(FP, Dict2);
_ ->
file:close(FP),
Dict
end.
map_from_file(Filename) ->
case file:open(Filename, [read]) of
{ok, FP} ->
map_from_file_(FP, #{});
{error,_} ->
'invalid_filename'
end.
map_from_file_(FP, Map) ->
case map_from_file_line(FP, Map) of
eof ->
Map;
NewMap ->
map_from_file_(FP, NewMap)
end.
map_from_file_line(FP) ->
map_from_file_line(FP, #{}).
map_from_file_line(FP, Map) ->
case file:read_line(FP) of
{ok, LN} ->
map_from_line(LN, Map);
_ ->
file:close(FP),
eof
end.
map_from_line(LN) ->
map_from_line(LN, #{}).
map_from_line(LN, Map) ->
map_from_line_(string:tokens(LN, ",=;"), Map).
map_from_line_([K,V|T], Map) ->
KK = list_to_binary(trim_whitespace(K)),
VV = list_to_binary(trim_whitespace(V)),
map_from_line_(T, Map#{KK => VV});
map_from_line_(_, Map) -> Map. | src/bbmustache.erl | 0.597608 | 0.4575 | bbmustache.erl | starcoder |
%%
%% The mandatory header(s) for each scenario file:
%% * `-module(...).` identity of module
%% * `-compile({parse_transform, monad}).` enable monads
-module(skeleton).
-compile({parse_transform, monad}).
%%
%% The workload scenario consists of attributes and actions
%% Attributes are functions that return scalar values
%% Action returns pure IO-monadic computations.
%% Actions and attributes are exported using `-export([...]).`
%% Typhoon requires each scenario to define attributes:
%% * `title()` a human readable scenario name
%% * `t()` time in milliseconds to execute workload
%% * `n()` number of concurrent sessions globally spawned in the cluster
-export([title/0, t/0, n/0]).
%% Scenario shall provide actions:
%% * `init()` an optional computation to be executed once by scenario session
%% the result of computation is fed to main entry point
%% * `run(_)` executes workload scenario
% -export([init/0, run/1]).
-export([run/1]).
%%%----------------------------------------------------------------------------
%%%
%%% attributes
%%%
%%%----------------------------------------------------------------------------
%% human readable scenario title
title() ->
"Skeleton Workload Scenario".
%% time to execute workload in milliseconds
t() ->
60000.
%% number of concurrent sessions to spawn in the cluster.
n() ->
2.
%%%----------------------------------------------------------------------------
%%%
%%% actions
%%%
%%%----------------------------------------------------------------------------
%%
%% init scenario configuration
init() ->
do([m_state || %% sequence of requests to execute as IO-monadic computation
_ <- request(), %% execute HTTP request and discard results
A <- request(), %% execute HTTP request and assign response to variable A
return(A) %% it just takes a value A and puts it in an IO context.
]).
%%
%% execute scenario, the operation is repeated until `t()` is expired.
run(_Config) ->
do([m_state ||
_ <- request(), %% execute HTTP request
A <- article(), %% execute another type of HTTP request
return(A)
]).
%%
%% create HTTP request using nested function call syntax
request() ->
do([m_http ||
% 1. create new HTTP request
_ /= new("https://api.zalando.com/"),
_ /= method('GET'),
% 3. set request header
_ /= header("Accept-Language", "de-DE"),
_ /= header("Connection", "close"),
% 4. build HTTP promise
_ /= request(),
%% 5. return results
return(_)
]).
%%
%% create HTTP request using chained function call syntax
article() ->
do([m_http ||
% 1. create new HTTP request
_ /= new("https://api.zalando.com/articles"),
_ /= method('GET'),
% 3. set request header
_ /= header("Accept-Language", "de-DE"),
_ /= header("Connection", "close"),
% 4. build HTTP promise and decode result
_ /= request(),
% 5. parse and return results
_ =< scenario:decode(_),
return( scenario:lens([content, {uniform}, id], _) )
]). | examples/skeleton.erl | 0.775775 | 0.616474 | skeleton.erl | starcoder |
%%%----------------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright 2012 University of St Andrews (See LICENCE)
%%% @headerfile "skel.hrl"
%%%
%%% @doc This module contains the initialization logic of a Divide and Conquer skeleton.
%%%
%%% The 'divconquer' skeleton applies and Divide and Conquer algorithm over a dataset,
%%% recursively splitting splitting the data into two sub-problems via a divide function, before
%%% applying a conquer function then merging the data back up.
%%%
%%% There are two possible skeleton definitions; one involves declaring functions for all three
%%% stages, the second only requires a single stage to be defined. If using the first definition,
%%% it is possible to substitite any of the functions for an atom which will use the associated
%%% pre-programmed default function.
%%%
%%% Both definitions require a maximum depth before a node will continue sequentially rather than
%%% spawning further worker processes.
%%%
%%% === Example ===
%%%
%%% ```skel:do([{divconquer, {merge, fun ?MODULE:mergeFun/1}, 5}], Data)'''
%%%
%%% In this example, we produce a divconquer skeleton with a custom merge function `mergeFun/1`.
%%% The workers will traverse down 5 levels before continuing sequentially. Default split and conquer
%%% functions will be used.
%%%
%%% ```skel:do([{divconquer, fun ?MODULE:mergeFun/1, fun ?MODULE:conquerFun/1, fun ?MODULE:splitFun/1, 5}], Data)'''
%%%
%%% This example explicitely declares all three possible functions along with the aforementioned
%%% maximum depth. Replacing any of the functions with a `default` atom will use the associated
%%% default function.
%%%
%%% @end
%%%----------------------------------------------------------------------------
-module(sk_divconquer).
-export([
make/4,
make/2,
default_merge/2,
default_conquer/1,
default_split/1
]).
-include("skel.hrl").
-ifdef(TEST).
-compile(export_all).
-endif.
-spec make(tuple(), pos_integer()) -> maker_fun().
%% @doc Initialises a divconquer skeleton given either a split, merge or conquer function withina tuple alongwith the
%% maximum depth respectively. The other two functions are set to default.
make({merge, MergeFun}, MaxDepth) when is_integer(MaxDepth), is_function(MergeFun) ->
fun(NextPid) ->
spawn(sk_divconquer_worker, start, [NextPid, MergeFun, fun default_conquer/1, fun default_split/1, MaxDepth])
end;
make({conquer, ConquerFun}, MaxDepth) ->
fun(NextPid) ->
spawn(sk_divconquer_worker, start, [NextPid, fun default_merge/2, ConquerFun, fun default_split/1, MaxDepth])
end;
make({divide, DivFun}, MaxDepth) ->
fun(NextPid) ->
spawn(sk_divconquer_worker, start, [NextPid, fun default_merge/2, fun default_conquer/1, DivFun, MaxDepth])
end.
-spec make(function(), function(), function(), pos_integer()) -> maker_fun().
%% @doc Initialises a divconquer skeleton given the three function parameters and maximum depth respectfully.
%% Any non-function function parameter is replaced by the associated default function.
make(MergeFun, ConquerFun, SplitFun, MaxDepth) when is_integer(MaxDepth) ->
fun(NextPid) ->
start(NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth)
end.
start(NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth) when is_function(MergeFun) == false ->
start(NextPid, fun default_merge/2, ConquerFun, SplitFun, MaxDepth);
start(NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth) when is_function(ConquerFun) == false ->
start(NextPid, MergeFun, fun default_conquer/1, SplitFun, MaxDepth);
start(NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth) when is_function(SplitFun) == false ->
start(NextPid, MergeFun, ConquerFun, fun default_split/1, MaxDepth);
start(NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth) ->
spawn(sk_divconquer_worker, start, [NextPid, MergeFun, ConquerFun, SplitFun, MaxDepth]).
-spec default_merge(list(), list()) -> list().
%% Default merge function
default_merge(List1, List2) ->
List1++List2.
-spec default_conquer(any()) -> list().
%% Default merge function
default_conquer(Element) ->
[Element].
-spec default_split(list()) -> list().
%% Default divide function
default_split([Elem]) ->
[Elem];
default_split(List) ->
lists:split(trunc(length(List)/2), List). | src/sk_divconquer.erl | 0.606032 | 0.880489 | sk_divconquer.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(io_lib_format).
%% Formatting functions of io library.
-export([fwrite/2,fwrite_g/1,indentation/2,scan/2,unscan/1,build/1]).
%% Format the arguments in Args after string Format. Just generate
%% an error if there is an error in the arguments.
%%
%% To do the printing command correctly we need to calculate the
%% current indentation for everything before it. This may be very
%% expensive, especially when it is not needed, so we first determine
%% if, and for how long, we need to calculate the indentations. We do
%% this by first collecting all the control sequences and
%% corresponding arguments, then counting the print sequences and
%% then building the output. This method has some drawbacks, it does
%% two passes over the format string and creates more temporary data,
%% and it also splits the handling of the control characters into two
%% parts.
-spec fwrite(Format, Data) -> FormatList when
Format :: io:format(),
Data :: [term()],
FormatList :: [char() | io_lib:format_spec()].
fwrite(Format, Args) ->
build(scan(Format, Args)).
%% Build the output text for a pre-parsed format list.
-spec build(FormatList) -> io_lib:chars() when
FormatList :: [char() | io_lib:format_spec()].
build(Cs) ->
Pc = pcount(Cs),
build(Cs, Pc, 0).
%% Parse all control sequences in the format string.
-spec scan(Format, Data) -> FormatList when
Format :: io:format(),
Data :: [term()],
FormatList :: [char() | io_lib:format_spec()].
scan(Format, Args) when is_atom(Format) ->
scan(atom_to_list(Format), Args);
scan(Format, Args) when is_binary(Format) ->
scan(binary_to_list(Format), Args);
scan(Format, Args) ->
collect(Format, Args).
%% Revert a pre-parsed format list to a plain character list and a
%% list of arguments.
-spec unscan(FormatList) -> {Format, Data} when
FormatList :: [char() | io_lib:format_spec()],
Format :: io:format(),
Data :: [term()].
unscan(Cs) ->
{print(Cs), args(Cs)}.
args([#{args := As} | Cs]) ->
As ++ args(Cs);
args([_C | Cs]) ->
args(Cs);
args([]) ->
[].
print([#{control_char := C, width := F, adjust := Ad, precision := P,
pad_char := Pad, encoding := Encoding, strings := Strings} | Cs]) ->
print(C, F, Ad, P, Pad, Encoding, Strings) ++ print(Cs);
print([C | Cs]) ->
[C | print(Cs)];
print([]) ->
[].
print(C, F, Ad, P, Pad, Encoding, Strings) ->
[$~] ++ print_field_width(F, Ad) ++ print_precision(P) ++
print_pad_char(Pad) ++ print_encoding(Encoding) ++
print_strings(Strings) ++ [C].
print_field_width(none, _Ad) -> "";
print_field_width(F, left) -> integer_to_list(-F);
print_field_width(F, right) -> integer_to_list(F).
print_precision(none) -> "";
print_precision(P) -> [$. | integer_to_list(P)].
print_pad_char($\s) -> ""; % default, no need to make explicit
print_pad_char(Pad) -> [$., Pad].
print_encoding(unicode) -> "t";
print_encoding(latin1) -> "".
print_strings(false) -> "l";
print_strings(true) -> "".
collect([$~|Fmt0], Args0) ->
{C,Fmt1,Args1} = collect_cseq(Fmt0, Args0),
[C|collect(Fmt1, Args1)];
collect([C|Fmt], Args) ->
[C|collect(Fmt, Args)];
collect([], []) -> [].
collect_cseq(Fmt0, Args0) ->
{F,Ad,Fmt1,Args1} = field_width(Fmt0, Args0),
{P,Fmt2,Args2} = precision(Fmt1, Args1),
{Pad,Fmt3,Args3} = pad_char(Fmt2, Args2),
{Encoding,Fmt4,Args4} = encoding(Fmt3, Args3),
{Strings,Fmt5,Args5} = strings(Fmt4, Args4),
{C,As,Fmt6,Args6} = collect_cc(Fmt5, Args5),
FormatSpec = #{control_char => C, args => As, width => F, adjust => Ad,
precision => P, pad_char => Pad, encoding => Encoding,
strings => Strings},
{FormatSpec,Fmt6,Args6}.
encoding([$t|Fmt],Args) ->
true = hd(Fmt) =/= $l,
{unicode,Fmt,Args};
encoding(Fmt,Args) ->
{latin1,Fmt,Args}.
strings([$l|Fmt],Args) ->
true = hd(Fmt) =/= $t,
{false,Fmt,Args};
strings(Fmt,Args) ->
{true,Fmt,Args}.
field_width([$-|Fmt0], Args0) ->
{F,Fmt,Args} = field_value(Fmt0, Args0),
field_width(-F, Fmt, Args);
field_width(Fmt0, Args0) ->
{F,Fmt,Args} = field_value(Fmt0, Args0),
field_width(F, Fmt, Args).
field_width(F, Fmt, Args) when F < 0 ->
{-F,left,Fmt,Args};
field_width(F, Fmt, Args) when F >= 0 ->
{F,right,Fmt,Args}.
precision([$.|Fmt], Args) ->
field_value(Fmt, Args);
precision(Fmt, Args) ->
{none,Fmt,Args}.
field_value([$*|Fmt], [A|Args]) when is_integer(A) ->
{A,Fmt,Args};
field_value([C|Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
field_value([C|Fmt], Args, 0);
field_value(Fmt, Args) ->
{none,Fmt,Args}.
field_value([C|Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
field_value(Fmt, Args, 10*F + (C - $0));
field_value(Fmt, Args, F) -> %Default case
{F,Fmt,Args}.
pad_char([$.,$*|Fmt], [Pad|Args]) -> {Pad,Fmt,Args};
pad_char([$.,Pad|Fmt], Args) -> {Pad,Fmt,Args};
pad_char(Fmt, Args) -> {$\s,Fmt,Args}.
%% collect_cc([FormatChar], [Argument]) ->
%% {Control,[ControlArg],[FormatChar],[Arg]}.
%% Here we collect the argments for each control character.
%% Be explicit to cause failure early.
collect_cc([$w|Fmt], [A|Args]) -> {$w,[A],Fmt,Args};
collect_cc([$p|Fmt], [A|Args]) -> {$p,[A],Fmt,Args};
collect_cc([$W|Fmt], [A,Depth|Args]) -> {$W,[A,Depth],Fmt,Args};
collect_cc([$P|Fmt], [A,Depth|Args]) -> {$P,[A,Depth],Fmt,Args};
collect_cc([$s|Fmt], [A|Args]) -> {$s,[A],Fmt,Args};
collect_cc([$e|Fmt], [A|Args]) -> {$e,[A],Fmt,Args};
collect_cc([$f|Fmt], [A|Args]) -> {$f,[A],Fmt,Args};
collect_cc([$g|Fmt], [A|Args]) -> {$g,[A],Fmt,Args};
collect_cc([$b|Fmt], [A|Args]) -> {$b,[A],Fmt,Args};
collect_cc([$B|Fmt], [A|Args]) -> {$B,[A],Fmt,Args};
collect_cc([$x|Fmt], [A,Prefix|Args]) -> {$x,[A,Prefix],Fmt,Args};
collect_cc([$X|Fmt], [A,Prefix|Args]) -> {$X,[A,Prefix],Fmt,Args};
collect_cc([$+|Fmt], [A|Args]) -> {$+,[A],Fmt,Args};
collect_cc([$#|Fmt], [A|Args]) -> {$#,[A],Fmt,Args};
collect_cc([$c|Fmt], [A|Args]) -> {$c,[A],Fmt,Args};
collect_cc([$~|Fmt], Args) when is_list(Args) -> {$~,[],Fmt,Args};
collect_cc([$n|Fmt], Args) when is_list(Args) -> {$n,[],Fmt,Args};
collect_cc([$i|Fmt], [A|Args]) -> {$i,[A],Fmt,Args}.
%% pcount([ControlC]) -> Count.
%% Count the number of print requests.
pcount(Cs) -> pcount(Cs, 0).
pcount([#{control_char := $p}|Cs], Acc) -> pcount(Cs, Acc+1);
pcount([#{control_char := $P}|Cs], Acc) -> pcount(Cs, Acc+1);
pcount([_|Cs], Acc) -> pcount(Cs, Acc);
pcount([], Acc) -> Acc.
%% build([Control], Pc, Indentation) -> io_lib:chars().
%% Interpret the control structures. Count the number of print
%% remaining and only calculate indentation when necessary. Must also
%% be smart when calculating indentation for characters in format.
build([#{control_char := C, args := As, width := F, adjust := Ad,
precision := P, pad_char := Pad, encoding := Enc,
strings := Str} | Cs], Pc0, I) ->
S = control(C, As, F, Ad, P, Pad, Enc, Str, I),
Pc1 = decr_pc(C, Pc0),
if
Pc1 > 0 -> [S|build(Cs, Pc1, indentation(S, I))];
true -> [S|build(Cs, Pc1, I)]
end;
build([$\n|Cs], Pc, _I) -> [$\n|build(Cs, Pc, 0)];
build([$\t|Cs], Pc, I) -> [$\t|build(Cs, Pc, ((I + 8) div 8) * 8)];
build([C|Cs], Pc, I) -> [C|build(Cs, Pc, I+1)];
build([], _Pc, _I) -> [].
decr_pc($p, Pc) -> Pc - 1;
decr_pc($P, Pc) -> Pc - 1;
decr_pc(_, Pc) -> Pc.
%% Calculate the indentation of the end of a string given its start
%% indentation. We assume tabs at 8 cols.
-spec indentation(String, StartIndent) -> integer() when
String :: io_lib:chars(),
StartIndent :: integer().
indentation([$\n|Cs], _I) -> indentation(Cs, 0);
indentation([$\t|Cs], I) -> indentation(Cs, ((I + 8) div 8) * 8);
indentation([C|Cs], I) when is_integer(C) ->
indentation(Cs, I+1);
indentation([C|Cs], I) ->
indentation(Cs, indentation(C, I));
indentation([], I) -> I.
%% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar,
%% Encoding, Indentation) -> String
%% This is the main dispatch function for the various formatting commands.
%% Field widths and precisions have already been calculated.
control($w, [A], F, Adj, P, Pad, _Enc, _Str, _I) ->
term(io_lib:write(A, -1), F, Adj, P, Pad);
control($p, [A], F, Adj, P, Pad, Enc, Str, I) ->
print(A, -1, F, Adj, P, Pad, Enc, Str, I);
control($W, [A,Depth], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(Depth) ->
term(io_lib:write(A, Depth), F, Adj, P, Pad);
control($P, [A,Depth], F, Adj, P, Pad, Enc, Str, I) when is_integer(Depth) ->
print(A, Depth, F, Adj, P, Pad, Enc, Str, I);
control($s, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_atom(A) ->
string(atom_to_list(A), F, Adj, P, Pad);
control($s, [L0], F, Adj, P, Pad, latin1, _Str, _I) ->
L = iolist_to_chars(L0),
string(L, F, Adj, P, Pad);
control($s, [L0], F, Adj, P, Pad, unicode, _Str, _I) ->
L = cdata_to_chars(L0),
uniconv(string(L, F, Adj, P, Pad));
control($e, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
fwrite_e(A, F, Adj, P, Pad);
control($f, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
fwrite_f(A, F, Adj, P, Pad);
control($g, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
fwrite_g(A, F, Adj, P, Pad);
control($b, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
unprefixed_integer(A, F, Adj, base(P), Pad, true);
control($B, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
unprefixed_integer(A, F, Adj, base(P), Pad, false);
control($x, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A),
is_atom(Prefix) ->
prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true);
control($x, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true);
control($X, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A),
is_atom(Prefix) ->
prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false);
control($X, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false);
control($+, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
Base = base(P),
Prefix = [integer_to_list(Base), $#],
prefixed_integer(A, F, Adj, Base, Pad, Prefix, true);
control($#, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
Base = base(P),
Prefix = [integer_to_list(Base), $#],
prefixed_integer(A, F, Adj, Base, Pad, Prefix, false);
control($c, [A], F, Adj, P, Pad, unicode, _Str, _I) when is_integer(A) ->
char(A, F, Adj, P, Pad);
control($c, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
char(A band 255, F, Adj, P, Pad);
control($~, [], F, Adj, P, Pad, _Enc, _Str, _I) -> char($~, F, Adj, P, Pad);
control($n, [], F, Adj, P, Pad, _Enc, _Str, _I) -> newline(F, Adj, P, Pad);
control($i, [_A], _F, _Adj, _P, _Pad, _Enc, _Str, _I) -> [].
-ifdef(UNICODE_AS_BINARIES).
uniconv(C) ->
unicode:characters_to_binary(C,unicode).
-else.
uniconv(C) ->
C.
-endif.
%% Default integer base
base(none) ->
10;
base(B) when is_integer(B) ->
B.
%% term(TermList, Field, Adjust, Precision, PadChar)
%% Output the characters in a term.
%% Adjust the characters within the field if length less than Max padding
%% with PadChar.
term(T, none, _Adj, none, _Pad) -> T;
term(T, none, Adj, P, Pad) -> term(T, P, Adj, P, Pad);
term(T, F, Adj, P0, Pad) ->
L = lists:flatlength(T),
P = erlang:min(L, case P0 of none -> F; _ -> min(P0, F) end),
if
L > P ->
adjust(chars($*, P), chars(Pad, F-P), Adj);
F >= P ->
adjust(T, chars(Pad, F-L), Adj)
end.
%% print(Term, Depth, Field, Adjust, Precision, PadChar, Encoding,
%% Indentation)
%% Print a term.
print(T, D, none, Adj, P, Pad, E, Str, I) ->
print(T, D, 80, Adj, P, Pad, E, Str, I);
print(T, D, F, Adj, none, Pad, E, Str, I) ->
print(T, D, F, Adj, I+1, Pad, E, Str, I);
print(T, D, F, right, P, _Pad, Enc, Str, _I) ->
Options = [{column, P},
{line_length, F},
{depth, D},
{encoding, Enc},
{strings, Str}],
io_lib_pretty:print(T, Options).
%% fwrite_e(Float, Field, Adjust, Precision, PadChar)
fwrite_e(Fl, none, Adj, none, Pad) -> %Default values
fwrite_e(Fl, none, Adj, 6, Pad);
fwrite_e(Fl, none, _Adj, P, _Pad) when P >= 2 ->
float_e(Fl, float_data(Fl), P);
fwrite_e(Fl, F, Adj, none, Pad) ->
fwrite_e(Fl, F, Adj, 6, Pad);
fwrite_e(Fl, F, Adj, P, Pad) when P >= 2 ->
term(float_e(Fl, float_data(Fl), P), F, Adj, F, Pad).
float_e(Fl, Fd, P) when Fl < 0.0 -> %Negative numbers
[$-|float_e(-Fl, Fd, P)];
float_e(_Fl, {Ds,E}, P) ->
case float_man(Ds, 1, P-1) of
{[$0|Fs],true} -> [[$1|Fs]|float_exp(E)];
{Fs,false} -> [Fs|float_exp(E-1)]
end.
%% float_man([Digit], Icount, Dcount) -> {[Chars],CarryFlag}.
%% Generate the characters in the mantissa from the digits with Icount
%% characters before the '.' and Dcount decimals. Handle carry and let
%% caller decide what to do at top.
float_man(Ds, 0, Dc) ->
{Cs,C} = float_man(Ds, Dc),
{[$.|Cs],C};
float_man([D|Ds], I, Dc) ->
case float_man(Ds, I-1, Dc) of
{Cs,true} when D =:= $9 -> {[$0|Cs],true};
{Cs,true} -> {[D+1|Cs],false};
{Cs,false} -> {[D|Cs],false}
end;
float_man([], I, Dc) -> %Pad with 0's
{string:chars($0, I, [$.|string:chars($0, Dc)]),false}.
float_man([D|_], 0) when D >= $5 -> {[],true};
float_man([_|_], 0) -> {[],false};
float_man([D|Ds], Dc) ->
case float_man(Ds, Dc-1) of
{Cs,true} when D =:= $9 -> {[$0|Cs],true};
{Cs,true} -> {[D+1|Cs],false};
{Cs,false} -> {[D|Cs],false}
end;
float_man([], Dc) -> {string:chars($0, Dc),false}. %Pad with 0's
%% float_exp(Exponent) -> [Char].
%% Generate the exponent of a floating point number. Always include sign.
float_exp(E) when E >= 0 ->
[$e,$+|integer_to_list(E)];
float_exp(E) ->
[$e|integer_to_list(E)].
%% fwrite_f(FloatData, Field, Adjust, Precision, PadChar)
fwrite_f(Fl, none, Adj, none, Pad) -> %Default values
fwrite_f(Fl, none, Adj, 6, Pad);
fwrite_f(Fl, none, _Adj, P, _Pad) when P >= 1 ->
float_f(Fl, float_data(Fl), P);
fwrite_f(Fl, F, Adj, none, Pad) ->
fwrite_f(Fl, F, Adj, 6, Pad);
fwrite_f(Fl, F, Adj, P, Pad) when P >= 1 ->
term(float_f(Fl, float_data(Fl), P), F, Adj, F, Pad).
float_f(Fl, Fd, P) when Fl < 0.0 ->
[$-|float_f(-Fl, Fd, P)];
float_f(Fl, {Ds,E}, P) when E =< 0 ->
float_f(Fl, {string:chars($0, -E+1, Ds),1}, P); %Prepend enough 0's
float_f(_Fl, {Ds,E}, P) ->
case float_man(Ds, E, P) of
{Fs,true} -> "1" ++ Fs; %Handle carry
{Fs,false} -> Fs
end.
%% float_data([FloatChar]) -> {[Digit],Exponent}
float_data(Fl) ->
float_data(float_to_list(Fl), []).
float_data([$e|E], Ds) ->
{lists:reverse(Ds),list_to_integer(E)+1};
float_data([D|Cs], Ds) when D >= $0, D =< $9 ->
float_data(Cs, [D|Ds]);
float_data([_|Cs], Ds) ->
float_data(Cs, Ds).
%% Writes the shortest, correctly rounded string that converts
%% to Float when read back with list_to_float/1.
%%
%% See also "Printing Floating-Point Numbers Quickly and Accurately"
%% in Proceedings of the SIGPLAN '96 Conference on Programming
%% Language Design and Implementation.
-spec fwrite_g(float()) -> string().
fwrite_g(0.0) ->
"0.0";
fwrite_g(Float) when is_float(Float) ->
{Frac, Exp} = mantissa_exponent(Float),
{Place, Digits} = fwrite_g_1(Float, Exp, Frac),
R = insert_decimal(Place, [$0 + D || D <- Digits]),
[$- || true <- [Float < 0.0]] ++ R.
-define(BIG_POW, (1 bsl 52)).
-define(MIN_EXP, (-1074)).
mantissa_exponent(F) ->
case <<F:64/float>> of
<<_S:1, 0:11, M:52>> -> % denormalized
E = log2floor(M),
{M bsl (53 - E), E - 52 - 1075};
<<_S:1, BE:11, M:52>> when BE < 2047 ->
{M + ?BIG_POW, BE - 1075}
end.
fwrite_g_1(Float, Exp, Frac) ->
Round = (Frac band 1) =:= 0,
if
Exp >= 0 ->
BExp = 1 bsl Exp,
if
Frac =:= ?BIG_POW ->
scale(Frac * BExp * 4, 4, BExp * 2, BExp,
Round, Round, Float);
true ->
scale(Frac * BExp * 2, 2, BExp, BExp,
Round, Round, Float)
end;
Exp < ?MIN_EXP ->
BExp = 1 bsl (?MIN_EXP - Exp),
scale(Frac * 2, 1 bsl (1 - Exp), BExp, BExp,
Round, Round, Float);
Exp > ?MIN_EXP, Frac =:= ?BIG_POW ->
scale(Frac * 4, 1 bsl (2 - Exp), 2, 1,
Round, Round, Float);
true ->
scale(Frac * 2, 1 bsl (1 - Exp), 1, 1,
Round, Round, Float)
end.
scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
%% Note that the scheme implementation uses a 326 element look-up
%% table for int_pow(10, N) where we do not.
if
Est >= 0 ->
fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
LowOk, HighOk);
true ->
Scale = int_pow(10, -Est),
fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
LowOk, HighOk)
end.
fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
TooLow = if
HighOk -> R + MPlus >= S;
true -> R + MPlus > S
end,
case TooLow of
true ->
{K + 1, generate(R, S, MPlus, MMinus, LowOk, HighOk)};
false ->
{K, generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)}
end.
generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
D = R0 div S,
R = R0 rem S,
TC1 = if
LowOk -> R =< MMinus;
true -> R < MMinus
end,
TC2 = if
HighOk -> R + MPlus >= S;
true -> R + MPlus > S
end,
case {TC1, TC2} of
{false, false} ->
[D | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)];
{false, true} ->
[D + 1];
{true, false} ->
[D];
{true, true} when R * 2 < S ->
[D];
{true, true} ->
[D + 1]
end.
insert_decimal(0, S) ->
"0." ++ S;
insert_decimal(Place, S) ->
L = length(S),
if
Place < 0;
Place >= L ->
ExpL = integer_to_list(Place - 1),
ExpDot = if L =:= 1 -> 2; true -> 1 end,
ExpCost = length(ExpL) + 1 + ExpDot,
if
Place < 0 ->
if
2 - Place =< ExpCost ->
"0." ++ lists:duplicate(-Place, $0) ++ S;
true ->
insert_exp(ExpL, S)
end;
true ->
if
Place - L + 2 =< ExpCost ->
S ++ lists:duplicate(Place - L, $0) ++ ".0";
true ->
insert_exp(ExpL, S)
end
end;
true ->
{S0, S1} = lists:split(Place, S),
S0 ++ "." ++ S1
end.
insert_exp(ExpL, [C]) ->
[C] ++ ".0e" ++ ExpL;
insert_exp(ExpL, [C | S]) ->
[C] ++ "." ++ S ++ "e" ++ ExpL.
int_ceil(X) when is_float(X) ->
T = trunc(X),
case (X - T) of
Neg when Neg < 0 -> T;
Pos when Pos > 0 -> T + 1;
_ -> T
end.
int_pow(X, 0) when is_integer(X) ->
1;
int_pow(X, N) when is_integer(X), is_integer(N), N > 0 ->
int_pow(X, N, 1).
int_pow(X, N, R) when N < 2 ->
R * X;
int_pow(X, N, R) ->
int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
log2floor(Int) when is_integer(Int), Int > 0 ->
log2floor(Int, 0).
log2floor(0, N) ->
N;
log2floor(Int, N) ->
log2floor(Int bsr 1, 1 + N).
%% fwrite_g(Float, Field, Adjust, Precision, PadChar)
%% Use the f form if Float is >= 0.1 and < 1.0e4,
%% and the prints correctly in the f form, else the e form.
%% Precision always means the # of significant digits.
fwrite_g(Fl, F, Adj, none, Pad) ->
fwrite_g(Fl, F, Adj, 6, Pad);
fwrite_g(Fl, F, Adj, P, Pad) when P >= 1 ->
A = abs(Fl),
E = if A < 1.0e-1 -> -2;
A < 1.0e0 -> -1;
A < 1.0e1 -> 0;
A < 1.0e2 -> 1;
A < 1.0e3 -> 2;
A < 1.0e4 -> 3;
true -> fwrite_f
end,
if P =< 1, E =:= -1;
P-1 > E, E >= -1 ->
fwrite_f(Fl, F, Adj, P-1-E, Pad);
P =< 1 ->
fwrite_e(Fl, F, Adj, 2, Pad);
true ->
fwrite_e(Fl, F, Adj, P, Pad)
end.
%% iolist_to_chars(iolist()) -> deep_char_list()
iolist_to_chars([C|Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
[C | iolist_to_chars(Cs)];
iolist_to_chars([I|Cs]) ->
[iolist_to_chars(I) | iolist_to_chars(Cs)];
iolist_to_chars([]) ->
[];
iolist_to_chars(B) when is_binary(B) ->
binary_to_list(B).
%% cdata() :: clist() | cbinary()
%% clist() :: maybe_improper_list(char() | cbinary() | clist(),
%% cbinary() | nil())
%% cbinary() :: unicode:unicode_binary() | unicode:latin1_binary()
%% cdata_to_chars(cdata()) -> io_lib:deep_char_list()
cdata_to_chars([C|Cs]) when is_integer(C), C >= $\000 ->
[C | cdata_to_chars(Cs)];
cdata_to_chars([I|Cs]) ->
[cdata_to_chars(I) | cdata_to_chars(Cs)];
cdata_to_chars([]) ->
[];
cdata_to_chars(B) when is_binary(B) ->
case catch unicode:characters_to_list(B) of
L when is_list(L) -> L;
_ -> binary_to_list(B)
end.
%% string(String, Field, Adjust, Precision, PadChar)
string(S, none, _Adj, none, _Pad) -> S;
string(S, F, Adj, none, Pad) ->
string_field(S, F, Adj, lists:flatlength(S), Pad);
string(S, none, _Adj, P, Pad) ->
string_field(S, P, left, lists:flatlength(S), Pad);
string(S, F, Adj, P, Pad) when F >= P ->
N = lists:flatlength(S),
if F > P ->
if N > P ->
adjust(flat_trunc(S, P), chars(Pad, F-P), Adj);
N < P ->
adjust([S|chars(Pad, P-N)], chars(Pad, F-P), Adj);
true -> % N == P
adjust(S, chars(Pad, F-P), Adj)
end;
true -> % F == P
string_field(S, F, Adj, N, Pad)
end.
string_field(S, F, _Adj, N, _Pad) when N > F ->
flat_trunc(S, F);
string_field(S, F, Adj, N, Pad) when N < F ->
adjust(S, chars(Pad, F-N), Adj);
string_field(S, _, _, _, _) -> % N == F
S.
%% unprefixed_integer(Int, Field, Adjust, Base, PadChar, Lowercase)
%% -> [Char].
unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase)
when Base >= 2, Base =< 1+$Z-$A+10 ->
if Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
term([$-|S], F, Adj, none, Pad);
true ->
S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
term(S, F, Adj, none, Pad)
end.
%% prefixed_integer(Int, Field, Adjust, Base, PadChar, Prefix, Lowercase)
%% -> [Char].
prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase)
when Base >= 2, Base =< 1+$Z-$A+10 ->
if Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
term([$-,Prefix|S], F, Adj, none, Pad);
true ->
S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
term([Prefix|S], F, Adj, none, Pad)
end.
%% char(Char, Field, Adjust, Precision, PadChar) -> chars().
char(C, none, _Adj, none, _Pad) -> [C];
char(C, F, _Adj, none, _Pad) -> chars(C, F);
char(C, none, _Adj, P, _Pad) -> chars(C, P);
char(C, F, Adj, P, Pad) when F >= P ->
adjust(chars(C, P), chars(Pad, F - P), Adj).
%% newline(Field, Adjust, Precision, PadChar) -> [Char].
newline(none, _Adj, _P, _Pad) -> "\n";
newline(F, right, _P, _Pad) -> chars($\n, F).
%%
%% Utilities
%%
adjust(Data, [], _) -> Data;
adjust(Data, Pad, left) -> [Data|Pad];
adjust(Data, Pad, right) -> [Pad|Data].
%% Flatten and truncate a deep list to at most N elements.
flat_trunc(List, N) when is_integer(N), N >= 0 ->
flat_trunc(List, N, [], []).
flat_trunc(L, 0, _, R) when is_list(L) ->
lists:reverse(R);
flat_trunc([H|T], N, S, R) when is_list(H) ->
flat_trunc(H, N, [T|S], R);
flat_trunc([H|T], N, S, R) ->
flat_trunc(T, N-1, S, [H|R]);
flat_trunc([], N, [H|S], R) ->
flat_trunc(H, N, S, R);
flat_trunc([], _, [], R) ->
lists:reverse(R).
%% A deep version of string:chars/2,3
chars(_C, 0) ->
[];
chars(C, 1) ->
[C];
chars(C, 2) ->
[C,C];
chars(C, 3) ->
[C,C,C];
chars(C, N) when is_integer(N), (N band 1) =:= 0 ->
S = chars(C, N bsr 1),
[S|S];
chars(C, N) when is_integer(N) ->
S = chars(C, N bsr 1),
[C,S|S].
%chars(C, N, Tail) ->
% [chars(C, N)|Tail].
%% Lowercase conversion
cond_lowercase(String, true) ->
lowercase(String);
cond_lowercase(String,false) ->
String.
lowercase([H|T]) when is_integer(H), H >= $A, H =< $Z ->
[(H-$A+$a)|lowercase(T)];
lowercase([H|T]) ->
[H|lowercase(T)];
lowercase([]) ->
[]. | lib/stdlib/src/io_lib_format.erl | 0.559049 | 0.47725 | io_lib_format.erl | starcoder |
%%%-------------------------------------------------------------------
%% @doc eleb128 Little-Endian Base 128 (LEB128) impl.
%% The basic impl. of LEB128 (https://en.wikipedia.org/wiki/LEB128)
%% @end
%%%-------------------------------------------------------------------
-module(eleb128).
%% API
-export([signed_encode/1,
unsigned_encode/1,
signed_decode/1,
unsigned_decode/1]).
%% The maximum value that we can put into 7 bits - 127
-define(UNSIGNED_MAX_VALUE, 128).
%% The maximum value that we can put into 6 bits - 63
-define(SIGNED_MAX_VALUE, 64).
%%====================================================================
%% API functions
%%====================================================================
-spec signed_encode(neg_integer()) -> binary().
signed_encode(Value) ->
encode(Value, 0, <<>>, ?SIGNED_MAX_VALUE).
-spec unsigned_encode (non_neg_integer()) -> binary().
unsigned_encode(Value) ->
encode(Value, 0, <<>>, ?UNSIGNED_MAX_VALUE).
-spec signed_decode(binary()) -> {neg_integer(), binary()}.
signed_decode(Binary) ->
{Size, Bitstring, Tail} = decode(Binary, 0, <<>>),
<<Value:Size/signed-integer>> = Bitstring,
{Value, Tail}.
-spec unsigned_decode(binary()) -> {non_neg_integer(), binary()}.
unsigned_decode(Binary) ->
{Size, Bitstring, Tail} = decode(Binary, 0, <<>>),
<<Value:Size/unsigned-integer>> = Bitstring,
{Value, Tail}.
%%====================================================================
%% Internal functions
%%====================================================================
-spec encode(non_neg_integer(), non_neg_integer(), binary(), pos_integer()) -> binary().
encode(Value, Shift, Acc, Max) when -Max =< Value bsr Shift andalso Value bsr Shift < Max ->
Chunk = Value bsr Shift,
<<Acc/binary, 0:1, Chunk:7/integer>>;
encode(Value, Shift, Acc, Max) ->
Chunk = Value bsr Shift,
encode(Value, Shift + 7, <<Acc/binary, 1:1, Chunk:7/integer>>, Max).
-spec decode(binary(), non_neg_integer(), binary()) -> {non_neg_integer(), binary(), binary()}.
decode(<<0:1, Chunk:7/bitstring, Tail/binary>>, Size, Acc) ->
{Size + 7, <<Chunk/bitstring, Acc/bitstring>>, Tail};
decode(<<1:1, Chunk:7/bitstring, Tail/binary>>, Size, Acc) ->
decode(Tail, Size + 7, <<Chunk/bitstring, Acc/bitstring>>). | src/eleb128.erl | 0.567937 | 0.491029 | eleb128.erl | starcoder |
% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*-
%%% ex: ft=erlang ts=4 sw=4 et
%%%
%%% Copyright 2016 <NAME>
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%% #alpaca_typer.erl
%%%
%%% This is based off of the sound and eager type inferencer in
%%% http://okmij.org/ftp/ML/generalization.html with some influence
%%% from https://github.com/tomprimozic/type-systems/blob/master/algorithm_w
%%% where the arrow type and instantiation are concerned.
%%%
%%% I still often use proplists in this module, mostly because dialyzer doesn't
%%% yet type maps correctly (Erlang 18.1).
-module(alpaca_typer).
-include("alpaca_ast.hrl").
-include("builtin_types.hrl").
%%% API
-export([type_modules/1]).
-export_type([cell/0]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%%% ##Data Structures
%%%
%%% ###Reference Cells
%%%
%%% Reference cells make unification much simpler (linking is a mutation)
%%% but we also need a simple way to make complete copies of cells so that
%%% each expression being typed can refer to its own copies of items from
%%% the environment and not _globally_ unify another function's types with
%%% its own, preventing others from doing the same (see Types And Programming
%%% Languages (Pierce), chapter 22).
-type cell() :: {cell, {cell_name, integer()}}.
-define(CELL_TABLE, alpaca_typer_cells).
-define(CELL_COUNTER, alpaca_cell_counter).
-spec new_cell(typ()) -> cell().
new_cell({cell, _}=C) ->
throw({error, cell_in_cell, C});
new_cell(Typ) ->
[{?CELL_COUNTER, CellCounter}] = try
ets:lookup(?CELL_TABLE, ?CELL_COUNTER)
catch
error:badarg ->
ets:new(?CELL_TABLE, [set, named_table, public]),
Initial = {?CELL_COUNTER, 0},
ets:insert(?CELL_TABLE, Initial),
[Initial]
end,
%% TODO: does tuple construction time have a negative impact in larger
%% code bases?
N = {cell_name, CellCounter},
ets:insert(?CELL_TABLE, {?CELL_COUNTER, CellCounter + 1}),
true = ets:insert_new(?CELL_TABLE, {N, Typ}),
{cell, N}.
-spec get_cell(cell()|typ()) -> typ().
get_cell({cell, CellName}) ->
case ets:lookup(?CELL_TABLE, CellName) of
[{CellName, {cell, CellName}}] ->
throw({recursive_cell, CellName});
[{CellName, {cell, _}=NextCell}] ->
get_cell(NextCell);
[{CellName, Val}] ->
Val
end;
get_cell(NotACell) ->
NotACell.
set_cell(?CELL_COUNTER, _) ->
erlang:error(badarg);
set_cell({cell, CellName}, {cell, TriedToCell}) ->
T = {error, cell_in_cell, CellName, TriedToCell},
throw(T);
set_cell({cell, CellName}, {link, {cell, CellName}}) ->
throw({error, recursive_link, CellName});
set_cell({cell, CellName}, Val) ->
true = ets:insert(?CELL_TABLE, {CellName, Val}),
ok.
%%% The `map` is a map of `unbound type variable name` to a `t_cell()`.
%%% It's used to ensure that each reference or link to a single type
%%% variable actually points to a single canonical reference cell for that
%%% variable. Failing to do so causes problems with unification since
%%% unifying one variable with another type should impact all occurrences
%%% of that variable.
-spec copy_cell(cell(), map()) -> {cell(), map()}.
copy_cell(Cell, RefMap) ->
case get_cell(Cell) of
{link, {cell, _}=C} ->
{NC, NewMap} = copy_cell(C, RefMap),
{new_cell({link, NC}), NewMap};
{t_arrow, Args, Ret} ->
%% there's some commonality below with copy_type_list/2
%% that can probably be exploited in future:
Folder = fun(A, {L, RM}) ->
{V, NM} = copy_cell(A, RM),
{[V|L], NM}
end,
{NewArgs, Map2} = lists:foldl(Folder, {[], RefMap}, Args),
{NewRet, Map3} = copy_cell(Ret, Map2),
{new_cell({t_arrow, lists:reverse(NewArgs), NewRet}), Map3};
{unbound, Name, _Lvl} = V ->
case maps:get(Name, RefMap, undefined) of
undefined ->
NC = new_cell(V),
{NC, maps:put(Name, NC, RefMap)};
Existing ->
{Existing, RefMap}
end;
{t_tuple, Members} ->
F = fun(M, {RM, Memo}) ->
{M2, RM2} = copy_cell(M, RM),
{RM2, [M2|Memo]}
end,
{RefMap2, Members2} = lists:foldl(F, {RefMap, []}, Members),
{new_cell({t_tuple, lists:reverse(Members2)}), RefMap2};
{t_list, C} ->
{C2, Map2} = copy_cell(C, RefMap),
{new_cell({t_list, C2}), Map2};
{t_map, K, V} ->
{K2, Map2} = copy_cell(K, RefMap),
{V2, Map3} = copy_cell(V, Map2),
{new_cell({t_map, K2, V2}), Map3};
#t_record{members=Members, row_var=RV} ->
F = fun(#t_record_member{type=T}=M, {Ms, Map}) ->
{T2, Map2} = copy_cell(T, Map),
{[M#t_record_member{type=T2}|Ms], Map2}
end,
{RevMembers, Map2} = lists:foldl(F, {[], RefMap}, Members),
{RV2, Map3} = copy_cell(RV, Map2),
{new_cell(#t_record{members=lists:reverse(RevMembers), row_var=RV2}), Map3};
#adt{vars=TypeVars, members=Members}=ADT ->
%% copy the type variables:
Folder = fun({TN, C}, {L, RM}) ->
{C2, NM} = copy_cell(C, RM),
{[{TN, C2}|L], NM}
end,
{NewTypeVars, Map2} = lists:foldl(Folder, {[], RefMap}, TypeVars),
%% and then copy the members:
F2 = fun(M, {L, RM}) ->
{M2, NM} = copy_cell(M, RM),
{[M2|L], NM}
end,
{NewMembers, Map3} = lists:foldl(F2, {[], Map2}, Members),
{new_cell(ADT#adt{vars=lists:reverse(NewTypeVars),
members=lists:reverse(NewMembers)}), Map3};
{t_adt_cons, _}=Constructor ->
{Constructor, RefMap};
{t_receiver, MsgT, BodyT} ->
{MsgT2, Map2} = copy_cell(MsgT, RefMap),
{BodyT2, Map3} = copy_cell(BodyT, Map2),
{new_cell({t_receiver, MsgT2, BodyT2}), Map3};
{cell, _}=Cell ->
copy_cell(Cell, RefMap);
V ->
{new_cell(V), RefMap}
end.
%%% ###Environments
%%%
%%% Environments track the following:
%%%
%%% 1. A counter for naming new type variables
%%% 2. The modules entered so far while checking the types of functions called
%%% in other modules that have not yet been typed. This is used in a crude
%%% form of detecting mutual recursion between/across modules.
%%% 3. The current module being checked.
%%% 4. The types available to the current module, eventually including
%%% imported types. This is used to find union types.
%%% 5. A proplist from type constructor name to the constructor AST node.
%%% 6. A proplist from type name to its instantiated ADT record.
%%% 7. A proplist of {expression name, expression type} for the types
%%% of values and functions so far inferred/checked.
%%% 8. The set of modules included in type checking.
%%%
%%% I'm including the modules in the typing environment so that when a call
%%% crosses a module boundary into a module not yet checked, we can add the
%%% bindings the other function expects. An example:
%%%
%%% Function `A.f` (f in module A) calls function `B.g` (g in module B). `B.g`
%%% calls an unexported function `B.h`. If the module B has not been checked
%%% before we check `A.f`, we have to check `B.g` in order to determine `A.f`'s
%%% type. In order to check `B.g`, `B.h` must be in the enviroment to also be
%%% checked.
%%%
-record(env, {
next_var=0 :: integer(),
entered_modules=[] :: list(atom()),
current_module=none :: none | alpaca_module(),
current_types=[] :: list(alpaca_type()),
type_constructors=[] :: list({string(), alpaca_constructor()}),
type_bindings=[] :: list({string(), t_adt()}),
bindings=[] :: list({term(), typ()|cell()}),
modules=[] :: list(alpaca_module())
}).
-type env() :: #env{}.
new_env(Mods) ->
#env{bindings=[celled_binding(Typ)||Typ <- ?all_bifs],
modules=Mods}.
%%% We need to build a proplist of type constructor name to the actual type
%%% constructor's AST node and associated type.
-spec constructors(list(alpaca_type())) -> list({string(), alpaca_constructor()}).
constructors(Types) ->
MemberFolder =
fun(#alpaca_constructor{name=#type_constructor{name=N}}=C, {Type, Acc}) ->
WithType = C#alpaca_constructor{type=Type},
{Type, [{N, WithType}|Acc]};
(_, Acc) ->
Acc
end,
TypesFolder = fun(#alpaca_type{members=Ms}=Typ, Acc) ->
{_, Cs} = lists:foldl(MemberFolder, {Typ, []}, Ms),
[Cs|Acc];
(#alpaca_type_alias{}, Acc) -> Acc
end,
lists:flatten(lists:foldl(TypesFolder, [], Types)).
%% Given a presumed newly-typed module, replace its untyped occurence within
%% the supplied environment. If the module does *not* exist in the environment,
%% it will be added.
replace_env_module(#env{modules=Ms}=E, #alpaca_module{name=N}=M) ->
E#env{modules = [M | [X || #alpaca_module{name=XN}=X <- Ms, XN /= N]]}.
celled_binding({Name, {t_arrow, Args, Ret}}) ->
{Name, {t_arrow, [new_cell(A) || A <- Args], new_cell(Ret)}}.
update_binding(Name, Typ, #env{bindings=Bs} = Env) ->
Env#env{bindings=[{Name, Typ}|[{N, T} || {N, T} <- Bs, N =/= Name]]}.
update_counter(VarNum, Env) ->
Env#env{next_var=VarNum}.
%% Used by deep_copy_type for a set of function arguments or
%% list elements.
copy_type_list(TL, RefMap) ->
Folder = fun(A, {L, RM}) ->
{V, NM} = copy_type(A, RM),
{[V|L], NM}
end,
{NewList, Map2} = lists:foldl(Folder, {[], RefMap}, TL),
{lists:reverse(NewList), Map2}.
%%% As referenced in several places, this is, after a fashion, following
%%% Pierce's advice in chapter 22 of Types and Programming Languages.
%%% We make a deep copy of the chain of reference cells so that we can
%%% unify a polymorphic function with some other types from a function
%%% application without _permanently_ unifying the types for everyone else
%%% and thus possibly blocking a legitimate use of said polymorphic function
%%% in another location.
deep_copy_type({t_arrow, A, B}, RefMap) ->
{NewArgs, Map2} = copy_type_list(A, RefMap),
{NewRet, Map3} = copy_type(B, Map2),
{{t_arrow, NewArgs, NewRet}, Map3};
deep_copy_type({t_tuple, Members}, RefMap) ->
{Members2, Map2} = copy_type_list(Members, RefMap),
{{t_tuple, Members2}, Map2};
deep_copy_type({t_list, A}, RefMap) ->
{NewList, Map} = copy_type_list(A, RefMap),
{{t_list, NewList}, Map};
deep_copy_type({t_map, K, V}, RefMap) ->
{NewK, Map2} = copy_type(K, RefMap),
{NewV, Map3} = copy_type(V, Map2),
{{t_map, NewK, NewV}, Map3};
deep_copy_type({t_receiver, A, B}, RefMap) ->
%% Here we're copying the body of the receiver first and then the
%% receiver type itself, explicitly with a method that pulls existing
%% reference cells for named type variables from the map returned by
%% the body's deep copy operation. This ensures that when the same
%% type variable occurs in body the body and receive types we use the
%% same reference cell.
{B2, M2} = deep_copy_type(B, RefMap),
{A2, M3} = copy_type(A, M2),
{{t_receiver, A2, B2}, M3};
deep_copy_type(#t_record{members=Ms, row_var=V}=R, RefMap) ->
{Ms2, RefMap2} = lists:foldl(fun(M, {Memo, Map}) ->
{M2, Map2} = deep_copy_type(M, Map),
{[M2|Memo], Map2}
end,
{[], RefMap}, Ms),
{V2, RefMap3} = deep_copy_type(V, RefMap2),
{R#t_record{members=lists:reverse(Ms2), row_var=V2}, RefMap3};
deep_copy_type(#t_record_member{type=T}=M, RefMap) ->
{T2, RefMap2} = deep_copy_type(T, RefMap),
{M#t_record_member{type=T2}, RefMap2};
deep_copy_type(T, M) ->
{T, M}.
copy_type({cell, _}=Cell, RefMap) ->
copy_cell(Cell, RefMap);
copy_type({t_arrow, _, _}=A, M) ->
deep_copy_type(A, M);
copy_type({unbound, _, _}=U, M) ->
copy_type(new_cell(U), M);
copy_type(T, M) ->
{new_cell(T), M}.
%%% ## Type Inferencer
occurs(Label, Level, {cell, _}=Cell) ->
occurs(Label, Level, get_cell(Cell));
occurs(Label, _Level, {unbound, Label, _}) ->
{error, {circular, Label}};
occurs(Label, Level, {link, Ty}) ->
occurs(Label, Level, Ty);
occurs(_Label, Level, {unbound, N, Lvl}) ->
{unbound, N, min(Level, Lvl)};
occurs(Label, Level, {t_arrow, Params, RetTyp}) ->
{t_arrow,
lists:map(fun(T) -> occurs(Label, Level, T) end, Params),
occurs(Label, Level, RetTyp)};
occurs(Label, Level, #t_record{members=Ms, row_var=RV}) ->
F = fun(_, [{error, {circular, _}}=Err|_]) -> Err;
(X, Acc) -> [occurs(Label, Level, X)|Acc]
end,
lists:foldl(F, [], [RV|Ms]);
occurs(Label, Level, #t_record_member{type=T}) ->
occurs(Label, Level, T);
occurs(_L, _Lvl, T) ->
T.
unwrap_cell({cell, _}=Cell) ->
unwrap_cell(get_cell(Cell));
unwrap_cell(Typ) ->
Typ.
%% Get the name of the current module out of the environment. Useful for
%% error generation.
module_name(#env{current_module=#alpaca_module{name=N}}) ->
N;
module_name(_) ->
undefined.
-type unification_error() ::
{error, {cannot_unify, atom(), integer(), typ(), typ()}}.
%% I make unification error tuples everywhere, just standardizing their
%% construction here:
-spec unify_error(Env::env(), Line::integer(), typ(), typ()) ->
unification_error().
unify_error(Env, Line, Typ1, Typ2) ->
{error, {cannot_unify, module_name(Env), Line, unwrap(Typ1), unwrap(Typ2)}}.
error_not_enough_type_arguments(Env, #type_constructor{line=L, name=N}) ->
{error, {not_enough_type_arguments, module_name(Env), L, N}}.
error_too_many_type_arguments(Env, #type_constructor{line=L, name=N}) ->
{error, {too_many_type_arguments, module_name(Env), L, N}}.
error_bad_constructor(Env, #type_constructor{line=L, name=N}) ->
{error, {bad_constructor, module_name(Env), L, N}}.
error_bad_constructor_arg(Env, Term) ->
{error, {bad_constructor_arg, module_name(Env), Term}}.
error_unknown_type_variable(Env, {type_var, L, N}) ->
{error, {unknown_type_var, module_name(Env), L, N}}.
%%% Unify now requires the environment not in order to make changes to it but
%%% so that it can look up potential type unions when faced with unification
%%% errors.
%%%
%%% For the purposes of record unification, T1 is considered to be the lower
%%% bound for unification. Example:
%%%
%%% a: {x: int, y: int} -- a row variable `A` is implied.
%%% f: {x: int|F} -> (int, {x: int|F})
%%%
%%% Calling f(a) given the above poses no problem. The two `x` members unify
%%% and the `F` in f's type unifies with `y: int|A`. But:
%%%
%%% b: {x: int} - a row variable `B` is implied.
%%% f: {x: int, y: int|F} -> (int, {x: int, y: int|F})
%%%
%%% Here `f` is more specific than `b` and _requires_ a `y: int` member. Its
%%% type must serve as a lower bound for unification, we don't want `f(b)` to
%%% succeed if the implied row variable `B` does not contain a `y: int`.
%%%
%%% Some of the packing into and unpacking from row variables is likely to get
%%% a little hairy in the first implementation here.
unify(T1, T2, Env, Line) ->
unify(T1, T2, Env, Line, false).
%% `StrictRecords` should be set to `true` whenever unifying the results from
%% multiple branches, e.g. the results of several match clauses or different
%% function versions. It will cause unify_records/5 to require that all records
%% have precisely the same fields so that a single expression or function will
%% result in the correct type. Previously this expression:
%%
%% match sym with
%% | :a -> {a=true, b=false}
%% | :b -> {b=false}
%%
%% would result in the type `{a: bool, b: bool}` which is only true for one of
%% the branches! With `StrictRecords` this doesn't happen any more.
-spec unify(typ(), typ(), env(), integer()) -> ok | {error, term()}.
unify(T1, T2, Env, Line, StrictRecords) ->
case {unwrap_cell(T1), unwrap_cell(T2)} of
{T, T} ->
ok;
%% only one instance of a type variable is permitted:
{{unbound, N, _}, {unbound, N, _}} -> unify_error(Env, Line, T1, T2);
{{link, Ty}, _} ->
unify(Ty, T2, Env, Line);
{_, {link, Ty}} ->
unify(T1, Ty, Env, Line);
{t_rec, _} ->
set_cell(T1, {link, T2}),
ok;
{_, t_rec} ->
set_cell(T2, {link, T1}),
ok;
%% Definitely room for cleanup in the next two cases:
{{unbound, N, Lvl}, Ty} ->
case occurs(N, Lvl, Ty) of
{unbound, _, _} = T ->
set_cell(T2, T),
set_cell(T1, {link, T2});
{error, _} = E ->
throw(E);
_Other ->
set_cell(T1, {link, T2})
end,
ok;
{Ty, {unbound, N, Lvl}} ->
case occurs(N, Lvl, Ty) of
{unbound, _, _} = T ->
set_cell(T1, T), % level adjustment
set_cell(T2, {link, T1});
{error, _} = E ->
E;
_Other ->
set_cell(T2, {link, T1})
end,
set_cell(T2, {link, T1}),
ok;
%%% This section creeps me right out at the moment. This is where some
%%% other operator that moves the receiver to the outside should be.
%%% Smells like a functor or monad to me.
{{t_arrow, _, A2}, {t_arrow, _, B2}} ->
ArrowArgCells = fun({cell, _}=C) ->
{t_arrow, Xs, _}=get_cell(C),
Xs;
({t_arrow, Xs, _}) -> Xs
end,
case unify_list(ArrowArgCells(T1), ArrowArgCells(T2), Env, Line) of
{error, _} = E -> E;
_ ->
%% Unwrap cells and links down to the first non-cell level.
%% Super gross.
F = fun({cell, _}=C) ->
case get_cell(C) of
{t_receiver, _, _}=R ->
R;
{link, {cell, _}=CC} ->
case get_cell(CC) of
{t_receiver, _, _}=R2 -> R2;
_ -> none
end;
{link, {t_receiver, _, _}=R2} -> R2;
_ -> none
end;
({link, {cell, _}=CC}) ->
case get_cell(CC) of
{t_receiver, _, _}=R2 -> R2;
_ -> none
end;
({t_receiver, _, _}=X) -> X;
(_) -> none
end,
AArgs = case T1 of
{cell, _} ->
{t_arrow, Xs, _}=get_cell(T1),
Xs;
_ ->
{t_arrow, Xs, _}=T1,
Xs
end,
StripCell = fun({cell, _}=C) -> get_cell(C);
({link, {cell, _}=C}) -> get_cell(C);
%% TODO: un-celled link? Seems bad. Verify and fix.
({link, X}) -> X;
(X) -> X
end,
NoCellArgs = lists:map(StripCell, lists:map(StripCell, AArgs)),
RR = [Receiver||{t_receiver, _, _}=Receiver
<- lists:map(F, NoCellArgs)],
%% Any argument to a function application that is a receiver
%% makes the entire expression a receiver.
case RR of
[] ->
unify(A2, B2, Env, Line, true);
%% The received types for each receiver must unify in
%% order for the process to be typed correctly.
[{t_receiver, H, _}|Tail] ->
Unify = fun(_, {error, _}=Err) -> Err;
({t_receiver, T, _}, Acc) ->
case unify(T, Acc, Env, Line) of
{error, _}=Err -> Err;
ok -> T
end;
(_Other, Acc) ->
Acc
end,
case lists:foldl(Unify, H, Tail) of
{error, _}=Err -> Err;
_ ->
case unify(A2, B2, Env, Line, true) of
{error, _}=Err -> Err;
ok ->
%% Re-wrapping with fresh cells
%% because I was running into
%% cycles. This entire block of
%% arrow unification needs to be
%% rewritten.
Receiver = {t_receiver,
new_cell(unwrap(H)),
new_cell(unwrap(A2))},
set_cell(A2, Receiver),
set_cell(B2, {link, A2}),
ok
end
end
end
end;
{{t_tuple, A}, {t_tuple, B}} when length(A) =:= length(B) ->
case unify_list(A, B, Env, Line) of
{error, _} = Err -> Err;
_ -> ok
end;
{{t_list, _}, t_nil} ->
set_cell(T2, {link, T1}),
ok;
{t_nil, {t_list, _}} ->
set_cell(T1, {link, T2}),
ok;
{{t_list, A}, {t_list, B}} ->
unify(A, B, Env, Line);
{{t_map, A1, B1}, {t_map, A2, B2}} ->
case unify(A1, A2, Env, Line) of
{error, _}=Err -> Err;
ok ->
case unify(B1, B2, Env, Line) of
{error, _}=Err -> Err;
ok -> ok
end
end;
{#t_record{}=LowerBound, #t_record{}=Target} ->
unify_records(LowerBound, Target, Env, Line, StrictRecords);
{#adt{}=A, B} -> unify_adt(T1, T2, A, B, Env, Line);
{A, #adt{}=B} -> unify_adt(T2, T1, B, A, Env, Line);
{{t_pid, _}, {t_pid, _}} ->
{t_pid, AC} = get_cell(T1),
{t_pid, BC} = get_cell(T2),
case unify(AC, BC, Env, Line) of
{error, _}=Err -> Err;
ok ->
set_cell(T1, {t_pid, AC}),
set_cell(T2, {link, T1}),
ok
end;
%%% Receivers unify with each other or in the case of a receiver and
%%% something else, the receiver unifies its result type with the other
%%% expression and both become receivers.
{{t_receiver, _, _}, {t_receiver, _, _}} ->
RecvRes = fun({cell, _}=C) ->
{t_receiver, _, X} = get_cell(C),
X;
({t_receiver, _, X}) ->
X
end,
RecvR = fun({cell, _}=C) ->
{t_receiver, X, _} = get_cell(C),
X;
({t_receiver, X, _}) ->
X
end,
case unify(RecvR(T1), RecvR(T2), Env, Line) of
{error, _}=Err -> Err;
ok -> case unify(RecvRes(T1), RecvRes(T2), Env, Line) of
{error, _}=Err -> Err;
ok ->
set_cell(T2, {link, T1}),
ok
end
end;
{{t_receiver, Recv, ResA}, {t_arrow, Args, ResB}} ->
%% Use strict record unification for return value typing:
case unify(ResA, ResB, Env, Line, true) of
{error, _}=Err -> Err;
ok ->
NewTyp = {t_receiver, Recv, {t_arrow, Args, ResA}},
set_cell(new_cell(T1), NewTyp),
set_cell(new_cell(T2), {link, T1}),
ok
end;
{{t_arrow, _, _}, {t_receiver, _, _}} ->
unify(T2, T1, Env, Line);
{{t_receiver, _Recv, ResA}, B} ->
case unify(ResA, new_cell(B), Env, Line) of
{error, _}=Err -> Err;
ok ->
set_cell(T2, {link, T1}),
ok
end;
{_A, {t_receiver, _Recv, _ResB}} ->
unify(T2, T1, Env, Line);
{_T1, _T2} ->
case find_covering_type(_T1, _T2, Env, Line) of
{error, _}=Err ->
Err;
{ok, _EnvOut, Union} ->
% set_cell(T1, Union),
set_cell(T2, {link, Union}),
ok
end
end.
%%% Here we're checking for membership of one party in another or for an
%%% exact match. ADTs with the same name are checked as the same only if
%%% they also come from the same module.
-spec unify_adt(cell(), cell(), t_adt(), typ(), env(), Line::integer()) ->
ok |
{error, {cannot_unify, typ(), typ()}}.
unify_adt(C1,
C2,
#adt{name=N, vars=AVars, module=M}=A,
#adt{name=N, vars=BVars, module=M},
Env, L) ->
%% Don't unify the keys _and_ vars:
case unify_list([V||{_, V} <- AVars], [V||{_, V} <- BVars], Env, L) of
{error, _}=Err -> Err;
_ ->
set_cell(C1, A),
set_cell(C2, {link, C1}),
ok
end;
unify_adt(C1, C2, #adt{vars=Vs, members=Ms}=A, AtomTyp, Env, L)
when is_atom(AtomTyp) ->
case [M||M <- Ms, unwrap(M) =:= AtomTyp] of
[_] ->
set_cell(C1, A),
set_cell(C2, {link, C1}),
ok;
[] ->
VFolder = fun(_, ok) -> ok;
({_, V}, Res) ->
case lists:member(V, Ms) of
true -> unify(AtomTyp, V, Env, L);
false -> Res
end
end,
lists:foldl(VFolder, unify_error(Env, L, A, AtomTyp), Vs)
end;
%% If an ADTs members are empty, it's a reference to an ADT that should
%% be instantiated in the environment. Replace it with the instantiated
%% version before proceeding. Having separate cases for A and B is
%% bothering me.
unify_adt(C1, C2, #adt{name=NA, members=[]}, B, Env, L) ->
case proplists:get_value(NA, Env#env.type_bindings) of
undefined -> {error, {no_type_for_name, NA}};
ADT -> unify_adt(C1, C2, ADT, B, Env, L)
end;
unify_adt(_C1, _C2,
#adt{name=NA, vars=VarsA, members=MA}=A,
#adt{name=NB, vars=VarsB, members=MB}=B, Env, L) ->
MemberFilter = fun(N) -> fun(#adt{name=AN}) when N =:= AN -> true;
(_) -> false
end
end,
%% as a result of instantiating types some members might be in reference
%% cells. We unpack them before searching for the right encompassing
%% type so we don't miss anything:
UnpackMembers = fun(Ms) -> [get_cell(M) || M <- Ms] end,
case lists:filter(MemberFilter(NB), UnpackMembers(MA)) of
[#adt{vars=ToCheck}] ->
UnifyFun = fun(_, {error, _}=Err) -> Err;
({{_, X}, {_, Y}}, ok) -> unify(X, Y, Env, L)
end,
lists:foldl(UnifyFun, ok, lists:zip(VarsB, ToCheck));
_ ->
case lists:filter(MemberFilter(NA), UnpackMembers(MB)) of
[#adt{vars=ToCheck}] ->
UnifyFun = fun(_, {error, _}=Err) -> Err;
({{_, X}, {_, Y}}, ok) -> unify(X, Y, Env, L)
end,
lists:foldl(UnifyFun, ok, lists:zip(VarsA, ToCheck));
_ ->
unify_error(Env, L, A, B)
end
end;
unify_adt(C1, C2, #adt{}=A, {t_tuple, _}=ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, ToCheck, Env, L);
unify_adt(C1, C2, #adt{}=A, {t_list, _LType}=ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, ToCheck, Env, L);
unify_adt(C1, C2, #adt{}=A, {t_map, _, _}=ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, ToCheck, Env, L);
unify_adt(C1, C2, #adt{}=A, #t_record{}=ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, ToCheck, Env, L);
unify_adt(C1, C2, #adt{}=A, {t_arrow, _, _}=ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, ToCheck, Env, L);
unify_adt(C1, C2, #adt{}=A, #alpaca_type{}=T, Env, L) ->
{ok, Env2, B, _} = inst_type(T, Env),
unify_adt(C1, C2, A, get_cell(B), Env2, L);
unify_adt(_, _, A, B, Env, L) ->
io:format("*** The failed ADT unification candidates were:~n~p ~p~n~p ~p~n", [A, unwrap(A), B, unwrap(B)]),
unify_error(Env, L, A, B).
unify_adt_and_poly(C1, C2, #adt{members=Ms}=A, {cell, _}=ToCheck, Env, L) ->
%% Try to find an ADT member that will unify with the passed in
%% polymorphic type:
F = fun(_, ok) -> ok;
(T, Res) ->
%% Unifying an ADT with something else always places it in the
%% first argument's position (`A` above) but calls to unify
%% must always treat the first argument as the lower bound (the
%% constraint):
case unify(T, ToCheck, Env, L) of
ok ->
set_cell(C2, {link, C1}),
ok;
_ ->
Res
end
end,
Seed = unify_error(Env, L, A, ToCheck),
lists:foldl(F, Seed, Ms);
%% ToCheck needs to be in a reference cell for unification and we're not
%% worried about losing the cell at this level since C1 and C2 are what
%% will actually be manipulated.
unify_adt_and_poly(C1, C2, #adt{members=_Ms}=A, ToCheck, Env, L) ->
unify_adt_and_poly(C1, C2, A, new_cell(ToCheck), Env, L).
%%% Given two different types, find a type in the set of currently available
%%% types that can unify them or fail.
-spec find_covering_type(
T1::typ(),
T2::typ(),
env(),
integer()) -> {ok, typ(), env()} |
{error,
{cannot_unify, atom(), integer(), typ(), typ()} |
{bad_variable, integer(), alpaca_type_var()}}.
find_covering_type(T1, T2, #env{current_types=Ts}=EnvIn, L) ->
%% Convert all the available types to actual ADT types with
%% which to attempt unions:
TypeFolder = fun(_ ,{error, _}=Err) ->
Err;
(Typ, {ADTs, E}) ->
case inst_type(Typ, E) of
{error, _}=Err -> Err;
{ok, E2, ADT, Ms} -> {[{ADT, Ms}|ADTs], E2}
end
end,
%% We remove all of the types from the environment because we don't want
%% to reinstantiate them again on unification failure when it's trying
%% to unify the two types with the instantiated member types.
%%
%% For example, if `T1` is `t_int` and the first member of a type we're
%% checking for valid union is anything _other_ that `t_int`, the call
%% to `unify` in `try_types` will cause `unify` to call this method
%% (`find_covering_type`) again, leading to instantiating all of the
%% types all over again and eventually leading to a spawn limit error.
%% By simply removing the types from the environment before proceeding,
%% we avoid this cycle.
case lists:foldl(TypeFolder, {[], EnvIn#env{current_types=[]}}, Ts) of
{error, _}=Err -> Err;
{ADTs, EnvOut} ->
ReturnEnv = EnvOut#env{current_types=EnvIn#env.current_types},
%% each type, filter to types that are T1 or T2, if the list
%% contains both, it's a match.
F = fun(_, {ok, _}=Res) ->
Res;
({ADT, Ms}, Acc) ->
case try_types(T1, T2, Ms, EnvOut, L, {none, none}) of
{ok, ok} -> {ok, ReturnEnv, ADT};
_ -> Acc
end
end,
Default = unify_error(EnvIn, L, T1, T2),
lists:foldl(F, Default, lists:reverse(ADTs))
end.
%%% try_types/5 attempts to unify the two given types with each ADT available
%%% to the module until one is found that covers both types or we run out of
%%% available types, yielding `'no_match'`.
try_types(_, _, _, _, _, {ok, ok}=Memo) ->
Memo;
try_types(T1, T2, [Candidate|Tail], Env, L, {none, none}) ->
case unify(T1, Candidate, Env, L) of
ok -> try_types(T1, T2, Tail, Env, L, {ok, none});
_ -> case unify(T2, Candidate, Env, L) of
ok -> try_types(T1, T2, Tail, Env, L, {none, ok});
_ -> try_types(T1, T2, Tail, Env, L, {none, none})
end
end;
try_types(T1, T2, [Candidate|Tail], Env, L, {none, M2}=Memo) ->
case unify(T1, Candidate, Env, L) of
ok -> try_types(T1, T2, Tail, Env, L, {ok, M2});
_ -> try_types(T1, T2, Tail, Env, L, Memo)
end;
try_types(T1, T2, [Candidate|Tail], Env, L, {M1, none}=Memo) ->
case unify(T2, Candidate, Env, L) of
ok -> try_types(T1, T2, Tail, Env, L, {M1, ok});
_ -> try_types(T1, T2, Tail, Env, L, Memo)
end;
try_types(_, _, [], _, _, _) ->
no_match.
%% See unify/5 for an explanation of `StrictRecords`. TLDR; it's used to force
%% an expression with multiple branches to require all returned record types to
%% have the exact same fields.
unify_records(
#t_record{members=[], row_var=Lower},
#t_record{members=[], row_var=Target},
Env,
Line,
StrictRecords
) ->
unify(Lower, Target, Env, Line, StrictRecords);
unify_records(LowerBound, Target, Env, Line, StrictRecords) ->
%% Unify each member of the lower bound with the others. We track whether
%% or not the type is for a pattern because if we _are_ unifying for
%% patterns then we don't need to check for missing members.
#t_record{
is_pattern=P1,
members=LowerM,
row_var=LowerRow} = flatten_record(LowerBound),
#t_record{
is_pattern=P2,
members=TargetM,
row_var=TargetRow} = flatten_record(Target),
case TargetM of
[] ->
%% We use a record with no members to force the row variable of a
%% record update to be a record.
unify_records(LowerBound, Target#t_record{members=LowerM}, Env, Line, StrictRecords);
_ ->
%% we operate on the target's members so that if the unification
%% with the lower bound's members succeeds, we have a list of exactly
%% what needs to unify with the lower's row variable.
KeyedTarget = lists:map(
fun(#t_record_member{name=X}=TRM) -> {X, TRM} end,
TargetM),
RemainingTarget = unify_record_members(
P1 or P2,
LowerM,
KeyedTarget,
Env,
Line,
StrictRecords),
%% unify the row variables
case RemainingTarget of
[] ->
unify(LowerRow, TargetRow, Env, Line, StrictRecords);
_ ->
case {LowerRow, TargetRow} of
{A, A} ->
NewTarget = #t_record{members=RemainingTarget},
unify(LowerRow, new_cell(NewTarget), Env, Line, StrictRecords);
_ ->
NewTarget = #t_record{
members=RemainingTarget,
row_var=TargetRow},
unify(LowerRow, new_cell(NewTarget), Env, Line, StrictRecords)
end
end
end.
unify_record_members(_IsPattern, [], [TargetRem|_], Env, Line, true) ->
{N, #t_record_member{}} = TargetRem,
erlang:error({missing_record_field, module_name(Env), Line, N});
unify_record_members(_IsPattern, [], TargetRem, _Env, _Line, _) ->
lists:map(fun({cell, _}=X) -> X;
({_, X}) -> X
end, TargetRem);
unify_record_members(IsPattern, [LowerBound|Rem], TargetRem, Env, Line, StrictRecords) ->
#t_record_member{name=N, type=T} = LowerBound,
case proplists:get_value(N, TargetRem) of
undefined when IsPattern =:= false ->
erlang:error({missing_record_field, module_name(Env), Line, N});
undefined ->
unify_record_members(IsPattern, Rem, TargetRem, Env, Line, StrictRecords);
#t_record_member{type=T2} ->
case unify(T, T2, Env, Line, StrictRecords) of
{error, Err} ->
erlang:error(Err);
ok ->
NewTargetRem = proplists:delete(N, TargetRem),
unify_record_members(IsPattern, Rem, NewTargetRem, Env, Line, StrictRecords)
end
end.
%% Record types are basically linked lists where the `row_var` portion
%% could be either an unbound type variable or another record type. We
%% need to unpack these row variables to unify records predictably and
%% also upon completion of typing. Problems could occur when unifying the
%% following:
%%
%% #t_record{members={x, t_int}, row_var=#t_record{members={y, t_int}}, ...}
%% #t_record{members={y, t_int}, row_var=#t_record{members={x, t_int}}, ...}
%%
%% Because the members that need unifying (coming from either record to the
%% other) are effectively hidden in their respective row variables,
%% unify_record_members won't see them directly.
flatten_record(#t_record{members=Ms, row_var=#t_record{}=Inner}) ->
#t_record{members=InnerMs, row_var=InnerRow} = Inner,
%% Now de-dupe fields, preferring newer ones:
Deduped = lists:foldl(
fun(#t_record_member{name=N, type=T}, Map) -> maps:put(N, T, Map) end,
maps:new(),
lists:reverse(Ms ++ InnerMs)),
RecMems = [#t_record_member{name=N, type=T} || {N, T} <- maps:to_list(Deduped)],
Rec = #t_record{members=RecMems, row_var=InnerRow},
flatten_record(Rec);
flatten_record(#t_record{row_var={cell, _}=Cell}=R) ->
case get_cell(Cell) of
#t_record{}=Inner -> flatten_record(R#t_record{row_var=Inner});
{link, L}=_Link -> flatten_record(R#t_record{row_var=L});
{unbound, _, _} -> R;
Other -> erlang:error({bad_row_var, Other, R})
end;
flatten_record(#t_record{}=R) ->
R.
%%% To search for a potential union, a type's variables all need to be
%%% instantiated and its members that are other types need to use the
%%% same variables wherever referenced. The successful returned elements
%%% (not including `'ok'`) include:
%%%
%%% - the instantiated type as an `#adt{}` record, with real type variable
%%% cells.
%%% - a list of all members that are _types_, so type variables, tuples, and
%%% other types but _not_ ADT constructors.
%%%
%%% Any members that are polymorphic types (AKA "generics") must reference
%%% only the containing type's variables or an error will result.
%%%
%%% In the `VarFolder` function you'll see that I always use a level of `0`
%%% for type variables. My thinking here is that since types are only
%%% defined at the top level, their variables are always created at the
%%% highest level. I might be wrong here and need to include the typing
%%% level as passed to inst/3 as well.
-spec inst_type(alpaca_type(), EnvIn::env()) ->
{ok, env(), typ(), list(typ())} |
{error, {bad_variable, integer(), alpaca_type_var()}}.
inst_type(#alpaca_type{}=Typ, EnvIn) ->
#alpaca_type{name={type_name, _, N}, module=Mod, vars=Vs, members=Ms} = Typ,
{Vars, Env} = inst_type_vars(Vs, [], EnvIn),
F = fun(#alpaca_type{name={_, _, NN}, module=M}, undefined) when NN =:= N ->
M;
(_, M) ->
M
end,
Mod2 = lists:foldl(F, Mod, Env#env.current_types),
ParentADT = #adt{name=N, module=Mod2, vars=lists:reverse(Vars)},
inst_type_members(ParentADT, Ms, Env, []);
inst_type(#alpaca_type_alias{target=T}, EnvIn) ->
{ok, EnvOut, _, [M]} = inst_type_members(#adt{}, [T], EnvIn, []),
{ok, EnvOut, M, [M]}.
inst_type_vars([], DoneVars, Env) ->
{DoneVars, Env};
inst_type_vars([{type_var, _, VN}|Rem], DoneVars, Env) ->
{TVar, E2} = new_var(0, Env),
inst_type_vars(Rem, [{VN, TVar}|DoneVars], E2);
inst_type_vars([{{type_var, _, VN}, #alpaca_type{}=T}|Rem], DoneVars, Env) ->
{ok, Env2, ADT, _} = inst_type(T, Env),
inst_type_vars(Rem, [{VN, new_cell(ADT)}|DoneVars], Env2);
inst_type_vars([{{type_var, _, VN}, Expr}|Rem], DoneVars, E) ->
%% copy_cell/1 should put every nested member properly
%% into its own reference cell:
{E2, Cell} = case get_cell(Expr) of
#alpaca_type{}=AT ->
{ok, Env2, Typ, _} = inst_type(AT, E),
{Env2, Typ};
_ ->
{Celled, _} = copy_cell(Expr, maps:new()),
{E, Celled}
end,
inst_type_vars(Rem, [{VN, Cell}|DoneVars], E2).
inst_type_members(ParentADT, [], Env, FinishedMembers) ->
{ok,
Env,
new_cell(ParentADT#adt{members=FinishedMembers}),
lists:reverse(FinishedMembers)};
%% single atom types are passed unchanged (built-in types):
inst_type_members(ParentADT, [H|T], Env, Memo) when is_atom(H) ->
inst_type_members(ParentADT, T, Env, [new_cell(H)|Memo]);
inst_type_members(ADT, [{t_list, TExp}|Rem], Env, Memo) ->
case inst_type_members(ADT, [TExp], Env, []) of
{error, _}=Err -> Err;
{ok, Env2, _, [InstMem]} ->
inst_type_members(ADT, Rem, Env2,
[new_cell({t_list, InstMem})|Memo])
end;
inst_type_members(ADT, [{t_map, KExp, VExp}|Rem], Env, Memo) ->
case inst_type_members(ADT, [KExp], Env, []) of
{error, _}=Err -> Err;
{ok, Env2, _, [InstK]} ->
case inst_type_members(ADT, [VExp], Env2, []) of
{error, _}=Err -> Err;
{ok, Env3, _, [InstV]} ->
NewT = new_cell({t_map, InstK, InstV}),
inst_type_members(ADT, Rem, Env3, [NewT|Memo])
end
end;
inst_type_members(ADT, [#t_record{}=R|Rem], Env, Memo) ->
#t_record{members=Ms, row_var=RV} = R,
{RVC, Env2} = case RV of
undefined ->
{V, E} = new_var(0, Env),
{V, E};
{unbound, _, _}=V ->
{new_cell(V), Env};
_ ->
{RV, Env}
end,
F = fun(#t_record_member{type=T}=M, {NewMems, _E}) ->
case inst_type_members(ADT, [T], Env, []) of
{error, _}=Err ->
erlang:error(Err);
{ok, E2, _, [InstT]} ->
{[M#t_record_member{type=InstT}|NewMems], E2}
end
end,
{NewMems, Env3} = lists:foldl(F, {[], Env2}, Ms),
NewT = new_cell(#t_record{members=lists:reverse(NewMems), row_var=RVC}),
inst_type_members(ADT, Rem, Env3, [NewT|Memo]);
inst_type_members(ADT, [{t_receiver, MExp, BExp}|Rem], Env, Memo) ->
case inst_type_members(ADT, [MExp], Env, []) of
{error, _}=Err -> Err;
{ok, Env2, _, [InstM]} ->
case inst_type_members(ADT, [BExp], Env2, []) of
{error, _}=Err -> Err;
{ok, Env3, _, [InstB]} ->
NewT = new_cell({t_receiver, InstM, InstB}),
inst_type_members(ADT, Rem, Env3, [NewT|Memo])
end
end;
inst_type_members(ADT, [{t_pid, TExp}|Rem], Env, Memo) ->
case inst_type_members(ADT, [TExp], Env, []) of
{error, _}=Err ->
Err;
{ok, Env2, _, [InstMem]} ->
inst_type_members(ADT, Rem, Env2,
[new_cell({t_pid, InstMem})|Memo])
end;
inst_type_members(#adt{vars=Vs}=ADT, [{type_var, L, N}|T], Env, Memo) ->
Default = {error, {bad_variable, L, N}},
case proplists:get_value(N, Vs, Default) of
{error, _}=Err -> Err;
Typ -> inst_type_members(ADT, T, Env, [Typ|Memo])
end;
inst_type_members(ADT, [{t_arrow, Args, Ret}|T], Env, Memo) ->
case inst_type_members(ADT, Args, Env, []) of
{error, _}=Err ->
Err;
{ok, Env2, _, InstArgs} ->
{ok, Env3, _, [InstRet]} = inst_type_members(ADT, [Ret], Env2, []),
Arrow = new_cell({t_arrow, InstArgs, InstRet}),
inst_type_members(ADT, T, Env3, [Arrow|Memo])
end;
inst_type_members(ADT, [#alpaca_type_tuple{members=Ms}|T], Env, Memo) ->
case inst_type_members(ADT, Ms, Env, []) of
{error, _}=Err ->
Err;
{ok, Env2, _, InstMembers} ->
inst_type_members(ADT, T, Env2,
[new_cell({t_tuple, InstMembers})|Memo])
end;
inst_type_members(ADT,
[#alpaca_type{name={type_name, _, N}, vars=Vs, module=Mod}|T],
Env,
Memo) ->
case inst_type_members(ADT, Vs, Env, []) of
{error, _}=Err -> Err;
{ok, Env2, _, Members} ->
Names = [VN || {type_var, _, VN} <- Vs],
NewMember = #adt{name=N, module=Mod, vars=lists:zip(Names, Members)},
inst_type_members(ADT, T, Env2, [new_cell(NewMember)|Memo])
end;
inst_type_members(ADT, [#alpaca_constructor{}=C|T], Env, Memo) ->
#alpaca_constructor{name=#type_constructor{name=N}, arg=A}=C,
%% We try to instantiate the constructor's argument just to make sure it's
%% fully valid, e.g. that it doesn't reference any type variables that don't
%% exist in the parent ADT's definition.
{ok, _, _, _} = inst_type_members(ADT, [A], Env, []),
inst_type_members(ADT, T, Env, [{t_adt_cons, N}|Memo]);
%% Everything else gets discared. Type constructors are not types in their
%% own right and thus not eligible for unification so we just discard them here:
inst_type_members(ADT, [_H|T], Env, Memo) ->
inst_type_members(ADT, T, Env, Memo).
%%% When the typer encounters the application of a type constructor, we can
%%% treat it somewhat like a typ arrow vs a normal function arrow (see
%%% `typ_apply/5`). The difference is that the arity is always `1` and the
%%% result type may contain numerous type variables rather than the single
%%% type variable in a normal arrow. Example:
%%%
%%% type t 'x 'y = A 'x | B 'y
%%%
%%% f z = A (z + 1)
%%%
%%% We need a way to unify the constructor application with a type constructor
%%% arrow that will yield something matching the following:
%%%
%%% #adt{name="t", vars=[t_int, {unbound, _, _}]
%%%
%%% To do this, `inst_type_arrow` builds a type arrow that uses the same type
%%% variable cells in the argument as in the return type, which is of course
%%% an instantiated instance of the ADT. If the "type arrow" unifies with
%%% the argument in the actual constructor application, the return of the type
%%% arrow will have the correct variables instantiated.
inst_type_arrow(EnvIn, #type_constructor{}=TC) ->
%% 20160603: I have an awful lot of case ... of all over this
%% codebase, trying a lineup of functions specific to this
%% task here instead. Sort of want Scala's `Try`.
ADT_f = fun({error, _}=Err) ->
Err;
(#alpaca_constructor{type=#alpaca_type{}=T}=C) ->
{C, inst_type(T, EnvIn)}
end,
Cons_f = fun({error, _}=Err) -> Err;
({C, {ok, Env, ADT, _}}) ->
#adt{vars=Vs} = get_cell(ADT),
#alpaca_constructor{arg=Arg} = C,
%% We need to include types from other modules even if
%% they're not exported so that types we *have* imported
%% that depend on those we've not can still be instantiated
%% correctly:
ExtractTypes = fun(#alpaca_module{types=Ts}) -> Ts end,
OtherTs = lists:flatten(lists:map(ExtractTypes, Env#env.modules)),
Types = EnvIn#env.current_types ++ OtherTs,
{Env2, InstArg} = inst_constructor_arg(Arg, Vs, Types, Env),
%% Bit of a hack in order to handle aliases here. If there
%% is a single member that is _not_ directly an ADT (e.g.
%% if we have a type that exists to bind a concrete type
%% to another one's variables) or a constructor, return
%% it directly. Removing the first case with #adt{} means
%% that error messages report the wrong type name.
%%
%% Permitting aliases to use type variables would partly
%% address this hack.
InstArg2 = case get_cell(InstArg) of
#adt{members=[PossibleAlias]} ->
case get_cell(PossibleAlias) of
#adt{} ->
InstArg;
{t_adt_cons, _} ->
InstArg;
_ ->
PossibleAlias
end;
_ ->
InstArg
end,
Arrow = {type_arrow, InstArg2, ADT},
{Env2, Arrow}
end,
%% If the constructor was not qualified with a module name it's pretty
%% to fetch but if it was, then we need to first make sure it's in
%% the specified module *and* that its enclosing type is exported.
%%
%% Here's the easy local get:
GetTC = fun(#type_constructor{name=Name, module=undefined}) ->
Default = error_bad_constructor(EnvIn, TC),
%% constructors defined in this module or imported by it:
Available = EnvIn#env.type_constructors,
proplists:get_value(Name, Available, Default);
%% and the part where we go to a different module:
(#type_constructor{line=Line, name=Name, module=Mod}) ->
Mods = EnvIn#env.modules,
M = [AM || #alpaca_module{name=N}=AM <- Mods, Mod =:= N],
case M of
[] ->
#alpaca_module{name=MN} = EnvIn#env.current_module,
throw({error, {bad_module, MN, Line, Mod}});
[Target] ->
%% in the beginning of typing, constructors/1 links
%% the actual type to the constructor contained
%% within it to make it easy for inst_type to
%% instantiate the type itself. Here we grab each
%% constructor whose name matches the one we're
%% looking for and insert the ADT itself in a
%% similar manner.
Types = Target#alpaca_module.types,
Exports = Target#alpaca_module.type_exports,
F = fun(#alpaca_type{members=Ms, name={_, _, TypeN}}=AT) ->
Cs = [AC#alpaca_constructor{type=AT} ||
#alpaca_constructor{
name=#type_constructor{name=TCN}
}=AC <- Ms, TCN =:= Name],
%% Now we make sure the type that the
%% constructor belongs to is actually
%% exported:
case [E || E <- Exports, E =:= TypeN] of
[] -> [];
[_] -> Cs
end
end,
case lists:flatten(lists:map(F, Types)) of
[RealC] ->
RealC;
[] ->
throw(error_bad_constructor(EnvIn, TC))
end
end
end,
try Cons_f(ADT_f(GetTC(TC)))
catch
throw:{error, _}=Error -> Error
end.
inst_constructor_arg(none, _, _, Env) ->
{Env, undefined};
inst_constructor_arg(t_rec, _, _, Env) ->
{Env, new_cell(t_rec)};
inst_constructor_arg(AtomType, _, _, Env) when is_atom(AtomType) ->
{Env, AtomType};
inst_constructor_arg({type_var, _, N}=TV, Vs, _, Env) ->
case proplists:get_value(N, Vs) of
undefined -> throw(error_unknown_type_variable(Env, TV));
V -> {Env, V}
end;
inst_constructor_arg(#t_record{members=Ms}=R, Vs, Types, Env) ->
F = fun(#t_record_member{type=T}=M) ->
case inst_constructor_arg(T, Vs, Types, Env) of
{error, _}=E -> erlang:error(E);
{_, T2} -> M#t_record_member{type=T2}
end
end,
{Var, Env2} = new_var(0, Env),
{Env2, new_cell(R#t_record{members=lists:map(F, Ms), row_var=Var})};
inst_constructor_arg(#alpaca_constructor{name=#type_constructor{name=N}},
_Vs, _Types, Env) ->
{Env, {t_adt_cons, N}};
inst_constructor_arg(#alpaca_type_tuple{members=Ms}, Vs, Types, Env) ->
F = fun(M, {E, Memo}) ->
{E2, M2} = inst_constructor_arg(M, Vs, Types, E),
{E2, [M2|Memo]}
end,
{Env2, Ms2} = lists:foldl(F, {Env, []}, Ms),
{Env2, new_cell({t_tuple, lists:reverse(Ms2)})};
inst_constructor_arg({t_list, ElementType}, Vs, Types, Env) ->
{Env2, ListElem} = inst_constructor_arg(ElementType, Vs, Types, Env),
{Env2, new_cell({t_list, ListElem})};
inst_constructor_arg({t_map, KeyType, ValType}, Vs, Types, Env) ->
{Env2, KElem} = inst_constructor_arg(KeyType, Vs, Types, Env),
{Env3, VElem} = inst_constructor_arg(ValType, Vs, Types, Env2),
{Env3, new_cell({t_map, KElem, VElem})};
inst_constructor_arg({t_receiver, MsgT, BodyT}, Vs, Types, Env) ->
{Env2, MElem} = inst_constructor_arg(MsgT, Vs, Types, Env),
{Env3, BElem} = inst_constructor_arg(BodyT, Vs, Types, Env2),
{Env3, new_cell({t_receiver, MElem, BElem})};
inst_constructor_arg({t_pid, MsgType}, Vs, Types, Env) ->
{Env2, PidElem} = inst_constructor_arg(MsgType, Vs, Types, Env),
{Env2, new_cell({t_pid, PidElem})};
inst_constructor_arg(#alpaca_type{name={type_name, _, N}, vars=Vars, members=M1},
Vs, Types, Env) ->
case find_type(N, Types) of
#alpaca_type_alias{target=Target} ->
inst_constructor_arg(Target, Vs, Types, Env);
#alpaca_type{vars = V2, members=M2, module=Mod} ->
%% when a polymorphic ADT occurs in another type's definition it might
%% have concrete types assigned rather than variables and thus we want
%% to find the original/biggest list of vars for the type. E.g.
%%
%% type option 'a = Some 'a | None
%% type either_int 'a = Left 'a | Right option int
%%
VarsToUse = case length(V2) > length(Vars) of
true -> V2;
false -> Vars
end,
%% We use this within `F` to fall back to other type variables that may have
%% been bound prior to this current type in a signature. Strictly speaking
%% we should remove items from `Vs` that already exist in `VarsToUse` but
%% that could get pretty expensive. I'm deliberately taking the lazy way
%% out and just avoiding a repeated append operation the the lists:foldl
%% call further below.
VarsWithDupes = VarsToUse ++ Vs,
F = fun({type_var, _, VN}) ->
{VN, proplists:get_value(VN, Vs)};
({{type_var, _, _}=VN, CT}=_ConcreteType) ->
%% the "concrete" type might actually be an as-yet
%% uninstantiated type which will lead to unification
%% failures later. Instead of assuming it's one of our base
%% types we instantiate it like anything else. Here I haven't
%% worried about the returned environment as I think we're
%% safely covered by using the already known variables and
%% existing environment:
{_, InstCT} = inst_constructor_arg(CT, VarsWithDupes, Types, Env),
{VN, InstCT}
end,
ADT_Vars = lists:map(F, VarsToUse),
Vs2 = replace_vars(M1, V2, Vs),
{Env2, Members} = lists:foldl(
fun(M, {E, Memo}) ->
{E2, MM} = inst_constructor_arg(M, Vs2, Types, E),
{E2, [MM|Memo]}
end,
{Env, []},
M2),
ADT = #adt{name=N,
vars=ADT_Vars,
members=lists:reverse(Members),
module=Mod},
{Env2, new_cell(ADT)}
end;
inst_constructor_arg({t_arrow, ArgTypes, RetType}, Vs, Types, Env) ->
F = fun(A, {E, Memo}) ->
{E2, A2} = inst_constructor_arg(A, Vs, Types, E),
{E2, [A2|Memo]}
end,
{Env2, InstantiatedArgs} = lists:foldl(F, {Env, []}, ArgTypes),
{Env3, InstantiatedRet} = inst_constructor_arg(RetType, Vs, Types, Env2),
{Env3, new_cell({t_arrow, lists:reverse(InstantiatedArgs), InstantiatedRet})};
inst_constructor_arg(Arg, _, _, Env) ->
throw(error_bad_constructor_arg(Env, Arg)).
find_type(Name, []) -> throw({error, {unknown_type, Name}});
find_type(Name, [#alpaca_type{name={type_name, _, Name}}=T|_]) -> T;
find_type(Name, [#alpaca_type_alias{name={type_name, _, Name}}=T|_]) -> T;
find_type(Name, [_Miss|Types]) -> find_type(Name, Types).
%% When we use a type specified elsewhere within another type, like mixing
%% options and lists, we need to make sure that the two types use the same name
%% for variables in the same position. An example:
%%
%% type opt 'a = Some 'a | None
%% type tagged 'x = Tag opt 'x
%%
%% When typing an instance of `tagged` above, we need to find the `opt` type and
%% then ensure that 'a becomes 'x in `opt` when we type the whole thing.
%%
%% `replace_vars` takes three arguments in this order:
%% 1. The type variables from type that uses a different one, `tagged` above.
%% 2. The type variables of the _used_ type, `opt` above.
%% 3. Type variables that already exist within the current context.
%%
replace_vars([], _, _) -> [];
replace_vars([{type_var, _, N}|Ms], [{type_var, _, V}|Vs], PropagatedVs) ->
[{V,proplists:get_value(N, PropagatedVs)}|replace_vars(Ms, Vs, PropagatedVs)];
%% Our use of a type may be specifying concrete types rather than variables:
replace_vars([M|Ms], [{type_var, _, V}|Vs], PropagatedVs) ->
[{V,M}|replace_vars(Ms, Vs, PropagatedVs)].
%% Unify two parameter lists, e.g. from a function arrow.
unify_list(As, Bs, Env, L) ->
unify_list(As, Bs, {[], []}, Env, L).
arity_error(Env, L) ->
erlang:error({arity_error, module_name(Env), L}).
unify_list([], [], {MemoA, MemoB}, _, _) ->
{lists:reverse(MemoA), lists:reverse(MemoB)};
unify_list([], _, _, Env, L) ->
arity_error(Env, L);
unify_list(_, [], _, Env, L) ->
arity_error(Env, L);
unify_list([A|TA], [B|TB], {MA, MB}, Env, L) ->
case unify(A, B, Env, L) of
{error, _} = E -> E;
ok -> unify_list(TA, TB, {[A|MA], [B|MB]}, Env, L)
end.
-spec inst_binding(
VarName :: atom()|string(),
Line :: integer(),
Lvl :: integer(),
Env :: env()) -> {typ(), env(), map()} | {error, term()}.
inst_binding(VarName, Line, Lvl, #env{bindings=Bs} = Env) ->
Default = {error, {bad_variable_name, module_name(Env), Line, VarName}},
case proplists:get_value(VarName, Bs, Default) of
{error, _} = E ->
case lookup_binding(VarName, Env, E) of
{ok, T} -> inst(T, Lvl, Env, maps:new());
Err -> Err
end;
T ->
inst(T, Lvl, Env, maps:new())
end.
%% If inst_binding/4 can't find a binding in the environment for the name it was
%% given to look up, it uses this function to see if the name is a top-level
%% binding later in the module:
lookup_binding(VarName, Env, Default) ->
case Env of
#env{current_module=#alpaca_module{functions=Funs}} ->
MatchingFuns = [B || #alpaca_binding{name=#a_lab{name = N}}=B <- Funs,
N =:= VarName
],
case MatchingFuns of
[] ->
Default;
%% There's no way to tell which arity version of a function is
%% intended from a label alone so we're defaulting to the
%% first one bound to the name we're looking for:
[F|_] ->
case typ_of(Env, 0, F) of
{error, _}=E -> E;
{Typ, _} -> {ok, Typ}
end
end;
_ ->
Default
end.
%% This is modeled after instantiate/2 github.com/tomprimozic's example
%% located in the URL given at the top of this file. The purpose of
%% CachedMap is to reuse the same instantiated unbound type variable for
%% every occurrence of the _same_ quantified type variable in a list
%% of function parameters.
%%
%% The return is the instantiated type, the updated environment and the
%% updated cache map.
-spec inst(typ(), integer(), env(), CachedMap::map()) ->
{typ(), env(), map()} | {error, term()}.
inst({link, Typ}, Lvl, Env, CachedMap) ->
inst(Typ, Lvl, Env, CachedMap);
inst({unbound, _, _}=Typ, _, Env, M) ->
{Typ, Env, M};
inst({qvar, Name}, Lvl, Env, CachedMap) ->
case maps:get(Name, CachedMap, undefined) of
undefined ->
{NewVar, NewEnv} = new_var(Lvl, Env),
{NewVar, NewEnv, maps:put(Name, NewVar, CachedMap)};
Typ ->
Typ
end;
inst({t_arrow, Params, ResTyp}, Lvl, Env, CachedMap) ->
Folder = fun(Next, {L, E, Map, Memo}) ->
{T, Env2, M} = inst(Next, L, E, Map),
{Lvl, Env2, M, [T|Memo]}
end,
{_, NewEnv, M, PTs} = lists:foldr(Folder, {Lvl, Env, CachedMap, []}, Params),
{RT, NewEnv2, M2} = inst(ResTyp, Lvl, NewEnv, M),
Arrow = {t_arrow, PTs, RT},
{Arrow, NewEnv2, M2};
inst({t_receiver, Recv, Body}=_R, Lvl, Env, CachedMap) ->
{Body2, Env2, Map2} = inst(Body, Lvl, Env, CachedMap),
{Recv2, Env3, Map3} = inst(Recv, Lvl, Env2, Map2),
NewR = {t_receiver, Recv2, Body2},
{NewR, Env3, Map3};
%% Everything else is assumed to be a constant:
inst(Typ, _Lvl, Env, Map) ->
{Typ, Env, Map}.
-spec new_var(Lvl :: integer(), Env :: env()) -> {cell(), env()}.
new_var(Lvl, #env{next_var=VN} = Env) ->
N = list_to_atom("t" ++ integer_to_list(VN)),
TVar = new_cell({unbound, N, Lvl}),
{TVar, Env#env{next_var=VN+1}}.
-spec gen(integer(), typ()) -> typ().
gen(Lvl, {unbound, N, L}) when L > Lvl ->
{qvar, N};
gen(Lvl, {link, T}) ->
gen(Lvl, T);
gen(Lvl, {t_arrow, PTs, T2}) ->
{t_arrow, [gen(Lvl, T) || T <- PTs], gen(Lvl, T2)};
gen(Lvl, {t_receiver, A, B}) ->
{t_receiver, gen(Lvl, A), gen(Lvl, B)};
gen(_, T) ->
T.
%% Simple function that takes the place of a foldl over a list of
%% arguments to an apply.
typ_list([], _Lvl, #env{next_var=NextVar}, Memo) ->
{lists:reverse(Memo), NextVar};
typ_list([H|T], Lvl, Env, Memo) ->
case typ_of(Env, Lvl, H) of
{error, _}=Err -> Err;
{Typ, NextVar} ->
typ_list(T, Lvl, update_counter(NextVar, Env), [Typ|Memo])
end.
unwrap({cell, _}=Cell) ->
unwrap(get_cell(Cell));
unwrap({link, Ty}) ->
unwrap(Ty);
unwrap({t_arrow, A, B}) ->
{t_arrow, [unwrap(X)||X <- A], unwrap(B)};
unwrap({t_clause, A, G, B}) ->
{t_clause, unwrap(A), unwrap(G), unwrap(B)};
unwrap({t_tuple, Vs}) ->
{t_tuple, [unwrap(V)||V <- Vs]};
unwrap({t_list, T}) ->
{t_list, unwrap(T)};
unwrap({t_map, K, V}) ->
{t_map, unwrap(K), unwrap(V)};
unwrap(#t_record{}=R) ->
#t_record{members=Ms, row_var=RV} = flatten_record(R),
#t_record{members=lists:map(fun unwrap/1, Ms), row_var=unwrap(RV)};
unwrap(#t_record_member{type=T}=RM) ->
RM#t_record_member{type=unwrap(T)};
unwrap(#adt{vars=Vs, members=Ms}=ADT) ->
ADT#adt{vars=[{Name, unwrap(V)} || {Name, V} <- Vs],
members=[unwrap(M) || M <- Ms]};
unwrap({t_receiver, A, B}) ->
{t_receiver, unwrap(A), unwrap(B)};
unwrap({t_pid, A}) ->
{t_pid, unwrap(A)};
unwrap(X) ->
X.
missing_type_error(SourceModule, Module, Type) ->
{no_such_type, SourceModule, Module, Type}.
private_type_error(SourceModule, Module, Type) ->
{unexported_type, SourceModule, Module, Type}.
retrieve_type(SourceModule, Module, Type, []) ->
throw(missing_type_error(SourceModule, Module, Type));
retrieve_type(SM, #a_lab{name=M}, T, [#alpaca_module{name=M, types=Ts, type_exports=ETs}|Rem]) ->
case [TT || #alpaca_type{name={type_name, _, TN}}=TT <- Ts, TN =:= T#a_lab.name] of
[#alpaca_type{name={_, _, TN}}=Type] ->
%% now make sure the type is exported:
case [X || X <- ETs, X =:= TN] of
[_] -> {ok, Type};
_ -> throw(private_type_error(SM, M, T))
end;
[] -> retrieve_type(SM, M, T, Rem)
end;
retrieve_type(SM, M, T, [_|Rem]) ->
retrieve_type(SM, M, T, Rem).
-spec type_modules([alpaca_module()]) -> {ok, [alpaca_module()]} |
{error, term()}.
type_modules(Mods) ->
{Pid, Monitor} = erlang:spawn_monitor(fun() ->
try type_modules(Mods, new_env(Mods), []) of
Res -> exit(Res)
catch
%% We want the underlying error that resulted in the bad match,
%% not the `badmatch` itself:
error:{badmatch, {error, _}=Err} ->
exit(Err);
E:T ->
io:format("alpaca_typer:type_modules/2 crashed with ~p:~p~n"
"Stacktrace:~n~p~n", [E, T, erlang:get_stacktrace()]),
exit({error, T})
end
end),
receive
{'DOWN', Monitor, process, Pid, Result} -> Result
end.
type_modules([], _, Acc) -> {ok, Acc};
type_modules([M|Ms], Env, Acc) ->
case type_module(M, Env) of
{ok, M2} ->
type_modules(Ms, replace_env_module(Env, M2), [M2|Acc]);
{error, _}=Err ->
Err
end.
-spec type_module(M::alpaca_module(), Env::env()) -> {ok, alpaca_module()} |
{error, term()}.
type_module(#alpaca_module{precompiled=true}=M, _Env) ->
{ok, M};
type_module(#alpaca_module{functions=Fs,
name=Name,
types=Ts,
type_imports=Imports,
tests=Tests}=M,
#env{modules=Modules}=Env) ->
[] = validate_types(M, Modules),
%% Fold function to yield all the imported types or report a missing one.
ImportFolder = fun(_, {error, _}=Err) -> Err;
(_, [{error, _}=Err|_]) -> Err;
(#alpaca_type_import{module=MN, type=T}, Acc) ->
[retrieve_type(Name, MN, T, Modules)|Acc]
end,
%% Fold function to instantiate all in-scope ADTs.
TypFolder = fun(_, {error, _}=Err) ->
Err;
(T, {Typs, E}) ->
TN = case T of
#alpaca_type{name={_, _, N}} -> N;
#alpaca_type_alias{name={_, _, N}} -> N
end,
case inst_type(T, E) of
{ok, E2, ADT, _} -> {[{TN, unwrap(ADT)}|Typs], E2};
{error, _}=Err -> Err
end
end,
case lists:foldl(ImportFolder, [], Imports) of
{error, _}=Err -> Err;
Imported ->
AllTypes = Ts ++ [T || {ok, T} <- Imported],
case lists:foldl(TypFolder, {[], Env}, AllTypes) of
{error, _}=Err ->
Err;
{ADTs, Env2} ->
Env3 = Env2#env{
type_bindings=ADTs,
current_module=M,
current_types=AllTypes,
type_constructors=constructors(AllTypes),
entered_modules=[Name|Env2#env.entered_modules]},
%% We need to get the environment back from typ_module_funs
%% so that the top-level function bindings are available to
%% tests:
case typ_module_funs(Fs, Env3, []) of
{error, _}=Err ->
Err;
{Env4, FunRes} ->
case type_module_tests(Tests, Env4, ok, FunRes) of
{error, _} = Err2 ->
Err2;
Funs when is_list(Funs) ->
{ok, M#alpaca_module{functions=Funs, typed=true}}
end
end
end
end.
typ_module_funs([], Env, Memo) ->
{Env, lists:reverse(Memo)};
typ_module_funs([#alpaca_binding{name=#a_lab{name=Name}}=F|Rem], Env, Memo) ->
case typ_of(Env, 0, F) of
{error, _} = E ->
E;
{Typ, NV} ->
Env2 = update_counter(NV, Env),
Env3 = update_binding(Name, Typ, Env2),
typ_module_funs(Rem, Env3, [F#alpaca_binding{type=unwrap(Typ)} | Memo])
end.
type_module_tests(_, _Env, {error, _}=Err, _) ->
Err;
type_module_tests(_, _Env, _, {error, _}=Err) ->
Err;
type_module_tests([], _, _, Funs) ->
Funs;
type_module_tests([#alpaca_test{expression=E}|Rem], Env, _, Funs) ->
type_module_tests(Rem, Env, typ_of(Env, 0, E), Funs).
%% Here we make a quick pass over each type defined in a module to ensure
%% types that are members of a type are defined in this module or imported.
validate_types(#alpaca_module{name=Mod, types=Types, type_imports=Imports}, Mods) ->
%% TODO: inconsistent use of labels vs raw binary:
TypeNames =
[N || #alpaca_type{name={_, _, N}} <- Types] ++
[N || #alpaca_type_import{type=#a_lab{name=N}} <- Imports] ++
[N || #alpaca_type_alias{name={_, _, N}} <- Types],
validate_types(Mod, TypeNames, Mods, Types).
validate_types(_ModName, _TypeNames, _Modules, []) ->
[];
validate_types(
ModName,
TypeNames,
Modules,
[#alpaca_type{module=MN}=H|T]) when MN =:= undefined; MN =:= ModName ->
#alpaca_type{name={_, L, N}, vars=Vs, members=Ms}=H,
case [TN || TN <- TypeNames, TN =:= N] of
[] ->
throw({unknown_type, ModName, L, N});
_ ->
validate_types(ModName, TypeNames, Modules, Ms),
validate_types(ModName, TypeNames, Modules, Vs),
validate_types(ModName, TypeNames, Modules, T)
end;
validate_types(MN, Ts, Mods, [#alpaca_type{}=T|Rem]) ->
#alpaca_type{name={_, L, N}, module=TargetMod, vars=Vs} = T,
case [M || #alpaca_module{name=X}=M <- Mods, X =:= TargetMod] of
[] ->
throw({bad_module, MN, L, TargetMod});
[#alpaca_module{types=Xs}] ->
Matches =
[X || #alpaca_type{name={_, _, Y}}=X <- Xs, Y =:= N] ++
[X || #alpaca_type_alias{name={_, _, Y}}=X <- Xs, Y =:= N],
case Matches of
[] ->
throw({unknown_type, MN, L, N});
[_] ->
[] = validate_types(MN, Ts, Mods, Vs),
validate_types(MN, Ts, Mods, Rem)
end
end;
validate_types(
ModName,
TypeNames,
Modules,
[#alpaca_type_alias{module=MN, target=Target}|T]) when MN =:= undefind; MN =:= ModName ->
validate_types(ModName, TypeNames, Modules, [Target]),
validate_types(ModName, TypeNames, Modules, T);
validate_types(MN, TNs, Mods, [{{type_var, _, _}, Typ}|T]) ->
validate_types(MN, TNs, Mods, [Typ]),
validate_types(MN, TNs, Mods, T);
validate_types(MN, TNs, Mods, [#alpaca_constructor{arg=A}|T]) ->
validate_types(MN, TNs, Mods, [A]),
validate_types(MN, TNs, Mods, T);
validate_types(ModName, TypeNames, Mods, [#alpaca_type_tuple{members=Ms}|T]) ->
validate_types(ModName, TypeNames, Mods, Ms),
validate_types(ModName, TypeNames, Mods, T);
validate_types(MN, Ts, Mods, [#t_record{members=Ms}|T]) ->
MemberTypes = [Type || #t_record_member{type=Type} <- Ms],
validate_types(MN, Ts, Mods, MemberTypes ++ T);
validate_types(M, TNs, Mods, [{t_list, LT}|T]) ->
[] = validate_types(M, TNs, Mods, [LT|T]),
validate_types(M, TNs, Mods, T);
validate_types(M, TNs, Mods, [{t_map, KT, VT}|T]) ->
[] = validate_types(M, TNs, Mods, [KT, VT] ++ T),
validate_types(M, TNs, Mods, T);
validate_types(M, Ts, Mods, [_H|T]) ->
validate_types(M, Ts, Mods, T).
%% In the past I returned the environment entirely but this contained mutations
%% beyond just the counter for new type variable names. The integer in the
%% successful return tuple is just the next type variable number so that
%% the environments further up have no possibility of being poluted with
%% definitions below.
-spec typ_of(
Env::env(),
Lvl::integer(),
Exp::alpaca_expression()) -> {typ(), integer()} | {error, term()}.
%% Base types now need to be in reference cells because when they are part
%% of unions they may need to be reset.
typ_of(#env{next_var=VarNum}, _Lvl, #a_int{}) ->
{new_cell(t_int), VarNum};
typ_of(#env{next_var=VarNum}, _Lvl, #a_flt{}) ->
{new_cell(t_float), VarNum};
typ_of(#env{next_var=VarNum}, _Lvl, #a_bool{}) ->
{new_cell(t_bool), VarNum};
typ_of(#env{next_var=VarNum}, _Lvl, #a_atom{}) ->
{new_cell(t_atom), VarNum};
typ_of(#env{next_var=VN}, _Lvl, #a_str{}) ->
{new_cell(t_string), VN};
typ_of(#env{next_var=VN}, _Lvl, {chars, _, _}) ->
{new_cell(t_chars), VN};
typ_of(Env, Lvl, #a_lab{line=L, name=N}) ->
case inst_binding(N, L, Lvl, Env) of
{error, _} = E -> E;
{T, #env{next_var=VarNum}, _} -> {T, VarNum}
end;
%% A qualified label is currently taken to represent a reference to a binding
%% in another module. In future, it will represent _either_ a binding in
%% another module or a binding inside of a record (field accessor.)
typ_of( Env
, _Lvl
, #a_qlab{ space=#a_lab{name=Mod}
, label=#a_lab{name=N}=Label
, line=_L
, arity=A
}
) ->
%% First we check to see if we have mutually recursive modules, including
%% across some chain of calls. We do this by tracking which modules the
%% typer has already "entered" to perform type checking, and then looking
%% to see if the typer will have to *re*-enter one of the previously typed
%% modules. `Err` here simply abstracts the actual error report generation,
%% while the following case statement performs the actual check.
%%
%% This decision to prevent mutually recursive modules was, frankly, an
%% *arbitrary* early decision in an attempt to keep complexity down. It may
%% very well bear revisiting.
Err = fun() ->
[CurrMod|_] = Env#env.entered_modules,
throw({error, {bidirectional_module_ref, Mod, CurrMod}})
end,
%% Here's the actual bidirectional/cyclic module check:
case [M || M <- Env#env.entered_modules, M =:= Mod] of
[] -> ok;
[_] -> case Env#env.current_module of
#alpaca_module{name=Mod} -> ok;
_ -> Err()
end;
_ -> Err()
end,
%% The check has passed, so we're going to go through the following steps:
%% 1. Add the target module to our list of entered modules, preventing
%% further cycles. The "target module" is the one to which the
%% qualified label refers.
%% 2. Check if the target module has already been typed. If it has *not*
%% been typed yet, do so now.
%% 3. Retrieve the binding from the target module that matches the target
%% binding name *and* arity. A failure to find a matching arity may
%% indicate an attempt to curry the target binding which is handled
%% elsewhere. See `typ_of(_, _, #alpaca_apply{...})` for more details.
EnteredModules = [Mod | Env#env.entered_modules],
{ok, Module, _} = extract_module_bindings(Env, Mod, Label),
Funs = case Module#alpaca_module.typed of
true ->
Module#alpaca_module.functions;
false ->
%% Type the called function in its own module:
Env2 = Env#env{current_module=Module,
entered_modules=EnteredModules},
{ok, #alpaca_module{functions=F}} = type_module(Module, Env2),
F
end,
[Typ] = [Typ || #alpaca_binding{
name=#a_lab{name = X},
type=Typ,
bound_expr=#alpaca_fun{arity=Arity}} <- Funs,
N =:= X,
A =:= Arity],
#env{next_var=NV}=Env,
%% deep copy to cell the various types, needed
%% because typing a module unwraps all the
%% reference cells before returning the module:
{DT, _} = deep_copy_type(Typ, maps:new()),
{DT, NV};
typ_of(#env{next_var=VN}, _Lvl, #a_unit{}) ->
{new_cell(t_unit), VN};
%% Errors only type as new variables for the moment to simplify
%% unification with other terms. I'm considering typing them as
%% a kind of effect that wraps enclosing expressions, similar to
%% how receivers are handled.
typ_of(Env, Lvl, {raise_error, _, _, Expr}) ->
case typ_of(Env, Lvl, Expr) of
{error, _}=Err ->
Err;
{_, NV} ->
{T, #env{next_var=NV2}} = new_var(Lvl, update_counter(NV, Env)),
{T, NV2}
end;
typ_of(Env, Lvl, {'_', L}) ->
{T, #env{next_var=VarNum}, _} = inst_binding('_', L, Lvl, Env),
{T, VarNum};
typ_of(Env, Lvl, #alpaca_tuple{values=Vs}) ->
case typ_list(Vs, Lvl, Env, []) of
{error, _} = E -> E;
{VTyps, NextVar} -> {new_cell({t_tuple, VTyps}), NextVar}
end;
typ_of(#env{next_var=_VarNum}=Env, Lvl, {nil, _Line}) ->
{TL, #env{next_var=NV}} = new_var(Lvl, Env),
{new_cell({t_list, TL}), NV};
typ_of(Env, Lvl, #alpaca_cons{line=Line, head=H, tail=T}) ->
{HTyp, NV1} = case typ_of(Env, Lvl, H) of
{error, HeadErr} -> throw(HeadErr);
OK -> OK
end,
{TTyp, NV2} =
case T of
{nil, _} -> {new_cell({t_list, HTyp}), NV1};
#alpaca_cons{}=Cons ->
typ_of(update_counter(NV1, Env), Lvl, Cons);
#a_lab{}=S ->
L = ast:line(S),
{STyp, Next} =
typ_of(update_counter(NV1, Env), Lvl, S),
{TL, #env{next_var=Next2}} =
new_var(Lvl, update_counter(Next, Env)),
NC = new_cell({t_list, TL}),
case unify(NC, STyp, Env, L) of
{error, _} = E -> E;
ok ->
{STyp, Next2}
end;
#alpaca_apply{}=Apply ->
{TApp, Next} = typ_of(update_counter(NV1, Env), Lvl, Apply),
case unify(
new_cell({t_list, HTyp}), TApp, Env, apply_line(Apply))
of
{error, _} = E -> E;
ok -> {TApp, Next}
end;
NonList ->
{error, {cons_to_non_list, NonList}}
end,
%% Imperative nastiness follows:
case {TTyp, NV2} of
{error, TailTypeError} -> throw(TailTypeError);
_ -> ok
end,
%% TODO: there's no error check here:
ListType = case TTyp of
{cell, _} ->
%% TODO: this is kind of a gross tree but previously
%% there were cases above that would instantiate list
%% types that were not celled, leading to some badarg
%% exceptions when unifying with ADTs:
case get_cell(TTyp) of
{link, {t_list, LT}} -> LT;
{link, {cell, _}=C} ->
case get_cell(C) of
{t_list, LT} -> LT
end;
{t_list, LT} -> LT
end;
{t_list, LT} ->
LT
end,
case unify(HTyp, ListType, Env, Line) of
{error, _} = Err ->
Err;
ok ->
{TTyp, NV2}
end;
typ_of(Env, Lvl, #alpaca_binary{segments=Segs}) ->
case type_bin_segments(Env, Lvl, Segs) of
{error, _}=Err -> Err;
{ok, NV} -> {new_cell(t_binary), NV}
end;
typ_of(Env, Lvl, #alpaca_map{}=M) ->
type_map(Env, Lvl, M);
typ_of(Env, Lvl, #alpaca_map_add{line=L, to_add=A, existing=B}) ->
#alpaca_map_pair{key=KE, val=VE} = A,
TypA = typ_list([KE, VE], Lvl, Env, []),
TypB = typ_of(Env, Lvl, B),
map_typ_of(
Env, TypA,
fun(Env2, [KT, VT]) ->
AMap = new_cell({t_map, KT, VT}),
map_typ_of(
Env2, TypB,
fun(Env3, MTyp) ->
map_err(unify(AMap, MTyp, Env3, L),
fun(_) -> {MTyp, Env3#env.next_var} end)
end)
end);
%% Record typing:
typ_of(Env, Lvl, #alpaca_record{is_pattern=IsPattern, members=Members}) ->
F = fun(#alpaca_record_member{name=N, val=V}, {ARMembers, E}) ->
case typ_of(E, Lvl, V) of
{error, _}=Err ->
erlang:error(Err);
{VTyp, NextVar} ->
MTyp = #t_record_member{name=N, type=VTyp},
{[MTyp|ARMembers], update_counter(NextVar, E)}
end
end,
{Members2, Env2} = lists:foldl(F, {[], Env}, Members),
{RowVar, Env3} = new_var(Lvl, Env2),
Res = new_cell(#t_record{
is_pattern=IsPattern,
members=lists:reverse(Members2),
row_var=RowVar}),
{Res, Env3#env.next_var};
typ_of(Env, Lvl, #alpaca_record_transform{additions=Adds, existing=Exists, line=L}) ->
{ExistsType, NV} = case typ_of(Env, Lvl, Exists) of
{error, _}=Err -> throw(Err);
OK -> OK
end,
{EmptyRecType, NV2} = typ_of(update_counter(NV, Env), Lvl, #alpaca_record{line=L}),
#t_record{row_var=RV} = get_cell(EmptyRecType),
Env2 = update_counter(NV2, Env),
ok = unify(EmptyRecType, ExistsType, Env2, L),
AddsRec = #alpaca_record{members=Adds, line=L},
{AddsRecCell, NV3} = typ_of(Env2, Lvl, AddsRec),
#t_record{members=AddMs} = get_cell(AddsRecCell),
Flattened = flatten_record(#t_record{members=AddMs, row_var=RV}),
#t_record{members=FlatMems, row_var=FlatVar} = Flattened,
%% Now de-dupe fields, preferring newer ones:
Deduped = lists:foldl(
fun(#t_record_member{name=N, type=T}, Map) -> maps:put(N, T, Map) end,
maps:new(),
lists:reverse(FlatMems)),
RecMems = [#t_record_member{name=N, type=T} || {N, T} <- maps:to_list(Deduped)],
Rec = #t_record{members=RecMems, row_var=FlatVar},
{new_cell(Rec), NV3};
typ_of(Env, _Lvl, #alpaca_type_apply{name=N, arg=none}) ->
case inst_type_arrow(Env, N) of
{error, _}=Err -> Err;
{Env2, {type_arrow, CTyp, RTyp}} ->
case unwrap(CTyp) of
undefined -> ok;
_ -> throw(error_not_enough_type_arguments(Env, N))
end,
{RTyp, Env2#env.next_var}
end;
typ_of(Env, Lvl, #alpaca_type_apply{name=N, arg=A}) ->
%% Some things come back from typing without being properly contained in a
%% reference cell, specifically those bound to labels. This can be a
%% problem when typing this kind of application because we jump straight
%% to unify/4 which requires both arguments to be in cells while other parts
%% of the typer balk at things being immediately celled. An overhaul of the
%% inferencer is in order pretty soon I think.
EnsureCelled = fun({cell, _}=X) -> X;
(X) -> new_cell(X)
end,
#type_constructor{line=L} = N,
case inst_type_arrow(Env, N) of
{error, _}=Err -> Err;
{Env2, {type_arrow, CTyp, RTyp}} ->
case unwrap(CTyp) of
%% Type tags/instance constructors (e.g. `None`) with no
%% defined arguments should not be able to be instantiated
%% with any:
undefined ->
throw(error_too_many_type_arguments(Env, N));
_ ->
case typ_of(Env2, Lvl, A) of
{error, _}=Err -> Err;
{ATyp, NVNum} ->
case unify(CTyp,
EnsureCelled(ATyp),
update_counter(NVNum, Env2), L)
of
ok -> {RTyp, NVNum};
{error, _}=Err -> Err
end
end
end
end;
%% BIFs are loaded in the environment as atoms:
typ_of(Env, Lvl, {bif, AlpacaName, L, _, _}) ->
case inst_binding(AlpacaName, L, Lvl, Env) of
{error, _} = E ->
E;
{T, #env{next_var=VarNum}, _} ->
{T, VarNum}
end;
typ_of(Env, Lvl, #alpaca_apply{expr={Mod, #a_lab{}=Sym, Arity}, args=Args}) ->
X = ast:label_name(Sym),
L = ast:line(Sym),
Satisfy =
fun() ->
%% Naively assume a single call to the same function for now.
%% does the module exist and does it export the function?
case extract_fun(Env, Mod, X, Arity) of
{error, _} = E -> E;
{ok, Module, _Fun} ->
EnteredModules = [Mod | Env#env.entered_modules],
FarMod = case Module#alpaca_module.typed of
true ->
{ok, Module};
false ->
%% Type the called function in its own module:
Env2 = Env#env{current_module=Module,
entered_modules=EnteredModules},
type_module(Module, Env2)
end,
%% Type the called function in its own module:
case FarMod of
{ok, #alpaca_module{functions=Funs}} ->
[T] = [Typ ||
#alpaca_binding{
name=#a_lab{name = N},
type=Typ,
bound_expr=#alpaca_fun{
arity=A}} <- Funs,
N =:= X,
A =:= Arity],
#env{next_var=NextVar}=Env,
%% deep copy to cell the various types, needed
%% because typing a module unwraps all the
%% reference cells before returning the module:
{DT, _} = deep_copy_type(T, maps:new()),
typ_apply(Env, Lvl, DT, NextVar, Args, L);
{error, _}=Err ->
Err
end
end
end,
Error = fun() ->
[CurrMod|_] = Env#env.entered_modules,
{error, {bidirectional_module_ref, Mod, CurrMod}}
end,
case [M || M <- Env#env.entered_modules, M == Mod] of
[] -> Satisfy();
[Mod] -> case Env#env.current_module of
#alpaca_module{name=Mod} -> Satisfy();
_ -> Error()
end;
_ -> Error()
end;
typ_of(Env, Lvl, #alpaca_apply{line=L, expr=Expr, args=Args}) ->
%% When we hit arity failures, it may be because the user
%% is intending a curried application. This function attempts
%% to find a potential function that can be unambigiously
%% curried, and then types against that by manipulating the
%% argument list and return type
LocalCurryFun =
fun() ->
%% If we got an arrow type, if we couldn't find it in the
%% top level, and it still didn't unify, it might be
%% a local binding we can still curry.
case typ_of(Env, Lvl, Expr) of
{error, {bad_variable_name, _, _, _}} = E -> E;
{error, _} = E -> E;
{{t_arrow, TArgs, TRet}, NextVar} ->
case length(Args) >= length(TArgs) of
true ->
#a_lab{name = N} = Expr,
Mod = Env#env.current_module#alpaca_module.name,
{error, {not_found, Mod, N, length(Args)}};
false ->
{CurryArgs, RemArgs} = lists:split(length(Args), TArgs),
CurriedTypF = {t_arrow, CurryArgs, {t_arrow, RemArgs, TRet}},
typ_apply(Env, Lvl, CurriedTypF, NextVar, Args, L)
end
end
end,
CurryFun =
fun(_OriginalErr) ->
%% Attempt to find a curryable version
{Mod, FN, Env2} = case Expr of
#a_lab{}=Sym ->
FunName = ast:label_name(Sym),
{Env#env.current_module, FunName, Env};
{bif, FunName, _, _, _} ->
{Env#env.current_module, FunName, Env};
#a_qlab{space=#a_lab{name=ModName}, label=FunName} ->
EnteredModules = [Env#env.current_module | Env#env.entered_modules],
{ok, Module, _} = extract_module_bindings(Env, ModName, FunName),
E = Env#env{current_module=Module,
entered_modules=EnteredModules},
{Module, FunName, E}
end,
CurryFuns = get_curryable_funs(Mod, FN, length(Args)+1),
case CurryFuns of
[] -> LocalCurryFun();
[Item] -> case typ_of(Env2, Lvl, Item) of
{{t_arrow, TArgs, TRet}, NextVar} ->
{CurryArgs, RemArgs} = lists:split(length(Args), TArgs),
CurriedTypF = {t_arrow, CurryArgs, {t_arrow, RemArgs, TRet}},
typ_apply(Env2, Lvl, CurriedTypF, NextVar, Args, L)
end;
Items -> {error, {ambiguous_curry, Expr, Items, L}}
end
end,
%% If the expression we're applying arguments to is a named function
%% (e.g. a label or bif), attempt to find it in the module.
%% This ForwardFun function is used specifically to find functions defined
%% later than the application we're trying to type.
ForwardFun =
fun() ->
FN = case Expr of
#a_lab{name=N} -> N;
{bif, FunName, _, _, _} -> FunName
end,
Mod = Env#env.current_module,
case get_fun(Mod, FN, length(Args)) of
{ok, _, Fun} ->
case typ_of(Env, Lvl, Fun) of
{error, _}=Err -> Err;
{TypF, NextVar} ->
%% We should have a t_arrow taking some args with a return value
%% What we need is a t_arrow that takes some of those args and returns
%% another t_arrow taking the remainder and returning the final arg
try
typ_apply(Env, Lvl, TypF, NextVar, Args, L)
catch
throw:{arity_error, _, _} = Err -> CurryFun(Err)
end
end;
{error, _} = E -> CurryFun(E)
end
end,
case typ_of(Env, Lvl, Expr) of
{error, {bad_variable_name, _, _, _}} -> ForwardFun();
{error, _} = E -> E;
{TypF, NextVar} ->
%% If the function in the environment is the wrong arity we want to
%% try to locate a matching one in the module.
%% This does not allow for different arity functions in a sequence
%% of let bindings which could be a weakness.
%%
try
typ_apply(Env, Lvl, TypF, NextVar, Args, L)
catch
error:{arity_error, _, _} ->
case Expr of
#a_qlab{} -> CurryFun({error, uncurryable_far_ref});
_ -> ForwardFun()
end
end
end;
%% Unify the patterns with each other and resulting expressions with each
%% other, then unifying the general pattern type with the match expression's
%% type.
typ_of(Env, Lvl, #alpaca_match{match_expr=E, clauses=Cs, line=Line}) ->
{ETyp, NextVar1} = typ_of(Env, Lvl, E),
Env2 = update_counter(NextVar1, Env),
case unify_clauses(Env2, Lvl, Cs) of
{error, _} = Err -> Err;
{ok, {t_clause, PTyp, _, RTyp}, #env{next_var=NextVar2}} ->
%% unify the expression with the unified pattern:
case unify(PTyp, ETyp, Env, Line) of
{error, _} = Err -> Err;
%% only need to return the result type of the unified
%% clause types:
ok -> {RTyp, NextVar2}
end
end;
typ_of(Env, Lvl, #alpaca_clause{pattern=P, guards=Gs, result=R, line=L}) ->
case add_bindings(P, Env, Lvl, 0) of
{error, _}=Err -> Err;
{PTyp, _, NewEnv, _} ->
F = fun(_, {error, _}=Err) -> Err;
(G, {Typs, AccEnv}) ->
case typ_of(AccEnv, Lvl, G) of
{error, _}=Err ->
Err;
{GTyp, NV} ->
{[GTyp|Typs], update_counter(NV, AccEnv)}
end
end,
case lists:foldl(F, {[], NewEnv}, Gs) of
{error, _}=Err -> Err;
{GTyps, Env2} ->
UnifyFolder = fun(_, {error, _}=Err) -> Err;
(N, Acc) ->
case unify(N, Acc, Env, L) of
{error, _}=Err -> Err;
ok -> Acc
end
end,
case lists:foldl(UnifyFolder, new_cell(t_bool), GTyps) of
{error, _}=Err -> Err;
_ ->
case typ_of(Env2, Lvl, R) of
{error, _} = E -> E;
{RTyp, NextVar2} ->
{{t_clause, PTyp, none, RTyp}, NextVar2}
end
end
end
end;
%%% Pattern match guards that both check the type of an argument and cause
%%% it's type to be fixed.
typ_of(Env, Lvl, #alpaca_type_check{type=T, expr=E, line=L}) ->
Typ = proplists:get_value(T, ?all_type_checks),
case typ_of(Env, Lvl, E) of
{error, _}=Err -> Err;
{ETyp, NV} ->
%% polymorphic built-in types like PIDs need to be instantiated
%% with appropriate type variables before getting unified.
{Env2, ToUnify} = case Typ of
t_pid ->
{PidT, E2} = new_var(Lvl, Env),
{E2, new_cell({t_pid, PidT})};
_ ->
{Env, new_cell(Typ)}
end,
case unify(ToUnify, ETyp, Env2, L) of
{error, _}=Err -> Err;
ok -> {t_bool, NV}
end
end;
typ_of(Env, Lvl, #alpaca_send{line=L, message=M, pid=P}) ->
case typ_of(Env, Lvl, P) of
{error, _}=Err -> Err;
{T, NV} ->
{PidT, Env2} = new_var(Lvl, Env),
PC = new_cell({t_pid, PidT}),
case unify(T, PC, Env2, Lvl) of
{error, _}=Err -> Err;
ok ->
case typ_of(Env2, Lvl, M) of
{error, _}=Err -> Err;
{MT, NV2} ->
Env3 = update_counter(NV2, Env2),
case unify(PidT, MT, Env3, L) of
{error, _}=Err -> Err;
ok -> {t_unit, NV}
end
end
end
end;
typ_of(Env, Lvl, #alpaca_receive{}=Recv) ->
type_receive(Env, Lvl, Recv);
%%% Calls to Erlang code only have their return value typed.
%%% However, we also check that the arguments refer to in-scope names.
typ_of(Env, Lvl, #alpaca_ffi{args=Args}=FFI) ->
case typ_ffi_args(Env, Lvl, Args) of
ok -> typ_ffi_clauses(Env, Lvl, FFI);
{error, _}=Err -> Err
end;
%% Spawning of functions in the current module:
typ_of(Env, Lvl, #alpaca_spawn{line=L, module=undefined, function=F, args=Args}) ->
%% make a function application and type it:
Apply = #alpaca_apply{line=L, expr=F, args=Args},
case typ_of(Env, Lvl, F) of
{error, _}=Err -> Err;
{SpawnFunTyp, NV} ->
Env2 = update_counter(NV, Env),
case typ_of(Env2, Lvl, Apply) of
{error, _}=Err -> Err;
{_AT, NV2} ->
%% use the type of the application to type a pid but prefer
%% the one determined by typing the application.
case SpawnFunTyp of
{t_receiver, Recv, _} ->
case _AT of
{t_receiver, Recv2, _} ->
{new_cell({t_pid, Recv2}), NV2};
_ ->
{new_cell({t_pid, Recv}), NV2}
end;
_ ->
{new_cell({t_pid, new_cell(undefined)}), NV2}
end
end
end;
typ_of(EnvIn, Lvl, #alpaca_fun{line=_L, name=N, versions=Vs}) ->
F = fun(_, {error, _}=Err) ->
Err;
(#alpaca_fun_version{args=Args, body=Body, line=VL}, {Types, Env}) ->
BindingF = fun(Arg, {Typs, E, VN}) ->
{AT, _, NE, VN2} = add_bindings(Arg, E, Lvl, VN),
{[AT|Typs], NE, VN2}
end,
{RevTyps, Env2, _} = lists:foldl(BindingF, {[], Env, 0}, Args),
JustTypes = lists:reverse(RevTyps),
RecursiveType = {t_arrow, JustTypes, new_cell(t_rec)},
EnvWithLetRec = update_binding(N, RecursiveType, Env2),
case typ_of(EnvWithLetRec, Lvl, Body) of
{error, _} = Err ->
Err;
{T, NextVar} ->
case unwrap(T) of
{t_receiver, Recv, Res} ->
TRec = {t_receiver, new_cell(Recv), new_cell(Res)},
{t_receiver, Recv2, Res2} =
collapse_receivers(TRec, Env2, Lvl),
X = {t_receiver, Recv2,
{t_arrow, JustTypes, Res2}},
{[{VL, X}|Types], update_counter(NextVar, Env2)};
_ ->
%% Nullary funs are really values - for type
%% checking we're only interested in their
%% return value
case JustTypes of
[] -> {[T|Types], update_counter(NextVar, Env2)};
_ -> {[{VL, {t_arrow, JustTypes, T}}|Types],
update_counter(NextVar, Env2)}
end
end
end
end,
case lists:foldl(F, {[], EnvIn}, Vs) of
{error, _}=Err ->
Err;
{RevVersions, Env2} ->
TypedVersions = lists:reverse(RevVersions),
{Root, _} = new_var(0, Env2),
Unified = lists:foldl(
fun(_, {error, _}=Err) ->
Err;
({VL, T1}, T2) ->
case unify(T1, T2, Env2, VL) of
{error, _}=Err -> Err;
ok -> T1
end
end,
Root,
TypedVersions),
case Unified of
{error, _}=Err -> Err;
Typ -> {Typ, Env2#env.next_var}
end
end;
%% A function binding, possibly inside a function if E2 /= `undefined`:
typ_of(Env, Lvl, #alpaca_binding{
name=#a_lab{name=N},
bound_expr=#alpaca_fun{}=E,
signature=Sig,
body=E2}) ->
{TypE, NextVar} = case typ_of(Env, Lvl, E#alpaca_fun{name=N}) of
{error, SymErr} -> throw(SymErr);
{_, _} = Success -> Success
end,
Env2 = update_counter(NextVar, Env),
case E2 of
undefined ->
%% If we have a type signature and we can unify it with the given
%% binding we have typed, replace our inferred type with the sig
case Sig of
#alpaca_type_signature{type=TS, line=Line, vars=Vs} ->
%% Type signatures may need to fully instantiated
Types = Env#env.current_types,
VarFolder = fun({type_var, _, VN}, {Vars, VarMap, E_}) ->
{TVar, E3} = new_var(0, E_),
{[{VN, TVar}|Vars], maps:put(VN, TVar, VarMap), E3};
({{type_var, _, VN}, Expr}, {Vars, VarMap, E_}) ->
%% copy_cell/1 should put every nested member properly
%% into its own reference cell:
{Celled, NewVarMap} = copy_cell(Expr, VarMap),
{[{VN, Celled}|Vars], NewVarMap, E_}
end,
{Vars2, _, Env3} = case Vs of
undefined -> {[], maps:new(), Env2};
_ -> lists:foldl(VarFolder, {[], maps:new(), Env2}, Vs)
end,
{_, ArgCons} = inst_constructor_arg(TS, Vars2, Types, Env3),
case unify(TypE, ArgCons, Env3, Line) of
ok -> {unwrap_cell(ArgCons), NextVar};
{error, _} = Err -> Err
end;
_ -> {TypE, NextVar}
end;
_ ->
typ_of(update_binding(N, gen(Lvl, TypE), Env2), Lvl+1, E2)
end;
%% A var binding inside a function:
typ_of(Env, Lvl, #alpaca_binding{name=#a_lab{name=N}, bound_expr=E1, body=E2}) ->
case typ_of(Env, Lvl, E1) of
{error, _}=Err ->
Err;
{TypE, NextVar} ->
case E2 of
undefined ->
{TypE, NextVar};
_ ->
Gen = gen(Lvl, TypE),
Env2 = update_counter(NextVar, Env),
typ_of(update_binding(N, Gen, Env2), Lvl+1, E2)
end
end.
typ_ffi_args(_Env, _Lvl, {nil, _}) -> ok;
typ_ffi_args(Env, Lvl, #alpaca_cons{head=H, tail=T}) ->
case typ_of(Env, Lvl, H) of
{error, _}=Err -> Err;
_Ok -> typ_ffi_args(Env, Lvl, T)
end.
typ_ffi_clauses(#env{next_var=NV}=Env, Lvl,
#alpaca_ffi{clauses=Cs, module={_, L, _}}) ->
ClauseFolder = fun(C, {Typs, EnvAcc}) ->
{{t_clause, _, _, T}, X} = typ_of(EnvAcc, Lvl, C),
{[T|Typs], update_counter(X, EnvAcc)}
end,
{TypedCs, #env{next_var=NV2}} = lists:foldl(
ClauseFolder,
{[], update_counter(NV, Env)}, Cs),
UnifyFolder = fun(A, Acc) ->
case unify(A, Acc, Env, L) of
ok -> Acc;
{error, _} = Err -> Err
end
end,
[FC|TCs] = lists:reverse(TypedCs),
case lists:foldl(UnifyFolder, FC, TCs) of
{error, _} = Err ->
Err;
_ ->
{FC, NV2}
end.
type_bin_segments(#env{next_var=NV}, _Lvl, []) ->
{ok, NV};
type_bin_segments(
Env,
Level,
[#alpaca_bits{value=V, type=T, line=L}|Rem])
when T == int; T == float; T == binary; T == utf8; T == latin1 ->
VTyp = typ_of(Env, Level, V),
map_typ_of(Env, VTyp,
fun(Env2, BitsTyp) ->
U = unify(BitsTyp, bin_type_to_type(T), Env2, L),
map_err(U, fun(_) -> type_bin_segments(Env2, Level, Rem) end)
end).
bin_type_to_type(int) -> new_cell(t_int);
bin_type_to_type(float) -> new_cell(t_float);
bin_type_to_type(utf8) -> new_cell(t_string);
bin_type_to_type(binary) -> new_cell(t_binary).
%% 2016-07-24 trying this "map" function out instead of littering
%% code with yet more case statements to check errors from typ_of.
map_typ_of(Env, Res, NextStep) ->
map_err(Res, fun({Typ, NV}) ->
Env2 = update_counter(NV, Env),
NextStep(Env2, Typ)
end).
map_err({error, _}=Err, _NextStep) -> Err;
map_err(Ok, NextStep) -> NextStep(Ok).
type_map(Env, Lvl, #alpaca_map{pairs=[]}) ->
{KeyVar, Env2} = new_var(Lvl, Env),
{ValVar, #env{next_var=NV}} = new_var(Lvl, Env2),
{new_cell({t_map, KeyVar, ValVar}), NV};
type_map(Env, Lvl, #alpaca_map{pairs=Pairs}) ->
{MapType, NV} = type_map(Env, Lvl, #alpaca_map{}),
Env2 = update_counter(NV, Env),
case unify_map_pairs(Env2, Lvl, Pairs, MapType) of
{error, _}=Err -> Err;
{Type, #env{next_var=NV2}} -> {Type, NV2}
end.
unify_map_pairs(Env, _, [], {cell, _}=C) ->
{C, Env};
unify_map_pairs(Env, _, [], T) ->
{new_cell(T), Env};
unify_map_pairs(Env, Lvl, [#alpaca_map_pair{line=L, key=KE, val=VE}|Rem], T) ->
{t_map, K, V} = unwrap_cell(T),
case typ_list([KE, VE], Lvl, Env, []) of
{error, _}=Err -> Err;
{[KT, VT], NV} ->
Env2 = update_counter(NV, Env),
case unify(K, KT, Env2, L) of
ok -> case unify(V, VT, Env2, L) of
ok -> unify_map_pairs(Env2, Lvl, Rem, T);
{error, _}=Err -> Err
end;
{error, _}=Err -> Err
end
end.
%%% This was pulled out of typing match expressions since the exact same clause
%%% type unification has to occur in match and receive expressions.
unify_clauses(Env, Lvl, Cs) ->
ClauseFolder =
fun(_, {error, _}=Err) -> Err;
(C, {Clauses, EnvAcc}) ->
case typ_of(EnvAcc, Lvl, C) of
{error, _}=Err -> Err;
{TypC, NV} ->
#alpaca_clause{line=Line} = C,
{[{Line, TypC}|Clauses], update_counter(NV, EnvAcc)}
end
end,
case lists:foldl(ClauseFolder, {[], Env}, Cs) of
{error, _}=Err -> Err;
{TypedCs, #env{next_var=NextVar2}} ->
UnifyFolder =
fun(_, {error, _}=Err) -> Err;
({Line, {t_clause, PA, _, RA}}, Acc) ->
case Acc of
{t_clause, PB, _, RB} = TypC ->
case unify(PA, PB, Env, Line) of
ok ->
%% All record result types must have the
%% exact same fields, hence `true`:
case unify(RA, RB, Env, Line, true) of
ok -> TypC;
{error, _} = Err -> Err
end;
{error, _} = Err -> Err
end;
{error, _} = Err -> Err
end
end,
[{_, FC}|TCs] = lists:reverse(TypedCs),
case lists:foldl(UnifyFolder, FC, TCs) of
{error, _}=Err ->Err;
_ -> {ok, FC, update_counter(NextVar2, Env)}
end
end.
collapse_error({error, _}=Err, _) ->
Err;
collapse_error(Res, F) ->
F(Res).
collapse_receivers({cell, _}=C, Env, Line) ->
collapse_error(
collapse_receivers(get_cell(C), Env, Line),
fun(R) -> set_cell(C, R), C end);
collapse_receivers({link, {cell, _}=C}, Env, Line) ->
collapse_error(
collapse_receivers(C, Env, Line),
fun(Res) -> {link, Res} end);
collapse_receivers({t_receiver, Typ, {cell, _}=C}=Recv, Env, Line) ->
case get_cell(C) of
{t_receiver, _, _}=Nested ->
case collapse_receivers(Nested, Env, Line) of
{error, _}=Err -> Err;
{t_receiver, _, Res}=Collapsed ->
case unify({t_receiver, Typ, Res},
new_cell(Collapsed),
Env, Line) of
ok -> {t_receiver, Typ, Res};
{error, _} = Err -> Err
end
end;
{link, {cell, _}=CC} ->
collapse_receivers({t_receiver, Typ, CC}, Env, Line);
_Other ->
Recv
end;
collapse_receivers({t_receiver, T, E}, Env, Line) ->
collapse_receivers({t_receiver, T, new_cell(E)}, Env, Line);
collapse_receivers(E, _, _) ->
E.
type_receive(Env, Lvl, #alpaca_receive{clauses=Cs, line=Line, timeout_action=TA}) ->
EnsureCelled = fun({cell, _}=C) -> C;
(NC) -> new_cell(NC)
end,
case unify_clauses(Env, Lvl, Cs) of
{error, _}=Err -> Err;
{ok, {t_clause, PTyp, _, RTyp}, Env2} ->
Collapsed = collapse_receivers(RTyp, Env, Line),
case unwrap(Collapsed) of
{t_receiver, _, B} ->
RC = EnsureCelled(Collapsed),
case unify(RC, new_cell(B), Env, Line) of
%% TODO: return this error
{error, _}=Er -> erlang:error(Er);
ok -> RC
end;
_ -> Collapsed
end,
case TA of
undefined ->
{new_cell({t_receiver, PTyp, RTyp}), Env2#env.next_var};
E -> case typ_of(Env2, Lvl, E) of
{error, _}=Err ->
Err;
{Typ, NV} ->
Env3 = update_counter(NV, Env2),
CollapsedC = EnsureCelled(Collapsed),
case unify(Typ, CollapsedC, Env3, Line) of
{error, _}=Err ->
Err;
ok ->
{new_cell({t_receiver, PTyp, CollapsedC}),
NV}
end
end
end
end.
%% Get the line number that should be reported by an application AST node.
apply_line(#alpaca_apply{line=L}) ->
L.
typ_apply(Env, Lvl, TypF, NextVar, Args, Line) ->
Result =
case TypF of
{cell, _} ->
case get_cell(TypF) of
{t_receiver, Recv, _App} ->
{App, _} = deep_copy_type(_App, maps:new()),
case typ_apply_no_recv(Env, Lvl, App,
NextVar, Args, Line) of
{error, _}=Err -> Err;
{Typ, NV} ->
NewRec = {t_receiver, Recv, Typ},
set_cell(TypF, NewRec),
{TypF, NV}
end;
_ ->
typ_apply_no_recv(Env, Lvl, TypF, NextVar, Args, Line)
end;
{t_receiver, Recv, _App} ->
%% Ensure that the receive type and body use the same reference
%% cells for the same type variables:
{{t_receiver, R2, A2}, _} = deep_copy_type(TypF, maps:new()),
case typ_apply_no_recv(Env, Lvl, A2, NextVar, Args, Line) of
{error, _}=Err -> Err;
{Typ, NV} ->
case get_cell(Typ) of
{t_receiver, _, RetTyp} ->
case unify(R2, Recv, Env, Line) of
{error, _}=Err -> Err;
ok ->
NewRec = {t_receiver, R2, RetTyp},
{NewRec, NV}
end;
_ ->
NewRec = {t_receiver, R2, Typ},
{NewRec, NV}
end
end;
_ ->
{TypF2, _} = deep_copy_type(TypF, maps:new()),
typ_apply_no_recv(Env, Lvl, TypF2, NextVar, Args, Line)
end,
Result.
typ_apply_no_recv(Env, Lvl, TypF, NextVar, Args, Line) ->
%% we make a deep copy of the function we're unifying
%% so that the types we apply to the function don't
%% force every other application to unify with them
%% where the other callers may be expecting a
%% polymorphic function. See Pierce's TAPL, chapter 22.
%{CopiedTypF, _} = deep_copy_type(TypF, maps:new()),
%% placeholder:
CopiedTypF = TypF,
case typ_list(Args, Lvl, update_counter(NextVar, Env), []) of
{error, _}=Err -> Err;
{ArgTypes, NextVar2} ->
TypRes = new_cell(t_rec),
Env2 = update_counter(NextVar2, Env),
Arrow = new_cell({t_arrow, ArgTypes, TypRes}),
case unify(CopiedTypF, Arrow, Env2, Line) of
{error, _} = E ->
E;
ok ->
#env{next_var=VarNum} = Env2,
{TypRes, VarNum}
end
end.
-spec extract_fun(
Env::env(),
ModuleName::atom(),
FunName::string(),
Arity::integer()) -> {ok, alpaca_module(), alpaca_binding()} |
{error,
{no_module, atom()} |
{not_exported, string(), integer()} |
{not_found, atom(), string, integer()}} .
extract_fun(Env, ModuleName, FunName, Arity) ->
case [M || M <- Env#env.modules, M#alpaca_module.name =:= ModuleName] of
[] ->
{error, {no_module, ModuleName}};
[Module] ->
Exports = Module#alpaca_module.function_exports,
case [F || {FN, A} = F <- Exports, FN =:= FunName, A =:= Arity] of
[_] -> get_fun(Module, FunName, Arity);
[] -> {error, {not_exported, FunName, Arity}}
end
end.
filter_bindings([], _BindingName, Memo) ->
lists:reverse(Memo);
filter_bindings([{#a_lab{name=N}, Arity}|Rem], N, Memo) ->
filter_bindings(Rem, N, [{N, Arity}|Memo]);
filter_bindings([_|Rem], BindingName, Memo) ->
filter_bindings(Rem, BindingName, Memo).
%% Arity-neutral version of extract_fun so that we can get all top-level
%% bindings for a name from a given module.
extract_module_bindings(Env
, ModuleName
, #a_lab{name=BindingName}
) when is_binary(ModuleName) ->
case [M || M <- Env#env.modules, M#alpaca_module.name =:= ModuleName] of
[] ->
{error, {no_module, ModuleName}};
[Module] ->
Exports = Module#alpaca_module.function_exports,
case filter_bindings(Exports, BindingName, []) of
[] ->
throw({error, {not_exported, ModuleName, BindingName}});
Funs ->
F = fun({_, A}) ->
{ok, _, Fun} = get_fun(Module, BindingName, A),
Fun
end,
{ok, Module, lists:map(F, Funs)}
end
end.
-spec get_fun(
Module::alpaca_module(),
FunName::string(),
Arity::integer()) -> {ok, alpaca_module(), alpaca_binding()} |
{error, {not_found, atom(), string, integer()}}.
get_fun(Module, FunName, Arity) ->
case filter_to_fun(Module#alpaca_module.functions, FunName, Arity) of
not_found -> {error, {not_found, Module, FunName, Arity}};
{ok, Fun} -> {ok, Module, Fun}
end.
get_curryable_funs(Module, FN, MinArity) ->
filter_to_curryable_funs(Module#alpaca_module.functions, FN, MinArity).
filter_to_fun([], _, _) ->
not_found;
filter_to_fun([#alpaca_binding{name=#a_lab{name = N}, bound_expr=#alpaca_fun{arity=Arity}}=Fun|_], FN, A)
when Arity =:= A, N =:= FN ->
{ok, Fun};
filter_to_fun([#alpaca_binding{name=#a_lab{name = N}}=Fun|_], FN, 0) when N =:= FN ->
{ok, Fun};
filter_to_fun([_F|Rem], FN, Arity) ->
filter_to_fun(Rem, FN, Arity).
filter_to_curryable_funs(Funs, FN, MinArity) ->
Pred = fun(#alpaca_binding{name=#a_lab{name=N}, bound_expr=#alpaca_fun{arity=Arity}}) ->
case {Arity >= MinArity, N =:= FN} of
{true, true} -> true;
_ -> false
end;
(_) -> false
end,
lists:filter(Pred, Funs).
%%% for clauses we need to add bindings to the environment for any labels
%%% (variables) that occur in the pattern. "NameNum" is used to give
%%% "wildcard" variable names (the '_' throwaway label) sequential and thus
%%% differing _actual_ variable names. This is necessary so that two different
%%% occurrences of '_' with different types don't collide in `unify/4` and
%%% thus cause typing to fail when it really should succeed.
%%%
%%% In addition to the type determined for the thing we're adding bindings from,
%%% the return type includes the modified environment with those new bindings
%%% we've added along with the updated "NameNum" value so that we can recurse
%%% through a data structure with `add_bindings/4`.
-spec add_bindings(
alpaca_expression(),
env(),
Lvl::integer(),
NameNum::integer()) -> {typ(), alpaca_expression(), env(), integer()} |
{error, term()}.
add_bindings(#a_lab{name=Name}=S, Env, Lvl, NameNum) ->
{Typ, Env2} = new_var(Lvl, Env),
{Typ, S, update_binding(Name, Typ, Env2), NameNum};
%%% A single occurrence of the wildcard doesn't matter here as the renaming
%%% only occurs in structures where multiple instances can show up, e.g.
%%% in tuples and lists.
add_bindings({'_', _}=X, Env, Lvl, NameNum) ->
{Typ, Env2} = new_var(Lvl, Env),
{Typ, X, update_binding('_', Typ, Env2), NameNum};
%%% Tuples are a slightly more involved case since we want a type for the
%%% whole tuple as well as any explicit variables to be available in the
%%% result side of the clause.
add_bindings(#alpaca_tuple{values=_}=Tup1, Env, Lvl, NameNum) ->
{#alpaca_tuple{values=Vs}=Tup2, NN2} = rename_wildcards(Tup1, NameNum),
{Env2, NN3} = lists:foldl(
fun (V, {EnvAcc, NN}) ->
{_, _, NewEnv, NewNN} = add_bindings(V, EnvAcc,
Lvl, NN),
{NewEnv, NewNN}
end,
{Env, NN2},
Vs),
case typ_of(Env2, Lvl, Tup2) of
{error, _}=Err -> Err;
{Typ, NextVar} -> {Typ, Tup2, update_counter(NextVar, Env2), NN3}
end;
add_bindings(#alpaca_cons{}=Cons, Env, Lvl, NameNum) ->
{#alpaca_cons{head=H, tail=T}=RenCons, NN2} = rename_wildcards(Cons, NameNum),
{_, _, Env2, NN3} = add_bindings(H, Env, Lvl, NN2),
{_, _, Env3, NN4} = add_bindings(T, Env2, Lvl, NN3),
case typ_of(Env3, Lvl, RenCons) of
{error, _}=Err -> Err;
{Typ, NextVar} -> {Typ, RenCons, update_counter(NextVar, Env3), NN4}
end;
add_bindings(#alpaca_binary{}=Bin, Env, Lvl, NameNum) ->
{Bin2, NN2} = rename_wildcards(Bin, NameNum),
F = fun(_, {error, _}=Err) -> Err;
(#alpaca_bits{value=V}, {E, N}) ->
case add_bindings(V, E, Lvl, N) of
{_, _, E2, N2} -> {E2, N2};
{error, _}=Err -> Err
end
end,
case lists:foldl(F, {Env, NN2}, Bin2#alpaca_binary.segments) of
{error, _}=Err -> Err;
{Env2, NN3} ->
T = typ_of(Env2, Lvl, Bin2),
map_typ_of(Env2, T, fun(Env3, Typ) -> {Typ, Bin2, Env3, NN3} end)
end;
add_bindings(#alpaca_map{}=M, Env, Lvl, NN) ->
{M2, _NN2} = rename_wildcards(M, NN),
Folder = fun(_, {error, _}=Err) -> Err;
(#alpaca_map_pair{key=K, val=V}, {E, N}) ->
case add_bindings(K, E, Lvl, N) of
{error, _}=Err -> Err;
{_, _, E2, N2} ->
case add_bindings(V, E2, Lvl, N2) of
{error, _}=Err -> Err;
{_, _, E3, N3} -> {E3, N3}
end
end
end,
case lists:foldl(Folder, {Env, NN}, M2#alpaca_map.pairs) of
{error, _}=Err -> Err;
{Env2, NN3} ->
case typ_of(Env2, Lvl, M2) of
{error, _}=Err -> Err;
{Typ, NV} -> {Typ, M2, update_counter(NV, Env2), NN3}
end
end;
add_bindings(#alpaca_record{}=R, Env, Lvl, NameNum) ->
{R2, _NameNum2} = rename_wildcards(R, NameNum),
F = fun(#alpaca_record_member{val=V}=_M, {E, N}) ->
case add_bindings(V, E, Lvl, N) of
{error, _}=Err -> erlang:error(Err);
{_, _, E2, N2} -> {E2, N2}
end
end,
case lists:foldl(F, {Env, NameNum}, R2#alpaca_record.members) of
{Env2, NameNum3} ->
case typ_of(Env2, Lvl, R2) of
{error, _}=Err -> erlang:error(Err);
{Typ, NV} -> {Typ, R2, update_counter(NV, Env2), NameNum3}
end
end;
add_bindings(#alpaca_type_apply{arg=none}=T, Env, Lvl, NameNum) ->
case typ_of(Env, Lvl, T) of
{error, _}=Err -> Err;
{Typ, NextVar} -> {Typ, T, update_counter(NextVar, Env), NameNum}
end;
add_bindings(#alpaca_type_apply{arg=Arg}=T, Env, Lvl, NameNum) ->
{RenamedArg, NN} = rename_wildcards(Arg, NameNum),
{_, _, Env2, NextNameNum} = add_bindings(RenamedArg, Env, Lvl, NN),
TA = T#alpaca_type_apply{arg=RenamedArg},
case typ_of(Env2, Lvl, TA) of
{error, _} = Err -> Err;
{Typ, NextVar} -> {Typ, TA, update_counter(NextVar, Env2), NextNameNum}
end;
add_bindings(Exp, Env, Lvl, NameNum) ->
case typ_of(Env, Lvl, Exp) of
{error, _}=Err -> Err;
{Typ, NextVar} -> {Typ, Exp, update_counter(NextVar, Env), NameNum}
end.
%%% Tuples may have multiple instances of the '_' wildcard/"don't care"
%%% label. Each instance needs a unique name for unification purposes
%%% so the individual occurrences of '_' get renamed with numbers in order,
%%% e.g. (1, _, _) would become (1, _0, _1).
rename_wildcards(#alpaca_tuple{values=Vs}=Tup, NameNum) ->
{Renamed, NN} = rename_wildcards(Vs, NameNum),
{Tup#alpaca_tuple{values=Renamed}, NN};
rename_wildcards(#alpaca_type_apply{arg=none}=TA, NN) ->
{TA, NN};
rename_wildcards(#alpaca_type_apply{arg=Arg}=TA, NN) ->
{Arg2, NN2} = rename_wildcards(Arg, NN),
{TA#alpaca_type_apply{arg=Arg2}, NN2};
rename_wildcards(#alpaca_cons{head=H, tail=T}, NameNum) ->
{RenH, N1} = rename_wildcards(H, NameNum),
{RenT, N2} = rename_wildcards(T, N1),
{#alpaca_cons{head=RenH, tail=RenT}, N2};
rename_wildcards(#alpaca_binary{segments=Segs}=B, NameNum) ->
F = fun(S, {Memo, NN}) ->
{S2, NN2} = rename_wildcards(S, NN),
{[S2|Memo], NN2}
end,
{Segs2, NN2} = lists:foldl(F, {[], NameNum}, Segs),
{B#alpaca_binary{segments=lists:reverse(Segs2)}, NN2};
rename_wildcards(#alpaca_bits{value=V}=Bits, NameNum) ->
{V2, NN} = rename_wildcards(V, NameNum),
{Bits#alpaca_bits{value=V2}, NN};
rename_wildcards(#alpaca_map{pairs=Pairs}=M, NameNum) ->
Folder = fun(P, {Ps, NN}) ->
{P2, NN2} = rename_wildcards(P, NN),
{[P2|Ps], NN2}
end,
{Pairs2, NN} = lists:foldl(Folder, {[], NameNum}, Pairs),
{M#alpaca_map{pairs=lists:reverse(Pairs2)}, NN};
rename_wildcards(#alpaca_map_pair{key=K, val=V}=P, NameNum) ->
{K2, N1} = rename_wildcards(K, NameNum),
{V2, N2} = rename_wildcards(V, N1),
{P#alpaca_map_pair{key=K2, val=V2}, N2};
rename_wildcards(#alpaca_record{members=Ms}=R, NameNum) ->
{Ms2, NameNum2} = rename_wildcards(Ms, NameNum),
{R#alpaca_record{members=Ms2}, NameNum2};
rename_wildcards(#alpaca_record_member{val=V}=RM, NameNum) ->
{V2, NameNum2} = rename_wildcards(V, NameNum),
{RM#alpaca_record_member{val=V2}, NameNum2};
rename_wildcards(Vs, NameNum) when is_list(Vs) ->
Folder = fun(V, {Acc, N}) ->
{NewOther, NewN} = rename_wildcards(V, N),
{[NewOther|Acc], NewN}
end,
{Renamed, NN} = lists:foldl(Folder, {[], NameNum}, Vs),
{lists:reverse(Renamed), NN};
rename_wildcards({'_', L}, N) ->
Name = unicode:characters_to_binary(integer_to_list(N)++"_", utf8),
Sym = ast:label(L, Name),
{Sym, N+1};
rename_wildcards(O, N) ->
{O, N}.
%%% Tests
-ifdef(TEST).
new_env() ->
#env{bindings=[celled_binding(Typ)||Typ <- ?all_bifs]}.
%% Top-level typ_of unwraps the reference cells used in unification.
%% This is only preserved for tests at the moment.
-spec typ_of(Env::env(), Exp::alpaca_expression())
-> {typ(), env()} | {error, term()}.
typ_of(Env, Exp) ->
case typ_of(Env, 0, Exp) of
{error, _} = E -> E;
{Typ, NewVar} -> {unwrap(Typ), update_counter(NewVar, Env)}
end.
%% Check the type of an expression from the "top-level"
%% of 0 with a new environment.
top_typ_of(Code) ->
Tokens = alpaca_scanner:scan(Code),
{ok, E} = alpaca_ast_gen:parse(Tokens),
typ_of(new_env(), E).
%% Check the type of the expression in code from the "top-level" with a
%% new environment that contains the provided ADTs.
top_typ_with_types(Code, ADTs) ->
{ok, E} = alpaca_ast_gen:parse(alpaca_scanner:scan(Code)),
Env = new_env(),
typ_of(Env#env{current_types=ADTs,
type_constructors=constructors(ADTs)},
E).
%% There are a number of expected "unbound" variables here. I think this
%% is due to the deallocation problem as described in the first post
%% referenced at the top.
typ_of_test_() ->
[?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("let double x = x + x"))
, ?_assertMatch({{t_arrow, [{t_arrow, [A], B}, A], B}, _},
top_typ_of("let apply f x = f x"))
, ?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("let doubler x = let double y = y + y in double x"))
].
infix_arrow_types_test_() ->
[?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("let (<*>) x = x + x"))
, ?_assertMatch({{t_arrow, [A, {t_arrow, [A], B}], B}, _},
top_typ_of("let (|>) x f = f x"))
].
simple_polymorphic_let_test() ->
Code =
"let double_app my_int ="
"let two_times f x = f (f x) in "
"let int_double i = i + i in "
"two_times int_double my_int",
?assertMatch({{t_arrow, [t_int], t_int}, _}, top_typ_of(Code)).
polymorphic_let_test() ->
Code =
"let double_application my_int my_float = "
"let two_times f x = f (f x) in "
"let int_double a = a + a in "
"let float_double b = b +. b in "
"let doubled_2 = two_times int_double my_int in "
"two_times float_double my_float",
?assertMatch({{t_arrow, [t_int, t_float], t_float}, _},
top_typ_of(Code)).
clause_test_() ->
[?_assertMatch({{t_clause, t_int, none, t_atom}, _},
typ_of(
new_env(),
#alpaca_clause{pattern=ast:int(1, 1),
result=ast:atom(1, true)})),
?_assertMatch({{t_clause, {unbound, t0, 0}, none, t_atom}, _},
typ_of(
new_env(),
#alpaca_clause{
pattern=ast:label(1, <<"x">>),
result=ast:atom(1, true)})),
?_assertMatch({{t_clause, t_int, none, t_int}, _},
typ_of(
new_env(),
#alpaca_clause{
pattern=ast:label(1, <<"x">>),
result=#alpaca_apply{
expr={bif, '+', 1, erlang, '+'},
args=[ast:label(1, <<"x">>),
ast:int(1, 2)]}}))
].
match_test_() ->
[?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("let f x = match x with\n i -> i + 2")),
?_assertThrow({cannot_unify, undefined, 3, t_atom, t_int},
top_typ_of(
"let f x = match x with\n"
" i -> i + 1\n"
"| :atom -> 2")),
?_assertMatch({{t_arrow, [t_int], t_atom}, _},
top_typ_of(
"let f x = match x + 1 with\n"
" 1 -> :x_was_zero\n"
"| 2 -> :x_was_one\n"
"| _ -> :x_was_more_than_one"))
].
%% Testing that type errors are reported for the appropriate line when
%% clauses are unified by match or receive.
pattern_match_error_line_test_() ->
[?_assertThrow({cannot_unify, undefined, 3, t_float, t_int},
top_typ_of(
"let f x = match x with\n"
" i, is_integer i -> :int\n"
" | f, is_float f -> :float")),
?_assertThrow({cannot_unify, _, 4, t_float, t_int},
top_typ_of(
"let f () = receive with\n"
" 0 -> :zero\n"
" | 1 -> :one\n"
" | 2.0 -> :two\n"
" | 3 -> :three\n")),
?_assertThrow({cannot_unify, undefined, 3, t_string, t_atom},
top_typ_of(
"let f x = match x with\n"
" 0 -> :zero\n"
" | i -> \"not zero\""))
].
tuple_test_() ->
[?_assertMatch({{t_arrow,
[{t_tuple, [t_int, t_float]}],
{t_tuple, [t_float, t_int]}}, _},
top_typ_of(
"let f tuple = match tuple with\n"
" (i, f) -> (f +. 1.0, i + 1)")),
?_assertMatch({{t_arrow, [t_int], {t_tuple, [t_int, t_atom]}}, _},
top_typ_of("let f i = (i + 2, :plus_two)")),
?_assertThrow({cannot_unify,
undefined,
3,
{t_tuple, [{unbound, t2, 0}, t_int]}, t_int},
top_typ_of(
"let f x = match x with\n"
" i -> i + 1\n"
"| (_, y) -> y + 1\n")),
?_assertMatch({{t_arrow, [{t_tuple,
[{unbound, _A, _},
{unbound, _B, _},
{t_tuple,
[t_int, t_int]}]}],
{t_tuple, [t_int, t_int]}}, _},
top_typ_of(
"let f x = match x with\n"
" (_, _, (1, x)) -> (x + 2, 1)\n"
"|(_, _, (_, x)) -> (x + 2, 50)\n"))
].
list_test_() ->
[?_assertMatch({{t_list, t_float}, _},
top_typ_of("1.0 :: []")),
?_assertMatch({{t_list, t_int}, _},
top_typ_of("1 :: 2 :: []")),
?_assertMatch({error, _}, top_typ_of("1 :: 2.0 :: []")),
?_assertMatch({{t_arrow,
[{unbound, A, _}, {t_list, {unbound, A, _}}],
{t_list, {unbound, A, _}}}, _},
top_typ_of("let f x y = x :: y")),
?_assertMatch({{t_arrow, [{t_list, t_int}], t_int}, _},
top_typ_of(
"let f l = match l with\n"
" h :: t -> h + 1")),
%% Ensure that a '_' in a list nested in a tuple is renamed properly
%% so that one does NOT get unified with the other when they're
%% potentially different types:
?_assertMatch({{t_arrow,
[{t_tuple, [{t_list, t_int}, {unbound, _, _}, t_float]}],
{t_tuple, [t_int, t_float]}}, _},
top_typ_of(
"let f list_in_tuple =\n"
" match list_in_tuple with\n"
" (h :: 1 :: _ :: t, _, f) -> (h, f +. 3.0)")),
?_assertThrow({cannot_unify, undefined, 3, t_float, t_int},
top_typ_of(
"let f should_fail x =\n"
"let l = 1 :: 2 :: 3 :: [] in\n"
"match l with\n"
" a :: b :: _ -> a +. b"))
].
binary_test_() ->
[?_assertMatch({t_binary, _},
top_typ_of("<<1>>")),
?_assertMatch({{t_arrow, [t_binary], t_binary}, _},
top_typ_of(
"let f x = match x with "
"<<1: size=8, 2: size=8, rest: type=binary>> -> rest")),
?_assertThrow({cannot_unify, undefined, 1, t_float, t_int},
top_typ_of("let f () = let x = 1.0 in <<x: type=int>>")),
?_assertMatch({{t_arrow, [t_binary], t_string}, _},
top_typ_of(
"let drop_hello bin = "
" match bin with"
" <<\"hello\": type=utf8, rest: type=utf8>> -> rest"))
].
map_test_() ->
[?_assertMatch({{t_map, t_atom, t_int}, _},
top_typ_of("#{:one => 1}")),
?_assertMatch({{t_map, t_atom, t_int}, _},
top_typ_of("#{:one => 1, :two => 2}")),
?_assertMatch({error, {cannot_unify, _, 2, t_atom, t_string}},
top_typ_of(
"#{:one => 1,\n"
" \"two\" => 2}")),
?_assertMatch({{t_arrow, [{t_map, t_atom, t_int}], t_string}, _},
top_typ_of(
"let f x = match x with\n"
" #{:one => i}, is_integer i -> \"has one\"\n"
" | _ -> \"doesn't have one\"")),
?_assertMatch({{t_map, t_atom, t_int}, _},
top_typ_of("#{:a => 1 | #{:b => 2}}")),
?_assertMatch({error, {cannot_unify, undefined, 1, t_atom, t_string}},
top_typ_of("#{:a => 1 | #{\"b\" => 2}}"))
].
module_typing_test() ->
Code =
"module typing_test\n\n"
"export add/2\n\n"
"let add x y = x + y\n\n"
"let head l = match l with\n"
" h :: t -> h",
[M] = make_modules([Code]),
?assertMatch({ok, #alpaca_module{
functions=[
#alpaca_binding{
name=#a_lab{line = 5, name = <<"add">>},
type={t_arrow,
[t_int, t_int],
t_int}},
#alpaca_binding{
name=#a_lab{line = 7, name = <<"head">>},
type={t_arrow,
[{t_list, {unbound, A, _}}],
{unbound, A, _}}}
]}},
type_module(M, new_env())).
module_with_forward_reference_test() ->
Code =
"module forward_ref\n\n"
"export add/2\n\n"
"let add x y = adder x y\n\n"
"let adder x y = x + y",
[M] = make_modules([Code]),
Env = new_env(),
?assertMatch(
{ok, #alpaca_module{
functions=[
#alpaca_binding{
name=#a_lab{line = 5, name = <<"add">>},
type={t_arrow, [t_int, t_int], t_int}},
#alpaca_binding{
name=#a_lab{line = 7, name = <<"adder">>},
type={t_arrow, [t_int, t_int], t_int}}]}},
type_module(M, Env#env{current_module=M, modules=[M]})).
simple_inter_module_test() ->
Mod1 =
"module inter_module_one\n\n"
"let add x y = inter_module_two.adder x y",
Mod2 =
"module inter_module_two\n\n"
"export adder/2\n\n"
"let adder x y = x + y",
[M1, M2] = make_modules([Mod1, Mod2]),
E = new_env(),
Env = E#env{modules=[M1, M2]},
?assertMatch(
{ok, #alpaca_module{
function_exports=[],
functions=[
#alpaca_binding{
name=#a_lab{line = 3, name = <<"add">>},
type={t_arrow, [t_int, t_int], t_int}}]}},
type_module(M1, Env)).
bidirectional_module_fail_test() ->
Mod1 =
"module inter_module_one\n\n"
"export add/2\n\n"
"let add x y = inter_module_two.adder x y",
Mod2 =
"module inter_module_two\n\n"
"export adder/2, failing_fun/1\n\n"
"let adder x y = x + y\n\n"
"let failing_fun x = inter_module_one.add x x",
[M1, M2] = make_modules([Mod1, Mod2]),
E = new_env(),
Env = E#env{modules=[M1, M2]},
?assertThrow({error, {bidirectional_module_ref,
<<"inter_module_two">>,
<<"inter_module_one">>}},
type_module(M2, Env)).
recursive_fun_test_() ->
[?_assertMatch({{t_arrow, [t_int], t_rec}, _},
top_typ_of(
"let f x =\n"
"let y = x + 1 in\n"
"f y")),
?_assertMatch({{t_arrow, [t_int], t_atom}, _},
top_typ_of(
"let f x = match x with\n"
" 0 -> :zero\n"
"| x -> f (x - 1)")),
?_assertThrow({cannot_unify, undefined, 3, t_int, t_atom},
top_typ_of(
"let f x = match x with\n"
" 0 -> :zero\n"
"| 1 -> 1\n"
"| y -> y - 1\n")),
?_assertMatch(
{{t_arrow, [{t_list, {unbound, A, _}},
{t_arrow, [{unbound, A, _}], {unbound, B, _}}],
{t_list, {unbound, B, _}}}, _}
when A =/= B,
top_typ_of(
"let my_map l f = match l with\n"
" [] -> []\n"
"| h :: t -> (f h) :: (my_map t f)"))
].
infinite_mutual_recursion_test() ->
Code =
"module mutual_rec_test\n\n"
"let a x = b x\n\n"
"let b x = let y = x + 1 in a y",
[M] = make_modules([Code]),
E = new_env(),
?assertMatch({ok, #alpaca_module{
name= <<"mutual_rec_test">>,
functions=[
#alpaca_binding{
name=#a_lab{line = 3, name = <<"a">>},
type={t_arrow, [t_int], t_rec}},
#alpaca_binding{
name=#a_lab{line = 5, name = <<"b">>},
type={t_arrow, [t_int], t_rec}}]}},
type_module(M, E)).
terminating_mutual_recursion_test() ->
Code =
"module terminating_mutual_rec_test\n\n"
"let a x = let y = x + 1 in b y\n\n"
"let b x = match x with\n"
" 10 -> :ten\n"
"| y -> a y",
[M] = make_modules([Code]),
E = new_env(),
?assertMatch({ok, #alpaca_module{
name= <<"terminating_mutual_rec_test">>,
functions=[
#alpaca_binding{
name=#a_lab{line = 3, name = <<"a">>},
type={t_arrow, [t_int], t_atom}},
#alpaca_binding{
name=#a_lab{line = 5, name = <<"b">>},
type={t_arrow, [t_int], t_atom}}]}},
type_module(M, E)).
ffi_test_() ->
[?_assertMatch({t_int, _},
top_typ_of(
"beam :io :format [\"One is ~w~n\", [1]] with\n"
" _ -> 1")),
?_assertMatch({error, {cannot_unify, undefined, 1, t_atom, t_int}},
top_typ_of(
"beam :a :b [1] with\n"
" (:ok, x) -> 1\n"
"| (:error, x) -> :error")),
?_assertMatch({{t_arrow, [{unbound, _, _}], t_atom}, _},
top_typ_of(
"let f x = beam :a :b [x] with\n"
" 1 -> :one\n"
"| _ -> :not_one")),
?_assertThrow({bad_variable_name, undefined, 1, <<"x">>},
top_typ_of(
"let f () = beam :a :b [x] with\n"
" 1 -> :one\n"
"| _ -> :not_one"))
].
equality_test_() ->
[?_assertMatch({t_bool, _}, top_typ_of("1 == 2")),
?_assertMatch({{t_arrow, [t_int], t_bool}, _},
top_typ_of("let f x = 1 == x")),
?_assertMatch({error, {cannot_unify, _, _, _, _}}, top_typ_of("1.0 == 1")),
?_assertMatch({{t_arrow, [t_int], t_atom}, _},
top_typ_of(
"let f x = match x with\n"
" a, a == 0 -> :zero\n"
"|b -> :not_zero")),
?_assertThrow({cannot_unify, undefined, 3, t_float, t_int},
top_typ_of(
"let f x = match x with\n"
" a -> a + 1\n"
"| a, a == 1.0 -> 1"))
].
type_guard_test_() ->
[
%% In a normal match without union types the is_integer guard should
%% coerce all of the patterns to t_int:
?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of(
"let f x = match x with\n"
" i, is_integer i -> i\n"
" | _ -> 0")),
%% Calls to Erlang should use a type checking guard to coerce the
%% type in the pattern for use in the resulting expression:
?_assertMatch({t_int, _},
top_typ_of(
"beam :a :b [5] with\n"
" :one -> 1\n"
" | i, i == 2.0 -> 2\n"
" | i, is_integer i -> i\n")),
%% Two results with different types as determined by their guards
%% should result in a type error:
?_assertMatch({error, {cannot_unify, _, _, t_int, t_float}},
top_typ_of(
"beam :a :b [2] with\n"
" i, i == 1.0 -> i\n"
" | i, is_integer i -> i")),
%% Guards should work with items from inside tuples:
?_assertMatch(
{{t_arrow, [{t_tuple, [t_atom, {unbound, _, _}]}], t_atom}, _},
top_typ_of(
"let f x = match x with\n"
" (msg, _), msg == :error -> :error\n"
" | (msg, _) -> :ok"))
, ?_assertMatch(
{{t_pid, _}, _},
top_typ_of("beam :erlang :self [] with p, is_pid p -> p"))
].
%%% ### ADT Tests
%%%
%%%
%%% Tests for ADTs that are simply unions of existing types:
union_adt_test_() ->
[?_assertThrow({cannot_unify, _, 1, t_int, t_atom},
top_typ_with_types(
"let f x = match x with "
" 0 -> :zero"
"| i, is_integer i -> i",
[])),
%% Adding a type that unions integers and atoms should make the
%% previously failing code pass.
?_assertMatch({{t_arrow,
[t_int],
#adt{name="t", vars=[]}},
_},
top_typ_with_types(
"let f x = match x with "
" 0 -> :zero"
"| i, is_integer i -> i",
[#alpaca_type{name={type_name, 1, "t"},
vars=[],
members=[t_int, t_atom]}]))
].
type_tuple_test_() ->
%% This first test passes but the second does not due to a spawn limit.
%% I believe an infinite loop is occuring when unification fails between
%% t_int and t_tuple in try_types which causes unify to reinstantiate the
%% types and the cycle continues. Both orderings of members need to work.
[?_assertMatch({{t_arrow,
[#adt{name="t", vars=[{"x", {unbound, t1, 0}}]}],
t_atom},
_},
top_typ_with_types(
"let f x = match x with "
" 0 -> :zero"
"| (i, 0) -> :adt",
[#alpaca_type{name={type_name, 1, "t"},
vars=[{type_var, 1, "x"}],
members=[#alpaca_type_tuple{
members=[{type_var, 1, "x"},
t_int]},
t_int]}])),
?_assertMatch({{t_arrow,
[#adt{name="t", vars=[{"x", {unbound, t1, 0}}]}],
t_atom},
_},
top_typ_with_types(
"let f x = match x with "
" 0 -> :zero"
"| (i, 0) -> :adt",
[#alpaca_type{name={type_name, 1, "t"},
vars=[{type_var, 1, "x"}],
members=[t_int,
#alpaca_type_tuple{
members=[{type_var, 1, "x"},
t_int]}]}])),
%% A recursive type with a bad variable:
?_assertThrow(
{bad_variable, 1, "y"},
top_typ_with_types(
"let f x = match x with "
" 0 -> :zero"
"| (i, 0) -> :adt"
"| (0, (i, 0)) -> :nested",
[#alpaca_type{name={type_name, 1, "t"},
vars=[{type_var, 1, "x"}],
members=[t_int,
#alpaca_type_tuple{
members=[{type_var, 1, "x"},
t_int]},
#alpaca_type_tuple{
members=[t_int,
#alpaca_type{
name={type_name, 1, "t"},
vars=[{type_var, 1, "y"}]}
]}]}]))
].
same_polymorphic_adt_union_test_() ->
[?_assertMatch({{t_arrow,
[#adt{name="t", vars=[{"x", t_float}]},
#adt{name="t", vars=[{"x", t_int}]}],
{t_tuple, [t_atom, t_atom]}},
_},
top_typ_with_types(
"let f x y ="
" let a = match x with"
" (0.0, 0) -> :zero "
"| (0.0, 0, :atom) -> :zero_atom in "
" let b = match y with"
" (1, 1) -> :int_one"
"| (1, 1, :atom) -> :one_atom in "
"(a, b)",
[#alpaca_type{name={type_name, 1, "t"},
vars=[{type_var, 1, "x"}],
members=[#alpaca_type_tuple{
members=[{type_var, 1, "x"},
t_int]},
#alpaca_type_tuple{
members=[{type_var, 1, "x"},
t_int,
t_atom]}]}]))
].
type_constructor_test_() ->
[?_assertMatch({{t_arrow,
[#adt{name="t", vars=[{"x", {unbound, _, _}}]}],
t_atom},
_},
top_typ_with_types(
"let f x = match x with "
"i, is_integer i -> :is_int"
"| A i -> :is_a",
[#alpaca_type{name={type_name, 1, "t"},
vars=[{type_var, 1, "x"}],
members=[t_int,
#alpaca_constructor{
name=#type_constructor{line=1, name="A"},
arg={type_var, 1, "x"}}]}])),
?_assertMatch(
{{t_arrow,
[t_int],
#adt{name="even_odd", vars=[]}},
_},
top_typ_with_types(
"let f x = match x % 2 with "
" 0 -> Even x"
"| 1 -> Odd x",
[#alpaca_type{name={type_name, 1, "even_odd"},
vars=[],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Even"},
arg=t_int},
#alpaca_constructor{
name=#type_constructor{line=1, name="Odd"},
arg=t_int}]}])),
?_assertMatch(
{{t_arrow,
[#adt{name="json_subset", vars=[]}],
t_atom},
_},
top_typ_with_types(
"let f x = match x with "
" i, is_integer i -> :int"
"| f, is_float f -> :float"
"| (k, v) -> :keyed_value",
[#alpaca_type{
name={type_name, 1, "json_subset"},
vars=[],
members=[t_int,
t_float,
#alpaca_type_tuple{
members=[t_string,
#alpaca_type{
name={type_name, 1, "json_subset"}}]}
]}])),
?_assertMatch(
{{t_arrow,
[{unbound, V, _}],
#adt{name="my_list", vars=[{"x", {unbound, V, _}}]}},
_},
top_typ_with_types(
"let f x = Cons (x, Cons (x, Nil))",
[#alpaca_type{
name={type_name, 1, "my_list"},
vars=[{type_var, 1, "x"}],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Cons"},
arg=#alpaca_type_tuple{
members=[{type_var, 1, "x"},
#alpaca_type{
name={type_name, 1, "my_list"},
vars=[{type_var, 1, "x"}]}]}},
#alpaca_constructor{
name=#type_constructor{line=1, name="Nil"},
arg=none}]}])),
?_assertThrow(
{cannot_unify, undefined, 1, t_int, t_float},
top_typ_with_types(
"let f x = Cons (1, Cons (2.0, Nil))",
[#alpaca_type{
name={type_name, 1, "my_list"},
vars=[{type_var, 1, "x"}],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Cons"},
arg=#alpaca_type_tuple{
members=[{type_var, 1, "x"},
#alpaca_type{
name={type_name, 1, "my_list"},
vars=[{type_var, 1, "x"}]}]}},
#alpaca_constructor{
name=#type_constructor{line=1, name="Nil"},
arg=none}]}])),
?_assertMatch(
{{t_arrow,
[{unbound, _, _}],
#adt{name="t", vars=[]}},
_},
top_typ_with_types(
"let f x = Constructor [1]",
[#alpaca_type{
name={type_name, 1, "t"},
vars=[],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Constructor"},
arg={t_list, t_int}}]}])),
?_assertMatch(
{{t_arrow,
[{unbound, _, _}],
#adt{name="t", vars=[]}},
_},
top_typ_with_types(
"let f x = Constructor #{1 => \"one\"}",
[#alpaca_type{
name={type_name, 1, "t"},
vars=[],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Constructor"},
arg={t_map, t_int, t_string}}]}])),
?_assertMatch(
{{t_arrow,
[{unbound, _, _}],
#adt{name="t", vars=[]}},
_},
top_typ_with_types(
"let f x = Constructor 1",
[#alpaca_type{
name={type_name, 1, "t"},
vars=[],
members=[#alpaca_constructor{
name=#type_constructor{line=1, name="Constructor"},
arg=#alpaca_type{name={type_name, 1, "union"}}}]},
#alpaca_type{
name={type_name, 1, "union"},
vars=[],
members=[t_int, t_float]}]))
].
type_constructor_with_pid_arg_test() ->
Code = "module constructor\n\n"
"type t = Constructor pid int\n\n"
"let a x = receive with i -> x + i\n\n"
"let make () = Constructor (spawn a 2)",
?assertMatch({ok, _}, module_typ_and_parse(Code)).
type_constructor_with_arrow_arg_test() ->
Base = "module constructor\n\n"
"type t = Constructor (fn int int -> bool)\n\n",
Valid = Base ++
"let p x y = x > y\n\n"
"let make () = Constructor p",
?assertMatch({ok, _}, module_typ_and_parse(Valid)),
Invalid = Base ++
"let p x y = x + y\n\n"
"let make () = Constructor p",
?assertMatch({error,{cannot_unify, <<"constructor">>, _, t_bool, t_int}},
module_typ_and_parse(Invalid)).
type_constructor_with_aliased_arrow_arg_test() ->
Base = "module constructor\n\n"
"type binop 'a = fn 'a 'a -> 'a\n"
"type intbinop = binop int\n"
"type wrapper = W intbinop\n\n",
Valid = Base ++ "let f (W b) = b 1 1\n\n",
?assertMatch({ok, _}, module_typ_and_parse(Valid)),
Invalid = Base ++ "let f (W b) = b 1 :atom\n\n",
?assertMatch({error, {cannot_unify, <<"constructor">>, 7, t_int, t_atom}},
module_typ_and_parse(Invalid)).
type_constructor_multi_level_type_alias_arg_test() ->
Code =
"module constructor\n\n"
"type twotuplelist 'a 'b = list ('a, 'b)\n\n"
"type proplist 'v = twotuplelist atom 'v\n\n"
"type checklist = proplist bool\n\n"
"type constructor = Constructor checklist\n\n",
Valid = Code ++ "let make () = Constructor [(:test_passed, true)]",
BadKey = Code ++ "let make () = Constructor [(1, true)]",
BadVal = Code ++ "let make () = Constructor [(:test_passed, 1)]",
BadArg = Code ++ "let make () = Constructor 1",
?assertMatch({ok, #alpaca_module{}}, module_typ_and_parse(Valid)),
?assertMatch({error, {cannot_unify, _, _, _, _}},
module_typ_and_parse(BadKey)),
?assertMatch({error, {cannot_unify, _, _, _, _}},
module_typ_and_parse(BadVal)),
?assertMatch({error, {cannot_unify, _, _, _, _}},
module_typ_and_parse(BadArg)).
type_var_replacement_test_() ->
[fun() ->
Code =
"module nested\n\n"
"type option 'a = Some 'a | None\n\n"
"type either 'a = Left 'a | Right option int\n\n"
"let foo x =\n"
" match x with\n"
" Right Some a -> a\n\n"
"let tester () = foo Right Some 1",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{},
#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module nested\n\n"
"type option 'a = Some 'a | None\n\n"
"type either 'a = Left 'a | Right option 'a\n\n"
"let foo x =\n"
" match x with\n"
" Right Some a -> a\n\n"
"let tester () = foo Right Some 1",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{},
#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
].
%%% Type constructors that use underscores in pattern matches to discard actual
%%% values should work, depends on correct recursive renaming.
rename_constructor_wildcard_test() ->
Code =
"module module_with_wildcard_constructor_tuples\n\n"
"type t = int | float | Pair (string, t)\n\n"
"let a x = match x with\n"
"i, is_integer i -> :int\n"
"| f, is_float f -> :float\n"
"| Pair (_, _) -> :tuple\n"
"| Pair (_, Pair (_, _)) -> :nested_t"
"| Pair (_, Pair (_, Pair(_, _))) -> :double_nested_t",
[M] = make_modules([Code]),
Env = new_env(),
Res = type_module(M, Env),
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_arrow,
[#adt{
name = <<"t">>,
vars=[],
members=[{t_adt_cons, "Pair"},
t_float,
t_int]}],
t_atom}}]}},
Res).
module_with_map_in_adt_test() ->
Code =
"module module_with_map_in_adt_test\n\n"
"type t 'v = list 'v | map atom 'v\n\n"
"let a x = match x with\n"
" h :: t -> h"
" | #{:key => v} -> v",
[M] = make_modules([Code]),
?assertMatch({ok, _}, type_modules([M])).
module_with_adt_map_error_test() ->
Code =
"module module_with_map_in_adt_test\n\n"
"type t 'v = list 'v | map atom 'v\n\n"
"let a x = match x with\n"
" h :: t, is_string h -> h"
" | #{:key => v}, is_chars v -> v",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch(
{error, {cannot_unify, _, _, {t_map, _, _}, {t_list, _}}}, Res).
json_union_type_test() ->
Code =
"module json_union_type_test\n\n"
"type json = int | float | string | bool "
" | list json "
" | list (string, json)\n\n"
"let json_to_atom j = match j with "
" i, is_integer i -> :int"
" | f, is_float f -> :float"
" | (_, _) :: _ -> :json_object"
" | _ :: _ -> :json_array",
[M] = make_modules([Code]),
Env = new_env(),
Res = type_module(M, Env),
?assertMatch(
{ok,
#alpaca_module{
types=[#alpaca_type{
module= <<"json_union_type_test">>,
name={type_name, 3, <<"json">>}}],
functions=[#alpaca_binding{
name=#a_lab{name = <<"json_to_atom">>},
type={t_arrow,
[#adt{name = <<"json">>,
members=[{t_list,
{t_tuple,
[t_string,
#adt{name = <<"json">>}]}},
{t_list,
#adt{name = <<"json">>}},
t_bool,
t_string,
t_float,
t_int]}],
t_atom}}]}},
Res).
module_with_types_test() ->
Code =
"module module_with_types\n\n"
"type t = int | float | (string, t)\n\n"
"let a x = match x with\n"
"i, is_integer i -> :int\n"
"| f, is_float f -> :float\n"
"| (_, _) -> :tuple"
"| (_, (_, _)) -> :nested",
[M] = make_modules([Code]),
Env = new_env(),
Res = type_module(M, Env),
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_arrow,
[#adt{
name = <<"t">>,
vars=[],
members=[{t_tuple,
[t_string,
#adt{name = <<"t">>,
vars=[],
members=[]}]},
t_float,
t_int
]}],
t_atom}}]}},
Res).
recursive_polymorphic_adt_test() ->
Code = polymorphic_tree_code() ++
"\n\nlet succeed () = height (Node (Leaf, 1, (Node (Leaf, 1, Leaf))))",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch({ok, _}, Res).
recursive_polymorphic_adt_fails_to_unify_with_base_type_test() ->
Code = polymorphic_tree_code() ++
"\n\nlet fail () = height 1",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch({error,
{ cannot_unify
, <<"tree">>
, 15
, #adt{ name = <<"tree">>
, vars=[{"a",_}]
, members=[ {t_adt_cons,"Node"}
, {t_adt_cons,"Leaf"}
]
}
, t_int
}
},
Res).
polymorphic_tree_code() ->
"module tree\n\n"
"type tree 'a = Leaf | Node (tree 'a, 'a, tree 'a)\n\n"
"let height t =\n"
" match t with\n"
" Leaf -> 0\n"
" | Node (l, _, r) -> 1 + (max (height l) (height r))\n\n"
"let max a b =\n"
" match (a > b) with\n"
" true -> a\n"
" | false -> b".
type_tag_arity_test_() ->
[fun() ->
Code =
"module opt \n"
"type opt 'a = Some 'a | None \n"
"let should_fail x = None x \n",
[M] = make_modules([Code]),
Res = type_modules([M]),
%% Nested error because it's thrown and I prefer to use a match
%% here rather than explicit full Symbol as its representation will
%% change over time.
?assertMatch(
{error, {error, {too_many_type_arguments, <<"opt">>, 3, "None"}}},
Res)
end
, fun() ->
%% An instance constructor that expects an argument should require
%% one when typing.
Code =
"module m \n"
"type x = X int \n"
"let should_fail _ = (X) \n",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch(
{error, {error, {not_enough_type_arguments, <<"m">>, 3, "X"}}},
Res)
end
].
builtin_types_as_type_variables_test() ->
Code =
"module optlist\n\n"
"type proplist 'k 'v = list ('k, 'v)\n\n"
"type optlist 'v = proplist atom 'v",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch({ok, _}, Res).
module_matching_lists_test() ->
Code =
"module module_matching_lists\n\n"
"type my_list 'x = Nil | Cons ('x, my_list 'x)\n\n"
"let a x = match x with "
" Nil -> :nil"
" | Cons (i, Nil), is_integer i -> :one_item"
" | Cons (i, xx) -> :more_than_one",
[M] = make_modules([Code]),
Env = new_env(),
Res = type_module(M, Env),
?assertMatch({ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_arrow,
[#adt{
name = <<"my_list">>,
vars=[{"x", t_int}]}],
t_atom}}]}},
Res).
%%% When ADTs are instantiated their variables and references to those
%%% variables are put in reference cells. Two functions that use the
%%% ADT with different types should not permanently union the ADTs
%%% variables, one preventing the other from using the ADT.
type_var_protection_test() ->
Code =
"module module_matching_lists\n\n"
"type my_list 'x = Nil | Cons ('x, my_list 'x)\n\n"
"let a x = match x with "
" Nil -> :nil"
" | Cons (i, Nil), is_integer i -> :one_integer"
" | Cons (i, xx) -> :more_than_one_integer\n\n"
"let b x = match x with "
" Nil -> :nil"
" | Cons (f, Nil), is_float f -> :one_float"
" | Cons (f, xx) -> :more_than_one_float\n\n"
"let c () = (Cons (1.0, Nil), Cons(1, Nil))",
[M] = make_modules([Code]),
Env = new_env(),
Res = type_module(M, Env),
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_arrow,
[#adt{
name = <<"my_list">>,
vars=[{"x", t_int}]}],
t_atom}},
#alpaca_binding{
name=#a_lab{line = 7, name = <<"b">>},
type={t_arrow,
[#adt{
name = <<"my_list">>,
vars=[{"x", t_float}]}],
t_atom}},
#alpaca_binding{
name=#a_lab{line = 9, name = <<"c">>},
type={t_arrow,
[t_unit],
{t_tuple,
[#adt{
name = <<"my_list">>,
vars=[{"x", t_float}]},
#adt{
name = <<"my_list">>,
vars=[{"x", t_int}]}]}}}]}},
Res).
type_var_protection_fail_unify_test() ->
Code =
"module module_matching_lists\n\n"
"type my_list 'x = Nil | Cons ('x, my_list 'x)\n\n"
"let c () = "
" let x = Cons (1.0, Nil) in "
" Cons (1, x)",
[M] = make_modules([Code]),
Res = type_modules([M]),
?assertMatch(
{error, {cannot_unify, <<"module_matching_lists">>, 5, t_int, t_float}}, Res).
type_error_in_test_test() ->
Code =
"module type_error_in_test\n\n"
"let add x y = x + y\n\n"
"test \"add floats\" = add 1.0 2.0",
Res = module_typ_and_parse(Code),
?assertEqual(
{error, {cannot_unify, <<"type_error_in_test">>, 5, t_int, t_float}}, Res).
%% At the moment we don't care what the type of the test expression is,
%% only that it type checks.
typed_tests_test() ->
Code =
"module type_error_in_test\n\n"
"let add x y = x + y\n\n"
"test \"add floats\" = add 1 2",
Res = module_typ_and_parse(Code),
?assertMatch({ok, #alpaca_module{
tests=[#alpaca_test{name=#a_str{line=5, val="add floats"}}]}},
Res).
polymorphic_list_as_return_value_test_() ->
[fun() ->
Code =
"module list_tests\n\n"
"let is_empty l =\n"
" match l with\n"
" [] -> true\n"
" | _ :: _ -> false\n\n"
"let a () = is_empty []\n\n"
"let b () = is_empty [:ok]\n\n"
"let c () = is_empty [1]",
Res = module_typ_and_parse(Code),
?assertMatch({ok, _}, Res)
end
,fun() ->
Code =
"module poly_list_head\n\n"
"let head l =\n"
" match l with\n"
" a :: _ -> a\n\n"
"let foo () = head [1, 2]",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{},
#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
].
polymorphic_adt_as_return_value_test() ->
Code =
"module option\n\n"
"type option 't = Some 't | None\n\n"
"let is_none opt =\n"
" match opt with\n"
" None -> true\n"
" | Some _ -> false\n\n"
"let a () = is_none None\n\n"
"let b () = is_none (Some :a)\n\n"
"let c () = is_none (Some 1)",
Res = module_typ_and_parse(Code),
?assertMatch({ok, _}, Res).
polymorphic_map_as_return_value_test_() ->
[fun() ->
Code =
"module empty_map\n\n"
"let is_empty m =\n"
" match m with\n"
" #{} -> true\n"
" | _ -> false\n\n"
"let a () = is_empty #{}\n\n"
"let b () = is_empty #{:a => 1}\n\n"
"let c () = is_empty #{1 => :a}\n\n",
Res = module_typ_and_parse(Code),
?assertMatch({ok, _}, Res)
end
, fun() ->
Code =
"module poly_map\n\n"
"let get_a m =\n"
" match m with\n"
" #{:a => a} -> a\n\n"
"let foo () = get_a #{:a => 1}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[{t_map, t_atom, {unbound, A, _}}],
{unbound, A, _}}},
#alpaca_binding{
type={t_arrow, [t_unit], t_int}
}]
}},
module_typ_and_parse(Code))
end
].
polymorphic_tuple_as_return_value_test() ->
Code =
"module poly_tuple\n\n"
"let second t =\n"
" match t with\n"
" (_, x) -> x\n\n"
"let a () = second (1, 2) \n\n"
"let b () = second (:a, :b)",
Res = module_typ_and_parse(Code),
?assertMatch({ok, _}, Res).
polymorphic_process_as_return_value_test() ->
Code =
"module poly_process\n\n"
"let behaviour state state_f =\n"
" receive with\n"
" x -> behaviour (state_f state x) state_f \n\n"
"let a () = let f x y = x + y in spawn behaviour 1 f\n\n"
"let b () = \n"
" let f x y = x +. y in\n"
" let p = spawn behaviour 1.0 f in\n"
" let u = send :a p in\n"
" p",
Res = module_typ_and_parse(Code),
?assertMatch({error, {cannot_unify, <<"poly_process">>, 12, t_float, t_atom}}, Res).
polymorphic_spawn_test() ->
FunCode =
"let behaviour state state_f =\n"
" receive with\n"
" x -> behaviour (state_f state x) state_f",
BaseEnv = new_env(),
{ok, FunExp} = alpaca_ast_gen:parse(alpaca_scanner:scan(FunCode)),
{FunType, _} = typ_of(BaseEnv, FunExp),
?assertMatch({t_receiver,
{unbound,t2,0},
{t_arrow,
[{unbound,t0,0},
{t_arrow,
[{unbound,t0,0},{unbound,t2,0}],
{unbound,t0,0}}],
t_rec}},
FunType),
NewBindings = [{<<"behaviour">>, FunType}|BaseEnv#env.bindings],
NewModule = #alpaca_module{functions=[FunExp#alpaca_binding{type=FunType}]},
EnvWithFun = BaseEnv#env{bindings=NewBindings, current_module=NewModule},
SpawnCode = "let f x y = x +. y in spawn behaviour 1.0 f",
{ok, SpawnExp} = alpaca_ast_gen:parse(alpaca_scanner:scan(SpawnCode)),
{SpawnType, _} = typ_of(EnvWithFun, SpawnExp),
?assertMatch({t_pid, t_float}, SpawnType).
%%% ### Process Interaction Typing Tests
%%%
%%% Things like receive, send, and spawn.
module_typ_and_parse(Code) ->
[M] = make_modules([Code]),
case type_modules([M]) of
{ok, [M2]} -> {ok, M2};
Err -> Err
end.
receive_test_() ->
[?_assertMatch({{t_receiver, t_int, t_int}, _},
top_typ_of(
"receive with "
" i -> i + 1")),
?_assertMatch({error, {cannot_unify, _, _, t_float, t_int}},
top_typ_of(
"receive with "
" i -> i + 1 "
"| f -> f +. 1")),
fun() ->
Code =
"module receive_adt\n\n"
"type my_union = float | int\n\n"
"let a () = receive with "
" i, is_integer i -> :received_int"
"| f, is_float f -> :received_float",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_receiver,
#adt{name = <<"my_union">>},
{t_arrow,
[t_unit],
t_atom}}
}]}},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module union_receives\n\n"
"let f x = receive with "
" 0 -> :ok"
" | i -> g (i + x)\n\n"
"let g x = receive with "
" i -> f (i - x)",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 3, name = <<"f">>},
type={t_receiver,
t_int,
{t_arrow,
[t_int],
t_atom}}},
#alpaca_binding{
name=#a_lab{line = 5, name = <<"g">>},
type={t_receiver,
t_int,
{t_arrow,
[t_int],
t_atom}}}
]}},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module union_for_two_receivers\n\n"
"type t = A | B\n\n"
"let a () = receive with "
" A -> b ()\n\n"
"let b () = receive with "
" B -> a () after 5 a()",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5, name = <<"a">>},
type={t_receiver,
#adt{name = <<"t">>},
{t_arrow,
[t_unit],
t_rec}}},
#alpaca_binding{
name=#a_lab{line = 7, name = <<"b">>},
type={t_receiver,
#adt{name = <<"t">>},
{t_arrow,
[t_unit],
t_rec}}}
]}},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module receive_in_let\n\n"
"let f x = "
" let y = receive with "
" i -> i "
" in let z = receive with "
" i -> i "
" in x + y + z",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_receiver,
t_int,
{t_arrow,
[t_int],
t_int}}}
]}},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module receive_in_let\n\n"
"let f x = "
" let y = receive with "
" i -> i "
" in let z = receive with "
" flt, is_float flt -> flt "
" in x + y + z",
?assertMatch({error, {cannot_unify, _, _, t_float, t_int}},
module_typ_and_parse(Code))
end
].
spawn_test_() ->
[fun() ->
Code =
"module spawn_module\n\n"
"export f/1, start_f/1\n\n"
"let f x = receive with "
" i -> f (x + i)\n\n"
"let start_f init = spawn f init",
?assertMatch({ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 5,
name = <<"f">>},
type={t_receiver,
t_int,
{t_arrow,
[t_int],
t_rec}}},
#alpaca_binding{
name=#a_lab{line = 7,
name = <<"start_f">>},
type={t_arrow,
[t_int],
{t_pid, t_int}
}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module spawn_composed_receiver\n\n"
"let recv () = receive with "
" i, is_integer i -> i\n\n"
"let not_recv () = (recv ()) + 2",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{line = 3,
name = <<"recv">>},
type={t_receiver,
t_int,
{t_arrow,
[t_unit],
t_int}}},
#alpaca_binding{
name=#a_lab{line = 5,
name = <<"not_recv">>},
type={t_receiver,
t_int,
{t_arrow,
[t_unit],
t_int}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module spawn_composed_pid\n\n"
"type t = A int | B int\n\n"
"let a x = let y = receive with "
" B xx -> b (x + xx)\n"
" | A xx -> xx + x in "
" a (x + y)\n\n"
"let b x = receive with "
" A xx -> a (x + xx)\n"
" | B xx -> b (xx + x)\n\n"
"let start_a init = spawn a init",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{name = <<"a">>},
type={t_receiver,
#adt{name = <<"t">>},
{t_arrow,
[t_int],
t_rec}}},
#alpaca_binding{
name=#a_lab{name = <<"b">>},
type={t_receiver,
#adt{name = <<"t">>},
{t_arrow,
[t_int],
t_rec}}},
#alpaca_binding{
name=#a_lab{name = <<"start_a">>},
type={t_arrow,
[t_int],
{t_pid, #adt{name = <<"t">>}}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module unify_failure_for_spawn\n\n"
"type t = A int\n\n"
"type u = B int\n\n"
"let a x = let y = receive with "
" B xx -> b (x + xx)\n"
" | A xx -> xx + x in "
" a (x + y)\n\n"
"let b x = receive with "
" A xx -> a (x + xx)\n"
" | B xx -> b (xx + x)\n\n"
"let start_a init = spawn a [init]",
?assertMatch(
{error, {cannot_unify, _, _,
#adt{name = <<"u">>},
#adt{name = <<"t">>}}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module non_receiver_pid\n\n"
"export f/1, start_f/1\n\n"
"let f x = f (x + 1)\n\n"
"let start_f () = spawn f 0",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{name = <<"f">>},
type={t_arrow, [t_int], t_rec}},
#alpaca_binding{}]}},
module_typ_and_parse(Code))
end
].
send_message_test_() ->
[fun() ->
Code =
"module send_example_1\n\n"
"let f x = receive with "
" i -> f (i + x)\n\n"
"let spawn_and_message_f () = "
" let p = spawn f 0 in "
" send 1 p",
?assertMatch(
{ok, #alpaca_module{}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module send_to_bad_pid\n\n"
"let f x = receive with "
" i -> f (i + x)\n\n"
"let spawn_and_message_f () = "
" let p = spawn f 0 in "
" send 1.0 p",
?assertMatch(
{error, {cannot_unify, _, _, t_int, t_float}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module send_to_non_pid\n\n"
"let f x = send 1 2",
?assertMatch(
{error, {cannot_unify, <<"send_to_non_pid">>, _, t_int, {t_pid, _}}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module send_to_non_receiver\n\n"
"let f x = f (x+1)\n\n"
"let start_f x = "
" let p = spawn f x in "
" send 1 p",
?assertMatch(
{error, {cannot_unify, _, _, undefined, t_int}},
module_typ_and_parse(Code))
end
].
%% Tests for records
record_inference_test_() ->
[?_assertMatch({#t_record{
members=[#t_record_member{name=x, type=t_int},
#t_record_member{name=y, type=t_float}],
row_var={unbound, _, _}}, _},
top_typ_of("{x=1, y=2.0}")),
fun() ->
Code =
"let f r = match r with\n"
" {x = x1} -> x1 + 1",
?assertMatch({{t_arrow,
[#t_record{
members=[#t_record_member{
name=x,
type=t_int}],
row_var={unbound, _, _}}],
t_int}, _},
top_typ_of(Code))
end,
fun() ->
Code =
"module record_inference_test_unify\n\n"
"let f r = match r with\n"
" {x = x1} -> (x1 * 2, r)\n\n"
"let g () = f {x=1, y=2}",
?assertMatch({ok,
#alpaca_module{
functions=[#alpaca_binding{
name=#a_lab{name = <<"f">>},
type={t_arrow,
[#t_record{
members=[#t_record_member{
name=x,
type=t_int}],
row_var={unbound, A, _}}],
{t_tuple,
[t_int,
#t_record{
members=[#t_record_member{
name=x,
type=t_int}],
row_var={unbound, A, _}}]}}},
#alpaca_binding{
name=#a_lab{name = <<"g">>},
type={t_arrow,
[t_unit],
{t_tuple,
[t_int,
#t_record{
members=[#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=y,
type=t_int}],
row_var={unbound, _B, _}}]}}
}]
}},
module_typ_and_parse(Code))
end,
?_assertException(error, {missing_record_field, undefined, 1, y},
top_typ_of("let f () = "
" let g r = match r with "
" {x=x1, y=y1} -> x1 + y1 in "
" g {x=1}")),
fun() ->
Code =
"module record_inference_record_adt_test\n\n"
"type my_adt 'a = Adt | {x: int, a: 'a}\n\n"
"let f r = match r with \n"
" {x=x1, a=a1} -> x1 + a1\n"
" | Adt -> 0",
?assertMatch(
{ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
name = <<"my_adt">>,
vars=[{"a", t_int}],
members=[#t_record{
members=[#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=a,
type=t_int}],
row_var={unbound, _, _}},
{t_adt_cons, "Adt"}]
}],
t_int}}]}},
module_typ_and_parse(Code))
end,
fun() ->
%% The following uses a constructor argument only to force
%% typing to the ADT.
Code =
"module nested_record_adt_test\n\n"
"type nested = Nested {fname: string, "
" lname: string, "
" address: {street: string,"
" city: string}}\n\n"
"let fname r = match r with\n"
" Nested {address={street=s}}, is_string s -> s",
?assertMatch({ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{name = <<"nested">>}],
t_string}}]}},
module_typ_and_parse(Code))
end
].
%% In the sample test file record_map_match_order.alp the ordering of maps
%% and records in the definition of a type impacts the unification and thus
%% inference of a function's return type. This test is to check for multiple
%% orderings and regressions.
%%
%% The root error appears to have been arising because in unify_adt_and_poly
%% one of the target type members was unwrapped from its reference cell. Since
%% the unification was actually impacting the top level cells, we could re-cell
%% the type and not worry about throwing that away later.
adt_ordering_test_() ->
[fun() ->
Code =
"module simple_adt_order_1\n\n"
"type t 'a = Some 'a | None\n\n"
"let f x = match x with\n"
" None -> :none\n"
" | Some a -> :an_a",
?assertMatch({ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
vars=[{"a", {unbound, _A, _}}],
members=[{t_adt_cons, "None"},
{t_adt_cons, "Some"}]}],
t_atom}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module simple_adt_order_2\n\n"
"type t 'a = None | Some 'a\n\n"
"let f x = match x with\n"
" None -> :none\n"
" | Some a -> :an_a",
?assertMatch({ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
vars=[{"a", {unbound, _A, _}}],
members=[{t_adt_cons, "Some"},
{t_adt_cons, "None"}]}],
t_atom}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module list_and_map_order_1\n\n"
"type t 'a = list 'a | map atom 'a\n\n"
"let f x = match x with\n"
" a :: _ -> a\n"
" | #{:a => a} -> a\n\n"
"let g () = f #{:a => 1, :b => 2}\n\n"
"let h () = f [1, 2]",
?assertMatch(
{ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
vars=[{"a", {unbound, A, _}}],
members=[{t_map, t_atom, {unbound, A, _}},
{t_list, {unbound, A, _}}]
}],
{unbound, A, _}}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module list_and_map_order_2\n\n"
"type t 'a = map atom 'a | list 'a \n\n"
"let f x = match x with\n"
" #{:a => a} -> a\n"
" | a :: _ -> a\n\n"
"let g () = f #{:a => 1, :b => 2}\n\n"
"let h () = f [1, 2]",
?assertMatch(
{ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
vars=[{"a", {unbound, A, _}}],
members=[{t_list, {unbound, A, _}},
{t_map, t_atom, {unbound, A, _}}]
}],
{unbound, A, _}}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module record_and_map_order_1\n\n"
"type record_map_union 'a = map atom int | {x: int}\n\n"
"let get_x rec_or_map =\n"
" match rec_or_map with\n"
" #{:x => xx} -> xx\n"
" | {x = xx} -> xx\n\n"
"let check_map () = get_x #{:x => 1}\n\n"
"let check_record () = get_x {x=2}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{}],
t_int}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
,fun() ->
Code =
"module record_and_map_order_2\n\n"
"type record_map_union 'a = {x: 'a} | map atom 'a\n\n"
"let get_x rec_or_map =\n"
" match rec_or_map with\n"
" #{:x => xx} -> xx\n"
" | {x = xx} -> xx\n\n"
"let check_map () = get_x #{:x => 1}\n\n"
"let check_record () = get_x {x=:b}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{vars=[{"a", {unbound, A, _}}]}],
{unbound, A, _}}},
#alpaca_binding{type={t_arrow, [t_unit], t_int}},
#alpaca_binding{type={t_arrow, [t_unit], t_atom}}]}},
module_typ_and_parse(Code))
end
].
unify_with_error_test_() ->
[fun() ->
Code =
"module unify_with_error_test\n\n"
"let throw_on_zero x = match x with "
" 0 -> throw :zero"
" | _ -> x * 2",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, [t_int], t_int}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module unify_with_error_test\n\n"
"let should_not_unify x = match x with "
" 0 -> throw :zero"
" | 1 -> :one "
" | _ -> x * 2",
?assertMatch(
{error, {cannot_unify, _, _, t_int, t_atom}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m\n"
"let f () = throw (x, :a)",
?assertMatch({error, {bad_variable_name, <<"m">>, 2, <<"x">>}},
module_typ_and_parse(Code))
end
].
function_argument_pattern_test_() ->
[fun() ->
Code =
"module fun_pattern\n\n"
"export f/1\n\n"
"let f 0 = :zero\n\n"
"let f x = :not_zero",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
bound_expr=#alpaca_fun{versions=[_, _]},
type={t_arrow, [t_int], t_atom}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
%% parens needed so the parser doesn't assume the _
%% belongs to the type constructor:
"let my_map (None) _ = None\n\n"
"let my_map Some a f = Some (f a)",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
bound_expr=#alpaca_fun{versions=[_, _]},
type={t_arrow,
[#adt{vars=[{_, {unbound, A, _}}]},
{t_arrow,
[{unbound, A, _}],
{unbound, B, _}}],
#adt{vars=[{_, {unbound, B, _}}]}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
bound_expr=#alpaca_fun{versions=[_, _]},
type={t_arrow,
[{t_arrow,
[{unbound, A, _}],
{unbound, B, _}},
#adt{vars=[{_, {unbound, A, _}}]}],
#adt{vars=[{_, {unbound, B, _}}]}}}]}},
module_typ_and_parse(Code))
end
].
constrain_polymorphic_adt_funs_test_() ->
[
%% Make sure my_map/2 with an explicit integer argument fails to type:
fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)\n\n"
"let doubler x = x * x\n\n"
"let foo () = my_map doubler 2",
?assertMatch(
{error, {cannot_unify, _, _, #adt{}, t_int}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)\n\n"
"let doubler x = x * x\n\n"
"let get_x {x=x} = x\n\n"
"let foo () = "
" let rec = {x=1, y=2} in "
" my_map doubler (get_x rec)",
?assertMatch(
{error, {cannot_unify, _, _, #adt{}, t_int}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)\n\n"
"let doubler x = x * x\n\n"
"let get_x rec = match rec with {x=x} -> x\n\n"
"let foo () = "
" let rec = {x=1, y=2} in "
" my_map doubler (get_x rec)",
?assertMatch(
{error, {cannot_unify, _, _, #adt{}, t_int}},
module_typ_and_parse(Code))
end
, fun() ->
%% If my_map depends on an option, when `third` always returns
%% a bare integer we should get a type error. `third` here is
%% obviously not very useful, I'm just trying to isolate the
%% typing issue to records.
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"type tuple_or_triple 'a 'b 'c = ('a, 'b) | ('a, 'b, 'c)\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)\n\n"
"let third (_, _) = 0\n\n"
"let third (_, _, t) = t\n\n"
"let doubler x = x * x\n\n"
"let foo () = "
" let tup = (1, 2) in "
" my_map doubler (third tup)",
?assertMatch(
{error, {cannot_unify, _, _, #adt{}, t_int}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module fun_pattern_with_adt\n\n"
"type option 'a = None | Some 'a\n\n"
"let my_map _ None = None\n\n"
"let my_map f Some a = Some (f a)\n\n"
"let doubler x = x * x\n\n"
"let get_x {x=x} = Some x\n\n"
"let get_x _ = None\n\n"
"let foo () = "
" let rec = {x=1, y=2} in "
" my_map doubler (get_x rec)",
?assertMatch(
{ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[{t_arrow, [{unbound, A, _}], {unbound, B, _}},
#adt{vars=[{_, {unbound, A, _}}]}],
#adt{vars=[{_, {unbound, B, _}}]}}},
#alpaca_binding{
type={t_arrow, [t_int], t_int}},
#alpaca_binding{
type={t_arrow,
[#t_record{
members=[#t_record_member{
name=x,
type={unbound, C, _}}]}],
#adt{vars=[{_, {unbound, C, _}}]}}},
#alpaca_binding{
type={t_arrow,
[t_unit],
#adt{vars=[{"a", t_int}]}}}]
}},
module_typ_and_parse(Code))
end
].
different_arity_test_() ->
[fun() ->
Code =
"module arity_test\n\n"
"let add x = x + x\n\n"
"let add x y = x + y",
?assertMatch({ok, #alpaca_module{}}, module_typ_and_parse(Code))
end
, fun() ->
Code =
"module arity_test\n\n"
"export add/2\n\n"
"let add x = x + x\n"
"let add x y = x + y",
?assertMatch({ok, #alpaca_module{}}, module_typ_and_parse(Code))
end
, fun() ->
Code =
"module arity_test\n\n"
"export add/1\n\n"
"let add x = x + x\n"
"let f x y = add x y",
?assertMatch(
{error, {not_found, _, <<"add">>, 2}},
module_typ_and_parse(Code))
end].
types_in_types_test_() ->
AstCode =
"module types_in_types\n\n"
"export format/1\n\n"
"export_type label,expr,ast\n\n"
"type label = Symbol string\n\n"
"type expr = label | Apply (expr, expr) "
"| Match {e: expr, clauses: list {pattern: expr, result: expr}}\n\n"
"type ast = expr | Fun {name: label, arity: int, body: expr}\n\n",
[fun() ->
%% Without importing `label` we should be fine if we're not
%% referencing its constructor directly:
FormatterCode =
"module formatter\n\n"
"import_type types_in_types.expr\n\n"
"import_type types_in_types.ast\n\n"
"let format ast_node = format 0 ast_node\n\n"
"let format d Match {e=e, clauses=cs} = :match",
[M1, M2] = make_modules([AstCode, FormatterCode]),
?assertMatch(
{ok, [#alpaca_module{}, #alpaca_module{}]},
type_modules([M1, M2]))
end
, fun() ->
%% Importing `label` and not using it should be fine:
FormatterCode =
"module formatter\n\n"
"import_type types_in_types.label\n\n"
"import_type types_in_types.expr\n\n"
"import_type types_in_types.ast\n\n"
"let format ast_node = format 0 ast_node\n\n"
"let format d Match {e=e, clauses=cs} = :match",
[M1, M2] = make_modules([AstCode, FormatterCode]),
?assertMatch(
{ok, [#alpaca_module{}, #alpaca_module{}]},
type_modules([M1, M2]))
end
, fun() ->
%% NOT importing `label` and then trying to use its type
%% constructor should yield an error:
FormatterCode =
"module formatter\n\n"
"import_type types_in_types.expr\n\n"
"import_type types_in_types.ast\n\n"
"let format ast_node = format 0 ast_node\n\n"
"let format d Match {e=e, clauses=cs} = :match\n\n"
"let format d Symbol _ = :label\n\n"
"let foo () = format 0 Match {e=Symbol \"x\", clauses=[]}",
[M1, M2] = make_modules([AstCode, FormatterCode]),
?assertMatch(
{error, {bad_constructor, <<"formatter">>, 11, "Symbol"}},
type_modules([M1, M2]))
end
, fun() ->
Ast =
"module types_in_types\n\n"
"export format/1\n\n"
"export_type label,expr,ast\n\n"
"type label = Symbol {name: string}\n\n"
"type expr = label | Apply (expr, expr) "
"| Match {e: expr, clauses: list {pattern: expr, result: expr}}\n\n"
"type ast = expr | Fun {name: label, arity: int, body: expr}\n\n",
%% Importing `label` should let us use the constructor:
FormatterCode =
"module formatter\n"
"import_type types_in_types.label\n"
"import_type types_in_types.expr\n"
"import_type types_in_types.ast\n\n"
"let format ast_node = format 0 ast_node\n\n"
"let format d Match {e=e, clauses=cs} = :match\n\n"
"let format d Symbol _ = :label\n\n"
"let foo () = format 0 Match {e=Symbol {name=\"x\"}, clauses=[]}",
[M1, M2] = make_modules([Ast, FormatterCode]),
?assertMatch(
{ok, [#alpaca_module{}, #alpaca_module{}]},
type_modules([M1, M2]))
end
].
expression_typing_test_() ->
[%% `1` is not a function from an int to something else:
?_assertMatch(
{error, {cannot_unify, _, _, t_int, {t_arrow, [t_int], _}}},
top_typ_of("1 2")),
?_assertMatch({{t_arrow, [t_unit], t_int}, _},
top_typ_of(
"let g () = "
"let f x = x + x in "
"let g () = f in "
"(g ()) 2"
))
].
module_qualified_types_test_() ->
[fun() ->
Code1 = "module m type a = int",
Code2 = "module n type b = m.a",
[M1, M2] = make_modules([Code1, Code2]),
?assertMatch(
{ok, [#alpaca_module{}, #alpaca_module{}]},
type_modules([M1, M2]))
end
, fun() ->
Code1 = "module m export_type a type a 'a = A 'a",
Code2 =
"module n "
"type b 'x = m.a 'x "
"let f m.A a = a + 1",
[M1, M2] = make_modules([Code1, Code2]),
?assertMatch(
{ok,
[#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{
name = <<"a">>,
vars=[{_, t_int}]}],
_}}]},
#alpaca_module{}]},
type_modules([M1, M2]))
end
, fun() ->
Code1 = "module m export_type a type a 'a = A 'a",
Code2 =
"module n "
"type a 'a = A 'a "
"let f A x = x + 1 "
"let other_a x = m.A x "
"let should_fail () = f (other_a 1)",
[M1, M2] = make_modules([Code1, Code2]),
?assertMatch({error,
{cannot_unify, _, _,
#adt{name = <<"a">>, module= <<"n">>},
#adt{name = <<"a">>, module= <<"m">>}}},
type_modules([M1, M2]))
end
, fun() ->
Code =
"module m "
"let f n.A x = x + 1",
[M] = make_modules([Code]),
?assertMatch({error, {bad_module, <<"m">>, 1, <<"n">>}}, type_modules([M]))
end
].
no_process_leak_test() ->
Code =
"module no_leaks\n"
"let add a b = a + b",
[M] = make_modules([Code]),
ProcessesBefore = length(erlang:processes()),
?assertMatch({ok, _}, type_modules([M])),
ProcessesAfter = wait_for_processes_to_die(ProcessesBefore, 10),
?assertEqual(ProcessesBefore, ProcessesAfter).
wait_for_processes_to_die(_ExpectedNumProcesses, 0) ->
length(erlang:processes());
wait_for_processes_to_die(ExpectedNumProcesses, AttemptsLeft) ->
case length(erlang:processes()) of
ExpectedNumProcesses -> ExpectedNumProcesses;
_WrongNumProcesses ->
timer:sleep(10),
wait_for_processes_to_die(ExpectedNumProcesses, AttemptsLeft-1)
end.
curry_applications_test_() ->
[fun() ->
Code =
"module curry\n"
"let add a b = a + b\n\n"
"let main x = add x\n",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[t_int, t_int], t_int}
},
#alpaca_binding{
type={t_arrow,
[t_int], {t_arrow, [t_int], t_int}}
}
]
}
},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module curry\n"
"let main x = add x\n"
"let add a b = a + b\n\n",
?assertMatch(
{ok, #alpaca_module{
functions=[
#alpaca_binding{
type={t_arrow,
[t_int], {t_arrow, [t_int], t_int}}
},
#alpaca_binding{
type={t_arrow,
[t_int, t_int], t_int}
}
]
}
},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module curry\n"
"let main x = add x\n"
"let add a b = a + b\n\n"
"let add a b c = a + b + c\n\n",
?assertMatch(
{error, {ambiguous_curry, _, _, _}},
module_typ_and_parse(Code))
end
].
local_curry_test() ->
Code =
"module local_curry\n"
"let main () = \n"
" let f x y = x * y in\n"
" f 10",
?assertMatch(
{ok, #alpaca_module{
functions=[
#alpaca_binding{
type={t_arrow,
[t_unit], {t_arrow, [t_int], t_int}}
}
]
}
},
module_typ_and_parse(Code)).
%% For issue #113, we want to be able to define a polymorphic type and use it
%% as a member in another type but with a concrete type rather than variables,
%% e.g.
%%
%% type option 'a = Some 'a | None
%% type int_option = option 'a
%%
concrete_type_parameters_test_() ->
[fun() ->
Code =
"module concrete_option\n"
"type opt 'a = Some 'a | None\n"
"type uses_opt = Uses opt int\n"
"let f () = Uses Some 1",
?assertMatch({ok, #alpaca_module{}},
module_typ_and_parse(Code))
end,
fun() ->
Code =
"module should_not_unify "
"type opt 'a = Some 'a | None "
"type uses_opt = Uses opt int "
"let f () = Uses Some 1.0",
?assertMatch({error, {cannot_unify, _, _, t_int, t_float}},
module_typ_and_parse(Code))
end,
fun() ->
Option =
"module option "
"export_type option "
"type option 'a = Some 'a | None",
UsesOption =
"module uses_option "
"type int_opt = IntOpt option.option int "
"let make_opt x = IntOpt option.Some x",
ImportsOption =
"module imports_option \n"
"import_type option.option \n"
"type int_opt = IntOpt option int \n"
"let make_opt x = IntOpt Some x",
Mods1 = make_modules([Option, UsesOption]),
Mods2 = make_modules([Option, ImportsOption]),
?assertMatch({ok,
[#alpaca_module{
name= <<"uses_option">>,
types=[#alpaca_type{
members=[#alpaca_constructor{
arg=#alpaca_type{
name={_, _, <<"option">>},
module= <<"option">>,
vars=[{_, t_int}]
}}]}],
functions=[#alpaca_binding{
type={t_arrow,
[t_int],
#adt{
name= <<"int_opt">>,
module= <<"uses_option">>
}}
}]
},
#alpaca_module{}]},
type_modules(Mods1)),
?assertMatch({ok,
[#alpaca_module{
name= <<"imports_option">>,
types=[#alpaca_type{
members=[#alpaca_constructor{
arg=#alpaca_type{
name={_, _, <<"option">>},
vars=[{_, t_int}]
}}]}],
functions=[#alpaca_binding{
type={t_arrow,
[t_int],
#adt{
name= <<"int_opt">>,
module= <<"imports_option">>
}}
}]
},
#alpaca_module{}]},
type_modules(Mods2))
end,
%% From @danabr's example on PR #116
fun() ->
Code =
"module int_opt \n"
"type opt 'a = Some 'a | None \n"
"type int_opt = opt int \n"
"type indirect = Indirect int_opt \n"
"let deconstruct (Indirect opt) = \n"
"match opt with \n"
"(Some 1) -> :blah \n",
?assertMatch({ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{name= <<"indirect">>}],
t_atom}
}]
}},
module_typ_and_parse(Code))
end
].
ensure_private_types_cant_import_test_() ->
[fun() ->
PrivateOptions =
"module private_option \n"
"type opt 'a = Some 'a | None \n",
ImportOption =
"module import_option \n"
"import_type private_option.opt",
UsesOption =
"module uses_option \n"
"type nested_int_opt = Nested private_option.opt int \n"
"let nest x = Nested x",
Mods1 = make_modules([PrivateOptions, ImportOption]),
Mods2 = make_modules([PrivateOptions, UsesOption]),
?assertMatch({error, {unexported_type, _, _, #a_lab{line=2, name= <<"opt">>}}},
type_modules(Mods1)),
%% TODO: this passes but should not, because if a type is private
%% to a module (not exported), it should not be accessible
%% at all.
%%
%% Private types further complicate things if they're used
%% by an exported type. We don't check for that now.
%% _Abstract_ types instead of private might be more
%% consistent.
?assertMatch({ok, [_, _]}, type_modules(Mods2))
end
%% Make sure exporting a type containing a private type doesn't leak the
%% private type's constructors:
, fun() ->
Exported =
"module exported \n"
"export_type b \n"
"type a 'a = A 'a \n"
"type b = B a int",
UsesB =
"module uses_b \n"
"import_type exported.b \n"
"let use_a x = A x",
Mods = make_modules([Exported, UsesB]),
?assertMatch({error, {bad_constructor, <<"uses_b">>, 3, "A"}},
type_modules(Mods))
end
%% Make sure we can't access constructors in private types:
, fun() ->
PrivateType =
"module private_type \n"
"type a = A int \n",
UsesA =
"module uses_a \n"
"let f x = private_type.A x",
Mods = make_modules([PrivateType, UsesA]),
?assertMatch({error, {bad_constructor, <<"uses_a">>, 2, "A"}},
type_modules(Mods))
end
].
%% From issue #91, a type's members must all exist, both concrete types and
%% type variables.
error_on_missing_types_test_() ->
[fun() ->
Code =
"module m \n"
"type t = b",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t 'a = A 'a \n"
"type u = t b",
?assertMatch({error, {unknown_type, <<"m">>, 3, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T b",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = (int, b)",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = {x: b}",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T int | list b",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"b">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = map atom c",
?assertMatch({error, {unknown_type, <<"m">>, 2, <<"c">>}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T 'a",
?assertMatch({error, {bad_variable, 2, "a"}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T (int, float, 'c)",
?assertMatch({error, {bad_variable, 2, "c"}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t 'a = map atom (t 'a)",
?assertMatch({ok, #alpaca_module{}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t 'a = n.u",
?assertMatch({error, {bad_module, <<"m">>, 2, <<"n">>}},
module_typ_and_parse(Code))
end
, fun() ->
M1 = "module m",
M2 = "module n \n type t = m.a",
Mods = make_modules([M1, M2]),
?assertMatch({error, {unknown_type, <<"n">>, 2, <<"a">>}},
type_modules(Mods))
end
].
record_transform_test_() ->
[?_assertMatch({#t_record{members=[#t_record_member{
name=a,
type=t_int},
#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=y,
type=t_int}], row_var={unbound, _, _}}, _},
top_typ_of("let z = {a=3} in {x=1, y=2 | z}")),
?_assertMatch({#t_record{members=[#t_record_member{
name=x,
type=t_string},
#t_record_member{
name=y,
type=t_int}], row_var={unbound, _, _}}, _},
top_typ_of("{x=\"hello\" | {y=2}}")),
?_assertMatch({#t_record{members=[#t_record_member{
name=x,
type=t_float}]}, _},
top_typ_of("{x=1.0 | {x=1}}")),
fun() ->
Code =
"module update_a_record \n"
"let add_y y_value r = \n"
" match r with \n"
" {x=1}\n -> {y=y_value\n | r}",
?assertMatch({ok, #alpaca_module{}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module update_record_no_match \n"
"let add_y y_value r = \n"
" {y=y_value | r} \n"
"let f () = add_y 5 {x=2.2}",
?assertMatch({ok,
#alpaca_module{
functions=[_,
#alpaca_binding{
type={t_arrow,
[t_unit],
#t_record{
members=[#t_record_member{
name=x,
type=t_float},
#t_record_member{
name=y,
type=t_int}]}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"let opaque_add rx = \n"
" match rx with \n"
" {x=_} -> {a=2, b=\"three\" | rx} \n"
"let f () = opaque_add {x=1, b=3}",
?assertMatch(
{ok,
#alpaca_module{
functions=[_,
#alpaca_binding{
type={t_arrow,
[t_unit],
#t_record{
members=[#t_record_member{},
#t_record_member{},
#t_record_member{}]
}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"let x_int rec = {x=1 | rec} \n"
"let x_float rec = {x=1.0, y=\"b\" | rec} \n"
"let x_string rec = {x=\"one!\" | rec} \n"
"let main () = x_string (x_float (x_int {z=5}))",
?assertMatch(
{ok,
#alpaca_module{
functions=[_, _, _,
#alpaca_binding{
type={t_arrow,
[t_unit],
#t_record{
members=[#t_record_member{
name=x,
type=t_string
},
#t_record_member{
name=y,
type=t_string
},
#t_record_member{
name=z,
type=t_int
}]}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T {x: int, y: float} \n"
"let main () = T {x=1}",
?assertMatch(
{error, {missing_record_field, <<"m">>, 3, y}},
module_typ_and_parse(Code))
end
].
records_and_type_constructors_test_() ->
[fun() ->
Code =
"module m \n"
"type t 'a = T {x: int, y: 'a} \n"
"let main () = T {x=1, y=\"hello\"}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[_],
#adt{vars=[{"a", t_string}]}}}
]}},
module_typ_and_parse(Code))
end
%% This test illustrates something I'm not sure we want at the moment.
%% Because an instance of a type constructor is used to instantiate a
%% type arrow rather than carry type information itself, using a
%% constructor with a record loses the row variable information as seen
%% in extract_rec/1's type.
, fun() ->
Code =
"module m \n"
"type t = T {x: int} \n"
"let main () = T {x=1, y=\"hello\"} \n"
"let extract_rec () = match (main ()) with \n"
" T rec -> rec",
?assertMatch(
{ok, #alpaca_module{
functions=[_,
#alpaca_binding{
type={t_arrow,
[_],
#t_record{
members=[#t_record_member{
name=x,
type=t_int}]
}}}
]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type t = T {x: int} \n"
"let main () = T {x=1.0}",
?assertMatch(
{error, {cannot_unify, _, _, t_int, t_float}},
module_typ_and_parse(Code))
end
].
literal_fun_test_() ->
[?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("fn x -> x + 1"))
, ?_assertMatch({{t_arrow, [t_int], t_int}, _},
top_typ_of("let f = fn x -> x + 1"))
, fun () ->
Code =
"module m "
"let map f [] = [] "
"let map f (h :: t) = (f h) :: (map f t) "
"let nested_fun () ="
" map (fn x -> (fn y -> y + 1) x) [1, 2, 3]",
?assertMatch(
{ok, #alpaca_module{
functions=[_,
#alpaca_binding{
type={t_arrow, [t_unit], {t_list, t_int}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"let f () = \n"
" let g = fn x -> x + 1 in \n"
" g 2",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
].
bad_signature_unify_fail_test() ->
Code =
"module sig \n"
"val double : fn string -> string\n"
"let double x = x * x",
?assertMatch({error,{cannot_unify, <<"sig">>, 2, t_int, t_string}},
module_typ_and_parse(Code)).
poly_specialization_unify_fail_test() ->
Code =
"module sig \n"
"-- Without the sig this types as `fn 'a -> 'a`\n"
"val intsOnly : fn int -> int\n"
"let intsOnly x = x\n"
"-- Without the type signature, the below would succeed\n"
"let tryWithString () = intsOnly \"hello world\"",
?assertMatch({error,{cannot_unify, <<"sig">>, 6, t_int, t_string}},
module_typ_and_parse(Code)).
beam_with_signatures_test() ->
Code =
"module sig \n"
"-- signatures with 'beam' FFI allows us to guard against bad input\n"
"val atomToBinary : fn atom -> binary\n"
"let atomToBinary b = beam :erlang :atom_to_binary [b, :utf8] with\n"
" | res, is_binary res -> res\n"
"let main () = \n",
Good = "atomToBinary :hello",
Fail = "atomToBinary 42",
?assertMatch({error,{cannot_unify, <<"sig">>, 7, t_atom, t_int}},
module_typ_and_parse(Code ++ Fail)),
?assertMatch({ok, _},
module_typ_and_parse(Code ++ Good)).
missing_type_var_in_signature_test() ->
Code =
"module sig \n"
"--type bob = Hello 'a\n"
"val missingTypeVar : fn 'a -> ('a, 'a)\n"
"let missingTypeVar x = (x, x)\n"
"let main () = missingTypeVar 10",
%% TODO: this nested error is wrong. Should be an exception, not value.
?assertMatch({error, {error, {unknown_type_var, <<"sig">>, 3, "a"}}},
module_typ_and_parse(Code)).
nested_adt_test() ->
Code =
"module nested_adt\n"
"type opt 'a = None | Some 'a\n"
"type w = W opt (opt int)\n"
"let make_w () = W (Some (Some 1))\n",
?assertMatch(
{ok, #alpaca_module{
types=[#alpaca_type{
name={type_name, _, <<"w">>},
vars=[],
members=[#alpaca_constructor{
arg=#alpaca_type{
name={type_name, _, <<"opt">>},
vars=[{{type_var, _, _},
#alpaca_type{
name={type_name, _, <<"opt">>},
vars=[{{type_var, _, _}, t_int}]
}}]
}}]},
#alpaca_type{name={type_name, _, <<"opt">>}}],
functions=[#alpaca_binding{
type={t_arrow, [t_unit], #adt{name = <<"w">>}}}]}},
module_typ_and_parse(Code)).
direct_lambda_application_test_() ->
Prelude =
"module lambdas\n"
"val apply 'a 'b : fn (fn 'a -> 'b) 'a -> 'b\n"
"let apply f x = f x\n",
[fun() ->
Code = Prelude ++
"let useLambda x =\n"
" apply (fn y -> x + y) 10",
?assertMatch({ok, _}, module_typ_and_parse(Code))
end,
fun() ->
Code = Prelude ++
"let useLambda x =\n"
" apply (fn (_, y) -> x + y) (:ignored, 10)",
?assertMatch({ok, _}, module_typ_and_parse(Code))
end,
fun() ->
Code = Prelude ++
"let boundLambda x =\n"
" let lambda = (fn y -> x + y) in\n"
" apply lambda 10",
?assertMatch({ok, #alpaca_module{}}, module_typ_and_parse(Code))
end,
fun() ->
Code = Prelude ++
"let useLet x =\n"
" apply (let f y = x + y in f) 10",
?assertMatch({ok, #alpaca_module{}}, module_typ_and_parse(Code))
end
].
destructuring_test_() ->
[fun() ->
Code = "module m let f() = let (x, _) = (1, 2) in x + 1",
?assertMatch({ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code = "module m let f t = let (x, y) = t in x + y",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, [{t_tuple, [t_int, t_int]}],
t_int}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"let f r = let {x=x, y=y} = r in x + y \n"
"let g t = let (x, y) = t in f {x=x, y=y} \n"
"let test_it () = g (5, 6)",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#t_record{members=[#t_record_member{type=t_int},
#t_record_member{type=t_int}
]}], t_int}},
#alpaca_binding{
type={t_arrow, [{t_tuple, [t_int, t_int]}], t_int}},
#alpaca_binding{
type={t_arrow, [t_unit], t_int}}]}},
module_typ_and_parse(Code))
end
].
%% TODO/FIXME: we have some bad line number reporting in here, lots of zeroes.
record_unification_test_() ->
%% Initial cases taken from https://github.com/alpaca-lang/alpaca/issues/198
%% and then expanded on.
[fun() ->
Code =
"module m \n"
"type s = {a: bool, b: int} \n"
"let foo :a s = {a=true, b=0 | s} \n"
"let foo :b s = {b=1| s} \n",
?assertMatch({error, {missing_record_field, <<"m">>, 4, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module mod\n"
"let foo :a = {a=true, b=0}\n"
"let foo :b = {b=0}\n",
?assertMatch({error, {missing_record_field, <<"mod">>, 3, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module mod\n"
"let foo :b = {b=0}\n"
"let foo :a = {a=true, b=0}\n",
%% Incorrect error, see the following issue:
%% https://github.com/alpaca-lang/alpaca/issues/263
?assertMatch({error, {missing_record_field, <<"mod">>, 3, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type s = {a: bool, b: int} \n"
"let foo sym = "
" match sym with \n"
" | :b -> {b=1} \n"
" | :a -> {a=true, b=0} ",
%% Incorrect error, see the following issue:
%% https://github.com/alpaca-lang/alpaca/issues/263
?assertMatch({error, {missing_record_field, <<"m">>, 5, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type s = {a: bool, b: int} \n"
"val foo: fn atom -> {a: bool, b: int}\n"
"let foo sym = \n"
" match sym with \n"
" | :a -> {a=true, b=0} \n"
" | :b -> {b=1}",
?assertMatch({error, {missing_record_field, <<"m">>, 7, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type s = {a: bool, b: int} \n"
"let foo sym rec = "
" match sym with "
" | :a -> {a=true, b=0 | rec } "
" | :b -> {b=1 | rec}",
?assertMatch({error, {missing_record_field, <<"m">>, 3, a}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type s = A {a: bool, b: int} \n"
"let foo :a A s = A {a=true | s} \n"
"let foo :b A s = A {b=1 | s}",
?assertMatch({ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[t_atom, #adt{name = <<"s">>}],
#adt{name = <<"s">>}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
%% Interesting bug turned up while fixing issue #234. When the
%% type and spec for `foo` below are commented out, it stops
%% failing. Somehow using the type in the spec *incorrectly*
%% widens the type passed by `use_foo`!
%%
%% It turns out that unifying an ADT from the current scope would
%% place the constraining record in the *target* position, not
%% that of the lower bound.
Code1 =
"module m \n"
"-- type my_rec = {x: int, y: int} \n"
"-- val foo: fn my_rec -> (int, int) \n"
"let foo {x=x, y=y} = (x, y) \n"
"let use_foo () = foo {x=1}",
?assertMatch(
{error, {missing_record_field, <<"m">>, 5, y}},
module_typ_and_parse(Code1)),
%% Now uncommented:
Code2 =
"module m \n"
"type my_rec = {x: int, y: int} \n"
"val foo: fn my_rec -> (int, int) \n"
"let foo {x=x, y=y} = (x, y) \n"
"let use_foo () = foo {x=1}",
?assertMatch(
{error, {missing_record_field, <<"m">>, 5, y}},
module_typ_and_parse(Code2))
end
].
%% Initial test case courtesy of https://github.com/lpil in issue 223.
iolist_test_() ->
[fun() ->
Code =
"module hello_world \n"
"let iolist_to_string iolist = \"\" \n"
"test \"iolist\" = \n"
"let iolist = [\"hello\", \" \", [\"world\"]] in\n"
"match (iolist_to_string iolist) with\n"
"| \"hello world\" -> :ok\n"
"| _ -> throw :not_equal",
?assertMatch({error, {cannot_unify, <<"hello_world">>,
4,
t_string,
{t_list, t_string}}},
module_typ_and_parse(Code))
end
%% Checking to make sure that adding a type unifying strings and lists of
%% them allows typing to succeed:
, fun() ->
Code =
"module hello_world \n"
"type iolist = string | list string \n"
"let iolist_to_string iolist = \"\" \n"
"test \"iolist\" = \n"
"let iolist = [\"hello\", \" \", [\"world\"]] in\n"
"match (iolist_to_string iolist) with\n"
"| \"hello world\" -> :ok\n"
"| _ -> throw :not_equal",
?assertMatch({ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[{unbound, _, _}],
t_string}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module iolist_example \n"
"type iolist \n"
" = list iolist \n"
" | string \n"
" \n"
"val my_iolist : iolist \n"
"let my_iolist = \n"
" [\"h\", [\"i\", [\"!\"]]]",
?assertMatch({error, {cannot_unify,
<<"iolist_example">>,
8,
t_string,
{t_list,t_string}}},
module_typ_and_parse(Code))
end
].
type_specs_and_vars_test_() ->
[fun() ->
Code =
"module m \n"
"type option 'a = Some 'a | None \n"
"val tail 'a : fn (list 'a) -> option (list 'a) \n"
"let tail xs = \n"
" match xs with \n"
" | [] -> None \n"
" | x :: rest -> Some rest",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={
t_arrow,
[{t_list, A}],
#adt{
name= <<"option">>,
vars=[{_, {t_list, A}}]}},
signature=#alpaca_type_signature{
type={
t_arrow,
[{t_list, B}],
#alpaca_type{
name={_, _, <<"option">>},
vars=[{_, {t_list, B}}]}}}
}]}},
module_typ_and_parse(Code))
end
, fun() ->
%% Trying a different nesting:
Code =
"module m \n"
"type opt 'a = Some 'a | None \n"
"val f 'b: fn (opt (list 'b)) -> list (opt 'b) \n"
"let f Some (h :: _) = [Some h]",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={
t_arrow,
[#adt{vars=[{N, {t_list, A}}]}],
{t_list,
#adt{vars=[{N, A}]}}}}]}},
module_typ_and_parse(Code))
end
%% Turned up a missing error check when chasing a fix for #230:
, fun() ->
Code =
"module m \n"
"val flatten 'a : fn (option (option 'a)) -> option 'a \n"
"let flatten op = \n"
"match op with \n"
" | None -> None \n"
" | Some value -> value",
?assertMatch(
{error, {bad_constructor, <<"m">>, 5, "None"}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module m \n"
"type option 'a = Some 'a | None \n"
"val flatten 'a : fn (option (option 'a)) -> option 'a \n"
"let flatten op = \n"
"match op with \n"
" | None -> None \n"
" | Some value -> value",
?assertMatch(
{ok,
#alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{name = <<"option">>,
vars=[{_, #adt{name = <<"option">>,
vars=[{_, A}]}}]}],
#adt{name = <<"option">>,
vars=[{_, A}]}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module x \n"
"type someRec = {x: int, y: string} \n"
"val getX: fn someRec -> int \n"
"let getX {x=x} = x \n"
"let shouldFailToGetX () = getX {x=1}",
?assertMatch(
{error, {missing_record_field, <<"x">>, 5, y}},
module_typ_and_parse(Code))
end
].
extend_records_in_adt_test_() ->
[fun() ->
Code =
"module a \n"
"type a_record = {x: int, y: string} \n"
"type use_a_record = U a_record \n"
"let replaceY U r y = U {y=y | r}",
?assertMatch(
{ok, #alpaca_module{}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module aa \n"
"type a_record = {x: int, y: bool} \n"
"type t = T a_record \n"
"type u = U t \n"
"let replace_y U (T r) y = {y=y | r}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#adt{name = <<"u">>}, {unbound, A, _}],
#t_record{
members=[#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=y,
type={unbound, A, _}
}]}}
}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module aaa \n"
"type a_record 'x = {x: 'x, y: bool} \n"
"type bind_x = a_record int \n"
"type uses_bound_x = U bind_x \n"
"let replace_x U rec = U {x=2 | rec}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
_,
#adt{name = <<"uses_bound_x">>}}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module b \n"
"type a_record = {x: int, y: string} \n"
"val with_spec: fn a_record string -> a_record \n"
"let with_spec r y = {y=y | r}",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow,
[#t_record{
members=[#t_record_member{name = x,
type = t_int},
#t_record_member{name = y,
type = t_string}]},
t_string],
#t_record{
members=[#t_record_member{name = x,
type = t_int},
#t_record_member{name = y,
type = t_string}]}
}}]}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module c \n"
"type c = C {x: int, y: string} \n"
"let f C c = c",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, _,
#t_record{
members=[#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=y,
type=t_string}]}}}]
}},
module_typ_and_parse(Code))
end
, fun() ->
Code =
"module c \n"
"type c_rec = {x: int, y: string} \n"
"type c = C c_rec \n"
"let f C c = c",
?assertMatch(
{ok, #alpaca_module{
functions=[#alpaca_binding{
type={t_arrow, _,
#t_record{
members=[#t_record_member{
name=x,
type=t_int},
#t_record_member{
name=y,
type=t_string}]}}}]
}},
module_typ_and_parse(Code))
end
].
make_modules(Sources) ->
NamedSources = lists:map(fun(C) -> {?FILE, C} end, Sources),
{ok, Mods} = alpaca_ast_gen:make_modules(NamedSources),
Mods.
-endif. | src/alpaca_typer.erl | 0.533641 | 0.432902 | alpaca_typer.erl | starcoder |
%% -------------------------------------------------------------------
%% Dasudian Distributed Bayesian Network
%%
%% @author <NAME> <<EMAIL>>, <<EMAIL>>
%% @copyright (c) 2015-2016 Dasudian.com. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(bn).
-include("types.hrl").
-export([child_given_parents/2, parent_given_child/4]).
-compile([export_all]).
-spec child_given_parents(probability(), [probability()]) ->
probability().
child_given_parents({Label, CRows}, Parents) ->
Probs = get_probs(Parents, CRows),
{Label, dpds:sum(Probs)}.
-spec parent_given_child(probability(), [probability()], pvar(), pval()) ->
prob_dist().
parent_given_child({_Cl,CRows}, Parents, Pl, Cv) ->
{Parent, OtherPs} = kvlist:pop_value(Parents, Pl),
A = sum_over_others(CRows, OtherPs),
B = sum_same(Cv, A),
C = times_prob_par(Parent, B),
{Pl, dpds:normalise(C)}.
%%%% private
-spec get_probs([probability()], kvlist(context(), prob_dist())) -> [prob].
get_probs(Parents, CRows) ->
lists:map(fun({Context, CPs}) ->
X = product(Context, Parents),
dpds:scale(CPs, X)
end,
CRows).
-spec product(context(), [probability()]) -> float().
product(Context, Parents) ->
lists:foldl(fun({X,Y}, A) ->
[{[], Pd}] = kvlist:get_value(Parents, X),
P = kvlist:get_value(Pd, Y),
A*P
end,
1, Context).
-spec sum_over_others(kvlist(context(), prob_dist()), [probability()]) -> probability().
sum_over_others(CRows, Ps) ->
lists:foldl(fun({Lab, P}, Acc) ->
do_am(Lab, P, Acc)
end,
CRows, Ps).
-spec do_am(pvar(), {context(), prob_dist()}, list()) -> list().
do_am(Lab, [{[], P}], Acc) -> % independent
lists:map(fun({Pars, Probs}) ->
{Val, Rest} = kvlist:pop_value(Pars, Lab),
VP = kvlist:get_value(P, Val),
New = dpds:scale(Probs, VP),
{Rest, New}
end,
Acc).
-spec sum_same(pval(), list()) -> list().
sum_same(Cv, A) ->
lists:foldl(fun({K, Vs}, Acc) ->
In = kvlist:get_value(Vs, Cv),
orddict:update(K, fun(X) -> X+In end, In, Acc)
end,
[], A).
-spec times_prob_par(probability(), list()) -> list().
times_prob_par([{[], Parent}], B) -> % independent
lists:map(fun({[{_,Pv}],PP}) ->
VP = kvlist:get_value(Parent, Pv),
{Pv, PP*VP}
end,
B). | src/bn.erl | 0.550124 | 0.520314 | bn.erl | starcoder |
%%%
%%% Ranges w/ optional bounds on ordered types.
-module(ff_range).
-type range(T) :: {maybe(bound(T)), maybe(bound(T))}.
-type bound(T) :: {exclusive | inclusive, ord(T)}.
-type maybe(T) :: infinity | T.
% totally ordered
-type ord(T) :: T.
-export_type([range/1]).
-export_type([bound/1]).
-export([intersect/2]).
-export([contains/2]).
%%
-spec intersect(range(T), range(T)) -> range(T) | undefined.
intersect(R1, R2) ->
B1 = max_bound(lower(R1), lower(R2)),
B2 = min_bound(upper(R1), upper(R2)),
case compare_bounds(B1, B2) of
gt ->
undefined;
_ ->
from_bounds(B1, B2)
end.
-spec contains(range(T), range(T)) -> boolean().
contains(R1, R2) ->
intersect(R1, R2) =:= R2.
%%
compare_bounds(B1, B1) ->
eq;
compare_bounds(_B1, neginf) ->
gt;
compare_bounds(_B1, posinf) ->
lt;
compare_bounds({_, V1}, {_, V2}) when V1 > V2 ->
gt;
compare_bounds({from, V1}, {_, V1}) ->
gt;
compare_bounds({upto, V1}, {_, V1}) ->
lt;
compare_bounds(B1, B2) ->
case compare_bounds(B2, B1) of
gt -> lt;
lt -> gt
end.
max_bound(B1, B2) ->
case compare_bounds(B1, B2) of
gt -> B1;
_ -> B2
end.
min_bound(B1, B2) ->
case compare_bounds(B1, B2) of
lt -> B1;
_ -> B2
end.
%%
lower({infinity, _}) ->
neginf;
lower({{exclusive, V}, _}) ->
{from, V};
lower({{inclusive, V}, _}) ->
{point, V}.
upper({_, infinity}) ->
posinf;
upper({_, {exclusive, V}}) ->
{upto, V};
upper({_, {inclusive, V}}) ->
{point, V}.
from_bounds(B1, B2) ->
{from_lower(B1), from_upper(B2)}.
from_lower(neginf) ->
infinity;
from_lower({from, V}) ->
{exclusive, V};
from_lower({point, V}) ->
{inclusive, V}.
from_upper(posinf) ->
infinity;
from_upper({upto, V}) ->
{exclusive, V};
from_upper({point, V}) ->
{inclusive, V}.
%% Tests
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-spec test() -> _.
-type testcase() :: {_, fun()}.
-spec intersect_test_() -> [testcase()].
intersect_test_() ->
[
?_assertEqual(
{infinity, infinity},
intersect(
{infinity, infinity},
{infinity, infinity}
)
),
?_assertEqual(
undefined,
intersect(
{infinity, {exclusive, 0}},
{{exclusive, 0}, infinity}
)
),
?_assertEqual(
undefined,
intersect(
{{exclusive, 0}, infinity},
{infinity, {exclusive, 0}}
)
),
?_assertEqual(
{{inclusive, 0}, {inclusive, 0}},
intersect(
{infinity, {inclusive, 0}},
{{inclusive, 0}, infinity}
)
),
?_assertEqual(
{{inclusive, 0}, {inclusive, 0}},
intersect(
{{inclusive, 0}, infinity},
{infinity, {inclusive, 0}}
)
),
?_assertEqual(
{{inclusive, 1}, {exclusive, 42}},
intersect(
{infinity, {exclusive, 42}},
{{inclusive, 1}, infinity}
)
),
?_assertEqual(
{{exclusive, 42}, infinity},
intersect(
{{exclusive, 42}, infinity},
{{exclusive, 42}, infinity}
)
),
?_assertEqual(
{{exclusive, 42}, {exclusive, 43}},
intersect(
{{inclusive, 42}, {exclusive, 43}},
{{exclusive, 42}, {inclusive, 43}}
)
),
?_assertEqual(
{{inclusive, 42}, {inclusive, 42}},
intersect(
{{inclusive, 41}, {inclusive, 42}},
{{inclusive, 42}, {inclusive, 43}}
)
),
?_assertEqual(
{{inclusive, 42}, {exclusive, 43}},
intersect(
{{exclusive, 41}, {inclusive, 44}},
{{inclusive, 42}, {exclusive, 43}}
)
)
].
-spec contains_test_() -> [testcase()].
contains_test_() ->
[
?_assertEqual(
true,
contains(
{infinity, infinity},
{infinity, infinity}
)
),
?_assertEqual(
true,
contains(
{infinity, infinity},
{{exclusive, 0}, {inclusive, 1000}}
)
),
?_assertEqual(
false,
contains(
{{exclusive, 0}, {inclusive, 1000}},
{infinity, infinity}
)
),
?_assertEqual(
true,
contains(
{{exclusive, 41}, {inclusive, 43}},
{{inclusive, 42}, {exclusive, 43}}
)
),
?_assertEqual(
false,
contains(
{{exclusive, 41}, {exclusive, 43}},
{{inclusive, 42}, {inclusive, 43}}
)
)
].
-endif. | apps/ff_core/src/ff_range.erl | 0.550607 | 0.590484 | ff_range.erl | starcoder |
-module(quic_util).
-include("quic.hrl").
%% ------------------------------------------------------------------
%% API Function Exports
%% ------------------------------------------------------------------
-export([binary_chunks/2]).
-export([exact_binary_chunks/2]).
-export([bit_to_boolean/1]).
-export([boolean_to_bit/1]).
-export([encode_uint/2]).
-export([hash_fnv1a_96/1]).
-export([zlib_uncompress/2]).
-export([coalesce/2]).
-export([binary_to_hex/1]).
-export([filtermapfoldl/3]).
-export([now_us/0]).
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
-spec binary_chunks(Blob :: binary(), Size :: pos_integer()) -> [binary()].
binary_chunks(Data, Size) when byte_size(Data) >= Size ->
<<Chunk:Size/binary, RemainingData/binary>> = Data,
[Chunk | binary_chunks(RemainingData, Size)];
binary_chunks(<<RemainingData/binary>>, _Size) ->
[RemainingData].
-spec exact_binary_chunks(Blob :: binary(), Size :: pos_integer()) -> [binary()].
exact_binary_chunks(<<>>, _Size) ->
[];
exact_binary_chunks(Data, Size) ->
<<Chunk:Size/binary, RemainingData/binary>> = Data,
[Chunk | exact_binary_chunks(RemainingData, Size)].
bit_to_boolean(0) -> false;
bit_to_boolean(1) -> true.
boolean_to_bit(false) -> 0;
boolean_to_bit(true) -> 1.
encode_uint(Value, Size) ->
<<Value:Size/little-unsigned-integer-unit:8>>.
-spec hash_fnv1a_96(iodata()) -> binary().
hash_fnv1a_96(Data) ->
% Based on https://groups.google.com/a/chromium.org/forum/#!topic/proto-quic/VpuIIe0WL3U
Hash128 = hash:fnv128a(iolist_to_binary(Data)),
<<Truncated:12/binary, _:4/binary>> = encode_uint(Hash128, 16),
Truncated.
-spec zlib_uncompress(Compressed :: binary(), Dictionary :: binary())
-> Uncompressed :: binary().
zlib_uncompress(Compressed, Dictionary) ->
Z = zlib:open(),
zlib:inflateInit(Z),
% This is so annoying
{'EXIT',{{need_dictionary, _DictionaryAdler32},_}} = (catch zlib:inflate(Z, Compressed)),
ok = zlib:inflateSetDictionary(Z, Dictionary),
Uncompressed = zlib:inflate(Z, Compressed),
?ASSERT((is_binary(Uncompressed) orelse is_list(Uncompressed)),
insuficcient_data_for_decompression),
zlib:close(Z),
Uncompressed.
-spec coalesce(Value :: term(), Default :: term()) -> term().
coalesce(undefined, Default) -> Default;
coalesce(Value, _Default) -> Value.
binary_to_hex(Value) ->
integer_to_list(binary:decode_unsigned(iolist_to_binary(Value), big), 16).
filtermapfoldl(FilterMapFoldFun, Acc0, List) ->
{RevFilterMapped, AccN} =
lists:foldl(
fun (Value, {RevFilterMappedAcc, Acc}) ->
{FilterMapResult, NewAcc} = FilterMapFoldFun(Value, Acc),
NewRevFilterMappedAcc =
case FilterMapResult of
false -> RevFilterMappedAcc;
true -> [Value | RevFilterMappedAcc];
{true, MappedValue} -> [MappedValue | RevFilterMappedAcc]
end,
{NewRevFilterMappedAcc, NewAcc}
end,
{[], Acc0},
List),
FilterMapped = lists:reverse(RevFilterMapped),
{FilterMapped, AccN}.
now_us() ->
os:system_time(micro_seconds). | src/quic_util.erl | 0.516839 | 0.448547 | quic_util.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_btree_stats).
-export([compute/1]).
-include("couch_db.hrl").
-record(stats, {
kv_count = 0,
kp_nodes = 0,
kv_nodes = 0,
depths = [],
reduction_sizes = [],
elements_per_kp_node = [],
elements_per_kv_node = [],
kp_node_sizes = [],
compressed_kp_node_sizes = [],
kv_node_sizes = [],
compressed_kv_node_sizes = [],
key_sizes = [],
value_sizes = []
}).
% Use only for debugging/analysis. This will traverse the whole btree.
compute(#btree{root = Root, fd = Fd} = Bt) ->
#stats{
depths = Depths0,
reduction_sizes = RedSizes0,
elements_per_kv_node = ElementsPerKvNode0,
elements_per_kp_node = ElementsPerKpNode0,
kv_node_sizes = KvNodeSizes0,
kp_node_sizes = KpNodeSizes0,
compressed_kv_node_sizes = CompKvNodeSizes0,
compressed_kp_node_sizes = CompKpNodeSizes0,
key_sizes = KeySizes0,
value_sizes = ValueSizes0
} = StatsRec0 = collect_stats(Root, Bt, 0, #stats{}),
StatsRec = StatsRec0#stats{
depths = lists:sort(Depths0),
reduction_sizes = lists:sort(RedSizes0),
elements_per_kv_node = lists:sort(ElementsPerKvNode0),
elements_per_kp_node = lists:sort(ElementsPerKpNode0),
kv_node_sizes = lists:sort(KvNodeSizes0),
kp_node_sizes = lists:sort(KpNodeSizes0),
compressed_kv_node_sizes = lists:sort(CompKvNodeSizes0),
compressed_kp_node_sizes = lists:sort(CompKpNodeSizes0),
key_sizes = lists:sort(KeySizes0),
value_sizes = lists:sort(ValueSizes0)
},
StatNames = record_info(fields, stats),
StatPoses = lists:seq(2, record_info(size, stats)),
ExpandableStats = [
{depths, <<"depth">>},
{reduction_sizes, <<"reduction_size">>},
{elements_per_kp_node, <<"elements_per_kp_node">>},
{elements_per_kv_node, <<"elements_per_kv_node">>},
{kp_node_sizes, <<"kp_node_size">>},
{kv_node_sizes, <<"kv_node_size">>},
{compressed_kv_node_sizes, <<"compressed_kv_node_size">>},
{compressed_kp_node_sizes, <<"compressed_kp_node_size">>},
{key_sizes, <<"key_size">>},
{value_sizes, <<"value_size">>}
],
Stats = lists:foldr(
fun({StatName, StatPos}, Acc) ->
Val = element(StatPos, StatsRec),
case couch_util:get_value(StatName, ExpandableStats) of
undefined ->
[{?l2b(atom_to_list(StatName)), Val} | Acc];
StatName2 ->
expand_stat(Val, StatName2) ++ Acc
end
end,
[],
lists:zip(StatNames, StatPoses)),
{ok, FileSize} = couch_file:bytes(Fd),
BtSize = couch_btree:size(Bt),
Frag = case FileSize of
0 ->
0;
_ ->
((FileSize - BtSize) / FileSize) * 100
end,
[
{<<"btree_size">>, BtSize},
{<<"file_size">>, FileSize},
{<<"fragmentation">>, Frag},
{<<"kv_chunk_threshold">>, Bt#btree.kv_chunk_threshold},
{<<"kp_chunk_threshold">>, Bt#btree.kp_chunk_threshold} | Stats
].
collect_stats(nil, _Bt, _Depth, Stats) ->
Stats;
collect_stats(Node, Bt, Depth, Stats) ->
Pointer = element(1, Node),
Reduction = element(2, Node),
{{NodeType, NodeList}, NodeCompSize, NodeSize} = get_node(Bt, Pointer),
ChildCount = length(NodeList),
RedSize = thing_size(Reduction),
case NodeType of
kv_node ->
Stats#stats{
kv_nodes = Stats#stats.kv_nodes + 1,
depths = [Depth + 1 | Stats#stats.depths],
kv_count = Stats#stats.kv_count + ChildCount,
elements_per_kv_node = [ChildCount | Stats#stats.elements_per_kv_node],
kv_node_sizes = [NodeSize | Stats#stats.kv_node_sizes],
compressed_kv_node_sizes =
[NodeCompSize | Stats#stats.compressed_kv_node_sizes],
reduction_sizes = [RedSize | Stats#stats.reduction_sizes],
key_sizes =
[thing_size(K) || {K, _} <- NodeList] ++ Stats#stats.key_sizes,
value_sizes =
[thing_size(V) || {_, V} <- NodeList] ++ Stats#stats.value_sizes
};
kp_node ->
Stats2 = Stats#stats{
kp_nodes = Stats#stats.kp_nodes + 1,
elements_per_kp_node = [ChildCount | Stats#stats.elements_per_kp_node],
kp_node_sizes = [NodeSize | Stats#stats.kp_node_sizes],
compressed_kp_node_sizes =
[NodeCompSize | Stats#stats.compressed_kp_node_sizes],
reduction_sizes = [RedSize | Stats#stats.reduction_sizes]
},
lists:foldl(
fun({_Key, NodeState}, StatsAcc) ->
collect_stats(NodeState, Bt, Depth + 1, StatsAcc)
end,
Stats2,
NodeList)
end.
get_node(#btree{fd = Fd, binary_mode = false}, NodePos) ->
{ok, CompressedBin} = couch_file:pread_binary(Fd, NodePos),
Bin = couch_compress:decompress(CompressedBin),
{binary_to_term(Bin), byte_size(CompressedBin), byte_size(Bin)};
get_node(#btree{fd = Fd, binary_mode = true}, NodePos) ->
{ok, CompressedBin} = couch_file:pread_binary(Fd, NodePos),
<<TypeInt, NodeBin/binary>> = Bin = couch_compress:decompress(CompressedBin),
Type = if TypeInt == 1 -> kv_node; true -> kp_node end,
{couch_btree:decode_node(NodeBin, Type, []), byte_size(CompressedBin), byte_size(Bin)}.
expand_stat(Values, StatName) ->
MaxName = iolist_to_binary(["max_", StatName]),
MinName = iolist_to_binary(["min_", StatName]),
AvgName = iolist_to_binary(["avg_", StatName]),
P90Name = iolist_to_binary([StatName, "_90_percentile"]),
P95Name = iolist_to_binary([StatName, "_95_percentile"]),
P99Name = iolist_to_binary([StatName, "_99_percentile"]),
case Values of
[] ->
MaxValue = 0,
MinValue = 0,
AvgValue = 0;
_ ->
MaxValue = lists:last(Values),
MinValue = hd(Values),
AvgValue = lists:sum(Values) / length(Values)
end,
[
{MaxName, MaxValue},
{MinName, MinValue},
{AvgName, AvgValue},
{P90Name, percentile(Values, 90)},
{P95Name, percentile(Values, 95)},
{P99Name, percentile(Values, 99)}
].
percentile([], _Perc) ->
0;
percentile(Values, Perc) ->
lists:nth(round((Perc / 100) * length(Values) + 0.5), Values).
thing_size(Bin) when is_binary(Bin) ->
byte_size(Bin);
thing_size(Term) ->
?term_size(Term). | src/couchdb/couch_btree_stats.erl | 0.53048 | 0.502502 | couch_btree_stats.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc This code is only needed for riak search migration and can be
%% pulled once riak search is removed from riak.
-module(yz_rs_migration).
-compile(export_all).
-include("yokozuna.hrl").
%% @doc Determine if Riak Search is enabled.
-spec is_riak_search_enabled() -> boolean().
is_riak_search_enabled() ->
app_helper:get_env(?RS_SVC, enabled, false).
%% @doc Remove Riak Search pre-commit hook from all buckets when Riak
%% Search is disabled.
%%
%% Previous versions of Riak had a bug in `set_bucket' which caused
%% bucket fixups to leak into the raw ring. If a user is upgrading
%% from one of these versions, and enabled search on a bucket, then
%% the Riak Searc hook will be in the raw ring. After migrating to
%% Yokozuna these hooks must be removed to avoid errors.
-spec strip_rs_hooks(IsRSEnabled :: boolean(), ring()) -> ok.
strip_rs_hooks(true, _) ->
ok;
strip_rs_hooks(false, Ring) ->
Buckets = riak_core_ring:get_buckets(Ring),
strip_rs_hooks_2(Ring, Buckets).
%%%===================================================================
%%% EVERYTHING BELOW IS FOR BUG CAUSED BY LEAKED BUCKET FIXUPS.
%%%
%%% Much of this code was copied from `riak_search_kv_hook'.
%%%===================================================================
%% @private
%%
%% @doc Given current pre-commit hook generate a new one with all
%% instances of the Riak Search hook removed.
%%
%% `Changed' - A boolean indicating if the `Precommit' value changed.
%%
%% `NewPrecommit' - The new pre-commit hook.
-spec gen_new_precommit([term()]) -> {Changed :: boolean(),
NewPrecommit :: [term()]}.
gen_new_precommit(Precommit) ->
%% Strip ALL Riak Search hooks.
NewPrecommit = lists:filter(fun ?MODULE:not_rs_hook/1, Precommit),
Changed = not (Precommit =:= NewPrecommit),
{Changed, NewPrecommit}.
%% @private
%%
%% @doc Retrieve the pre-commit hook from bucket properties. If it
%% doesn not exist then default to the empty list.
-spec get_precommit([term()]) -> [term()].
get_precommit(BProps) ->
case proplists:get_value(precommit, BProps, []) of
X when is_list(X) -> X;
{struct, _}=X -> [X]
end.
%% @private
%%
%% @doc Predicate function which returns `true' if the `Hook' is NOT a
%% Riak Search hook. For use with `lists:filter/2'.
-spec not_rs_hook(term()) -> boolean().
not_rs_hook(Hook) ->
not (Hook == rs_precommit_def()).
%% @private
%%
%% @doc The definition of the Riak Search pre-commit hook.
-spec rs_precommit_def() -> term().
rs_precommit_def() ->
{struct, [{<<"mod">>,<<"riak_search_kv_hook">>},
{<<"fun">>,<<"precommit">>}]}.
strip_rs_hooks_2(_Ring, []) ->
ok;
strip_rs_hooks_2(Ring, [Bucket|Rest]) ->
BProps = riak_core_bucket:get_bucket(Bucket, Ring),
Precommit = get_precommit(BProps),
{Changed, NewPreHook} = gen_new_precommit(Precommit),
case Changed of
true ->
riak_core_bucket:set_bucket(Bucket, [{precommit, NewPreHook}]),
ok;
false ->
ok
end,
strip_rs_hooks_2(Ring, Rest). | deps/yokozuna/src/yz_rs_migration.erl | 0.583322 | 0.403273 | yz_rs_migration.erl | starcoder |
-module(day2).
-export([solve_part1/1, solve_part2/1]).
%% for tests
-export([validate/4, count/2, validate2/4, is_char_at/3]).
%%% solution behavior
solve_part1(Input) ->
length([Password || {Character, AtLeast, AtMost, Password} <- Input,
validate(Character, AtLeast, AtMost, Password)]).
solve_part2(Input) ->
length([Password || {Character, AtLeast, AtMost, Password} <- Input,
validate2(Character, AtLeast, AtMost, Password)]).
%%% internals
%%% Part1
-spec validate(Character, AtLeast, AtMost, Password) ->
Result when
Character :: string(),
AtLeast :: pos_integer(),
AtMost :: pos_integer(),
Password :: string(),
Result :: boolean().
validate(Character, AtLeast, AtMost, Password) ->
Count = count(Character, Password),
io:format("~p~n", [Count]),
(Count >= AtLeast) andalso (Count =< AtMost).
-spec count([Character], String) ->
Count when
Character :: pos_integer(),
String :: string(),
Count :: non_neg_integer().
count([Character], String) ->
count(Character, String, 0).
-spec count(Character, String, Count) ->
FinalCount when
Character :: non_neg_integer(),
String :: string(),
Count :: non_neg_integer(),
FinalCount :: non_neg_integer().
count(_, [], Count) -> Count;
count(Character, [Character|Tail], Count) ->
count(Character, Tail, Count + 1);
count(Character, [_NotCharacter|Tail], Count) ->
count(Character, Tail, Count).
%%% Part2
-spec validate2(Character, FirstPos, SecondPos, Password) ->
Result when
Character :: [pos_integer()],
FirstPos :: pos_integer(),
SecondPos :: pos_integer(),
Password :: string(),
Result :: boolean().
validate2(Character, FirstPos, SecondPos, Password) ->
(is_char_at(Character, FirstPos, Password))
xor (is_char_at(Character, SecondPos, Password)).
-spec is_char_at(Character, Pos, String) ->
boolean() when
Character :: [pos_integer()],
Pos :: non_neg_integer(),
String :: string().
is_char_at([Character], Pos, String) ->
io:format("~p, ~p, ~p~n", [Character, Pos, String]),
lists:nth(Pos, String) =:= Character. | src/day2.erl | 0.503662 | 0.495178 | day2.erl | starcoder |
%%% -*- erlang -*-
%%%
%%% This file is part of hackney_lib released under the Apache 2 license.
%%% See the NOTICE for more information.
%%%
%%% Copyright (c) 2011-2012, <NAME> <<EMAIL>>
%%% Copyright (c) 2013-2015 <NAME>
%%%
%%% @doc HTTP parser in pure Erlang
%%% This parser is able to parse HTTP responses and requests in a
%%% streaming fashion. If not set it will be autodetect the type of
%%% binary parsed, if it's a request or a response.
%%%
%%% Internally it is keeping a buffer for intermediary steps but don't
%%% keep any state in memory.
%%%
%%%
%%% The first time you initialise a parser using `hackney_http:parser/0'
%%% or `hackney_http:parser/1' you will receive an opaque record You can
%%% then process it using the function `hackney_http:execute/2'.
%%%
%%% Each steps will return the status, some data and the new parser that
%%% you can process later with `hackney_http:execute/2' when
%%% `{more, ...}' is returnned or `hackney_http:execute/1' in other
%%% cases:
%%%
%%% - `{response, http_version(), status(), http_reason(), parser()}':
%%% when the first line of a response is parsed
%%% - `{request, http_version(), http_method(), uri(), parser()}':
%%% when the first line of a request (on servers) is parsed
%%% - `{more, parser()}': when the parser need more
%%% data. The new data should be passed to `hackney_http:execute/2' with
%%% the new parser() state received.
%%% - `{header, {Name :: binary(), Value :: binary()}, parser()}':
%%% when an header has been parsed. To continue the parsing you must
%%% call the given `parser()' with `hackney_http:execute/1'.
%%% - `{headers_complete, parser()}' : when all headers have been parsed.
%%% To continue the parsing you must call the given `parser()' state
%%% with `hackney_http:execute/1'.
%%% - `{more, parser(), binary()}': on body, when
%%% the parser need more data. The new data should be passed to
%%% `hackney_http:execute/2' (with `parser()' ) when received. The binary at the end of the
%%% tuple correspond to the actual buffer of the parser. It may be used
%%% for other purpose, like start to parse a new request on pipeline
%%% connections, for a proxy...
%%% - `{ok, binary(), parser()}': on body, when a chunk has been
%%% parsed. To continue the parsing you must call
%%% `hackney_http:execute/1' with the given `parser()'.
%%% - `{done, binary()}': when the parsing is done. The binary
%%% given correpond to the non parsed part of the internal buffer.
%%% - `{error, term{}}': when an error happen
-module(hackney_http).
-export([parser/0, parser/1]).
-export([execute/1, execute/2]).
-export([get/2]).
-export([parse_response_version/2]).
-include("hackney_lib.hrl").
-opaque parser() :: #hparser{}.
-export_type([parser/0]).
-type http_version() :: {integer(), integer()}.
-type status() :: integer().
-type http_reason() :: binary().
-type http_method() :: binary().
-type uri() :: binary().
-type body_result() :: {more, parser(), binary()}
| {ok, binary(), parser()}
| {done, binary()}.
-type parser_result() ::
{response, http_version(), status(), http_reason(), parser()}
| {request, http_version(), http_method(), uri(), parser()}
| {more, parser()}
| body_result()
| {error, term()}.
-type parser_option() :: request | response | auto
| {max_empty_lines, integer()}
| {max_line_length, integer()}.
-type parser_options() :: [parser_option()].
%% @doc Create a new HTTP parser. The parser will autodetect if the parded
%% binary is a response or a request.
-spec parser() -> parser().
parser() ->
parser([]).
%% @doc create a new HTTP parser with options. By default the type of
%% parsed binary will be detected.
%%
%% Available options:
%% <ul>
%% <li>`auto' : autodetect if the binary parsed is a response or a
%% request (default).</li>
%% <li>`response': set the parser to parse a response</li>
%% <li>`request': set the parser to parse a request (server)</li>
%% <li>`{max_line_lenght, Max}': set the maximum size of a line parsed
%% before we give up.</li>
%% <li>`{max_lines_empty, Max}': the maximum number of empty line we
%% accept before the first line happen</li>
%% </ul>
-spec parser(parser_options()) -> parser().
parser(Options) ->
parse_options(Options, #hparser{}).
%% @doc retrieve a parser property.
%% Properties are:
%% - `buffer': internal buffer of the parser (non parsed)
%% - `state': the current state (on_status, on_header, on_body, done)
%% - `version': HTTP version
%% - `content_length': content length header if any
%% - `transfer_encoding': transfer encoding header if any
%% - `content_type': content type header if any
%% - `location': location header if any
%% - `connection': connection header if any.
-spec get(parser(), atom() | [atom()]) -> any().
get(Parser, Props) when is_list(Props) ->
[get_property(P, Parser) || P <- Props];
get(Parser, Prop) ->
get_property(Prop, Parser).
%% @doc Execute the parser with the current buffer.
-spec execute(#hparser{}) -> parser_result().
execute(St) ->
execute(St, <<>>).
%% @doc Execute the parser with the new buffer
-spec execute(#hparser{}, binary()) -> parser_result().
execute(#hparser{state=Status, buffer=Buffer}=St, Bin) ->
%% update the state with the new buffer
NBuffer = << Buffer/binary, Bin/binary >>,
St1 = St#hparser{buffer=NBuffer},
%% process the right state.
case Status of
done -> done;
on_first_line -> parse_first_line(NBuffer, St1, 0);
on_header -> parse_headers(St1);
on_body -> parse_body(St1)
end.
%% Empty lines must be using \r\n.
parse_first_line(<< $\n, _/binary >>, _St, _) ->
{error, badarg};
%% We limit the length of the first-line to MaxLength to avoid endlessly
%% reading from the socket and eventually crashing.
parse_first_line(Buffer, St=#hparser{type=Type,
max_line_length=MaxLength,
max_empty_lines=MaxEmpty}, Empty) ->
case match_eol(Buffer, 0) of
nomatch when byte_size(Buffer) > MaxLength ->
{error, line_too_long};
nomatch ->
{more, St#hparser{empty_lines=Empty}};
1 when Empty =:= MaxEmpty ->
{error, bad_request};
1 ->
<< _:16, Rest/binary >> = Buffer,
parse_first_line(Rest, St, Empty + 1);
_ when Type =:= auto ->
case parse_request_line(St) of
{request, _Method, _URI, _Version, _NState} = Req -> Req;
{error, bad_request} -> parse_response_line(St)
end;
_ when Type =:= response ->
parse_response_line(St);
_ when Type =:= request ->
parse_request_line(St)
end.
match_eol(<< $\n, _/bits >>, N) ->
N;
match_eol(<< _, Rest/bits >>, N) ->
match_eol(Rest, N + 1);
match_eol(_, _) ->
nomatch.
%% @doc parse status
parse_response_line(#hparser{buffer=Buf}=St) ->
case binary:split(Buf, <<"\r\n">>) of
[Line, Rest] ->
parse_response_version(Line, St#hparser{buffer=Rest});
_ ->
{error, bad_request}
end.
parse_response_version(<< "HTTP/", High, ".", Low, $\s, Rest/binary >>, St)
when High >= $0, High =< $9, Low >= $0, Low =< $9 ->
Version = { High -$0, Low - $0},
parse_status(Rest, St, Version, <<>>);
parse_response_version(_, _) ->
{error, bad_request}.
parse_status(<<>>, St, Version, Acc) ->
parse_reason(<<>>, St, Version, Acc);
parse_status(<< C, Rest/bits >>, St, Version, Acc) ->
case C of
$\r -> {error, bad_request};
$\s -> parse_reason(Rest, St, Version, Acc);
_ -> parse_status(Rest, St, Version, << Acc/binary, C >>)
end.
parse_reason(Reason, St, Version, StatusCode) ->
StatusInt = list_to_integer(binary_to_list(StatusCode)),
NState = St#hparser{type=response,
version=Version,
state=on_header,
partial_headers=[]},
{response, Version, StatusInt, Reason, NState}.
parse_request_line(#hparser{buffer=Buf}=St) ->
parse_method(Buf, St, <<>>).
parse_method(<< C, Rest/bits >>, St, Acc) ->
case C of
$\r -> {error, bad_request};
$\s -> parse_uri(Rest, St, Acc);
_ -> parse_method(Rest, St, << Acc/binary, C >>)
end.
parse_uri(<< $\r, _/bits >>, _St, _) ->
{error, bad_request};
parse_uri(<< "* ", Rest/bits >>, St, Method) ->
parse_version(Rest, St, Method, <<"*">>);
parse_uri(Buffer, St, Method) ->
parse_uri_path(Buffer, St, Method, <<>>).
parse_uri_path(<< C, Rest/bits >>, St, Method, Acc) ->
case C of
$\r -> {error, bad_request};
$\s -> parse_version(Rest, St, Method, Acc);
_ -> parse_uri_path(Rest, St, Method, << Acc/binary, C >>)
end.
parse_version(<< "HTTP/", High, ".", Low, $\r , $\n, Rest/binary >>, St, Method, URI)
when High >= $0, High =< $9, Low >= $0, Low =< $9 ->
Version = { High -$0, Low - $0},
NState = St#hparser{type=request,
version=Version,
method=Method,
state=on_header,
buffer=Rest,
partial_headers=[]},
{request, Method, URI, Version, NState};
parse_version(_, _, _, _) ->
{error, bad_request}.
%% @doc fetch all headers
parse_headers(#hparser{}=St) ->
case parse_header(St) of
{more, St2} ->
{more, St2};
{headers_complete, St2} ->
{headers_complete, St2};
{header, KV, St2} ->
{header, KV, St2}
end.
parse_header(#hparser{buffer=Buf}=St) ->
case binary:split(Buf, <<"\r\n">>) of
[<<>>, Rest] ->
{headers_complete, St#hparser{buffer=Rest,
state=on_body}};
[Line, << " ", Rest/binary >> ] ->
NewBuf = iolist_to_binary([Line, " ", Rest]),
parse_header(St#hparser{buffer=NewBuf});
[Line, << "\t", Rest/binary >> ] ->
NewBuf = iolist_to_binary([Line, " ", Rest]),
parse_header(St#hparser{buffer=NewBuf});
[Line, Rest]->
parse_header(Line, St#hparser{buffer=Rest});
[Buf] ->
{more, St}
end.
parse_header(Line, St) ->
[Key, Value] = case binary:split(Line, <<":">>, [trim]) of
[K] -> [K, <<>>];
[K, V] -> [K, parse_header_value(V)]
end,
St1 = case hackney_bstr:to_lower(hackney_bstr:trim(Key)) of
<<"content-length">> ->
case hackney_util:to_int(Value) of
{ok, CLen} -> St#hparser{clen=CLen};
false -> St#hparser{clen=bad_int}
end;
<<"transfer-encoding">> ->
TE = hackney_bstr:to_lower(hackney_bstr:trim(Value)),
St#hparser{te=TE};
<<"connection">> ->
Connection = hackney_bstr:to_lower(hackney_bstr:trim(Value)),
St#hparser{connection=Connection};
<<"content-type">> ->
CType=hackney_bstr:to_lower(hackney_bstr:trim(Value)),
St#hparser{ctype=CType};
<<"location">> ->
Location = hackney_bstr:trim(Value),
St#hparser{location=Location};
_ ->
St
end,
{header, {Key, Value}, St1}.
parse_header_value(H) ->
hackney_bstr:trim(H).
parse_trailers(St, Acc) ->
case parse_headers(St) of
{header, Header, St2} -> parse_trailers(St2, [Header | Acc]);
{headers_complete, St2} -> {ok, lists:reverse(Acc), St2};
_ -> error
end.
parse_body(#hparser{body_state=waiting, method= <<"HEAD">>, buffer=Buffer}) ->
{done, Buffer};
parse_body(St=#hparser{body_state=waiting, te=TE, clen=Length, buffer=Buffer}) ->
case {TE, Length} of
{<<"chunked">>, _} ->
parse_body(St#hparser{body_state=
{stream, fun te_chunked/2, {0, 0}, fun ce_identity/1}});
{_, 0} ->
{done, Buffer};
{_, bad_int} ->
{done, Buffer};
{_, _} ->
parse_body(
St#hparser{body_state={stream, fun te_identity/2, {0, Length}, fun ce_identity/1}}
)
end;
parse_body(#hparser{body_state=done, buffer=Buffer}) ->
{done, Buffer};
parse_body(St=#hparser{buffer=Buffer, body_state={stream, _, _, _}}) when byte_size(Buffer) > 0 ->
transfer_decode(Buffer, St#hparser{buffer= <<>>});
parse_body(St) ->
{more, St, <<>>}.
-spec transfer_decode(binary(), #hparser{})
-> {ok, binary(), #hparser{}} | {error, atom()}.
transfer_decode(Data, St=#hparser{
body_state={stream, TransferDecode,
TransferState, ContentDecode},
buffer=Buf}) ->
case TransferDecode(Data, TransferState) of
{ok, Data2, TransferState2} ->
content_decode(ContentDecode, Data2,
St#hparser{body_state= {stream,
TransferDecode,
TransferState2,
ContentDecode}});
{ok, Data2, Rest, TransferState2} ->
content_decode(ContentDecode, Data2,
St#hparser{buffer=Rest,
body_state={stream,
TransferDecode,
TransferState2,
ContentDecode}});
{chunk_done, Rest} ->
case parse_trailers(St#hparser{buffer=Rest}, []) of
{ok, _Trailers, #hparser{buffer=Rest1}} ->
{done, Rest1};
_ ->
{done, Rest}
end;
{chunk_ok, Chunk, Rest} ->
{ok, Chunk, St#hparser{buffer=Rest}};
more ->
{more, St#hparser{buffer=Data}, Buf};
{done, Rest} ->
{done, Rest};
{done, Data2, _Rest} ->
content_decode(ContentDecode, Data2,
St#hparser{body_state=done});
{done, Data2, _Length, Rest} ->
content_decode(ContentDecode, Data2, St#hparser{buffer=Rest,
body_state=done});
done ->
{done, <<>>};
{error, Reason} ->
{error, Reason}
end.
-spec content_decode(fun(), binary(), #hparser{})
-> {ok, binary(), #hparser{}} | {error, atom()}.
content_decode(ContentDecode, Data, St) ->
case ContentDecode(Data) of
{ok, Data2} -> {ok, Data2, St};
{error, Reason} -> {error, Reason}
end.
%% @doc Decode a stream of chunks.
-spec te_chunked(binary(), any())
-> more | {ok, binary(), {non_neg_integer(), non_neg_integer()}}
| {ok, binary(), binary(), {non_neg_integer(), non_neg_integer()}}
| {done, non_neg_integer(), binary()} | {error, badarg}.
te_chunked(<<>>, _) ->
done;
te_chunked(Data, _) ->
case read_size(Data) of
{ok, 0, Rest} ->
{chunk_done, Rest};
{ok, Size, Rest} ->
case read_chunk(Rest, Size) of
{ok, Chunk, Rest1} ->
{chunk_ok, Chunk, Rest1};
eof ->
more
end;
eof ->
more
end.
%% @doc Decode an identity stream.
-spec te_identity(binary(), {non_neg_integer(), non_neg_integer()})
-> {ok, binary(), {non_neg_integer(), non_neg_integer()}}
| {done, binary(), non_neg_integer(), binary()}.
te_identity(Data, {Streamed, Total})
when (Streamed + byte_size(Data)) < Total ->
{ok, Data, {Streamed + byte_size(Data), Total}};
te_identity(Data, {Streamed, Total}) ->
Size = Total - Streamed,
<< Data2:Size/binary, Rest/binary >> = Data,
{done, Data2, Total, Rest}.
%% @doc Decode an identity content.
-spec ce_identity(binary()) -> {ok, binary()}.
ce_identity(Data) ->
{ok, Data}.
read_size(Data) ->
case read_size(Data, [], true) of
{ok, Line, Rest} ->
case io_lib:fread("~16u", Line) of
{ok, [Size], _} ->
{ok, Size, Rest};
_ ->
{error, {poorly_formatted_size, Line}}
end;
Err ->
Err
end.
read_size(<<>>, _, _) ->
eof;
read_size(<<"\r\n", Rest/binary>>, Acc, _) ->
{ok, lists:reverse(Acc), Rest};
read_size(<<$;, Rest/binary>>, Acc, _) ->
read_size(Rest, Acc, false);
read_size(<<$\s, Rest/binary>>, Acc, _) ->
read_size(Rest, Acc, false);
read_size(<<C, Rest/binary>>, Acc, AddToAcc) ->
case AddToAcc of
true ->
read_size(Rest, [C|Acc], AddToAcc);
false ->
read_size(Rest, Acc, AddToAcc)
end.
read_chunk(Data, Size) ->
case Data of
<<Chunk:Size/binary, "\r\n", Rest/binary>> ->
{ok, Chunk, Rest};
<<_Chunk:Size/binary, Rest/binary>> when byte_size(Rest) >= 2 ->
{error, poorly_formatted_chunked_size};
_ ->
eof
end.
%% @private
parse_options([], St) ->
St;
parse_options([auto | Rest], St) ->
parse_options(Rest, St#hparser{type=auto});
parse_options([request | Rest], St) ->
parse_options(Rest, St#hparser{type=request});
parse_options([response | Rest], St) ->
parse_options(Rest, St#hparser{type=response});
parse_options([{max_line_length, MaxLength} | Rest], St) ->
parse_options(Rest, St#hparser{max_line_length=MaxLength});
parse_options([{max_empty_lines, MaxEmptyLines} | Rest], St) ->
parse_options(Rest, St#hparser{max_empty_lines=MaxEmptyLines});
parse_options([_ | Rest], St) ->
parse_options(Rest, St).
get_property(buffer, #hparser{buffer=Buffer}) ->
Buffer;
get_property(state, #hparser{state=State}) ->
State;
get_property(version, #hparser{version=Version}) ->
Version;
get_property(method, #hparser{method=Method}) ->
Method;
get_property(transfer_encoding, #hparser{te=TE}) ->
TE;
get_property(content_length, #hparser{clen=CLen}) ->
CLen;
get_property(connection, #hparser{connection=Connection}) ->
Connection;
get_property(content_type, #hparser{ctype=CType}) ->
CType;
get_property(location, #hparser{location=Location}) ->
Location.
%%% Private Tests
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
parse_response_header_with_trailing_whitespace_test() ->
Response = <<"Content-Length: 27515 ">>,
Parser1 = parser([response]),
{header, {<<"Content-Length">>, Length}, _} = parse_header(Response, Parser1),
?assertEqual(<<"27515">>, Length).
-endif. | deps/hackney/src/hackney_http.erl | 0.538255 | 0.454109 | hackney_http.erl | starcoder |
-module(thoas).
-export([
decode/1, decode/2, encode/1, encode/2, encode_to_iodata/1,
encode_to_iodata/2
]).
-export_type([
decode_error/0, decode_options/0, encode_options/0, json_term/0,
input_term/0
]).
-type decode_options() :: #{
strings => reference | copy
}.
-type encode_options() :: #{
escape => json | unicode | html | javascript
}.
-type json_term() ::
integer() |
float() |
binary() |
list(json_term()) |
#{ binary() => json_term() }.
-type input_term() ::
integer() |
float() |
binary() |
atom() |
list(input_term()) |
list({binary() | atom(), input_term()}) |
#{ binary() | atom() => input_term() }.
-type decode_error() ::
unexpected_end_of_input |
{unexpected_byte, binary(), integer()} |
{unexpected_sequence, binary(), integer()}.
%% Decode JSON into Erlang terms.
%%
-spec decode(iodata()) -> {ok, json_term()} | {error, decode_error()}.
decode(Json) ->
decode(Json, #{}).
%% Decode JSON into Erlang terms.
%%
%% # Options
%%
%% - `strings`
%% - `reference` (default) - when possible thoas tries to create a
%% sub-binary into the original
%% - `copy` - always copies the strings. This option is especially
%% useful when parts of the decoded data will be stored for a long time (in
%% ets or some process) to avoid keeping the reference to the original data.
%%
-spec decode(iodata(), decode_options()) ->
{ok, json_term()} | {error, decode_error()}.
decode(Json, Options) when is_map(Options) ->
Binary = iolist_to_binary(Json),
thoas_decode:decode(Binary, Options).
%% Encode Erlang terms into JSON.
%%
%% Throws on invalid input.
%%
-spec encode(input_term()) -> binary().
encode(Term) ->
encode(Term, #{}).
%% Encode Erlang terms into JSON.
%%
%% Throws on invalid input.
%%
%% # Options
%%
%% - `escape`
%% - `json` (default) - the regular JSON escaping as defined by RFC 7159.
%% - `javascript` - additionally escapes the LINE SEPARATOR (U+2028) and
%% PARAGRAPH SEPARATOR (U+2029) characters to make the produced JSON valid
%% JavaScript.
%% - `html` - similar to `javascript_safe`, but also escapes the / character
%% to prevent XSS.
%% - `unicode` - escapes all non-ascii characters.
%%
-spec encode(input_term(), encode_options()) -> binary().
encode(Input, Options) when is_map(Options) ->
iolist_to_binary(thoas_encode:encode(Input, Options)).
%% Encode Erlang terms into JSON as iodata.
%%
%% This function should be preferred to encode/2, if the generated JSON will be
%% handed over to one of the IO functions or sent over the socket. The Erlang
%% runtime is able to leverage vectorised writes and avoid allocating a continuous
%% buffer for the whole resulting string, lowering memory use and increasing
%% performance.
%%
%% Throws on invalid input.
%%
-spec encode_to_iodata(input_term()) -> iodata().
encode_to_iodata(Term) ->
encode_to_iodata(Term, #{}).
%% Encode Erlang terms into JSON as iodata.
%%
%% This function should be preferred to encode/2, if the generated JSON will be
%% handed over to one of the IO functions or sent over the socket. The Erlang
%% runtime is able to leverage vectorised writes and avoid allocating a continuous
%% buffer for the whole resulting string, lowering memory use and increasing
%% performance.
%%
%% Throws on invalid input.
%%
%% # Options
%%
%% - `escape`
%% - `json` (default) - the regular JSON escaping as defined by RFC 7159.
%% - `javascript` - additionally escapes the LINE SEPARATOR (U+2028) and
%% PARAGRAPH SEPARATOR (U+2029) characters to make the produced JSON valid
%% JavaScript.
%% - `html` - similar to `javascript_safe`, but also escapes the / character
%% to prevent XSS.
%% - `unicode` - escapes all non-ascii characters.
%%
-spec encode_to_iodata(input_term(), encode_options()) -> iodata().
encode_to_iodata(Input, Options) ->
thoas_encode:encode(Input, Options). | src/thoas.erl | 0.560253 | 0.457985 | thoas.erl | starcoder |
% LSM9DS1 3-axis accelerometer, 3-axis gyroscope, 3-axis magnetometer:
% http://www.st.com/web/en/resource/technical/document/datasheet/DM00103319.pdf
%
% LPS25HB digital barometer
% http://www.st.com/web/en/resource/technical/document/datasheet/DM00141379.pdf
% @doc Driver module for the <a href="https://store.digilentinc.com/pmod-nav-9-axis-imu-plus-barometer/">PmodNAV</a> 9-axis IMU plus barometer device.
%
% For more information see the Tutorial at the GRiSP Wiki: <a href= "https://github.com/grisp/grisp/wiki/PmodNAV-Tutorial">PmodNAV Tutorial</a>.
%
% Start the driver with
% ```
% 1> grisp:add_device(spi1, pmod_nav).
% '''
% @end
-module(pmod_nav).
-behaviour(gen_server).
% API
-export([start_link/2]).
-export([config/2]).
-export([read/2]).
-export([read/3]).
-export([registers/0]).
-export([registers/1]).
% Callbacks
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([code_change/3]).
-export([terminate/2]).
-include("grisp.hrl").
-include("pmod_nav.hrl").
-define(SPI_MODE, #{clock => {high, trailing}}).
-type component() :: acc | mag | alt.
-type register() :: atom().
-type opts() :: #{}.
%--- API -----------------------------------------------------------------------
% @private
start_link(Slot, Opts) -> gen_server:start_link(?MODULE, [Slot, Opts], []).
% @doc Change configurations.
%
% === Examples ===
% To switch to accelerometer only mode, i.e., power down the gyroscope, use:
% ```
% 2> pmod_nav:config(acc, #{odr_g => power_down}).
% ok
% '''
% To turn the gyroscope back on use:
% ```
% 3> pmod_nav:config(acc, #{odr_g => {hz,14.9}}).
% ok
% '''
%
% For more possible configurations see the datasheets
% <a href="http://www.st.com/web/en/resource/technical/document/datasheet/DM00103319.pdf">LSM9DS1</a>
% and
% <a href="http://www.st.com/web/en/resource/technical/document/datasheet/DM00141379.pdf">LPS25HB</a>.
% Use {@link registers/0} and {@link registers/1} to see the mapping between
% Erlang expressions and the bits on the hardware.
-spec config(component(), #{}) -> ok.
config(Comp, Options) when is_map(Options) -> call({config, Comp, Options}).
% @equiv read(Comp, Registers, #{})
-spec read(component(), [register()]) -> any() | {error, any()}.
read(Comp, Registers) -> read(Comp, Registers, #{}).
% @doc Read registers of a component.
%
% === Examples ===
% To read the accelerometer X, Y and Z axises G-forces in milli g, use:
% ```
% 4> pmod_nav:read(acc, [out_x_xl, out_y_xl, out_z_xl], #{xl_unit => mg}).
% [50.813,6.527,983.7470000000001]
% '''
%
% === Further Registers ===
% <table border="1" summary="Examples of register entries">
% <tr><th>Component</th><th>Registers</th><th>Possible Options</th>
% <th>Description</th>
% </tr>
% <tr><td>`acc'</td><td>`[out_x_g, out_y_g, out_z_g]'</td>
% <td>`#{g_unit => dps | mdps} default dps'</td>
% <td>Rotation of the axises x,y and z in (milli) degrees per second</td>
% </tr>
% <tr><td>`acc'</td><td>`[out_x_xl, out_y_xl, out_z_xl]'</td>
% <td>`#{xl_unit => g | mg} default g'</td>
% <td>G-force on the axises x,y and z</td>
% </tr>
% <tr><td>`mag'</td><td>`[out_x_m, out_y_m, out_z_m]'</td>
% <td>`#{mag_unit => gauss | mgauss} default gauss'</td>
% <td>Strength of the magnetic field in (milli) gauss</td>
% </tr>
% <tr><td>`alt'</td><td>`[press_out]'</td>
% <td></td>
% <td>Pressure in hPa</td>
% </tr>
% <tr><td>`alt'</td><td>`[temp_out]'</td>
% <td></td>
% <td>Temperature in °C</td>
% </tr>
% </table>
%
% For all registers see {@link registers/0} and {@link registers/1}
% and use the datasheets
% <a href="http://www.st.com/web/en/resource/technical/document/datasheet/DM00103319.pdf">LSM9DS1</a>
% and
% <a href="http://www.st.com/web/en/resource/technical/document/datasheet/DM00141379.pdf">LPS25HB</a>
% for a complete description.
%
-spec read(component(), [register()], opts()) -> any() | {error, any()}.
read(Comp, Registers, Opts) when is_list(Registers) ->
call({read, Comp, Registers, Opts}).
%--- Callbacks -----------------------------------------------------------------
% @private
init([Slot, Opts]) ->
case {grisp_hw:platform(), Slot} of
{grisp_base, spi1} -> ok;
{grisp2, spi2} -> ok;
{P, S} -> error({incompatible_slot, P, S})
end,
process_flag(trap_exit, true),
State = #{
acc => init_comp(Slot, acc),
mag => init_comp(Slot, mag),
alt => init_comp(Slot, alt),
debug => maps:get(debug, Opts, false)
},
#{acc := #{bus := Bus}} = State,
% Do an empty transfer with default chip select to make sure clock is high
_ = grisp_spi:transfer(Bus, [{?SPI_MODE, <<16#FF>>}]),
State1 = verify_device(State),
State2 = initialize_device(State1),
grisp_devices:register(Slot, ?MODULE),
{ok, State2}.
% @private
handle_call(Call, _From, State) ->
try execute_call(Call, State)
catch throw:Reason -> {reply, {error, Reason}, State}
end.
% @private
handle_cast(Request, _State) -> error({unknown_cast, Request}).
% @private
handle_info(Info, _State) -> error({unknown_info, Info}).
% @private
code_change(_OldVsn, State, _Extra) -> {ok, State}.
% @private
terminate(_Reason, State) -> deinitialize_device(State).
%--- Internal ------------------------------------------------------------------
call(Call) ->
Dev = grisp_devices:default(?MODULE),
case gen_server:call(Dev#device.pid, Call) of
{error, Reason} -> error(Reason);
Result -> Result
end.
init_comp(Slot, Comp) ->
Regs = registers(Comp),
#{
bus => grisp_spi:open(Slot, pin(Slot, Comp)),
regs => Regs,
rev => reverse_opts(Regs),
cache => #{}
}.
execute_call({config, Comp, Options}, State) ->
{Result, NewState} = write_config(State, Comp, Options),
{reply, Result, NewState};
execute_call({read, Comp, Registers, Opts}, State) ->
{Result, NewState} = read_and_convert(State, Comp, Registers, Opts),
{reply, Result, NewState};
execute_call(Request, _State) ->
error({unknown_call, Request}).
initialize_device(State) ->
configure_components(State, [
{acc, #{ % Accelerometer
zen_xl => enabled, % Z axis
yen_xl => enabled, % y axis
xen_xl => enabled, % X axis
odr_xl => {hz, 10}, % Output data rate
fs_xl => {g, 2}, % Full-scale
i2c_disable => true % I2C disabled
}},
{acc, #{ % Gyro
zen_g => enabled, % Z axis
yen_g => enabled, % Y axis
xen_g => enabled, % X axis
odr_g => {hz, 14.9} % Output data rate
}},
{mag, #{
om => ultra_high, % X and Y axis performance
do => {hz, 10}, % Output data rate
fs => {gauss, 4}, % Full-scale
i2c_disable => true, % I2C disabled
md => continuous_conversion, % Operating mode
omz => ultra_high, % Z axis performance
bdu => continuous % Output regs update mode
}},
{alt, #{
pd => active % Power up instrument
}},
{alt, #{
odr => {hz, 7}, % Output data rate
i2c_dis => true % I2C disabled
}}
]).
deinitialize_device(State) ->
configure_components(State, [
{acc, #{ % Accelerometer
zen_xl => disabled, % Z axis
yen_xl => disabled, % y axis
xen_xl => disabled, % X axis
odr_xl => power_down % Output data rate
}},
{acc, #{
i2c_disable => false % I2C disabled
}},
{acc, #{ % Gyro
zen_g => disabled, % Z axis
yen_g => disabled, % Y axis
xen_g => disabled, % X axis
odr_g => power_down % Output data rate
}},
{mag, #{
md => power_down % Power down instrument
}},
{alt, #{
i2c_dis => false, % I2C disabled
pd => power_down % Power down instrument
}}
]).
configure_components(State, Config) ->
lists:foldl(fun({Comp, Opts}, S) ->
{_R, NewS} = write_config(S, Comp, Opts),
NewS
end, State, Config).
verify_device(State) ->
lists:foldl(fun verify_reg/2, State, [
{acc, who_am_i, <<2#01101000>>},
{mag, who_am_i_m, <<2#00111101>>},
{alt, who_am_i, <<2#10111101>>}
]).
verify_reg({Comp, Reg, Expected}, State) ->
case read_and_convert(State, Comp, [Reg], #{}) of
{[Expected], NewState} -> NewState;
{[Other], _NewState} -> error({register_mismatch, Comp, Reg, Other})
end.
write_config(State, Comp, Options) ->
#{Comp := #{rev := Rev, cache := Cache, regs := Regs}} = State,
Partitions = partition(Options, Rev),
NewCache = maps:map(fun(Reg, Opts) ->
Bin = case maps:find(Reg, Cache) of
{ok, Value} -> Value;
error -> read_bin(State, Comp, Reg)
end,
{Addr, read_write, _Size, Conv} = maps:get(Reg, Regs),
NewBin = render_bits(Conv, Bin, Opts),
write_bin(State, Comp, Addr, NewBin)
end, Partitions),
{ok, mapz:deep_put([Comp, cache], NewCache, State)}.
partition(Options, RevOpts) ->
maps:fold(fun(K, V, Acc) ->
Reg = case maps:find(K, RevOpts) of
{ok, Value} -> Value;
error -> throw({unknown_option, K})
end,
maps:update_with(Reg, fun(M) -> maps:put(K, V, M) end, #{K => V}, Acc)
end, #{}, Options).
reverse_opts(Registers) ->
maps:fold(fun
(Reg, {_Addr, read_write, _Size, Conv}, Rev) when is_list(Conv) ->
lists:foldl(fun
(0, R) -> R;
(D, R) -> maps:put(D, Reg, R)
end, Rev, proplists:get_keys(Conv));
(_Reg, _Spec, Rev) ->
Rev
end, #{}, Registers).
read_and_convert(State, Comp, Registers, Opts) ->
{Values, NewState} = read_regs(State, Comp, Registers),
convert_regs(NewState, Comp, Opts, Values).
read_regs(State, Comp, Registers) ->
{Values, NewCache} = lists:foldl(fun(Reg, {Acc, Cache}) ->
Value = read_bin(State, Comp, Reg),
NewC = case mapz:deep_get([Comp, regs, Reg], State) of
{_Addr, read_write, _Size, Conv} when is_list(Conv) ->
maps:put(Reg, Value, Cache);
_ReadOnlyReg ->
Cache
end,
{[{Reg, Value}|Acc], NewC}
end, {[], mapz:deep_get([Comp, cache], State)}, Registers),
{lists:reverse(Values), mapz:deep_put([Comp, cache], NewCache, State)}.
convert_regs(State, Comp, Opts, Values) ->
{Results, NewState} = lists:foldl(fun(R, {Acc, S}) ->
{Result, NewS} = convert_reg(S, Comp, Opts, R),
{[Result|Acc], NewS}
end, {[], State}, Values),
{lists:reverse(Results), NewState}.
convert_reg(State, Comp, Opts, {Reg, Value}) ->
% TODO: Move component upwards
case mapz:deep_get([Comp, regs, Reg], State) of
{_Addr, _RW, _Size, Conv} when is_function(Conv) ->
{Converted, {Comp, NewState}} = Conv(Value, {Comp, State}, Opts),
{Converted, NewState};
{_Addr, _RW, _Size, Conv} when is_list(Conv) ->
% FIXME: Put reg in cache here instead of in read_regs!!!?
NewS = mapz:deep_put([Comp, cache, Reg], Value, State),
{parse_bits(Conv, Value), NewS};
{_Addr, _RW, _Size, Type} ->
{decode(Type, Value), State}
end.
write_bin(#{debug := Debug} = State, Comp, Reg, Value) ->
#{Comp := #{bus := Bus}} = State,
[debug_write(Comp, Reg, Value) || Debug],
<<>> = request(Bus, write_request(Comp, Reg, Value), 0),
Value.
read_bin(#{debug := Debug} = State, Comp, Reg) ->
#{Comp := #{bus := Bus}} = State,
case mapz:deep_find([Comp, regs, Reg], State) of
{ok, {Addr, _RW, Size, _Conv}} ->
Result = request(Bus, read_request(Comp, Addr), Size),
[debug_read(Comp, Addr, Result) || Debug],
Result;
error ->
throw({unknown_register, Comp, Reg})
end.
write_request(acc, Reg, Val) -> <<?RW_WRITE:1, Reg:7, Val/binary>>;
write_request(mag, Reg, Val) -> <<?RW_WRITE:1, ?MS_INCR:1, Reg:6, Val/binary>>;
write_request(alt, Reg, Val) -> <<?RW_WRITE:1, ?MS_INCR:1, Reg:6, Val/binary>>.
read_request(acc, Reg) -> <<?RW_READ:1, Reg:7>>;
read_request(mag, Reg) -> <<?RW_READ:1, ?MS_INCR:1, Reg:6>>;
read_request(alt, Reg) -> <<?RW_READ:1, ?MS_INCR:1, Reg:6>>.
request(Bus, Request, Pad) ->
[Response] = grisp_spi:transfer(Bus, [
{?SPI_MODE, Request, byte_size(Request), Pad}
]),
Response.
render_bits(Defs, Bin, Opts) ->
render_bits(Defs, Bin, Opts, 0).
render_bits([{Name, Size, Mapping}|Defs], Bin, Opts, Pos) ->
NewBin = case maps:find(Name, Opts) of
{ok, Value} ->
Bits = case {Value, Mapping} of
{<<Value/bitstring>>, _} when bit_size(Value) == Size ->
Value;
{Value, Mapping} when is_map(Mapping) ->
case maps:find(Value, Mapping) of
{ok, Mapped} -> <<Mapped:Size>>;
error -> throw({invalid_value, Name, Value})
end;
{Value, Type} ->
encode(Name, Type, Size, Value)
end,
grisp_bitmap:set_bits(Bin, Pos, Bits);
error ->
Bin
end,
render_bits(Defs, NewBin, Opts, Pos + Size);
render_bits([{0, Size}|Defs], Bin, Opts, Pos) ->
NewBin = grisp_bitmap:set_bits(Bin, Pos, <<0:Size>>),
render_bits(Defs, NewBin, Opts, Pos + Size);
render_bits([], Bin, _Opts, Pos) when bit_size(Bin) == Pos ->
Bin.
parse_bits(Conv, Bin) -> parse_bits(Conv, Bin, #{}).
parse_bits([{Name, Size, Type}|Conv], Bin, Opts) ->
<<Raw:Size/bitstring, Rest/bitstring>> = Bin,
parse_bits(Conv, Rest, maps:put(Name, decode(Type, Raw), Opts));
parse_bits([{0, Size}|Conv], Bin, Opts) ->
<<_:Size, Rest/bitstring>> = Bin,
parse_bits(Conv, Rest, Opts);
parse_bits([], <<>>, Opts) ->
Opts.
encode(_Name, unsigned_little, Size, Value) when is_integer(Value) ->
<<Value:Size/unsigned-little>>;
encode(_Name, {unsigned_little, Min, Max}, Size, Value)
when is_integer(Value) andalso Value >= Min andalso Value =< Max ->
<<Value:Size/unsigned-little>>;
encode(_Name, signed_little, Size, Value) when is_integer(Value) ->
<<Value:Size/signed-little>>;
encode(_Name, raw, Size, Value) when bit_size(Value) =< Size ->
<<Value:Size/bitstring>>;
encode(_Name, raw, Size, Value) when is_integer(Value) ->
<<Value:Size/unsigned-integer>>;
encode(Name, _Type, _Size, Value) ->
throw({invalid_value, Name, Value}).
decode(raw, Raw) ->
Raw;
decode(unsigned_little, Raw) ->
Size = bit_size(Raw),
<<Value:Size/unsigned-little>> = Raw,
Value;
decode({unsigned_little, _Min, _Max}, Raw) ->
Size = bit_size(Raw),
<<Value:Size/unsigned-little>> = Raw,
Value;
decode(signed_little, Raw) ->
Size = bit_size(Raw),
<<Value:Size/signed-little>> = Raw,
Value;
decode(Mapping, Raw) when is_map(Mapping) ->
Size = bit_size(Raw),
<<Value:Size/unsigned-little>> = Raw,
Values = mapz:inverse(Mapping),
case maps:find(Value, Values) of
{ok, Mapped} -> Mapped;
error -> Raw
end.
setting(Reg, Opt, {Comp, State}) ->
{_Addr, _RW, _RegSize, Conv} = mapz:deep_get([Comp, regs, Reg], State),
{Parsed, NewState} = case mapz:deep_find([Comp, cache, Reg], State) of
{ok, Cached} -> {[parse_bits(Conv, Cached)], State};
error -> read_and_convert(State, Comp, [Reg], #{})
end,
{maps:get(Opt, hd(Parsed)), {Comp, NewState}}.
pin(Slot, acc) -> atom_join(Slot, '_pin1');
pin(Slot, mag) -> atom_join(Slot, '_pin9');
pin(Slot, alt) -> atom_join(Slot, '_pin10').
atom_join(Atom1, Atom2) ->
list_to_atom(atom_to_list(Atom1) ++ atom_to_list(Atom2)).
% @doc Get the registers (with the possible entries) of all components.
- spec registers() -> #{component() => #{}}.
registers() ->
#{
acc => registers(acc),
mag => registers(mag),
alt => registers(alt)
}.
% @doc Get the registers (with the possible entries) of one component.
%
% === Example ===
% To see the possible configurations in `ctrl_reg1_g' use:
% ```
% 4> maps:find(ctrl_reg1_g, pmod_nav:registers(acc)).
% {ok,{16,read_write,1,
% [{odr_g,3,
% #{power_down => 0,
% {hz,119} => 3,
% {hz,238} => 4,
% {hz,476} => 5,
% {hz,952} => 6,
% {hz,14.9} => 1,
% {hz,59.5} => 2}},
% {fs_g,2,#{{dps,245} => 0,{dps,500} => 1,{dps,2000} => 3}},
% {0,1},
% {bw_g,2,raw}]}}
% '''
%
-spec registers(component()) -> #{atom() => any()}.
registers(acc) ->
#{
act_ths => {16#04, read_write, 1, [
{sleep_on_inact_en, 1, #{
gyroscope_power_down => 0,
gyroscope_sleep => 1
}},
{act_ths, 7, unsigned_little}
]},
act_dur => {16#05, read_write, 1, [{act_dur, 8, unsigned_little}]},
int_gen_cfg_xl => {16#06, read_write, 1, [
{aoi_xl, 1, #{or_combination => 0, and_combination => 1}},
{'6d', 1, #{disabled => 0, enabled => 1}},
{zhie_xl, 1, #{disabled => 0, enabled => 1}},
{zlie_xl, 1, #{disabled => 0, enabled => 1}},
{yhie_xl, 1, #{disabled => 0, enabled => 1}},
{ylie_xl, 1, #{disabled => 0, enabled => 1}},
{xhie_xl, 1, #{disabled => 0, enabled => 1}},
{xlie_xl, 1, #{disabled => 0, enabled => 1}}
]},
int_gen_ths_x_xl => {16#07, read_write, 1, [
{ths_xl_x, 8, unsigned_little}
]},
int_gen_ths_y_xl => {16#08, read_write, 1, [
{ths_xl_y, 8, unsigned_little}
]},
int_gen_ths_z_xl => {16#09, read_write, 1, [
{ths_xl_z, 8, unsigned_little}
]},
int_gen_dur_xl => {16#0A, read_write, 1, [
{wait_xl, 1, #{off => 0, on => 1}},
{dur_xl, 7, unsigned_little}
]},
reference_g => {16#0B, read_write, 1, [{ref_g, 8, raw}]},
int1_ctrl => {16#0C, read_write, 1, [
{int1_ig_g, 1, #{disabled => 0, enabled => 1}},
{int_ig_xl, 1, #{disabled => 0, enabled => 1}},
{int_fss5, 1, #{disabled => 0, enabled => 1}},
{int_ovr, 1, #{disabled => 0, enabled => 1}},
{int_fth, 1, #{disabled => 0, enabled => 1}},
{int_boot, 1, #{disabled => 0, enabled => 1}},
{int_drdy_g, 1, #{disabled => 0, enabled => 1}},
{int_drdy_xl, 1, #{disabled => 0, enabled => 1}}
]},
int2_ctrl => {16#0D, read_write, 1, [
{int2_inact, 1, #{false => 0, true => 1}},
{0, 1},
{int2_fss5, 1, #{disabled => 0, enabled => 1}},
{int2_ovr, 1, #{disabled => 0, enabled => 1}},
{int2_fth, 1, #{disabled => 0, enabled => 1}},
{int2_drdy_temp, 1, #{disabled => 0, enabled => 1}},
{int2_drdy_g, 1, #{disabled => 0, enabled => 1}},
{int2_drdy_xl, 1, #{disabled => 0, enabled => 1}}
]},
who_am_i => {16#0F, read, 1, raw},
ctrl_reg1_g => {16#10, read_write, 1, [
{odr_g, 3, #{
power_down => 2#000,
{hz, 14.9} => 2#001,
{hz, 59.5} => 2#010,
{hz, 119} => 2#011,
{hz, 238} => 2#100,
{hz, 476} => 2#101,
{hz, 952} => 2#110
}},
{fs_g, 2, #{
{dps, 245} => 2#00,
{dps, 500} => 2#01,
{dps, 2000} => 2#11
}},
{0, 1},
{bw_g, 2, raw}
]},
ctrl_reg2_g => {16#11, read_write, 1, [
{0, 4},
{int_sel, 2, raw},
{out_sel, 2, raw}
]},
ctrl_reg3_g => {16#12, read_write, 1, [
{lp_mode, 1, #{disabled => 0, enabled => 1}},
{hp_en, 1, #{disabled => 0, enabled => 1}},
{0, 2},
{hpcf_g, 4, raw}
]},
orient_cfg_g => {16#13, read_write, 1, [
{0, 2},
{signx_g, 1, #{positive => 0, negative => 1}},
{signy_g, 1, #{positive => 0, negative => 1}},
{signz_g, 1, #{positive => 0, negative => 1}},
{orient, 3, raw}
]},
int_gen_src_g => {16#14, read, 1, [
{0, 1},
{ia_g, 1, #{false => 0, true => 1}},
{zh_g, 1, #{false => 0, true => 1}},
{zl_g, 1, #{false => 0, true => 1}},
{yh_g, 1, #{false => 0, true => 1}},
{yl_g, 1, #{false => 0, true => 1}},
{xh_g, 1, #{false => 0, true => 1}},
{xl_g, 1, #{false => 0, true => 1}}
]},
out_temp => {16#15, read, 2, fun convert_temp/3},
out_temp_l => {16#15, read, 1, raw},
out_temp_h => {16#16, read, 1, raw},
status_reg => {16#17, read, 1, [
{0, 1},
{ig_xl, 1, #{false => 0, true => 1}},
{ig_g, 1, #{false => 0, true => 1}},
{inact, 1, #{false => 0, true => 1}},
{boot_status, 1, #{no_boot_running => 0, boot_running => 1}},
{tda, 1, #{false => 0, true => 1}},
{gda, 1, #{false => 0, true => 1}},
{xlda, 1, #{false => 0, true => 1}}
]},
out_x_g => {16#18, read, 2, fun convert_dps/3},
out_y_g => {16#1A, read, 2, fun convert_dps/3},
out_z_g => {16#1C, read, 2, fun convert_dps/3},
ctrl_reg4 => {16#1E, read_write, 1, [
{0, 2},
{zen_g, 1, #{disabled => 0, enabled => 1}},
{yen_g, 1, #{disabled => 0, enabled => 1}},
{xen_g, 1, #{disabled => 0, enabled => 1}},
{0, 1},
{lir_xl1, 1, #{false => 0, true => 1}},
{'4d_xl1', 1, #{'6d' => 0, '4d' => 1}}
]},
ctrl_reg5_xl => {16#1F, read_write, 1, [
{dec, 2, #{
no_decimation => 2#00,
{samples, 2} => 2#01,
{samples, 4} => 2#10,
{samples, 8} => 2#11
}},
{zen_xl, 1, #{disabled => 0, enabled => 1}},
{yen_xl, 1, #{disabled => 0, enabled => 1}},
{xen_xl, 1, #{disabled => 0, enabled => 1}},
{0, 3}
]},
ctrl_reg6_xl => {16#20, read_write, 1, [ % FIXME: Verify default settings when booting the device!
{odr_xl, 3, #{
power_down => 2#000,
{hz, 10} => 2#001,
{hz, 50} => 2#010,
{hz, 119} => 2#011,
{hz, 238} => 2#100,
{hz, 476} => 2#101,
{hz, 952} => 2#110
}},
{fs_xl, 2, #{
{g, 2} => 2#00,
{g, 4} => 2#10,
{g, 8} => 2#11,
{g, 16} => 2#01
}},
{bw_scal_odr, 1, #{odr => 2#0, bw_xl => 2#1}},
{bw_xl, 2, #{
{hz, 408} => 2#00,
{hz, 211} => 2#01,
{hz, 105} => 2#10,
{hz, 50} => 2#11
}}
]},
ctrl_reg7_xl => {16#21, read_write, 1, [
{hr, 1, #{disabled => 0, enabled => 1}},
{dcf, 2, raw},
{0, 2},
{fds, 1, #{disabled => 0, enabled => 1}},
{0, 1},
{hpis1, 1, #{disabled => 0, enabled => 1}}
]},
ctrl_reg8 => {16#22, read_write, 1, [
{boot, 1, #{normal => 0, reboot_memory => 1}},
{bdu, 1, #{continuous => 0, read => 1}},
{h_lactive, 1, #{high => 0, low => 1}},
{pp_od, 1, #{push_pull => 0, open_drain => 1}},
{sim, 1, #{'4-wire' => 0, '3-wire' => 1}},
{if_add_inc, 1, #{disabled => 0, enabled => 1}},
{ble, 1, #{lsb => 0, msb => 1}},
{sw_reset, 1, #{normal => 0, reset => 1}}
]},
ctrl_reg9 => {16#23, read_write, 1, [
{0, 1},
{sleep_g, 1, #{disabled => 0, enabled => 1}},
{0, 1},
{fifo_temp_en, 1, #{disabled => 0, enabled => 1}},
{drdy_mask_bit, 1, #{disabled => 0, enabled => 1}},
{i2c_disable, 1, #{false => 0, true => 1}},
{fifo_en, 1, #{disabled => 0, enabled => 1}},
{stop_on_fth, 1, #{false => 0, true => 1}}
]},
ctrl_reg10 => {16#24, read_write, 1, [
{0, 5},
{st_g, 1, #{disabled => 0, enable_csd => 1}},
{0, 1},
{st_xl, 1, #{disabled => 0, enabled => 1}}
]},
int_gen_src_xl => {16#26, read, 1, [
{0, 1},
{ia_xl, 1, #{false => 0, true => 1}},
{zh_xl, 1, #{false => 0, true => 1}},
{zl_xl, 1, #{false => 0, true => 1}},
{yh_xl, 1, #{false => 0, true => 1}},
{yl_xl, 1, #{false => 0, true => 1}},
{xh_xl, 1, #{false => 0, true => 1}},
{xl_xl, 1, #{false => 0, true => 1}}
]},
status_reg2 => {16#27, read, 1, [
{0, 1},
{ig_xl, 1, #{false => 0, true => 1}},
{ig_g, 1, #{false => 0, true => 1}},
{inact, 1, #{false => 0, true => 1}},
{boot_status, 1, #{no_boot_running => 0, boot_running => 1}},
{tda, 1, #{false => 0, true => 1}},
{gda, 1, #{false => 0, true => 1}},
{xlda, 1, #{false => 0, true => 1}}
]},
out_x_xl => {16#28, read, 2, fun convert_g/3},
out_x_l_xl => {16#28, read, 1, raw},
out_x_h_xl => {16#29, read, 1, raw},
out_y_xl => {16#2A, read, 2, fun convert_g/3},
out_y_l_xl => {16#2A, read, 1, raw},
out_y_h_xl => {16#2B, read, 1, raw},
out_z_xl => {16#2C, read, 2, fun convert_g/3},
out_z_l_xl => {16#2C, read, 1, raw},
out_z_h_xl => {16#2D, read, 1, raw},
fifo_ctrl => {16#2E, read_write, 1, [
{fmode, 3, raw},
{fth, 5, unsigned_little}
]},
fifo_src => {16#2F, read, 1, [
{fth, 1, #{false => 0, true => 1}},
{ovrn, 1, #{false => 0, true => 1}},
{fss, 6, unsigned_little}
]},
int_gen_cfg_g => {16#30, read_write, 1, [
{aoi_g, 1, #{'or' => 0, 'and' => 1}},
{lir_g, 1, #{false => 0, true => 1}},
{zhie_g, 1, #{disabled => 0, enabled => 1}},
{zlie_g, 1, #{disabled => 0, enabled => 1}},
{yhie_g, 1, #{disabled => 0, enabled => 1}},
{ylie_g, 1, #{disabled => 0, enabled => 1}},
{xhie_g, 1, #{disabled => 0, enabled => 1}},
{xlie_g, 1, #{disabled => 0, enabled => 1}}
]},
int_gen_ths_x_g => {16#31, read_write, 2, [
{dcrm_g, 1, #{reset => 0, decrement => 1}},
{ths_g_x, 15, signed_little}
]},
int_gen_ths_xh_g => {16#31, read_write, 1, raw},
int_gen_ths_xl_g => {16#32, read_write, 1, raw},
int_gen_ths_y_g => {16#33, read_write, 2, [
{0, 1},
{ths_g_y, 15, signed_little}
]},
int_gen_ths_yh_g => {16#33, read_write, 1, raw},
int_gen_ths_yl_g => {16#34, read_write, 1, raw},
int_gen_ths_z_g => {16#35, read_write, 2, [
{0, 1},
{ths_g_z, 15, signed_little}
]},
int_gen_ths_zh_g => {16#35, read_write, 1, raw},
int_gen_ths_zl_g => {16#36, read_write, 1, raw},
int_gen_dur_g => {16#37, read_write, 1, [
{wait_g, 1, #{disabled => 0, enabled => 1}},
{dur_g, 7, unsigned_little}
]}
};
registers(mag) ->
#{
offset_x_reg_m => {16#05, read_write, 2, [
{ofxm, 16, signed_little}
]},
offset_x_reg_l_m => {16#05, read_write, 1, raw},
offset_x_reg_h_m => {16#06, read_write, 1, raw},
offset_y_reg_m => {16#07, read_write, 2, [
{ofym, 16, signed_little}
]},
offset_y_reg_l_m => {16#07, read_write, 1, raw},
offset_y_reg_h_m => {16#08, read_write, 1, raw},
offset_z_reg_m => {16#09, read_write, 2, [
{ofzm, 16, signed_little}
]},
offset_z_reg_l_m => {16#09, read_write, 1, raw},
offset_z_reg_h_m => {16#0A, read_write, 1, raw},
who_am_i_m => {16#0F, read, 1, raw},
ctrl_reg1_m => {16#20, read_write, 1, [
{temp_comp, 1, #{disabled => 0, enabled => 1}},
{om, 2, #{
low => 2#00,
medium => 2#01,
high => 2#10,
ultra_high => 2#11
}},
{do, 3, #{
{hz, 0.625} => 2#000,
{hz, 1.25} => 2#001,
{hz, 2.5} => 2#010,
{hz, 5} => 2#011,
{hz, 10} => 2#100,
{hz, 20} => 2#101,
{hz, 40} => 2#110,
{hz, 80} => 2#111
}},
{fast_odr, 1, #{disabled => 0, enabled => 1}},
{st, 1, #{disabled => 0, enabled => 1}}
]},
ctrl_reg2_m => {16#21, read_write, 1, [
{0, 1},
{fs, 2, #{
{gauss, 4} => 2#00,
{gauss, 8} => 2#01,
{gauss, 12} => 2#10,
{gauss, 16} => 2#11
}},
{0, 1},
{reboot, 1, #{normal => 0, reboot_memory => 1}},
{soft_rst, 1, #{default => 0, reset => 1}},
{0, 2}
]},
ctrl_reg3_m => {16#22, read_write, 1, [
{i2c_disable, 1, #{false => 0, true => 1}},
{0, 1},
{lp, 1, #{false => 0, true => 1}},
{0, 2},
{sim, 1, #{write_only => 0, read_write => 1}},
{md, 2, #{
continuous_conversion => 2#00,
single_conversion => 2#01,
power_down => 2#11
}}
]},
ctrl_reg4_m => {16#23, read_write, 1, [
{0, 4},
{omz, 2, #{
low => 2#00,
medium => 2#01,
high => 2#10,
ultra_high => 2#11
}},
{ble, 1, #{lsb => 0, msb => 1}},
{0, 1}
]},
ctrl_reg5_m => {16#24, read_write, 1, [
{fast_read, 1, #{disabled => 0, enabled => 0}},
{bdu, 1, #{continuous => 0, read => 1}},
{0, 6}
]},
status_reg_m => {16#27, read, 1, [
{zyxor, 1, #{false => 0, true => 1}},
{zor, 1, #{false => 0, true => 1}},
{yor, 1, #{false => 0, true => 1}},
{'xor', 1, #{false => 0, true => 1}},
{zyxda, 1, #{false => 0, true => 1}},
{zda, 1, #{false => 0, true => 1}},
{yda, 1, #{false => 0, true => 1}},
{xda, 1, #{false => 0, true => 1}}
]},
out_x_m => {16#28, read, 2, fun convert_gauss/3},
out_x_l_m => {16#28, read, 1, raw},
out_x_h_m => {16#29, read, 1, raw},
out_y_m => {16#2A, read, 2, fun convert_gauss/3},
out_y_l_m => {16#2A, read, 1, raw},
out_y_h_m => {16#2B, read, 1, raw},
out_z_m => {16#2C, read, 2, fun convert_gauss/3},
out_z_l_m => {16#2C, read, 1, raw},
out_z_h_m => {16#2D, read, 1, raw},
int_cfg_m => {16#30, read_write, 1, [
{xien, 1, #{disabled => 0, enabled => 1}},
{yien, 1, #{disabled => 0, enabled => 1}},
{zien, 1, #{disabled => 0, enabled => 1}},
{0, 2},
{iea, 1, #{low => 0, high => 1}},
{iel, 1, #{false => 0, true => 1}},
{ien, 1, #{disabled => 0, enabled => 1}}
]},
int_src_m => {16#31, read, 1, [
{pth_x, 1, #{false => 0, true => 1}},
{pth_y, 1, #{false => 0, true => 1}},
{pth_z, 1, #{false => 0, true => 1}},
{nth_x, 1, #{false => 0, true => 1}},
{nth_y, 1, #{false => 0, true => 1}},
{nth_z, 1, #{false => 0, true => 1}},
{mroi, 1, #{false => 0, true => 1}},
{int, 1, #{false => 0, true => 1}}
]},
int_ths_m => {16#32, read_write, 2, [
{ths, 16, {unsigned_little, 0, 32767}}
]},
int_ths_l_m => {16#32, read_write, 1, raw},
int_ths_h_m => {16#33, read_write, 1, raw}
};
registers(alt) ->
#{
ref_p => {16#08, read_write, 3, raw},
ref_p_xl => {16#08, read_write, 1, raw},
ref_p_l => {16#09, read_write, 1, raw},
ref_p_h => {16#0A, read_write, 1, raw},
who_am_i => {16#0F, read, 1, raw},
res_conf => {16#10, read_write, 1, [
{0, 4},
{avgt, 2, #{
{int_avg, 8} => 2#00,
{int_avg, 16} => 2#01,
{int_avg, 32} => 2#10,
{int_avg, 64} => 2#11
}},
{avgp, 2, #{
{int_avg, 8} => 2#00,
{int_avg, 32} => 2#01,
{int_avg, 128} => 2#10,
{int_avg, 512} => 2#11
}}
]},
ctrl_reg1 => {16#20, read_write, 1, [
{pd, 1, #{power_down => 0, active => 1}},
{odr, 3, #{
one_shot => 2#000,
{hz, 1} => 2#001,
{hz, 7} => 2#010,
{hz, 12.5} => 2#011,
{hz, 25} => 2#100
}},
{diff_en, 1, #{disabled => 0, enabled => 1}},
{bdu, 1, #{continuous => 0, read => 1}},
{reset_az, 1, #{normal => 0, reset => 1}},
{sim, 1, #{'4-wire' => 0, '3-wire' => 1}}
]},
ctrl_reg2 => {16#21, read_write, 1, [
{boot, 1, #{normal => 0, reboot_memory => 1}},
{fifo_en, 1, #{disabled => 0, enabled => 1}},
{stop_on_fth, 1, #{false => 0, true => 1}},
{fifo_mean_dec, 1, #{disabled => 0, enabled => 1}},
{i2c_dis, 1, #{false => 0, true => 1}},
{swreset, 1, #{normal => 0, reset => 1}},
{autozero, 1, #{disabled => 0, enabled => 1}},
{one_shot, 1, #{idle => 0, trigger => 1}}
]},
ctrl_reg3 => {16#22, read_write, 1, [
{int_h_l, 1, #{high => 0, low => 1}},
{pp_od, 1, #{push_pull => 0, open_drain => 1}},
{0, 4},
{int_s, 2, #{
data_signal => 2#00,
p_high => 2#01,
p_low => 2#10,
p_or => 2#11
}}
]},
ctrl_reg4 => {16#23, read_write, 1, [
{0, 4},
{f_empty, 1, #{disabled => 0, enabled => 1}},
{f_fth, 1, #{disabled => 0, enabled => 1}},
{f_ovr, 1, #{disabled => 0, enabled => 1}},
{drdy, 1, #{disabled => 0, enabled => 1}}
]},
interrupt_cfg => {16#24, read_write, 1, [
{0, 5},
{lir, 1, #{false => 0, true => 1}},
{pl_e, 1, #{disabled => 0, enabled => 1}},
{ph_e, 1, #{disabled => 0, enabled => 1}}
]},
int_source => {16#25, read, 1, [
{0, 5},
{ia, 1, #{false => 0, true => 1}},
{pl, 1, #{false => 0, true => 1}},
{ph, 1, #{false => 0, true => 1}}
]},
status_reg => {16#27, read, 1, [
{0, 2},
{p_or, 1, #{false => 0, true => 1}},
{t_or, 1, #{false => 0, true => 1}},
{0, 2},
{p_da, 1, #{false => 0, true => 1}},
{t_da, 1, #{false => 0, true => 1}}
]},
press_out => {16#28, read, 3, fun convert_pressure/3},
press_out_xl => {16#28, read, 1, raw},
press_out_l => {16#29, read, 1, raw},
press_out_h => {16#2A, read, 1, raw},
temp_out => {16#2B, read, 2, fun convert_alt_temp/3},
temp_out_l => {16#2B, read, 1, raw},
temp_out_h => {16#2C, read, 1, raw},
fifo_ctrl => {16#2E, read_write, 1, [
{f_mode, 3, #{
bypass => 2#000,
fifo => 2#001,
stream => 2#010,
stream_to_fifo => 2#011,
bypass_to_stream => 2#100,
not_available => 2#101,
fifo_mean => 2#110,
bypass_to_fifo => 2#111
}},
{wtm_point, 5, #{
{sample, 2} => 2#00001,
{sample, 4} => 2#00011,
{sample, 8} => 2#00111,
{sample, 16} => 2#01111,
{sample, 32} => 2#11111
}}
]},
fifo_status => {16#2F, read, 1, [
{fth_fifo, 1, #{false => 0, true => 1}},
{ovr, 1, #{false => 0, true => 1}},
{empty_fifo, 1, #{false => 0, true => 1}},
{fss, 5, unsigned_little}
]},
ths_p => {16#30, read_write, 1, unsigned_little},
ths_p_l => {16#30, read_write, 1, raw},
ths_p_h => {16#31, read_write, 1, raw},
rpds => {16#39, read_write, 1, signed_little},
rpds_l => {16#39, read_write, 1, raw},
rpds_h => {16#3A, read_write, 1, raw}
}.
convert_g(Raw, Context, Opts) ->
Value = decode(signed_little, Raw),
Scale = case maps:get(xl_unit, Opts, g) of
g -> 0.001;
mg -> 1.0;
Other -> throw({unknown_option, #{xl_unit => Other}})
end,
{FS, NewContext} = setting(ctrl_reg6_xl, fs_xl, Context),
Result = case FS of
{g, 2} -> Value * 0.061 * Scale;
{g, 8} -> Value * 0.244 * Scale;
{g, 4} -> Value * 0.122 * Scale;
{g, 16} -> Value * 0.732 * Scale;
_ -> Raw
end,
{Result, NewContext}.
convert_temp(Raw, Context, _Opts) ->
{decode(signed_little, Raw) / 16 + 25, Context}.
convert_dps(Raw, Context, Opts) ->
Value = decode(signed_little, Raw),
Scale = case maps:get(g_unit, Opts, dps) of
dps -> 0.001;
mdps -> 1.0;
Other -> throw({unknown_option, #{g_unit => Other}})
end,
{AR, NewContext} = setting(ctrl_reg1_g, fs_g, Context),
Result = case AR of
{dps, 245} -> Value * 8.75 * Scale;
{dps, 500} -> Value * 17.50 * Scale;
{dps, 2000} -> Value * 70.0 * Scale;
_ -> Raw
end,
{Result, NewContext}.
convert_gauss(Raw, Context, Opts) ->
Value = decode(signed_little, Raw),
Scale = case maps:get(mag_unit, Opts, gauss) of
gauss -> 0.001;
mgauss -> 1.0;
Other -> throw({unknown_option, #{mag_unit => Other}})
end,
{MagSensitivity, NewContext} = setting(ctrl_reg2_m, fs, Context),
Result = case MagSensitivity of
{gauss, 4} -> Value * 0.14 * Scale;
{gauss, 8} -> Value * 0.29 * Scale;
{gauss, 12} -> Value * 0.43 * Scale;
{gauss, 16} -> Value * 0.58 * Scale
end,
{Result, NewContext}.
convert_pressure(Raw, Context, _Opts) ->
{decode(signed_little, Raw) / 4096, Context}.
convert_alt_temp(Raw, Context, _Opts) ->
{42.5 + decode(signed_little, Raw) / 480, Context}.
debug_read(Comp, Reg, Value) ->
io:format("[PmodNAV][~p] read 16#~2.16.0B --> ~s~n",
[Comp, Reg, debug_bitstring(Value)]
).
debug_write(Comp, Reg, Value) ->
io:format("[PmodNAV][~p] write 16#~2.16.0B <-- ~s~n",
[Comp, Reg, debug_bitstring(Value)]
).
debug_bitstring(Bitstring) ->
lists:flatten([io_lib:format("2#~8.2.0B ", [X]) || <<X>> <= Bitstring]). | src/pmod_nav.erl | 0.730963 | 0.693499 | pmod_nav.erl | starcoder |
%% =====================================================================
%% Licensed under the Apache License, Version 2.0 (the "License"); you may
%% not use this file except in compliance with the License. You may obtain
%% a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% Alternatively, you may use this file under the terms of the GNU Lesser
%% General Public License (the "LGPL") as published by the Free Software
%% Foundation; either version 2.1, or (at your option) any later version.
%% If you wish to allow use of your version of this file only under the
%% terms of the LGPL, you should delete the provisions above and replace
%% them with the notice and other provisions required by the LGPL; see
%% <http://www.gnu.org/licenses/>. If you do not delete the provisions
%% above, a recipient may use your version of this file under the terms of
%% either the Apache License or the LGPL.
%%
%% @copyright 2003 <NAME>
%% @author <NAME> <<EMAIL>>
%% @see edoc
%% @end
%% =====================================================================
%% @doc Interface for calling EDoc from Erlang startup options.
%%
%% The following is an example of typical usage in a Makefile:
%% ```docs:
%% erl -noshell -run edoc_run application "'$(APP_NAME)'" \
%% '"."' '[{def,{vsn,"$(VSN)"}}]'
%% '''
%% (note the single-quotes to avoid shell expansion, and the
%% double-quotes enclosing the strings).
%%
%% <strong>New feature in version 0.6.9</strong>: It is no longer
%% necessary to write `-s init stop' last on the command line in order
%% to make the execution terminate. The termination (signalling success
%% or failure to the operating system) is now built into these
%% functions.
-module(edoc_run).
-export([file/1, application/1, files/1, toc/1]).
-compile({no_auto_import,[error/1]}).
-import(edoc_report, [report/2, error/1]).
-type args() :: [string()].
%% @spec application([string()]) -> none()
%%
%% @doc Calls {@link edoc:application/3} with the corresponding
%% arguments. The strings in the list are parsed as Erlang constant
%% terms. The list can be either `[App]', `[App, Options]' or `[App,
%% Dir, Options]'. In the first case {@link edoc:application/1} is
%% called instead; in the second case, {@link edoc:application/2} is
%% called.
%%
%% The function call never returns; instead, the emulator is
%% automatically terminated when the call has completed, signalling
%% success or failure to the operating system.
-spec application(args()) -> no_return().
application(Args) ->
F = fun () ->
case parse_args(Args) of
[App] -> edoc:application(App);
[App, Opts] -> edoc:application(App, Opts);
[App, Dir, Opts] -> edoc:application(App, Dir, Opts);
_ ->
invalid_args("edoc_run:application/1", Args)
end
end,
run(F).
%% @spec files([string()]) -> none()
%%
%% @doc Calls {@link edoc:files/2} with the corresponding arguments. The
%% strings in the list are parsed as Erlang constant terms. The list can
%% be either `[Files]' or `[Files, Options]'. In the first case, {@link
%% edoc:files/1} is called instead.
%%
%% The function call never returns; instead, the emulator is
%% automatically terminated when the call has completed, signalling
%% success or failure to the operating system.
-spec files(args()) -> no_return().
files(Args) ->
F = fun () ->
case parse_args(Args) of
[Files] -> edoc:files(Files);
[Files, Opts] -> edoc:files(Files, Opts);
_ ->
invalid_args("edoc_run:files/1", Args)
end
end,
run(F).
%% @hidden Not official yet
-spec toc(args()) -> no_return().
toc(Args) ->
F = fun () ->
case parse_args(Args) of
[Dir, Paths] -> edoc:toc(Dir,Paths);
[Dir, Paths, Opts] -> edoc:toc(Dir,Paths,Opts);
_ ->
invalid_args("edoc_run:toc/1", Args)
end
end,
run(F).
%% @spec file([string()]) -> none()
%%
%% @deprecated This is part of the old interface to EDoc and is mainly
%% kept for backwards compatibility. The preferred way of generating
%% documentation is through one of the functions {@link application/1}
%% and {@link files/1}.
%%
%% @doc Calls {@link edoc:file/2} with the corresponding arguments. The
%% strings in the list are parsed as Erlang constant terms. The list can
%% be either `[File]' or `[File, Options]'. In the first case, an empty
%% list of options is passed to {@link edoc:file/2}.
%%
%% The following is an example of typical usage in a Makefile:
%% ```$(DOCDIR)/%.html:%.erl
%% erl -noshell -run edoc_run file '"$<"' '[{dir,"$(DOCDIR)"}]' \
%% -s init stop'''
%%
%% The function call never returns; instead, the emulator is
%% automatically terminated when the call has completed, signalling
%% success or failure to the operating system.
-spec file(args()) -> no_return().
file(Args) ->
F = fun () ->
case parse_args(Args) of
[File] -> edoc:file(File, []);
[File, Opts] -> edoc:file(File, Opts);
_ ->
invalid_args("edoc_run:file/1", Args)
end
end,
run(F).
-spec invalid_args(string(), args()) -> no_return().
invalid_args(Where, Args) ->
report("invalid arguments to ~ts: ~w.", [Where, Args]),
shutdown_error().
run(F) ->
wait_init(),
case catch {ok, F()} of
{ok, _} ->
shutdown_ok();
{'EXIT', E} ->
report("edoc terminated abnormally: ~P.", [E, 10]),
shutdown_error();
Thrown ->
report("internal error: throw without catch in edoc: ~P.",
[Thrown, 15]),
shutdown_error()
end.
wait_init() ->
case erlang:whereis(code_server) of
undefined ->
erlang:yield(),
wait_init();
_ ->
ok
end.
%% When and if a function init:stop/1 becomes generally available, we
%% can use that instead of delay-and-pray when there is an error.
-spec shutdown_ok() -> no_return().
shutdown_ok() ->
%% shut down emulator nicely, signalling "normal termination"
init:stop().
-spec shutdown_error() -> no_return().
shutdown_error() ->
%% delay 1 second to allow I/O to finish
receive after 1000 -> ok end,
%% stop emulator the hard way with a nonzero exit value
halt(1).
parse_args([A | As]) when is_atom(A) ->
[parse_arg(atom_to_list(A)) | parse_args(As)];
parse_args([A | As]) ->
[parse_arg(A) | parse_args(As)];
parse_args([]) ->
[].
parse_arg(A) ->
case catch {ok, edoc_lib:parse_expr(A, 1)} of
{ok, Expr} ->
case catch erl_parse:normalise(Expr) of
{'EXIT', _} ->
report("bad argument: '~ts':", [A]),
exit(error);
Term ->
Term
end;
{error, _, D} ->
report("error parsing argument '~ts'", [A]),
error(D),
exit(error)
end. | lib/edoc/src/edoc_run.erl | 0.709321 | 0.45532 | edoc_run.erl | starcoder |
%% -------- Overview ---------
%%
%% There are two primary types of exchange sorted
%% - a full exchange aimed at implementations with cached trees, where the
%% cached trees represent all the data in the location, and the comparion is
%% between two complete data sets
%% - a partial exchange where it is expected that trees will be dynamically
%% created covering a subset of data within the location
%%
%% The full exchange assumes access to cached trees, with a low cost of
%% repeated access, and a relatively high proportion fo the overall cost in
%% network bandwitdh. These exchanges go through the following process:
%%
%% - Root Compare
%% - Root Confirm
%% - Branch Compare
%% - Branch Confirm
%% - Clock Compare
%% - Repair
%%
%% The partial, dynamic tree exchange is based on dynamically produced trees,
%% where a relatively high proportion of the cost is in the production of the
%% trees. In a tree exchange, whole trees are compared (potentially reduced by
%% use of a segment filter), until the delta stops decreasing at a significant
%% rate and a Clock Compare is run. So these exchanges for through the
%% following process:
%%
%% - Tree Compare (x n)
%% - Clock Compare
%% - Repair
%%
%% Each exchange has a 'blue' list and a 'pink' list. Each list (blue and
%% pink) is a set of partitions pertinent to this exchange, with the state
%% to be compared being the merging of all the trees referenced by the list.
%%
%% The lists can be a single item each (for a pairwise exchange), or a
%% ring-size number of partitions for a coverage query exchange.
%%
%% -------- Root Compare ---------
%%
%% This allows the comparison between the roots of trees. Each root (with a
%% tree size of large and 4-byte hashes), will be 4KB in size. The outcome of
%% the comparison should be a set of BranchIDs where the (merged) roots are
%% showing differences.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Root Confirm ---------
%%
%% In the next stage the roots are again requested, received and compared.
%% Again a set of branchIDs which differ is created - and the set of
%% confirmed deltas is the intersection of the sets generated from both root
%% exchanges.
%%
%% The purpose of the confirm stage is to rule out false negative results
%% related to timing differences in the result of PUTs.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Branch Compare / Confirm ---------
%%
%% The set of branch ID differences should now be fetched (Compare), and then
%% re-fetched following a timeout (Confirm) to produce a set of SegmentIDs (or
%% tree leaves) that represent differences ebwteen blue and pink, eliminating
%% false negatives related to timing as with the Root Compare and Confirm.
%%
%% Each Branch is 1KB in size. So if there are more than 16 branches which
%% have differrences, only 16 should be chosen for the Compare and Confirm to
%% control the volume of network traffic prompted by the exchange.
%%
%% The Exchange can terminate if the set of differences is empty. A timeout
%% should trigger the commencement of the next stage (to provide a pause
%% between vnode requests).
%%
%% -------- Clock Compare ---------
%%
%% The final stage is clock compare. The clock compare can be done on up to
%% 128 segments across a maximum of 8 BranchIDs. This is to control the
%% potential overhead of the comparison and subsequent repairs. This may mean
%% for empty vnodes o(1000) exchanges may be required to fully recover the
%% store. However, in these cases it is likely that handoff and read repair
%% is already recovering the data so overly-aggressive read repair is
%% unnecessary.
%%
-module(aae_exchange).
-behaviour(gen_fsm).
-ifdef(fsm_deprecated).
-compile({nowarn_deprecated_function,
[{gen_fsm, start, 3},
{gen_fsm, send_event, 2}]}).
-endif.
-include("include/aae.hrl").
-define(TRANSITION_PAUSE_MS, 500).
% A pause between phases - allow queue lengths to change, and avoid
% generating an excess workload for AAE
-define(CACHE_TIMEOUT_MS, 60000).
% 60 seconds (used in fetch root/branches)
-define(SCAN_TIMEOUT_MS, 600000).
% 10 minutes (used in fetch clocks)
-define(UNFILTERED_SCAN_TIMEOUT_MS, 3600000).
% 60 minutes (used in fetch trees with no filters)
-define(MAX_RESULTS, 128).
% Maximum number of results to request in one round of
-define(WORTHWHILE_REDUCTION, 0.3).
% If the last comparison of trees has reduced the size of the dirty leaves
% by 30%, probably worth comparing again before a clock fetch is run.
% Number a suck-teeth estimate, not even a fag-packet calculation involved.
-define(WORTHWHILE_FILTER, 256).
% If the number of segment IDs to pass into a filter is too large, the
% filter is probably not worthwhile - more effort checking the filter, than
% time saved in the accumulator. Another suck-teeth estimate here as to
% what this value is, at this level with a small tree it will save opening
% all but one block in most slots (with the sst file). I suspect the
% optimal number is more likely to be higher than lower.
-export([init/1,
handle_sync_event/4,
handle_event/3,
handle_info/3,
terminate/3,
code_change/4]).
-export([waiting_all_results/2,
prepare_full_exchange/2,
prepare_partial_exchange/2,
root_compare/2,
root_confirm/2,
branch_compare/2,
branch_confirm/2,
clock_compare/2,
tree_compare/2,
merge_root/2,
merge_branches/2]).
-export([compare_roots/2,
compare_branches/2,
compare_clocks/2,
compare_trees/2]).
-export([start/4,
start/6,
reply/3]).
-include_lib("eunit/include/eunit.hrl").
-record(state, {root_compare_deltas = [] :: list(),
root_confirm_deltas = [] :: list(),
branch_compare_deltas = [] :: list(),
branch_confirm_deltas = [] :: list(),
tree_compare_deltas = [] :: list(),
key_deltas = [] :: list(),
repair_fun,
reply_fun,
blue_list = [] :: input_list(),
pink_list = [] :: input_list(),
exchange_id = "not_set" :: list(),
blue_returns = {0, 0} :: {integer(), integer()},
pink_returns = {0, 0} :: {integer(), integer()},
pink_acc,
blue_acc,
merge_fun,
start_time = os:timestamp() :: erlang:timestamp(),
pending_state :: atom(),
reply_timeout = 0 :: integer(),
exchange_type :: exchange_type(),
exchange_filters = none :: filters(),
last_tree_compare = none :: list(non_neg_integer())|none,
tree_compares = 0 :: integer()
}).
-type input_list() :: [{fun(), list(tuple())|all}].
% The Blue List and the Pink List are made up of:
% - a SendFun, which should be a 3-arity function, taking a preflist,
% a message and a colour to be used to flag the reply;
% - a list of preflists, to be used in the SendFun to be filtered by the
% target. The Preflist might be {Index, Node} for remote requests or
% {Index, Pid} for local requests
% For partial exchanges only, the preflist can and must be set to 'all'
-type branch_results() :: list({integer(), binary()}).
% Results to branch queries are a list mapping Branch ID to the binary for
% that branch
-type exchange_state() :: #state{}.
-type exchange_type() :: full|partial.
-type bucket() ::
{binary(), binary()}|binary().
-type key_range() ::
{binary(), binary()}|all.
-type modified_range() ::
{non_neg_integer(), non_neg_integer()}|all.
-type segment_filter() ::
{segments, list(non_neg_integer()), leveled_tictac:tree_size()}|all.
-type hash_method() ::
pre_hash|{rehash, non_neg_integer()}.
-type filters() ::
{filter,
bucket(), key_range(),
leveled_tictac:tree_size(),
segment_filter(), modified_range(),
hash_method()}|none.
% filter to be used in partial exchanges
-define(FILTERIDX_SEG, 5).
-define(FILTERIDX_TRS, 4).
%%%============================================================================
%%% API
%%%============================================================================
start(BlueList, PinkList, RepairFun, ReplyFun) ->
% API for backwards compatability
start(full, BlueList, PinkList, RepairFun, ReplyFun, none).
-spec start(exchange_type(),
input_list(), input_list(), fun(), fun(),
filters()) -> {ok, pid(), list()}.
%% @doc
%% Start an FSM to manage an exchange and comapre the preflsist in the
%% BlueList with those in the PinkList, using the RepairFun to repair any
%% keys discovered to have inconsistent clocks. ReplyFun used to reply back
%% to calling client the StateName at termination.
%%
%% The ReplyFun should be a 1 arity function t
start(full, BlueList, PinkList, RepairFun, ReplyFun, none) ->
ExchangeID = leveled_util:generate_uuid(),
{ok, ExPID} = gen_fsm:start(?MODULE,
[{full, none},
BlueList, PinkList, RepairFun, ReplyFun,
ExchangeID],
[]),
{ok, ExPID, ExchangeID};
start(partial, BlueList, PinkList, RepairFun, ReplyFun, Filters) ->
ExchangeID = leveled_util:generate_uuid(),
{ok, ExPID} = gen_fsm:start(?MODULE,
[{partial, Filters},
BlueList, PinkList, RepairFun, ReplyFun,
ExchangeID],
[]),
{ok, ExPID, ExchangeID}.
-spec reply(pid(), any(), pink|blue) -> ok.
%% @doc
%% Support events to be sent back to the FSM
reply(Exchange, Result, Colour) ->
gen_fsm:send_event(Exchange, {reply, Result, Colour}).
%%%============================================================================
%%% gen_fsm callbacks
%%%============================================================================
init([{full, none}, BlueList, PinkList, RepairFun, ReplyFun, ExChID]) ->
leveled_rand:seed(),
PinkTarget = length(PinkList),
BlueTarget = length(BlueList),
State = #state{blue_list = BlueList,
pink_list = PinkList,
repair_fun = RepairFun,
reply_fun = ReplyFun,
exchange_id = ExChID,
pink_returns = {PinkTarget, PinkTarget},
blue_returns = {BlueTarget, BlueTarget},
exchange_type = full},
aae_util:log("EX001", [ExChID, PinkTarget + BlueTarget], logs()),
{ok, prepare_full_exchange, State, 0};
init([{partial, Filters}, BlueList, PinkList, RepairFun, ReplyFun, ExChID]) ->
leveled_rand:seed(),
PinkTarget = length(PinkList),
BlueTarget = length(BlueList),
State = #state{blue_list = BlueList,
pink_list = PinkList,
repair_fun = RepairFun,
reply_fun = ReplyFun,
exchange_id = ExChID,
pink_returns = {PinkTarget, PinkTarget},
blue_returns = {BlueTarget, BlueTarget},
exchange_type = partial,
exchange_filters = Filters},
aae_util:log("EX001", [ExChID, PinkTarget + BlueTarget], logs()),
{ok, prepare_partial_exchange, State, 0}.
prepare_full_exchange(timeout, State) ->
aae_util:log("EX006",
[prepare_tree_exchange, State#state.exchange_id],
logs()),
trigger_next(fetch_root,
root_compare,
fun merge_root/2,
<<>>,
false,
?CACHE_TIMEOUT_MS,
State).
prepare_partial_exchange(timeout, State) ->
aae_util:log("EX006",
[prepare_partial_exchange, State#state.exchange_id],
logs()),
Filters = State#state.exchange_filters,
ScanTimeout = filtered_timeout(Filters),
TreeSize = element(?FILTERIDX_TRS, Filters),
trigger_next({merge_tree_range, Filters},
tree_compare,
fun merge_tree/2,
leveled_tictac:new_tree(empty_tree, TreeSize),
false,
ScanTimeout,
State).
root_compare(timeout, State) ->
aae_util:log("EX006", [root_compare, State#state.exchange_id], logs()),
BranchIDs = compare_roots(State#state.blue_acc, State#state.pink_acc),
trigger_next(fetch_root,
root_confirm,
fun merge_root/2,
<<>>,
length(BranchIDs) == 0,
?CACHE_TIMEOUT_MS,
State#state{root_compare_deltas = BranchIDs}).
tree_compare(timeout, State) ->
aae_util:log("EX006", [root_compare, State#state.exchange_id], logs()),
DirtyLeaves = compare_trees(State#state.blue_acc, State#state.pink_acc),
TreeCompares = State#state.tree_compares + 1,
{StillDirtyLeaves, Reduction} =
case State#state.last_tree_compare of
none ->
{DirtyLeaves, 1.0};
PreviouslyDirtyLeaves ->
SDL = intersect_ids(PreviouslyDirtyLeaves, DirtyLeaves),
{SDL, 1.0 - length(SDL) / length(PreviouslyDirtyLeaves)}
end,
% We want to keep comparing trees until the number of deltas stops reducing
% significantly. Then there should be a clock comparison.
% It is expected there will be natural deltas with tree compare because of
% timing differences. Ideally the natural deltas will be small enough so
% that there should be no more than 2 tree compares before a segment filter
% can be applied to accelerate the process.
Filters = State#state.exchange_filters,
TreeSize = element(?FILTERIDX_TRS, Filters),
case ((length(StillDirtyLeaves) > 0)
and (Reduction > ?WORTHWHILE_REDUCTION)) of
true ->
% Keep comparing trees, this is reducing the segments we will
% eventually need to compare
Filters0 =
case length(StillDirtyLeaves) < ?WORTHWHILE_FILTER of
true ->
Segments =
{segments, StillDirtyLeaves, TreeSize},
setelement(?FILTERIDX_SEG, Filters, Segments);
false ->
Filters
end,
ScanTimeout = filtered_timeout(Filters0),
trigger_next({merge_tree_range, Filters0},
tree_compare,
fun merge_tree/2,
leveled_tictac:new_tree(empty_tree, TreeSize),
false,
ScanTimeout,
State#state{last_tree_compare = StillDirtyLeaves,
tree_compares = TreeCompares});
false ->
% Compare clocks. Note if there are no Mismatched segment IDs the
% stop condition in trigger_next will be met
SegmentIDs = select_ids(StillDirtyLeaves,
?MAX_RESULTS,
tree_compare,
State#state.exchange_id),
% TODO - select_ids doesn't account for TreeSize
Filters0 =
setelement(?FILTERIDX_SEG,
Filters,
{segments, SegmentIDs, TreeSize}),
trigger_next({fetch_clocks_range, Filters0},
clock_compare,
fun merge_clocks/2,
[],
length(SegmentIDs) == 0,
?SCAN_TIMEOUT_MS,
State#state{tree_compare_deltas = StillDirtyLeaves,
tree_compares = TreeCompares})
end.
root_confirm(timeout, State) ->
aae_util:log("EX006", [root_confirm, State#state.exchange_id], logs()),
BranchIDs0 = State#state.root_compare_deltas,
BranchIDs1 = compare_roots(State#state.blue_acc, State#state.pink_acc),
BranchIDs = select_ids(intersect_ids(BranchIDs0, BranchIDs1),
?MAX_RESULTS,
root_confirm,
State#state.exchange_id),
trigger_next({fetch_branches, BranchIDs},
branch_compare,
fun merge_branches/2,
[],
length(BranchIDs) == 0,
?CACHE_TIMEOUT_MS,
State#state{root_confirm_deltas = BranchIDs}).
branch_compare(timeout, State) ->
aae_util:log("EX006", [branch_compare, State#state.exchange_id], logs()),
SegmentIDs = compare_branches(State#state.blue_acc, State#state.pink_acc),
trigger_next({fetch_branches, State#state.root_confirm_deltas},
branch_confirm,
fun merge_branches/2,
[],
length(SegmentIDs) == 0,
?CACHE_TIMEOUT_MS,
State#state{branch_compare_deltas = SegmentIDs}).
branch_confirm(timeout, State) ->
aae_util:log("EX006", [branch_confirm, State#state.exchange_id], logs()),
SegmentIDs0 = State#state.branch_compare_deltas,
SegmentIDs1 = compare_branches(State#state.blue_acc, State#state.pink_acc),
SegmentIDs = select_ids(intersect_ids(SegmentIDs0, SegmentIDs1),
?MAX_RESULTS,
branch_confirm,
State#state.exchange_id),
trigger_next({fetch_clocks, SegmentIDs},
clock_compare,
fun merge_clocks/2,
[],
length(SegmentIDs) == 0,
?SCAN_TIMEOUT_MS,
State#state{branch_confirm_deltas = SegmentIDs}).
clock_compare(timeout, State) ->
aae_util:log("EX006", [clock_compare, State#state.exchange_id], logs()),
RepairKeys = compare_clocks(State#state.blue_acc, State#state.pink_acc),
RepairFun = State#state.repair_fun,
aae_util:log("EX004",
[State#state.exchange_id, length(RepairKeys)],
logs()),
RepairFun(RepairKeys),
{stop,
normal,
State#state{key_deltas = RepairKeys}}.
waiting_all_results({reply, not_supported, Colour}, State) ->
aae_util:log("EX010", [Colour, State#state.exchange_id], logs()),
{stop, normal, State#state{pending_state = not_supported}};
waiting_all_results({reply, Result, Colour}, State) ->
aae_util:log("EX007", [Colour, State#state.exchange_id], logs()),
{PC, PT} = State#state.pink_returns,
{BC, BT} = State#state.blue_returns,
MergeFun = State#state.merge_fun,
{State0, AllPink, AllBlue} =
case Colour of
pink ->
PinkAcc = MergeFun(Result, State#state.pink_acc),
{State#state{pink_returns = {PC + 1, PT}, pink_acc = PinkAcc},
PC + 1 == PT, BC == BT};
blue ->
BlueAcc = MergeFun(Result, State#state.blue_acc),
{State#state{blue_returns = {BC + 1, BT}, blue_acc = BlueAcc},
PC == PT, BC + 1 == BT}
end,
case AllBlue and AllPink of
true ->
{next_state,
State0#state.pending_state,
State0,
jitter_pause(?TRANSITION_PAUSE_MS)};
false ->
{next_state,
waiting_all_results,
State0,
set_timeout(State0#state.start_time,
State0#state.reply_timeout)}
end;
waiting_all_results(timeout, State) ->
{PC, PT} = State#state.pink_returns,
{BC, BT} = State#state.blue_returns,
MissingCount = PT + BT - (PC + BC),
aae_util:log("EX002",
[State#state.pending_state,
MissingCount,
State#state.exchange_id],
logs()),
{stop, normal, State#state{pending_state = timeout}}.
handle_sync_event(_msg, _From, StateName, State) ->
{reply, ok, StateName, State}.
handle_event(_Msg, StateName, State) ->
{next_state, StateName, State}.
handle_info(_Msg, StateName, State) ->
{next_state, StateName, State}.
terminate(normal, StateName, State) ->
case State#state.exchange_type of
full ->
aae_util:log("EX003",
[StateName,
State#state.exchange_id,
length(State#state.root_compare_deltas),
length(State#state.root_confirm_deltas),
length(State#state.branch_compare_deltas),
length(State#state.branch_confirm_deltas),
length(State#state.key_deltas)],
logs());
partial ->
aae_util:log("EX009",
[StateName,
State#state.exchange_id,
length(State#state.tree_compare_deltas),
State#state.tree_compares,
length(State#state.key_deltas)],
logs())
end,
ReplyFun = State#state.reply_fun,
ReplyFun({State#state.pending_state, length(State#state.key_deltas)}).
code_change(_OldVsn, StateName, State, _Extra) ->
{ok, StateName, State}.
%%%============================================================================
%%% External Functions
%%%============================================================================
-spec merge_binary(binary(), binary()) -> binary().
%% @doc
%% Merge two binaries - where one might be empty (as nothing has been seen for
%% that preflist, or the accumulator is the initial one)
merge_binary(<<>>, AccBin) ->
AccBin;
merge_binary(ResultBin, <<>>) ->
ResultBin;
merge_binary(ResultBin, AccBin) ->
leveled_tictac:merge_binaries(ResultBin, AccBin).
-spec merge_branches(branch_results(), branch_results()) -> branch_results().
%% @doc
%% Branches should be returned as a list of {BranchID, BranchBin} pairs. For
%% each branch in a result, merge into the accumulator.
merge_branches([], BranchAccL) ->
BranchAccL;
merge_branches([{BranchID, BranchBin}|Rest], BranchAccL) ->
case lists:keyfind(BranchID, 1, BranchAccL) of
false ->
% First response has an empty accumulator
merge_branches(Rest, [{BranchID, BranchBin}|BranchAccL]);
{BranchID, BinAcc} ->
BinAcc0 = merge_binary(BranchBin, BinAcc),
merge_branches(Rest,
lists:keyreplace(BranchID,
1,
BranchAccL,
{BranchID, BinAcc0}))
end.
-spec merge_root(binary(), binary()) -> binary().
%% @doc
%% Merge an individual result for a set of preflists into the accumulated
%% binary for the tree root
merge_root(Root, RootAcc) ->
merge_binary(Root, RootAcc).
-spec merge_tree(leveled_tictac:tictactree(), leveled_tictac:tictactree())
-> leveled_tictac:tictactree().
%% @doc
%% Merge two trees into an XOR'd tree representing the total result set
merge_tree(Tree0, Tree1) ->
leveled_tictac:merge_trees(Tree0, Tree1).
%%%============================================================================
%%% Internal Functions
%%%============================================================================
-spec trigger_next(any(), atom(), fun(), any(), boolean(),
integer(), exchange_state()) -> any().
%% @doc
%% Trigger the next request
trigger_next(NextRequest, PendingStateName, MergeFun, InitAcc, StopTest,
Timeout, LoopState) ->
case StopTest of
true ->
{stop, normal, LoopState};
false ->
ok = send_requests(NextRequest,
LoopState#state.blue_list,
LoopState#state.pink_list,
always_blue),
{next_state,
waiting_all_results,
LoopState#state{start_time = os:timestamp(),
pending_state = PendingStateName,
pink_acc = InitAcc,
blue_acc = InitAcc,
merge_fun = MergeFun,
pink_returns =
reset(LoopState#state.pink_returns),
blue_returns =
reset(LoopState#state.blue_returns),
reply_timeout = Timeout},
Timeout}
end.
-spec set_timeout(erlang:timestamp(), pos_integer()) -> integer().
%% @doc
%% Set the timeout in a given state based on the time the state was commenced
set_timeout(StartTime, Timeout) ->
max(0, Timeout - timer:now_diff(os:timestamp(), StartTime) div 1000).
-spec send_requests(any(), list(tuple()), list(tuple()),
always_blue|always_pink) -> ok.
%% @doc
%% Alternate between sending requests to items on the blue and pink list
send_requests({merge_tree_range, {filter, B, KR, TS, SF, MR, HM}},
BlueList, PinkList, Always) ->
% unpack the filter into a single tuple msg or merge_tree_range
send_requests({merge_tree_range, B, KR, TS, SF, MR, HM},
BlueList, PinkList, Always);
send_requests({fetch_clocks_range, {filter, B, KR, _TS, SF, MR, _HM}},
BlueList, PinkList, Always) ->
% unpack the filter into a single tuple msg or merge_tree_range
send_requests({fetch_clocks_range, B, KR, SF, MR},
BlueList, PinkList, Always);
send_requests(_Msg, [], [], _Always) ->
ok;
send_requests(Msg, [{SendFun, Preflists}|Rest], PinkList, always_blue) ->
SendFun(Msg, Preflists, blue),
case length(PinkList) > 0 of
true ->
send_requests(Msg, Rest, PinkList, always_pink);
false ->
send_requests(Msg, Rest, PinkList, always_blue)
end;
send_requests(Msg, BlueList, [{SendFun, Preflists}|Rest], always_pink) ->
SendFun(Msg, Preflists, pink),
case length(BlueList) > 0 of
true ->
send_requests(Msg, BlueList, Rest, always_blue);
false ->
send_requests(Msg, BlueList, Rest, always_pink)
end.
-spec merge_clocks(list(tuple()), list(tuple())) -> list(tuple()).
%% @doc
%% Accumulate keys and clocks returned in the segment query, outputting a
%% sorted list of keys and clocks.
merge_clocks(KeyClockL, KeyClockLAcc) ->
lists:merge(lists:usort(KeyClockL), KeyClockLAcc).
-spec compare_roots(binary(), binary()) -> list(integer()).
%% @doc
%% Compare the roots of two trees (i.e. the Pink and Blue root), and return a
%% list of branch IDs which are mismatched.
compare_roots(BlueRoot, PinkRoot) ->
leveled_tictac:find_dirtysegments(BlueRoot, PinkRoot).
-spec compare_branches(branch_results(), branch_results()) -> list(integer()).
%% @doc
%% Compare two sets of branches , and return a list of segment IDs which are
%% mismatched
compare_branches(BlueBranches, PinkBranches) ->
FoldFun =
fun(Idx, Acc) ->
{BranchID, BlueBranch} = lists:nth(Idx, BlueBranches),
{BranchID, PinkBranch} = lists:keyfind(BranchID, 1, PinkBranches),
DirtySegs =
leveled_tictac:find_dirtysegments(BlueBranch, PinkBranch),
lists:map(fun(S) ->
leveled_tictac:join_segment(BranchID, S)
end,
DirtySegs) ++ Acc
end,
lists:foldl(FoldFun, [], lists:seq(1, length(BlueBranches))).
-spec compare_clocks(list(tuple()), list(tuple())) -> list(tuple()).
%% @doc
%% Find the differences between the lists - and return a list of
%% {B, K, blue-side VC, pink-side VC}
%% If theblue-side or pink-seide does not contain the key, then none is used
%% in place of the clock
compare_clocks(BlueList, PinkList) ->
% Two lists of {B, K, VC} want to remove everything where {B, K, VC} is
% the same in both lists
aae_util:log("EX008", [BlueList, PinkList], logs()),
BlueSet = ordsets:from_list(BlueList),
PinkSet = ordsets:from_list(PinkList),
BlueDelta = ordsets:subtract(BlueSet, PinkSet),
PinkDelta = ordsets:subtract(PinkSet, BlueSet),
% Want to subtract out from the Pink and Blue Sets any example where
% both pink and blue are the same
%
% This should speed up the folding and key finding to provide the
% joined list
BlueDeltaList =
lists:reverse(
ordsets:fold(fun({B, K, VCB}, Acc) ->
% Assume for now that element may be only
% blue
[{{B, K}, {VCB, none}}|Acc]
end,
[],
BlueDelta)),
% BlueDeltaList is the output of compare clocks, assuming the item
% is only on the Blue side (so it compares the blue vector clock with
% none)
PinkEnrichFun =
fun({B, K, VCP}, Acc) ->
case lists:keyfind({B, K}, 1, Acc) of
{{B, K}, {VCB, none}} ->
ElementWithClockDiff =
{{B, K}, {VCB, VCP}},
lists:keyreplace({B, K}, 1, Acc, ElementWithClockDiff);
false ->
ElementOnlyPink =
{{B, K}, {none, VCP}},
lists:keysort(1, [ElementOnlyPink|Acc])
end
end,
% The Foldfun to be used on the PinkDelta, will now fill in the Pink
% vector clock if the element also exists in Pink
AllDeltaList =
ordsets:fold(PinkEnrichFun, BlueDeltaList, PinkDelta),
% The accumulator starts with the Blue side only perspective, and
% either adds to it or enriches it by folding over the Pink side
% view
AllDeltaList.
-spec compare_trees(leveled_tictac:tictactree(),
leveled_tictac:tictactree()) -> list(non_neg_integer()).
%% @doc
%% Compare the trees - get list of dirty leaves (Segment IDs)
compare_trees(Tree0, Tree1) ->
leveled_tictac:find_dirtyleaves(Tree0, Tree1).
-spec intersect_ids(list(integer()), list(integer())) -> list(integer()).
%% @doc
%% Provide the intersection of two lists of integer IDs
intersect_ids(IDs0, IDs1) ->
lists:filter(fun(ID) -> lists:member(ID, IDs1) end, IDs0).
-spec select_ids(list(integer()), pos_integer(), atom(), list())
-> list(integer()).
%% @doc
%% Select a cluster of IDs if the list of IDs is smaller than the maximum
%% output size. The lookup based on these IDs will be segment based, so it
%% is expected that the tightest clustering will yield the most efficient
%% results.
select_ids(IDList, MaxOutput, StateName, ExchangeID) ->
IDList0 = lists:usort(IDList),
FoldFun =
fun(Idx, {BestIdx, MinOutput}) ->
Space = lists:nth(MaxOutput + Idx - 1, IDList0)
- lists:nth(Idx, IDList0),
case Space < MinOutput of
true ->
{Idx, Space};
false ->
{BestIdx, MinOutput}
end
end,
case length(IDList0) > MaxOutput of
true ->
aae_util:log("EX005",
[ExchangeID, length(IDList0), StateName],
logs()),
{BestSliceStart, _Score} =
lists:foldl(FoldFun,
{0, infinity},
lists:seq(1, 1 + length(IDList0) - MaxOutput)),
lists:sublist(IDList0, BestSliceStart, MaxOutput);
false ->
IDList0
end.
-spec jitter_pause(pos_integer()) -> pos_integer().
%% @doc
%% Jitter a pause, so if multiple FSMs started at once, they don't all use
%% the network at the same time
jitter_pause(Timeout) ->
leveled_rand:uniform(Timeout) + Timeout div 2.
-spec reset({pos_integer(), pos_integer()})
-> {non_neg_integer(), pos_integer()}.
%% @doc
%% Rest the count back to 0
reset({Target, Target}) -> {0, Target}.
-spec filtered_timeout(filters()) -> pos_integer().
%% @doc
%% Has a filter been applied to the scan (true), or are we scanning the whole
%% bucket (false)
filtered_timeout({filter, _B, KeyRange, _TS, SegFilter, ModRange, _HM}) ->
case ((KeyRange == all) and (SegFilter == all) and (ModRange == all)) of
true ->
?UNFILTERED_SCAN_TIMEOUT_MS;
false ->
?SCAN_TIMEOUT_MS
end.
%%%============================================================================
%%% log definitions
%%%============================================================================
-spec logs() -> list(tuple()).
%% @doc
%% Define log lines for this module
logs() ->
[{"EX001",
{info, "Exchange id=~s with target_count=~w expected"}},
{"EX002",
{error, "Timeout with pending_state=~w and missing_count=~w"
++ " for exchange id=~s"}},
{"EX003",
{info, "Normal exit for full exchange at"
++ " pending_state=~w for exchange_id=~s"
++ " root_compare_deltas=~w root_confirm_deltas=~w"
++ " branch_compare_deltas=~w branch_confirm_deltas=~w"
++ " key_deltas=~w"}},
{"EX004",
{info, "Exchange id=~s led to prompting of repair_count=~w"}},
{"EX005",
{info, "Exchange id=~s throttled count=~w at state=~w"}},
{"EX006",
{debug, "State change to ~w for exchange id=~s"}},
{"EX007",
{debug, "Reply received for colour=~w in exchange id=~s"}},
{"EX008",
{debug, "Comparison between BlueList ~w and PinkList ~w"}},
{"EX009",
{info, "Normal exit for partial (dynamic) exchange at"
++ " pending_state=~w for exchange_id=~s"
++ " tree_compare_deltas=~w after tree_compares=~w"
++ " key_deltas=~w"}},
{"EX010",
{warn, "Exchange not_supported for colour=~w in exchange id=~s"}}
].
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(TEST).
select_id_test() ->
L0 = [1, 2, 3],
?assertMatch(L0, select_ids(L0, 3, root_confirm, "t0")),
L1 = [1, 2, 3, 5],
?assertMatch(L0, select_ids(L1, 3, root_confirm, "t1")),
L2 = [1, 2, 3, 5, 6, 7, 8],
?assertMatch(L0, select_ids(L2, 3, root_confirm, "t2")),
?assertMatch([5, 6, 7, 8], select_ids(L2, 4, root_confirm, "t3")),
?assertMatch(L0, select_ids(intersect_ids(L1, L2), 3, root_confirm, "t4")),
L3 = [8, 7, 1, 3, 2, 5, 6],
?assertMatch(L0, select_ids(L3, 3, root_confirm, "t5")),
?assertMatch([5, 6, 7, 8], select_ids(L3, 4, root_confirm, "t6")),
?assertMatch(L0, select_ids(intersect_ids(L1, L3), 3, root_confirm, "t7")).
compare_clocks_test() ->
KV1 = {<<"B1">>, <<"K1">>, [{a, 1}]},
KV2 = {<<"B1">>, <<"K2">>, [{b, 1}]},
KV3 = {<<"B1">>, <<"K3">>, [{a, 2}]},
KV4 = {<<"B1">>, <<"K1">>, [{a, 1}, {b, 2}]},
KV5 = {<<"B1">>, <<"K2">>, [{b, 1}, {c, 1}]},
BL1 = [KV1, KV2, KV3],
PL1 = [KV1, KV2, KV3],
?assertMatch([], compare_clocks(BL1, PL1)),
BL2 = [KV2, KV3, KV4],
?assertMatch([{{<<"B1">>, <<"K1">>}, {[{a, 1}, {b, 2}], [{a, 1}]}}],
compare_clocks(BL2, PL1)),
?assertMatch([{{<<"B1">>, <<"K1">>}, {[{a, 1}], [{a, 1}, {b, 2}]}}],
compare_clocks(PL1, BL2)),
PL2 = [KV4, KV5],
?assertMatch([{{<<"B1">>, <<"K1">>},
{[{a, 1}], [{a, 1}, {b, 2}]}},
{{<<"B1">>, <<"K2">>},
{[{b, 1}], [{b, 1}, {c, 1}]}},
{{<<"B1">>, <<"K3">>},
{[{a, 2}], none}}],
compare_clocks(BL1, PL2)).
clean_exit_ontimeout_test() ->
State0 = #state{pink_returns={4, 5}, blue_returns={8, 8},
exchange_type = full},
State1 = State0#state{pending_state = timeout},
{stop, normal, State1} = waiting_all_results(timeout, State0).
coverage_cheat_test() ->
{next_state, prepare, _State0} =
handle_event(null, prepare, #state{exchange_type = full}),
{reply, ok, prepare, _State1} =
handle_sync_event(null, nobody, prepare, #state{exchange_type = full}),
{next_state, prepare, _State2} =
handle_info(null, prepare, #state{exchange_type = full}),
{ok, prepare, _State3} =
code_change(null, prepare, #state{exchange_type = full}, null).
-endif. | src/aae_exchange.erl | 0.650578 | 0.842507 | aae_exchange.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core: Core Riak Application
%%
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(json_pp).
-define(SPACE, 32).
-define(is_quote(C), (C == $\") orelse (C == $\')).
-define(is_indent(C), (C == 91) orelse (C == 123)). % [, {
-define(is_undent(C), (C == 93) orelse (C == 125)). % ], }
-export([print/1,
test/0]).
print(Str) when is_list(Str) -> json_pp(Str, 0, undefined, []).
json_pp([$\\, C| Rest], I, C, Acc) -> % in quote
json_pp(Rest, I, C, [C, $\\| Acc]);
json_pp([C| Rest], I, undefined, Acc) when ?is_quote(C) ->
json_pp(Rest, I, C, [C| Acc]);
json_pp([C| Rest], I, C, Acc) -> % in quote
json_pp(Rest, I, undefined, [C| Acc]);
json_pp([C| Rest], I, undefined, Acc) when ?is_indent(C) ->
json_pp(Rest, I+1, undefined, [pp_indent(I+1), $\n, C| Acc]);
json_pp([C| Rest], I, undefined, Acc) when ?is_undent(C) ->
json_pp(Rest, I-1, undefined, [C, pp_indent(I-1), $\n| Acc]);
json_pp([$,| Rest], I, undefined, Acc) ->
json_pp(Rest, I, undefined, [pp_indent(I), $\n, $,| Acc]);
json_pp([$:| Rest], I, undefined, Acc) ->
json_pp(Rest, I, undefined, [?SPACE, $:| Acc]);
json_pp([C|Rest], I, Q, Acc) ->
json_pp(Rest, I, Q, [C| Acc]);
json_pp([], _I, _Q, Acc) -> % done
lists:reverse(Acc).
pp_indent(I) -> lists:duplicate(I*4, ?SPACE).
%% testing
test_data() ->
{struct, [{foo, true},
{bar, false},
{baz, {array, [1, 2, 3, 4]}},
{'fiz:f', null},
{"fozzer\"", 5}]}.
listify(IoList) -> binary_to_list(list_to_binary(IoList)).
test() ->
J1 = listify(mochijson:encode(test_data())),
io:format("~s~n", [listify(print(J1))]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
basic_test() ->
J1 = listify(mochijson:encode(test_data())),
L1 =
"{\n"
" \"foo\": true,\n"
" \"bar\": false,\n"
" \"baz\": [\n"
" 1,\n"
" 2,\n"
" 3,\n"
" 4\n"
" ],\n"
" \"fiz:f\": null,\n"
" \"fozzer\\\"\": 5\n"
"}",
?assertEqual(L1, listify(print(J1))),
ok.
-endif. | src/json_pp.erl | 0.534612 | 0.532972 | json_pp.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Dead code is code that is executed but has no effect. This
%% optimization pass either removes dead code or jumps around it,
%% potentially making it unreachable so that it can be dropped
%% the next time beam_ssa:linearize/1 is called.
%%
-module(beam_ssa_dead).
-export([opt/1]).
-include("beam_ssa.hrl").
-import(lists, [append/1,keymember/3,last/1,member/2,
reverse/1,sort/1,takewhile/2]).
-type used_vars() :: #{beam_ssa:label():=cerl_sets:set(beam_ssa:var_name())}.
-type basic_type_test() :: atom() | {'is_tagged_tuple',pos_integer(),atom()}.
-type type_test() :: basic_type_test() | {'not',basic_type_test()}.
-type op_name() :: atom().
-type basic_rel_op() :: {op_name(),beam_ssa:b_var(),beam_ssa:value()} |
{basic_type_test(),beam_ssa:value()}.
-type rel_op() :: {op_name(),beam_ssa:b_var(),beam_ssa:value()} |
{type_test(),beam_ssa:value()}.
-record(st,
{bs :: beam_ssa:block_map(),
us :: used_vars(),
skippable :: #{beam_ssa:label():='true'},
rel_op=none :: 'none' | rel_op(),
target=any :: 'any' | 'one_way' | beam_ssa:label()
}).
-spec opt([{Label0,Block0}]) -> [{Label,Block}] when
Label0 :: beam_ssa:label(),
Block0 :: beam_ssa:b_blk(),
Label :: beam_ssa:label(),
Block :: beam_ssa:b_blk().
opt(Linear) ->
{Used,Skippable} = used_vars(Linear),
Blocks0 = maps:from_list(Linear),
St0 = #st{bs=Blocks0,us=Used,skippable=Skippable},
St = shortcut_opt(St0),
#st{bs=Blocks} = combine_eqs(St#st{us=#{}}),
beam_ssa:linearize(Blocks).
%%%
%%% Shortcut br/switch targets.
%%%
%%% A br/switch may branch to another br/switch that in turn always
%%% branches to another target. Rewrite br/switch to refer to the
%%% ultimate targets directly. That will save execution time, but
%%% could also reduce the size of the code if some of the original
%%% targets become unreachable and be deleted.
%%%
%%% When rewriting branches, we must be careful not to skip instructions
%%% that have side effects or that bind variables that will be used
%%% at the new target.
%%%
%%% We must also avoid branching to phi nodes. The reason is
%%% twofold. First, we might create a critical edge which is strictly
%%% forbidden. Second, there will be a branch from a block that is not
%%% listed in the list of predecessors in the phi node. Those
%%% limitations could probably be overcome, but it is not clear how
%%% much that would improve the code.
%%%
shortcut_opt(#st{bs=Blocks}=St) ->
%% Processing the blocks in reverse post order seems to give more
%% opportunities for optimizations compared to post order. (Based on
%% running scripts/diffable with both PO and RPO and looking at
%% the diff.)
%%
%% Unfortunately, processing the blocks in reverse post order
%% potentially makes the time complexity quadratic, instead of
%% linear for post order processing. We avoid drastic slowdowns by
%% limiting how far we search forward to a common block that
%% both the success and failure label will reach (see the comment
%% in the first clause of shortcut_2/5).
Ls = beam_ssa:rpo(Blocks),
shortcut_opt(Ls, St).
shortcut_opt([L|Ls], #st{bs=Blocks0}=St) ->
#b_blk{is=Is,last=Last0} = Blk0 = get_block(L, St),
case shortcut_terminator(Last0, Is, L, St) of
Last0 ->
%% No change. No need to update the block.
shortcut_opt(Ls, St);
Last ->
%% The terminator was simplified in some way.
%% Update the block.
Blk = Blk0#b_blk{last=Last},
Blocks = Blocks0#{L=>Blk},
shortcut_opt(Ls, St#st{bs=Blocks})
end;
shortcut_opt([], St) -> St.
shortcut_terminator(#b_br{bool=#b_literal{val=true},succ=Succ0},
_Is, From, St0) ->
St = St0#st{rel_op=none},
shortcut(Succ0, From, #{}, St);
shortcut_terminator(#b_br{bool=#b_var{}=Bool,succ=Succ0,fail=Fail0}=Br,
Is, From, St0) ->
St = St0#st{target=one_way},
RelOp = get_rel_op(Bool, Is),
%% The boolean in a `br` is seldom used by the successors. By
%% not binding its value unless it is actually used we might be able
%% to skip some work in shortcut/4 and sub/2.
SuccBs = bind_var_if_used(Succ0, Bool, #b_literal{val=true}, St),
BrSucc = shortcut(Succ0, From, SuccBs, St#st{rel_op=RelOp}),
FailBs = bind_var_if_used(Fail0, Bool, #b_literal{val=false}, St),
BrFail = shortcut(Fail0, From, FailBs, St#st{rel_op=invert_op(RelOp)}),
case {BrSucc,BrFail} of
{#b_br{bool=#b_literal{val=true},succ=Succ},
#b_br{bool=#b_literal{val=true},succ=Fail}}
when Succ =/= Succ0; Fail =/= Fail0 ->
%% One or both of the targets were cut short.
beam_ssa:normalize(Br#b_br{succ=Succ,fail=Fail});
{_,_} ->
%% No change.
Br
end;
shortcut_terminator(#b_switch{arg=Bool,fail=Fail0,list=List0}=Sw,
_Is, From, St) ->
Fail = shortcut_sw_fail(Fail0, List0, Bool, From, St),
List = shortcut_sw_list(List0, Bool, From, St),
beam_ssa:normalize(Sw#b_switch{fail=Fail,list=List});
shortcut_terminator(Last, _Is, _From, _St) ->
Last.
shortcut_sw_fail(Fail0, List, Bool, From, St0) ->
case sort(List) of
[{#b_literal{val=false},_},
{#b_literal{val=true},_}] ->
RelOp = {{'not',is_boolean},Bool},
St = St0#st{rel_op=RelOp,target=one_way},
#b_br{bool=#b_literal{val=true},succ=Fail} =
shortcut(Fail0, From, #{}, St),
Fail;
_ ->
Fail0
end.
shortcut_sw_list([{Lit,L0}|T], Bool, From, St0) ->
RelOp = {'=:=',Bool,Lit},
St = St0#st{rel_op=RelOp},
#b_br{bool=#b_literal{val=true},succ=L} =
shortcut(L0, From, bind_var(Bool, Lit, #{}), St#st{target=one_way}),
[{Lit,L}|shortcut_sw_list(T, Bool, From, St0)];
shortcut_sw_list([], _, _, _) -> [].
shortcut(L, _From, Bs, #st{rel_op=none,target=one_way}) when map_size(Bs) =:= 0 ->
%% There is no way that we can find a suitable branch, because there is no
%% relational operator stored, there are no bindings, and the block L can't
%% have any phi nodes from which we could pick bindings because when the target
%% is `one_way`, it implies the From block has a two-way `br` terminator.
#b_br{bool=#b_literal{val=true},succ=L,fail=L};
shortcut(L, From, Bs, St) ->
shortcut_1(L, From, Bs, cerl_sets:new(), St).
shortcut_1(L, From, Bs0, UnsetVars0, St) ->
case shortcut_2(L, From, Bs0, UnsetVars0, St) of
none ->
%% No more shortcuts found. Package up the previous
%% label in an unconditional branch.
#b_br{bool=#b_literal{val=true},succ=L,fail=L};
{#b_br{bool=#b_var{}}=Br,_,_} ->
%% This is a two-way branch. We can't do any better.
Br;
{#b_br{bool=#b_literal{val=true},succ=Succ},Bs,UnsetVars} ->
%% This is a safe `br`, but try to find a better one.
shortcut_1(Succ, L, Bs, UnsetVars, St)
end.
%% Try to shortcut this block, branching to a successor.
shortcut_2(L, From, Bs, UnsetVars, St) ->
case cerl_sets:size(UnsetVars) of
SetSize when SetSize > 128 ->
%% This is an heuristic to limit the search for a forced label
%% before it drastically slows down the compiler. Experiments
%% with scripts/diffable showed that limits larger than 31 did not
%% find any more opportunities for optimization.
none;
_SetSize ->
shortcut_3(L, From, Bs, UnsetVars, St)
end.
shortcut_3(L, From, Bs0, UnsetVars0, St) ->
#b_blk{is=Is,last=Last} = get_block(L, St),
case eval_is(Is, From, Bs0, St) of
none ->
%% It is not safe to avoid this block because it
%% has instructions with potential side effects.
none;
Bs ->
%% The instructions in the block (if any) don't
%% have any side effects and can be skipped.
%% Evaluate the terminator.
case eval_terminator(Last, Bs, St) of
none ->
%% The terminator is not suitable (could be
%% because it is a switch that can't be simplified
%% or it is a ret instruction).
none;
#b_br{}=Br ->
%% We have a potentially suitable br.
%% Now update the set of variables that will never
%% be set if this block will be skipped.
case update_unset_vars(L, Is, Br, UnsetVars0, St) of
unsafe ->
%% It is unsafe to use this br,
%% because it refers to a variable defined
%% in this block.
shortcut_unsafe_br(Br, L, Bs, UnsetVars0, St);
UnsetVars ->
%% Continue checking whether this br is
%% suitable.
shortcut_test_br(Br, L, Bs, UnsetVars, St)
end
end
end.
shortcut_test_br(Br, From, Bs, UnsetVars, St) ->
case is_br_safe(UnsetVars, Br, St) of
false ->
shortcut_unsafe_br(Br, From, Bs, UnsetVars, St);
true ->
shortcut_safe_br(Br, From, Bs, UnsetVars, St)
end.
shortcut_unsafe_br(Br, From, Bs, UnsetVars, #st{target=Target}=St) ->
%% Branching using this `br` is unsafe, either because it
%% is an unconditional branch to a phi node, or because
%% one or more of the variables that are not set will be
%% used. Try to follow branches of this `br`, to find a
%% safe `br`.
case Br of
#b_br{bool=#b_literal{val=true},succ=L} ->
case Target of
L ->
%% We have reached the forced target, and it
%% is unsafe. Give up.
none;
_ ->
%% Try following this branch to see whether it
%% leads to a safe `br`.
shortcut_2(L, From, Bs, UnsetVars, St)
end;
#b_br{bool=#b_var{},succ=Succ,fail=Fail} ->
case {Succ,Fail} of
{L,Target} ->
%% The failure label is the forced target.
%% Try following the success label to see
%% whether it also ultimately ends up at the
%% forced target.
shortcut_2(L, From, Bs, UnsetVars, St);
{Target,L} ->
%% The success label is the forced target.
%% Try following the failure label to see
%% whether it also ultimately ends up at the
%% forced target.
shortcut_2(L, From, Bs, UnsetVars, St);
{_,_} ->
case Target of
any ->
%% This two-way branch is unsafe. Try
%% reducing it to a one-way branch.
shortcut_two_way(Br, From, Bs, UnsetVars, St);
one_way ->
%% This two-way branch is unsafe. Try
%% reducing it to a one-way branch.
shortcut_two_way(Br, From, Bs, UnsetVars, St);
_ when is_integer(Target) ->
%% This two-way branch is unsafe, and
%% there already is a forced target.
%% Give up.
none
end
end
end.
shortcut_safe_br(Br, From, Bs, UnsetVars, #st{target=Target}=St) ->
%% This `br` instruction is safe. It does not branch to a phi
%% node, and all variables that will be used are guaranteed to be
%% defined.
case Br of
#b_br{bool=#b_literal{val=true},succ=L} ->
%% This is a one-way branch.
case Target of
any ->
%% No forced target. Success!
{Br,Bs,UnsetVars};
one_way ->
%% The target must be a one-way branch, which this
%% `br` is. Success!
{Br,Bs,UnsetVars};
L when is_integer(Target) ->
%% The forced target is L. Success!
{Br,Bs,UnsetVars};
_ when is_integer(Target) ->
%% Wrong forced target. Try following this branch
%% to see if it ultimately ends up at the forced
%% target.
shortcut_2(L, From, Bs, UnsetVars, St)
end;
#b_br{bool=#b_var{}} ->
%% This is a two-way branch.
if
Target =:= any; Target =:= one_way ->
%% No specific forced target. Try to reduce the
%% two-way branch to an one-way branch.
case shortcut_two_way(Br, From, Bs, UnsetVars, St) of
none when Target =:= any ->
%% This `br` can't be reduced to a one-way
%% branch. Return the `br` as-is.
{Br,Bs,UnsetVars};
none when Target =:= one_way ->
%% This `br` can't be reduced to a one-way
%% branch. The caller wants a one-way
%% branch. Give up.
none;
{_,_,_}=Res ->
%% This `br` was successfully reduced to a
%% one-way branch.
Res
end;
is_integer(Target) ->
%% There is a forced target, which can't
%% be reached because this `br` is a two-way
%% branch. Give up.
none
end
end.
update_unset_vars(L, Is, Br, UnsetVars, #st{skippable=Skippable}) ->
case is_map_key(L, Skippable) of
true ->
%% None of the variables used in this block are used in
%% the successors. Thus, there is no need to add the
%% variables to the set of unset variables.
case Br of
#b_br{bool=#b_var{}=Bool} ->
case keymember(Bool, #b_set.dst, Is) of
true ->
%% Bool is a variable defined in this
%% block. Using the br instruction from
%% this block (and skipping the body of
%% the block) is unsafe.
unsafe;
false ->
%% Bool is either a variable not defined
%% in this block or a literal. Adding it
%% to the UnsetVars set would not change
%% the outcome of the tests in
%% is_br_safe/2.
UnsetVars
end;
#b_br{} ->
UnsetVars
end;
false ->
%% Some variables defined in this block are used by
%% successors. We must update the set of unset variables.
SetInThisBlock = [V || #b_set{dst=V} <- Is],
cerl_sets:union(UnsetVars, cerl_sets:from_list(SetInThisBlock))
end.
shortcut_two_way(#b_br{succ=Succ,fail=Fail}, From, Bs0, UnsetVars0, St0) ->
case shortcut_2(Succ, From, Bs0, UnsetVars0, St0#st{target=Fail}) of
{#b_br{bool=#b_literal{},succ=Fail},_,_}=Res ->
Res;
none ->
St = St0#st{target=Succ},
case shortcut_2(Fail, From, Bs0, UnsetVars0, St) of
{#b_br{bool=#b_literal{},succ=Succ},_,_}=Res ->
Res;
none ->
none
end
end.
get_block(L, St) ->
#st{bs=#{L:=Blk}} = St,
Blk.
is_br_safe(UnsetVars, Br, #st{us=Us}=St) ->
%% Check that none of the unset variables will be used.
case Br of
#b_br{bool=#b_var{}=V,succ=Succ,fail=Fail} ->
#{Succ:=Used0,Fail:=Used1} = Us,
%% A two-way branch never branches to a phi node, so there
%% is no need to check for phi nodes here.
not cerl_sets:is_element(V, UnsetVars) andalso
cerl_sets:is_disjoint(Used0, UnsetVars) andalso
cerl_sets:is_disjoint(Used1, UnsetVars);
#b_br{succ=Same,fail=Same} ->
%% An unconditional branch must not jump to
%% a phi node.
not is_forbidden(Same, St) andalso
cerl_sets:is_disjoint(map_get(Same, Us), UnsetVars)
end.
is_forbidden(L, St) ->
case get_block(L, St) of
#b_blk{is=[#b_set{op=phi}|_]} ->
true;
#b_blk{is=[#b_set{}=I|_]} ->
beam_ssa:is_loop_header(I);
#b_blk{} -> false
end.
%% Evaluate the instructions in the block.
%% Return the updated bindings, or 'none' if there is
%% any instruction with potential side effects.
eval_is([#b_set{op=phi,dst=Dst,args=Args}|Is], From, Bs0, St) ->
Val = get_phi_arg(Args, From),
Bs = bind_var(Dst, Val, Bs0),
eval_is(Is, From, Bs, St);
eval_is([#b_set{op=succeeded,dst=Dst,args=[Var]}], _From, Bs, _St) ->
case Bs of
#{Var:=failed} ->
bind_var(Dst, #b_literal{val=false}, Bs);
#{Var:=#b_literal{}} ->
bind_var(Dst, #b_literal{val=true}, Bs);
#{} ->
Bs
end;
eval_is([#b_set{op={bif,_},dst=Dst}=I0|Is], From, Bs, St) ->
I = sub(I0, Bs),
case eval_bif(I, St) of
#b_literal{}=Val ->
eval_is(Is, From, bind_var(Dst, Val, Bs), St);
failed ->
eval_is(Is, From, bind_var(Dst, failed, Bs), St);
none ->
eval_is(Is, From, Bs, St)
end;
eval_is([#b_set{op=Op,dst=Dst}=I|Is], From, Bs, St)
when Op =:= is_tagged_tuple; Op =:= is_nonempty_list ->
#b_set{args=Args} = sub(I, Bs),
case eval_rel_op(Op, Args, St) of
#b_literal{}=Val ->
eval_is(Is, From, bind_var(Dst, Val, Bs), St);
none ->
eval_is(Is, From, Bs, St)
end;
eval_is([#b_set{}=I|Is], From, Bs, St) ->
case beam_ssa:no_side_effect(I) of
true ->
%% This instruction has no side effects. It can
%% safely be omitted.
eval_is(Is, From, Bs, St);
false ->
%% This instruction may have some side effect.
%% It is not safe to avoid this instruction.
none
end;
eval_is([], _From, Bs, _St) -> Bs.
get_phi_arg([{Val,From}|_], From) -> Val;
get_phi_arg([_|As], From) -> get_phi_arg(As, From).
eval_terminator(#b_br{bool=#b_var{}=Bool}=Br, Bs, _St) ->
case get_value(Bool, Bs) of
#b_literal{val=Val}=Lit ->
case is_boolean(Val) of
true ->
beam_ssa:normalize(Br#b_br{bool=Lit});
false ->
%% Non-boolean literal. This means that this `br`
%% terminator will never actually be reached with
%% these bindings. (There must be a previous two-way
%% branch that branches the other way when Bool
%% is bound to a non-boolean literal.)
none
end;
#b_var{}=Var ->
beam_ssa:normalize(Br#b_br{bool=Var})
end;
eval_terminator(#b_br{bool=#b_literal{}}=Br, _Bs, _St) ->
beam_ssa:normalize(Br);
eval_terminator(#b_switch{arg=Arg,fail=Fail,list=List}=Sw, Bs, St) ->
case get_value(Arg, Bs) of
#b_literal{}=Val ->
%% Literal argument. Simplify to a `br`.
beam_ssa:normalize(Sw#b_switch{arg=Val});
#b_var{} ->
%% Try optimizing the switch.
case eval_switch(List, Arg, St, Fail) of
none ->
none;
To when is_integer(To) ->
%% Either one of the values in the switch
%% matched a previous value in a '=:=' test, or
%% none of the values matched a previous test.
#b_br{bool=#b_literal{val=true},succ=To,fail=To}
end
end;
eval_terminator(#b_ret{}, _Bs, _St) ->
none.
eval_switch(List, Arg, #st{rel_op={_,Arg,_}=PrevOp}, Fail) ->
%% There is a previous relational operator testing the same variable.
%% Optimization may be possible.
eval_switch_1(List, Arg, PrevOp, Fail);
eval_switch(_, _, _, _) ->
%% There is either no previous relational operator, or it tests
%% a different variable. Nothing to optimize.
none.
eval_switch_1([{Lit,Lbl}|T], Arg, PrevOp, Fail) ->
RelOp = {'=:=',Arg,Lit},
case will_succeed(PrevOp, RelOp) of
yes ->
%% Success. This branch will always be taken.
Lbl;
no ->
%% This branch will never be taken.
eval_switch_1(T, Arg, PrevOp, Fail);
maybe ->
%% This label could be reached.
eval_switch_1(T, Arg, PrevOp, none)
end;
eval_switch_1([], _Arg, _PrevOp, Fail) ->
%% Fail is now either the failure label or 'none'.
Fail.
bind_var_if_used(L, Var, Val, #st{us=Us}) ->
case cerl_sets:is_element(Var, map_get(L, Us)) of
true -> #{Var=>Val};
false -> #{}
end.
bind_var(Var, failed, Bs) ->
Bs#{Var=>failed};
bind_var(Var, Val0, Bs) ->
Val = get_value(Val0, Bs),
Bs#{Var=>Val}.
get_value(#b_var{}=Var, Bs) ->
case Bs of
#{Var:=Val} -> get_value(Val, Bs);
#{} -> Var
end;
get_value(#b_literal{}=Lit, _Bs) -> Lit.
eval_bif(#b_set{op={bif,Bif},args=Args}, St) ->
Arity = length(Args),
case erl_bifs:is_pure(erlang, Bif, Arity) of
false ->
none;
true ->
case get_lit_args(Args) of
none ->
%% Not literal arguments. Try to evaluate
%% it based on a previous relational operator.
eval_rel_op({bif,Bif}, Args, St);
LitArgs ->
try apply(erlang, Bif, LitArgs) of
Val -> #b_literal{val=Val}
catch
error:_ -> none
end
end
end.
get_lit_args([#b_literal{val=Lit1}]) ->
[Lit1];
get_lit_args([#b_literal{val=Lit1},
#b_literal{val=Lit2}]) ->
[Lit1,Lit2];
get_lit_args([#b_literal{val=Lit1},
#b_literal{val=Lit2},
#b_literal{val=Lit3}]) ->
[Lit1,Lit2,Lit3];
get_lit_args(_) -> none.
%%%
%%% Handling of relational operators.
%%%
get_rel_op(Bool, [_|_]=Is) ->
case last(Is) of
#b_set{op=Op,dst=Bool,args=Args} ->
normalize_op(Op, Args);
#b_set{} ->
none
end;
get_rel_op(_, []) -> none.
%% normalize_op(Instruction) -> {Normalized,FailLabel} | error
%% Normalized = {Operator,Variable,Variable|Literal} |
%% {TypeTest,Variable}
%% Operation = '<' | '=<' | '=:=' | '=/=' | '>=' | '>'
%% TypeTest = is_atom | is_integer ...
%% Variable = #b_var{}
%% Literal = #b_literal{}
%%
%% Normalize a relational operator to facilitate further
%% comparisons between operators. Always make the register
%% operand the first operand. If there are two registers,
%% order the registers in lexical order.
%%
%% For example, this instruction:
%%
%% #b_set{op={bif,=<},args=[#b_literal{}, #b_var{}}
%%
%% will be normalized to:
%%
%% {'=<',#b_var{},#b_literal{}}
-spec normalize_op(Op, Args) -> NormalizedOp | 'none' when
Op :: beam_ssa:op(),
Args :: [beam_ssa:value()],
NormalizedOp :: basic_rel_op().
normalize_op(is_tagged_tuple, [Arg,#b_literal{val=Size},#b_literal{val=Tag}])
when is_integer(Size), is_atom(Tag) ->
{{is_tagged_tuple,Size,Tag},Arg};
normalize_op(is_nonempty_list, [Arg]) ->
{is_nonempty_list,Arg};
normalize_op({bif,Bif}, [Arg]) ->
case erl_internal:new_type_test(Bif, 1) of
true -> {Bif,Arg};
false -> none
end;
normalize_op({bif,Bif}, [_,_]=Args) ->
case erl_internal:comp_op(Bif, 2) of
true ->
normalize_op_1(Bif, Args);
false ->
none
end;
normalize_op(_, _) -> none.
normalize_op_1(Bif, Args) ->
case Args of
[#b_literal{}=Arg1,#b_var{}=Arg2] ->
{turn_op(Bif),Arg2,Arg1};
[#b_var{}=Arg1,#b_literal{}=Arg2] ->
{Bif,Arg1,Arg2};
[#b_var{}=A,#b_var{}=B] ->
if A < B -> {Bif,A,B};
true -> {turn_op(Bif),B,A}
end;
[#b_literal{},#b_literal{}] ->
none
end.
-spec invert_op(basic_rel_op() | 'none') -> rel_op() | 'none'.
invert_op({Op,Arg1,Arg2}) ->
{invert_op_1(Op),Arg1,Arg2};
invert_op({TypeTest,Arg}) ->
{{'not',TypeTest},Arg};
invert_op(none) -> none.
invert_op_1('>=') -> '<';
invert_op_1('<') -> '>=';
invert_op_1('=<') -> '>';
invert_op_1('>') -> '=<';
invert_op_1('=:=') -> '=/=';
invert_op_1('=/=') -> '=:=';
invert_op_1('==') -> '/=';
invert_op_1('/=') -> '=='.
turn_op('<') -> '>';
turn_op('=<') -> '>=';
turn_op('>') -> '<';
turn_op('>=') -> '=<';
turn_op('=:='=Op) -> Op;
turn_op('=/='=Op) -> Op;
turn_op('=='=Op) -> Op;
turn_op('/='=Op) -> Op.
eval_rel_op(_Bif, _Args, #st{rel_op=none}) ->
none;
eval_rel_op(Bif, Args, #st{rel_op=Prev}) ->
case normalize_op(Bif, Args) of
none ->
eval_boolean(Prev, Bif, Args);
RelOp ->
case will_succeed(Prev, RelOp) of
yes -> #b_literal{val=true};
no -> #b_literal{val=false};
maybe -> none
end
end.
eval_boolean({{'not',is_boolean},Var}, {bif,'not'}, [Var]) ->
failed;
eval_boolean({{'not',is_boolean},Var}, {bif,Op}, Args)
when Op =:= 'and'; Op =:= 'or' ->
case member(Var, Args) of
true -> failed;
false -> none
end;
eval_boolean(_, _, _) ->
none.
%% will_succeed(PrevCondition, Condition) -> yes | no | maybe
%% PrevCondition is a condition known to be true. This function
%% will tell whether Condition will succeed.
will_succeed({_,_,_}=Same, {_,_,_}=Same) ->
%% Repeated test.
yes;
will_succeed({Op1,Var,#b_literal{val=A}}, {Op2,Var,#b_literal{val=B}}) ->
will_succeed_1(Op1, A, Op2, B);
will_succeed({Op1,Var,#b_var{}=A}, {Op2,Var,#b_var{}=B}) ->
will_succeed_vars(Op1, A, Op2, B);
will_succeed({'=:=',Var,#b_literal{val=A}}, {TypeTest,Var}) ->
eval_type_test(TypeTest, A);
will_succeed({_,_}=Same, {_,_}=Same) ->
%% Repeated type test.
yes;
will_succeed({Test1,Var}, {Test2,Var}) ->
will_succeed_test(Test1, Test2);
will_succeed({{'not',is_boolean},Var}, {'=:=',Var,#b_literal{val=Lit}})
when is_boolean(Lit) ->
no;
will_succeed({_,_}, {_,_}) ->
maybe;
will_succeed({_,_}, {_,_,_}) ->
maybe;
will_succeed({_,_,_}, {_,_}) ->
maybe;
will_succeed({_,_,_}, {_,_,_}) ->
maybe.
will_succeed_test({'not',Test1}, Test2) ->
case Test1 =:= Test2 of
true -> no;
false -> maybe
end;
will_succeed_test(is_tuple, {is_tagged_tuple,_,_}) ->
maybe;
will_succeed_test({is_tagged_tuple,_,_}, is_tuple) ->
yes;
will_succeed_test(is_list, is_nonempty_list) ->
maybe;
will_succeed_test(is_nonempty_list, is_list) ->
yes;
will_succeed_test(_T1, _T2) ->
maybe.
will_succeed_1('=:=', A, '<', B) ->
if
B =< A -> no;
true -> yes
end;
will_succeed_1('=:=', A, '=<', B) ->
if
B < A -> no;
true -> yes
end;
will_succeed_1('=:=', A, '=:=', B) when A =/= B ->
no;
will_succeed_1('=:=', A, '=/=', B) ->
if
A =:= B -> no;
true -> yes
end;
will_succeed_1('=:=', A, '>=', B) ->
if
B > A -> no;
true -> yes
end;
will_succeed_1('=:=', A, '>', B) ->
if
B >= A -> no;
true -> yes
end;
will_succeed_1('=/=', A, '=:=', B) when A =:= B -> no;
will_succeed_1('<', A, '=:=', B) when B >= A -> no;
will_succeed_1('<', A, '=/=', B) when B >= A -> yes;
will_succeed_1('<', A, '<', B) when B >= A -> yes;
will_succeed_1('<', A, '=<', B) when B >= A -> yes;
will_succeed_1('<', A, '>=', B) when B >= A -> no;
will_succeed_1('<', A, '>', B) when B >= A -> no;
will_succeed_1('=<', A, '=:=', B) when B > A -> no;
will_succeed_1('=<', A, '=/=', B) when B > A -> yes;
will_succeed_1('=<', A, '<', B) when B > A -> yes;
will_succeed_1('=<', A, '=<', B) when B >= A -> yes;
will_succeed_1('=<', A, '>=', B) when B > A -> no;
will_succeed_1('=<', A, '>', B) when B >= A -> no;
will_succeed_1('>=', A, '=:=', B) when B < A -> no;
will_succeed_1('>=', A, '=/=', B) when B < A -> yes;
will_succeed_1('>=', A, '<', B) when B =< A -> no;
will_succeed_1('>=', A, '=<', B) when B < A -> no;
will_succeed_1('>=', A, '>=', B) when B =< A -> yes;
will_succeed_1('>=', A, '>', B) when B < A -> yes;
will_succeed_1('>', A, '=:=', B) when B =< A -> no;
will_succeed_1('>', A, '=/=', B) when B =< A -> yes;
will_succeed_1('>', A, '<', B) when B =< A -> no;
will_succeed_1('>', A, '=<', B) when B =< A -> no;
will_succeed_1('>', A, '>=', B) when B =< A -> yes;
will_succeed_1('>', A, '>', B) when B =< A -> yes;
will_succeed_1('==', A, '==', B) ->
if
A == B -> yes;
true -> no
end;
will_succeed_1('==', A, '/=', B) ->
if
A == B -> no;
true -> yes
end;
will_succeed_1('/=', A, '/=', B) when A == B -> yes;
will_succeed_1('/=', A, '==', B) when A == B -> no;
will_succeed_1(_, _, _, _) -> maybe.
will_succeed_vars('=/=', Val, '=:=', Val) -> no;
will_succeed_vars('=:=', Val, '=/=', Val) -> no;
will_succeed_vars('=:=', Val, '>=', Val) -> yes;
will_succeed_vars('=:=', Val, '=<', Val) -> yes;
will_succeed_vars('/=', Val1, '==', Val2) when Val1 == Val2 -> no;
will_succeed_vars('==', Val1, '/=', Val2) when Val1 == Val2 -> no;
will_succeed_vars(_, _, _, _) -> maybe.
eval_type_test(Test, Arg) ->
case eval_type_test_1(Test, Arg) of
true -> yes;
false -> no
end.
eval_type_test_1(is_nonempty_list, Arg) ->
case Arg of
[_|_] -> true;
_ -> false
end;
eval_type_test_1({is_tagged_tuple,Sz,Tag}, Arg) ->
if
tuple_size(Arg) =:= Sz, element(1, Arg) =:= Tag ->
true;
true ->
false
end;
eval_type_test_1(Test, Arg) ->
erlang:Test(Arg).
%%%
%%% Combine bif:'=:=' and switch instructions
%%% to switch instructions.
%%%
%%% Consider this code:
%%%
%%% 0:
%%% @ssa_bool = bif:'=:=' Var, literal 1
%%% br @ssa_bool, label 2, label 3
%%%
%%% 2:
%%% ret literal a
%%%
%%% 3:
%%% @ssa_bool:7 = bif:'=:=' Var, literal 2
%%% br @ssa_bool:7, label 4, label 999
%%%
%%% 4:
%%% ret literal b
%%%
%%% 999:
%%% .
%%% .
%%% .
%%%
%%% The two bif:'=:=' instructions can be combined
%%% to a switch:
%%%
%%% 0:
%%% switch Var, label 999, [ { literal 1, label 2 },
%%% { literal 2, label 3 } ]
%%%
%%% 2:
%%% ret literal a
%%%
%%% 4:
%%% ret literal b
%%%
%%% 999:
%%% .
%%% .
%%% .
%%%
combine_eqs(#st{bs=Blocks}=St) ->
Ls = reverse(beam_ssa:rpo(Blocks)),
combine_eqs_1(Ls, St).
combine_eqs_1([L|Ls], #st{bs=Blocks0}=St0) ->
case comb_get_sw(L, St0) of
none ->
combine_eqs_1(Ls, St0);
{_,Arg,_,Fail0,List0} ->
case comb_get_sw(Fail0, St0) of
{true,Arg,Fail1,Fail,List1} ->
%% Another switch/br with the same arguments was
%% found. Try combining them.
case combine_lists(Fail1, List0, List1, Blocks0) of
none ->
%% Different types of literals in the lists,
%% or the success cases in the first switch
%% could branch to the second switch
%% (increasing code size and repeating tests).
combine_eqs_1(Ls, St0);
List ->
%% Everything OK! Combine the lists.
Sw0 = #b_switch{arg=Arg,fail=Fail,list=List},
Sw = beam_ssa:normalize(Sw0),
Blk0 = map_get(L, Blocks0),
Blk = Blk0#b_blk{last=Sw},
Blocks = Blocks0#{L:=Blk},
St = St0#st{bs=Blocks},
combine_eqs_1(Ls, St)
end;
{true,_OtherArg,_,_,_} ->
%% The other switch/br uses a different Arg.
combine_eqs_1(Ls, St0);
{false,_,_,_,_} ->
%% Not safe: Bindings of variables that will be used
%% or execution of instructions with potential
%% side effects will be skipped.
combine_eqs_1(Ls, St0);
none ->
%% No switch/br at this label.
combine_eqs_1(Ls, St0)
end
end;
combine_eqs_1([], St) -> St.
comb_get_sw(L, #st{bs=Blocks,skippable=Skippable}) ->
#b_blk{is=Is,last=Last} = map_get(L, Blocks),
Safe0 = is_map_key(L, Skippable),
case Last of
#b_ret{} ->
none;
#b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail} ->
case comb_is(Is, Bool, Safe0) of
{none,_} ->
none;
{#b_set{op={bif,'=:='},args=[#b_var{}=Arg,#b_literal{}=Lit]},Safe} ->
{Safe,Arg,L,Fail,[{Lit,Succ}]};
{#b_set{},_} ->
none
end;
#b_br{} ->
none;
#b_switch{arg=#b_var{}=Arg,fail=Fail,list=List} ->
{none,Safe} = comb_is(Is, none, Safe0),
{Safe,Arg,L,Fail,List}
end.
comb_is([#b_set{dst=#b_var{}=Bool}=I], Bool, Safe) ->
{I,Safe};
comb_is([#b_set{}=I|Is], Bool, Safe0) ->
Safe = Safe0 andalso beam_ssa:no_side_effect(I),
comb_is(Is, Bool, Safe);
comb_is([], _Bool, Safe) ->
{none,Safe}.
%% combine_list(Fail, List1, List2, Blocks) -> List|none.
%% Try to combine two switch lists, returning the combined
%% list or 'none' if not possible.
%%
%% The values in the two lists must be all of the same type.
%%
%% The code reached from the labels in the first list must
%% not reach the failure label (if they do, tests could
%% be repeated).
%%
combine_lists(Fail, L1, L2, Blocks) ->
Ls = beam_ssa:rpo([Lbl || {_,Lbl} <- L1], Blocks),
case member(Fail, Ls) of
true ->
%% One or more of labels in the first list
%% could reach the failure label. That
%% means that the second switch/br instruction
%% will be retained, increasing code size and
%% potentially also execution time.
none;
false ->
%% The combined switch will replace both original
%% br/switch instructions, leading to a reduction in code
%% size and potentially also in execution time.
combine_lists_1(L1, L2)
end.
combine_lists_1(List0, List1) ->
case are_lists_compatible(List0, List1) of
true ->
First = maps:from_list(List0),
List0 ++ [{Val,Lbl} || {Val,Lbl} <- List1,
not is_map_key(Val, First)];
false ->
none
end.
are_lists_compatible([{#b_literal{val=Val1},_}|_],
[{#b_literal{val=Val2},_}|_]) ->
case lit_type(Val1) of
none -> false;
Type -> Type =:= lit_type(Val2)
end.
lit_type(Val) ->
if
is_atom(Val) -> atom;
is_float(Val) -> float;
is_integer(Val) -> integer;
true -> none
end.
%%%
%%% Calculate used variables for each block.
%%%
used_vars(Linear) ->
used_vars(reverse(Linear), #{}, #{}).
used_vars([{L,#b_blk{is=Is}=Blk}|Bs], UsedVars0, Skip0) ->
%% Calculate the variables used by each block and its
%% successors. This information is used by
%% shortcut_opt/1.
Successors = beam_ssa:successors(Blk),
Used0 = used_vars_succ(Successors, L, UsedVars0, cerl_sets:new()),
Used = used_vars_blk(Blk, Used0),
UsedVars = used_vars_phis(Is, L, Used, UsedVars0),
%% combine_eqs/1 needs different variable usage information than
%% shortcut_opt/1. The Skip map will have an entry for each block
%% that can be skipped (does not bind any variable used in
%% successor). This information is also useful for speeding up
%% shortcut_opt/1.
Defined0 = [Def || #b_set{dst=Def} <- Is],
Defined = cerl_sets:from_list(Defined0),
MaySkip = cerl_sets:is_disjoint(Defined, Used0),
case MaySkip of
true ->
Skip = Skip0#{L=>true},
used_vars(Bs, UsedVars, Skip);
false ->
used_vars(Bs, UsedVars, Skip0)
end;
used_vars([], UsedVars, Skip) ->
{UsedVars,Skip}.
used_vars_succ([S|Ss], L, LiveMap, Live0) ->
Key = {S,L},
case LiveMap of
#{Key:=Live} ->
%% The successor has a phi node, and the value for
%% this block in the phi node is a variable.
used_vars_succ(Ss, L, LiveMap, cerl_sets:union(Live, Live0));
#{S:=Live} ->
%% No phi node in the successor, or the value for
%% this block in the phi node is a literal.
used_vars_succ(Ss, L, LiveMap, cerl_sets:union(Live, Live0));
#{} ->
%% A peek_message block which has not been processed yet.
used_vars_succ(Ss, L, LiveMap, Live0)
end;
used_vars_succ([], _, _, Acc) -> Acc.
used_vars_phis(Is, L, Live0, UsedVars0) ->
UsedVars = UsedVars0#{L=>Live0},
Phis = takewhile(fun(#b_set{op=Op}) -> Op =:= phi end, Is),
case Phis of
[] ->
UsedVars;
[_|_] ->
PhiArgs = append([Args || #b_set{args=Args} <- Phis]),
case [{P,V} || {#b_var{}=V,P} <- PhiArgs] of
[_|_]=PhiVars ->
PhiLive0 = rel2fam(PhiVars),
PhiLive = [{{L,P},cerl_sets:union(cerl_sets:from_list(Vs), Live0)} ||
{P,Vs} <- PhiLive0],
maps:merge(UsedVars, maps:from_list(PhiLive));
[] ->
%% There were only literals in the phi node(s).
UsedVars
end
end.
used_vars_blk(#b_blk{is=Is,last=Last}, Used0) ->
Used = cerl_sets:union(Used0, cerl_sets:from_list(beam_ssa:used(Last))),
used_vars_is(reverse(Is), Used).
used_vars_is([#b_set{op=phi}|Is], Used) ->
used_vars_is(Is, Used);
used_vars_is([#b_set{dst=Dst}=I|Is], Used0) ->
Used1 = cerl_sets:union(Used0, cerl_sets:from_list(beam_ssa:used(I))),
Used = cerl_sets:del_element(Dst, Used1),
used_vars_is(Is, Used);
used_vars_is([], Used) ->
Used.
%%%
%%% Common utilities.
%%%
sub(#b_set{args=Args}=I, Sub) when map_size(Sub) =/= 0 ->
I#b_set{args=[sub_arg(A, Sub) || A <- Args]};
sub(I, _Sub) -> I.
sub_arg(#b_var{}=Old, Sub) ->
case Sub of
#{Old:=New} -> New;
#{} -> Old
end;
sub_arg(Old, _Sub) -> Old.
rel2fam(S0) ->
S1 = sofs:relation(S0),
S = sofs:rel2fam(S1),
sofs:to_external(S). | lib/compiler/src/beam_ssa_dead.erl | 0.64232 | 0.419113 | beam_ssa_dead.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_views_encoding).
-export([
max/0,
encode/1,
encode/2,
decode/1
]).
-define(NULL, 0).
-define(FALSE, 1).
-define(TRUE, 2).
-define(NUMBER, 3).
-define(STRING, 4).
-define(LIST, 5).
-define(OBJECT, 6).
-define(MAX, 255).
max() ->
max_encoding_value.
encode(X) ->
encode(X, value).
encode(X, Type) when Type == key; Type == value ->
erlfdb_tuple:pack(encode_int(X, Type)).
decode(Encoded) ->
Val = erlfdb_tuple:unpack(Encoded),
decode_int(Val).
encode_int(null, _Type) ->
{?NULL};
encode_int(false, _Type) ->
{?FALSE};
encode_int(true, _Type) ->
{?TRUE};
encode_int(max_encoding_value, _Type) ->
{?MAX};
encode_int(Num, key) when is_number(Num) ->
{?NUMBER, float(Num)};
encode_int(Num, value) when is_number(Num) ->
{?NUMBER, Num};
encode_int(Bin, key) when is_binary(Bin) ->
{?STRING, couch_util:get_sort_key(Bin)};
encode_int(Bin, value) when is_binary(Bin) ->
{?STRING, Bin};
encode_int(List, Type) when is_list(List) ->
Encoded = lists:map(
fun(Item) ->
encode_int(Item, Type)
end,
List
),
{?LIST, list_to_tuple(Encoded)};
encode_int({Props}, Type) when is_list(Props) ->
Encoded = lists:map(
fun({K, V}) ->
EK = encode_int(K, Type),
EV = encode_int(V, Type),
{EK, EV}
end,
Props
),
{?OBJECT, list_to_tuple(Encoded)}.
decode_int({?NULL}) ->
null;
decode_int({?FALSE}) ->
false;
decode_int({?TRUE}) ->
true;
decode_int({?MAX}) ->
max_encoding_value;
decode_int({?STRING, Bin}) ->
Bin;
decode_int({?NUMBER, Num}) ->
Num;
decode_int({?LIST, List}) ->
lists:map(fun decode_int/1, tuple_to_list(List));
decode_int({?OBJECT, Object}) ->
Props = lists:map(
fun({EK, EV}) ->
K = decode_int(EK),
V = decode_int(EV),
{K, V}
end,
tuple_to_list(Object)
),
{Props}. | src/couch_views/src/couch_views_encoding.erl | 0.60964 | 0.486271 | couch_views_encoding.erl | starcoder |
%% @doc
%% A Histogram tracks the size and number of events in buckets.
%% You can use Histograms for aggregatable calculation of quantiles.
%%
%% Example use cases for Histograms:
%% <ul>
%% <li>Response latency</li>
%% <li>Request size</li>
%% </ul>
%%
%% Histogram expects `buckets' key in a metric spec. Buckets can be:
%% - a list of numbers in increasing order;
%% - :default;
%% - {:linear, start, step, count};
%% - {:exponential, start, step, count}
%%
%% Example:
%% <pre lang="erlang">
%% -module(example_instrumenter).
%% setup() ->
%% prometheus_histogram:declare([{name, http_request_duration_milliseconds},
%% {labels, [method]},
%% {buckets, [100, 300, 500, 750, 1000]},
%% {help, "Http Request execution time."}]).
%%
%% instrument(Time, Method) ->
%% %% Time must be in native units, otherwise duration_unit must be false
%% prometheus_histogram:observe(http_request_duration_milliseconds,
%% [Method], Time).
%%
%% </pre>
%% @end
-module(prometheus_histogram).
%%% metric
-export([new/1,
declare/1,
deregister/1,
deregister/2,
set_default/2,
observe/2,
observe/3,
observe/4,
pobserve/6,
observe_duration/2,
observe_duration/3,
observe_duration/4,
remove/1,
remove/2,
remove/3,
reset/1,
reset/2,
reset/3,
value/1,
value/2,
value/3,
buckets/1,
buckets/2,
buckets/3,
values/2]
).
%%% collector
-export([deregister_cleanup/1,
collect_mf/2,
collect_metrics/2]).
-include("prometheus.hrl").
-behaviour(prometheus_metric).
-behaviour(prometheus_collector).
%%====================================================================
%% Macros
%%====================================================================
-define(TABLE, ?PROMETHEUS_HISTOGRAM_TABLE).
-define(BOUNDS_POS, 2).
-define(ISUM_POS, 3).
-define(FSUM_POS, 4).
-define(BUCKETS_START, 5).
-define(WIDTH, 16).
%% ets row layout
%% {Key, NBounds, Sum, Bucket1, Bucket2, ...}
%% NBounds is a list of bounds possibly converted to native units
%%====================================================================
%% Metric API
%%====================================================================
%% @doc Creates a histogram using `Spec'.
%%
%% Raises `{missing_metric_spec_key, Key, Spec}' error if required `Soec' key
%% is missing.<br/>
%% Raises `{invalid_metric_name, Name, Message}' error if metric `Name'
%% is invalid.<br/>
%% Raises `{invalid_metric_help, Help, Message}' error if metric `Help'
%% is invalid.<br/>
%% Raises `{invalid_metric_labels, Labels, Message}' error if `Labels'
%% isn't a list.<br/>
%% Raises `{invalid_label_name, Name, Message}' error if `Name' isn't a valid
%% label name.<br/>
%% Raises `{invalid_value_error, Value, Message}' error if `duration_unit' is
%% unknown or doesn't match metric name.<br/>
%% Raises `{mf_already_exists, {Registry, Name}, Message}' error if a histogram
%% with the same `Spec' already exists.
%%
%% Histogram-specific errors:<br/>
%% Raises `{no_buckets, Buckets}' error if `Buckets' are missing,
%% not a list, empty list or not known buckets spec.<br/>
%% Raises `{invalid_buckets, Buckets, Message}' error if `Buckets'
%% aren't in increasing order.<br/>
%% Raises `{invalid_bound, Bound}' error if `Bound' isn't a number.
%% @end
new(Spec) ->
Spec1 = validate_histogram_spec(Spec),
prometheus_metric:insert_new_mf(?TABLE, ?MODULE, Spec1).
%% @doc Creates a histogram using `Spec'.
%% If a histogram with the same `Spec' exists returns `false'.
%%
%% Raises `{missing_metric_spec_key, Key, Spec}' error if required `Soec' key
%% is missing.<br/>
%% Raises `{invalid_metric_name, Name, Message}' error if metric `Name'
%% is invalid.<br/>
%% Raises `{invalid_metric_help, Help, Message}' error if metric `Help'
%% is invalid.<br/>
%% Raises `{invalid_metric_labels, Labels, Message}' error if `Labels'
%% isn't a list.<br/>
%% Raises `{invalid_label_name, Name, Message}' error if `Name' isn't a valid
%% label name.<br/>
%% Raises `{invalid_value_error, Value, MessagE}' error if `duration_unit' is
%% unknown or doesn't match metric name.<br/>
%%
%% Histogram-specific errors:<br/>
%% Raises `{no_buckets, Buckets}' error if `Buckets' are missing,
%% not a list, empty list or not known buckets spec.<br/>
%% Raises `{invalid_buckets, Buckets, Message}' error if `Buckets'
%% aren't in increasing order.<br/>
%% Raises `{invalid_bound, Bound}' error if `Bound' isn't a number.
%% @end
declare(Spec) ->
Spec1 = validate_histogram_spec(Spec),
prometheus_metric:insert_mf(?TABLE, ?MODULE, Spec1).
%% @equiv deregister(default, Name)
deregister(Name) ->
deregister(default, Name).
%% @doc
%% Removes all histogram series with name `Name' and
%% removes Metric Family from `Registry'.
%%
%% After this call new/1 for `Name' and `Registry' will succeed.
%%
%% Returns `{true, _}' if `Name' was a registered histogram.
%% Otherwise returns `{false, _}'.
%% @end
deregister(Registry, Name) ->
try
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name),
Buckets = prometheus_metric:mf_data(MF),
prometheus_metric:deregister_mf(?TABLE, Registry, Name),
Select = deregister_select(Registry, Name, Buckets),
NumDeleted = ets:select_delete(?TABLE, Select),
{true, NumDeleted > 0}
catch
_:_ -> {false, false}
end.
%% @private
set_default(Registry, Name) ->
insert_placeholders(Registry, Name, []).
%% @equiv observe(default, Name, [], Value)
observe(Name, Value) ->
observe(default, Name, [], Value).
%% @equiv observe(default, Name, LabelValues, Value)
observe(Name, LabelValues, Value) ->
observe(default, Name, LabelValues, Value).
%% @doc Observes the given `Value'.
%%
%% Raises `{invalid_value, Value, Message}' if `Value'
%% isn't an integer.<br/>
%% Raises `{unknown_metric, Registry, Name}' error if histogram with named
%% `Name' can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
observe(Registry, Name, LabelValues, Value) when is_integer(Value) ->
Key = key(Registry, Name, LabelValues),
case ets:lookup(?TABLE, Key) of
[Metric] ->
BucketPosition = calculate_histogram_bucket_position(Metric, Value),
ets:update_counter(?TABLE, Key,
[{?ISUM_POS, Value},
{?BUCKETS_START + BucketPosition, 1}]);
[] ->
insert_metric(Registry, Name, LabelValues, Value, fun observe/4)
end,
ok;
observe(Registry, Name, LabelValues, Value) when is_number(Value) ->
Key = key(Registry, Name, LabelValues),
case ets:lookup(?TABLE, Key) of
[Metric] ->
fobserve_impl(Key, Metric, Value);
[] ->
insert_metric(Registry, Name, LabelValues, Value,
fun(_, _, _, _) ->
observe(Registry, Name, LabelValues, Value)
end)
end;
observe(_Registry, _Name, _LabelValues, Value) ->
erlang:error({invalid_value, Value, "observe accepts only numbers"}).
%% @private
pobserve(Registry, Name, LabelValues, Buckets, BucketPos, Value) when is_integer(Value) ->
Key = key(Registry, Name, LabelValues),
try
ets:update_counter(?TABLE, Key,
[{?ISUM_POS, Value}, {?BUCKETS_START + BucketPos, 1}])
catch error:badarg ->
insert_metric(Registry, Name, LabelValues, Value,
fun(_, _, _, _) ->
pobserve(Registry, Name, LabelValues, Buckets,
BucketPos, Value)
end)
end,
ok;
pobserve(Registry, Name, LabelValues, Buckets, BucketPos, Value) when is_number(Value) ->
Key = key(Registry, Name, LabelValues),
case
fobserve_impl(Key, Buckets, BucketPos, Value) of
0 ->
insert_metric(Registry, Name, LabelValues, Value,
fun(_, _, _, _) ->
fobserve_impl(Key, Buckets, BucketPos, Value)
end);
1 ->
ok
end;
pobserve(_Registry, _Name, _LabelValues, _Buckets, _Pos, Value) ->
erlang:error({invalid_value, Value, "pobserve accepts only numbers"}).
%% @equiv observe_duration(default, Name, [], Fun)
observe_duration(Name, Fun) ->
observe_duration(default, Name, [], Fun).
%% @equiv observe_duration(default, Name, LabelValues, Fun)
observe_duration(Name, LabelValues, Fun) ->
observe_duration(default, Name, LabelValues, Fun).
%% @doc Tracks the amount of time spent executing `Fun'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if histogram with named
%% `Name' can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% Raises `{invalid_value, Value, Message}' if `Fun'
%% isn't a function.<br/>
%% @end
observe_duration(Registry, Name, LabelValues, Fun) when is_function(Fun) ->
Start = erlang:monotonic_time(),
try
Fun()
after
observe(Registry, Name, LabelValues, erlang:monotonic_time() - Start)
end;
observe_duration(_Regsitry, _Name, _LabelValues, Fun) ->
erlang:error({invalid_value, Fun, "observe_duration accepts only functions"}).
%% @equiv remove(default, Name, [])
remove(Name) ->
remove(default, Name, []).
%% @equiv remove(default, Name, LabelValues)
remove(Name, LabelValues) ->
remove(default, Name, LabelValues).
%% @doc Removes histogram series identified by `Registry', `Name'
%% and `LabelValues'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if histogram with name
%% `Name' can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
remove(Registry, Name, LabelValues) ->
prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
case lists:flatten([ets:take(?TABLE,
{Registry, Name, LabelValues, Scheduler})
|| Scheduler <- schedulers_seq()]) of
[] -> false;
_ -> true
end.
%% @equiv reset(default, Name, [])
reset(Name) ->
reset(default, Name, []).
%% @equiv reset(default, Name, LabelValues)
reset(Name, LabelValues) ->
reset(default, Name, LabelValues).
%% @doc Resets the value of the histogram identified by `Registry', `Name'
%% and `LabelValues'.
%%
%% Raises `{unknown_metric, Registry, Name}' error if histogram with name
%% `Name' can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
reset(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
Buckets = prometheus_metric:mf_data(MF),
UpdateSpec = generate_update_spec(Buckets),
case lists:usort([ets:update_element(?TABLE,
{Registry, Name, LabelValues, Scheduler},
[{?ISUM_POS, 0}, {?FSUM_POS, 0}] ++ UpdateSpec)
|| Scheduler <- schedulers_seq()]) of
[_, _] -> true;
[true] -> true;
_ -> false
end.
%% @equiv value(default, Name, [])
value(Name) ->
value(default, Name, []).
%% @equiv value(default, Name, LabelValues)
value(Name, LabelValues) ->
value(default, Name, LabelValues).
%% @doc Returns the value of the histogram identified by `Registry', `Name'
%% and `LabelValues'. If there is no histogram for `LabelValues',
%% returns `undefined'.
%%
%% If duration unit set, sum will be converted to the duration unit.
%% {@link prometheus_time. Read more here.}
%%
%% Raises `{unknown_metric, Registry, Name}' error if histogram named `Name'
%% can't be found in `Registry'.<br/>
%% Raises `{invalid_metric_arity, Present, Expected}' error if labels count
%% mismatch.
%% @end
value(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
RawValues = [ets:lookup(?TABLE, {Registry, Name, LabelValues, Scheduler})
|| Scheduler <- schedulers_seq()],
case lists:flatten(RawValues) of
[] -> undefined;
Values -> {reduce_buckets_counters(Values), reduce_sum(MF, Values)}
end.
values(Registry, Name) ->
case prometheus_metric:check_mf_exists(?TABLE, Registry, Name) of
false -> [];
MF -> mf_values(Registry, Name, MF)
end.
%% @equiv buckets(default, Name, [])
buckets(Name) ->
buckets(default, Name, []).
%% @equiv buckets(default, Name, LabelValues)
buckets(Name, LabelValues) ->
buckets(default, Name, LabelValues).
%% @doc Returns buckets of the histogram identified by `Registry', `Name'
%% and `LabelValues'.
%% @end
buckets(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
prometheus_metric:mf_data(MF).
%%====================================================================
%% Collector API
%%====================================================================
%% @private
deregister_cleanup(Registry) ->
[delete_metrics(Registry, Buckets)
|| [_, _, _, _, Buckets] <- prometheus_metric:metrics(?TABLE, Registry)],
true = prometheus_metric:deregister_mf(?TABLE, Registry),
ok.
%% @private
collect_mf(Registry, Callback) ->
[Callback(create_histogram(Name, Help, {CLabels, Labels, Registry, DU, Buckets})) ||
[Name, {Labels, Help}, CLabels, DU, Buckets]
<- prometheus_metric:metrics(?TABLE, Registry)],
ok.
%% @private
collect_metrics(Name, {CLabels, Labels, Registry, DU, Bounds}) ->
MFValues = load_all_values(Registry, Name, Bounds),
LabelValuesMap = reduce_label_values(MFValues),
maps:fold(
fun(LabelValues, Stat, L) ->
[create_histogram_metric(CLabels, Labels, DU, Bounds, LabelValues, Stat)|L]
end, [], LabelValuesMap).
%%====================================================================
%% Private Parts
%%====================================================================
validate_histogram_spec(Spec) ->
Labels = prometheus_metric_spec:labels(Spec),
validate_histogram_labels(Labels),
RBuckets = prometheus_metric_spec:get_value(buckets, Spec, default),
Buckets = prometheus_buckets:new(RBuckets),
[{data, Buckets}|Spec].
validate_histogram_labels(Labels) ->
[raise_error_if_le_label_found(Label) || Label <- Labels].
raise_error_if_le_label_found("le") ->
erlang:error({invalid_metric_label_name, "le",
"histogram cannot have a label named \"le\""});
raise_error_if_le_label_found(Label) ->
Label.
insert_metric(Registry, Name, LabelValues, Value, CB) ->
insert_placeholders(Registry, Name, LabelValues),
CB(Registry, Name, LabelValues, Value).
fobserve_impl(Key, Metric, Value) ->
Buckets = metric_buckets(Metric),
BucketPos = calculate_histogram_bucket_position(Metric, Value),
fobserve_impl(Key, Buckets, BucketPos, Value).
fobserve_impl(Key, Buckets, BucketPos, Value) ->
ets:select_replace(?TABLE, generate_select_replace(Key, Buckets, BucketPos, Value)).
insert_placeholders(Registry, Name, LabelValues) ->
MF = prometheus_metric:check_mf_exists(?TABLE, Registry, Name, LabelValues),
MFBuckets = prometheus_metric:mf_data(MF),
DU = prometheus_metric:mf_duration_unit(MF),
Fun = fun (Bucket) ->
prometheus_time:maybe_convert_to_native(DU, Bucket)
end,
BoundCounters = lists:duplicate(length(MFBuckets), 0),
MetricSpec =
[key(Registry, Name, LabelValues), lists:map(Fun, MFBuckets), 0, 0]
++ BoundCounters,
ets:insert_new(?TABLE, list_to_tuple(MetricSpec)).
calculate_histogram_bucket_position(Metric, Value) ->
Buckets = metric_buckets(Metric),
prometheus_buckets:position(Buckets, Value).
generate_select_replace(Key, Bounds, BucketPos, Value) ->
BoundPlaceholders = gen_query_bound_placeholders(Bounds),
HistMatch = list_to_tuple([Key, '$2', '$3', '$4'] ++ BoundPlaceholders),
BucketUpdate = lists:sublist(BoundPlaceholders, BucketPos)
++ [{'+', gen_query_placeholder(?BUCKETS_START + BucketPos), 1}]
++ lists:nthtail(BucketPos + 1, BoundPlaceholders),
HistUpdate = list_to_tuple([{Key}, '$2', '$3', {'+', '$4', Value}] ++ BucketUpdate),
[{HistMatch,
[],
[{HistUpdate}]}].
buckets_seq(Buckets) ->
lists:seq(?BUCKETS_START, ?BUCKETS_START + length(Buckets) - 1).
generate_update_spec(Buckets) ->
[{Index, 0} || Index <- buckets_seq(Buckets)].
gen_query_placeholder(Index) ->
list_to_atom("$" ++ integer_to_list(Index)).
gen_query_bound_placeholders(Buckets) ->
[gen_query_placeholder(Index) || Index <- buckets_seq(Buckets)].
augment_counters([Start | Counters]) ->
augment_counters(Counters, [Start], Start).
augment_counters([], LAcc, _CAcc) ->
LAcc;
augment_counters([Counter | Counters], LAcc, CAcc) ->
augment_counters(Counters, LAcc ++ [CAcc + Counter], CAcc + Counter).
metric_buckets(Metric) ->
element(?BOUNDS_POS, Metric).
reduce_buckets_counters(Metrics) ->
ABuckets =
[sub_tuple_to_list(Metric, ?BUCKETS_START,
?BUCKETS_START + length(metric_buckets(Metric)))
|| Metric <- Metrics],
[lists:sum(Bucket) || Bucket <- transpose(ABuckets)].
transpose([[]|_]) -> [];
transpose(M) ->
[lists:map(fun hd/1, M) | transpose(lists:map(fun tl/1, M))].
reduce_sum(Metrics) ->
lists:sum([element(?ISUM_POS, Metric) + element(?FSUM_POS, Metric)
|| Metric <- Metrics]).
reduce_sum(MF, Metrics) ->
DU = prometheus_metric:mf_duration_unit(MF),
prometheus_time:maybe_convert_to_du(DU, reduce_sum(Metrics)).
create_histogram_metric(CLabels, Labels, DU, Bounds, LabelValues, [ISum, FSum | Buckets]) ->
BCounters = augment_counters(Buckets),
Bounds1 = lists:zipwith(fun(Bound, Bucket) ->
{Bound, Bucket}
end,
Bounds, BCounters),
prometheus_model_helpers:histogram_metric(
CLabels ++ lists:zip(Labels, LabelValues),
Bounds1,
lists:last(BCounters),
prometheus_time:maybe_convert_to_du(DU, ISum + FSum)).
load_all_values(Registry, Name, Bounds) ->
BoundPlaceholders = gen_query_bound_placeholders(Bounds),
QuerySpec = [{Registry, Name, '$1', '_'}, '_', '$3', '$4'] ++ BoundPlaceholders,
ets:match(?TABLE, list_to_tuple(QuerySpec)).
deregister_select(Registry, Name, Buckets) ->
BoundCounters = lists:duplicate(length(Buckets), '_'),
MetricSpec = [{Registry, Name, '_', '_'}, '_', '_', '_'] ++ BoundCounters,
[{list_to_tuple(MetricSpec), [], [true]}].
delete_metrics(Registry, Buckets) ->
BoundCounters = lists:duplicate(length(Buckets), '_'),
MetricSpec = [{Registry, '_', '_', '_'}, '_', '_', '_'] ++ BoundCounters,
ets:match_delete(?TABLE, list_to_tuple(MetricSpec)).
sub_tuple_to_list(Tuple, Pos, Size) when Pos < Size ->
[element(Pos, Tuple) | sub_tuple_to_list(Tuple, Pos + 1, Size)];
sub_tuple_to_list(_Tuple, _Pos, _Size) -> [].
schedulers_seq() ->
lists:seq(0, ?WIDTH-1).
key(Registry, Name, LabelValues) ->
X = erlang:system_info(scheduler_id),
Rnd = X band (?WIDTH-1),
{Registry, Name, LabelValues, Rnd}.
reduce_label_values(MFValues) ->
lists:foldl(
fun([Labels | V], ResAcc) ->
case maps:is_key(Labels, ResAcc) of
true ->
PrevSum = maps:get(Labels, ResAcc),
ResAcc#{Labels => [lists:sum(C) || C <- transpose([PrevSum, V])]};
false ->
ResAcc#{Labels => V}
end
end, #{}, MFValues).
mf_values(Registry, Name, MF) ->
DU = prometheus_metric:mf_duration_unit(MF),
Labels = prometheus_metric:mf_labels(MF),
Bounds = prometheus_metric:mf_data(MF),
MFValues = load_all_values(Registry, Name, Bounds),
LabelValuesMap = reduce_label_values(MFValues),
maps:fold(
fun(LabelValues, [ISum, FSum | BCounters], L) ->
Bounds1 = lists:zipwith(fun(Bound, Bucket) ->
{Bound, Bucket}
end,
Bounds, BCounters),
[{lists:zip(Labels, LabelValues), Bounds1,
prometheus_time:maybe_convert_to_du(DU, ISum + FSum)}|L]
end, [], LabelValuesMap).
create_histogram(Name, Help, Data) ->
prometheus_model_helpers:create_mf(Name, Help, histogram, ?MODULE, Data). | src/metrics/prometheus_histogram.erl | 0.790611 | 0.602763 | prometheus_histogram.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2017: <NAME>
%%% This software is released under the MIT license cited in 'LICENSE.md'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module provides an API to a so-called zig-zag array. Zig-zag array
%%% is an array in which two consecutive values are separated with a key that
%%% identifies those values, i.e. each key is associated with two values:
%%% left and right. Moreover keys in zig-zag array are sorted in ascending
%%% order and duplicated keys are not allowed. Example zig-zag array:
%%% v1 k1 v2 k2 v3 k3 v4.
%%% @end
%%%-------------------------------------------------------------------
-module(bp_tree_array).
-author("<NAME>").
-include("bp_tree.hrl").
%% API exports
-export([new/1, size/1]).
-export([get/2, update/3, remove/2, remove/3]).
-export([find/2, find_value/2, lower_bound/2]).
-export([insert/3, append/3, prepend/3, split/1, merge/2]).
-export([to_list/1, from_list/1, to_map/1, from_map/1]).
-record(bp_tree_array, {
size,
data
}).
-type key() :: any().
-type value() :: any().
-type selector() :: key | left | right | both | lower_bound | lower_bound_key.
-type pos() :: non_neg_integer() | first | last.
-type remove_pred() :: fun((value()) -> boolean()).
-opaque array() :: #bp_tree_array{}.
-export_type([array/0, selector/0]).
%%====================================================================
%% API functions
%%====================================================================
%%--------------------------------------------------------------------
%% @doc
%% Creates a new array.
%% @end
%%--------------------------------------------------------------------
-spec new(pos_integer()) -> array().
new(Size) ->
#bp_tree_array{
size = 0,
data = erlang:make_tuple(2 * Size + 1, ?NIL)
}.
%%--------------------------------------------------------------------
%% @doc
%% Returns the size of an array.
%% @end
%%--------------------------------------------------------------------
-spec size(array()) -> non_neg_integer().
size(#bp_tree_array{size = Size}) ->
Size.
%%--------------------------------------------------------------------
%% @doc
%% Returns an item from an array at a selected position.
%% @end
%%--------------------------------------------------------------------
-spec get({selector(), pos()}, array()) ->
{ok, value() | {value(), value()}} | {error, out_of_range}.
get({lower_bound, Key}, Array) ->
Pos = lower_bound(Key, Array),
get({left, Pos}, Array);
get({lower_bound_key, Key}, Array) ->
Pos = lower_bound(Key, Array),
get({key, Pos}, Array);
get({Selector, first}, Array = #bp_tree_array{}) ->
get({Selector, 1}, Array);
get({Selector, last}, Array = #bp_tree_array{size = Size}) ->
get({Selector, Size}, Array);
get({right, 0}, #bp_tree_array{data = Data}) ->
{ok, erlang:element(1, Data)};
get({_Selector, Pos}, #bp_tree_array{size = Size})
when Pos < 1 orelse Pos > Size ->
{error, out_of_range};
get({left, Pos}, #bp_tree_array{data = Data}) ->
{ok, erlang:element(2 * Pos - 1, Data)};
get({key, Pos}, #bp_tree_array{data = Data}) ->
{ok, erlang:element(2 * Pos, Data)};
get({right, Pos}, #bp_tree_array{data = Data}) ->
{ok, erlang:element(2 * Pos + 1, Data)};
get({both, Pos}, #bp_tree_array{data = Data}) ->
{ok, {erlang:element(2 * Pos - 1, Data), erlang:element(2 * Pos + 1, Data)}}.
%%--------------------------------------------------------------------
%% @doc
%% Returns an item in an array at a selected position.
%% @end
%%--------------------------------------------------------------------
-spec update({selector(), pos()}, value() | {value(), value()},
array()) -> {ok, array()} | {error, out_of_range}.
update({Selector, first}, Value, Array = #bp_tree_array{}) ->
update({Selector, 1}, Value, Array);
update({Selector, last}, Value, Array = #bp_tree_array{size = Size}) ->
update({Selector, Size}, Value, Array);
update({right, 0}, Value, Array = #bp_tree_array{data = Data}) ->
{ok, Array#bp_tree_array{data = erlang:setelement(1, Data, Value)}};
update({_Selector, Pos}, _Value, #bp_tree_array{size = Size})
when Pos < 1 orelse Pos > Size ->
{error, out_of_range};
update({left, Pos}, Value, Array = #bp_tree_array{data = Data}) ->
Data2 = erlang:setelement(2 * Pos - 1, Data, Value),
{ok, Array#bp_tree_array{data = Data2}};
update({key, Pos}, Value, Array = #bp_tree_array{data = Data}) ->
Data2 = erlang:setelement(2 * Pos, Data, Value),
{ok, Array#bp_tree_array{data = Data2}};
update({right, Pos}, Value, Array = #bp_tree_array{data = Data}) ->
Data2 = erlang:setelement(2 * Pos + 1, Data, Value),
{ok, Array#bp_tree_array{data = Data2}};
update({both, Pos}, {LValue, RValue}, Array = #bp_tree_array{data = Data}) ->
Data2 = erlang:setelement(2 * Pos - 1, Data, LValue),
Data3 = erlang:setelement(2 * Pos + 1, Data2, RValue),
{ok, Array#bp_tree_array{data = Data3}}.
%%--------------------------------------------------------------------
%% @doc
%% Returns position of a key in an array or fails with a missing error.
%% @end
%%--------------------------------------------------------------------
-spec find(key(), array()) -> {ok, pos_integer()} | {error, not_found}.
find(Key, Array = #bp_tree_array{}) ->
Pos = lower_bound(Key, Array),
case get({key, Pos}, Array) of
{ok, Key} -> {ok, Pos};
{ok, _} -> {error, not_found};
{error, out_of_range} -> {error, not_found}
end.
%%--------------------------------------------------------------------
%% @doc
%% Returns value for a key in an array or fails with a missing error.
%% @end
%%--------------------------------------------------------------------
-spec find_value(key(), array()) -> {ok, value()} | {error, not_found}.
find_value(Key, Array = #bp_tree_array{}) ->
case find(Key, Array) of
{ok, Pos} -> get({left, Pos}, Array);
{error, Reason} -> {error, Reason}
end.
%%--------------------------------------------------------------------
%% @doc
%% Returns a position of a first key in an array that does not compare less
%% than a key.
%% @end
%%--------------------------------------------------------------------
-spec lower_bound(key(), array()) -> pos_integer().
lower_bound(Key, Array = #bp_tree_array{size = Size}) ->
lower_bound(Key, 1, Size, Array).
%%--------------------------------------------------------------------
%% @doc
%% Inserts a key-value pair into an array.
%% @end
%%--------------------------------------------------------------------
-spec insert({selector(), key()}, value() | {value(), value()}, array()) ->
{ok, array()} | {error, out_of_space | already_exists}.
insert({_Selector, _Key}, _Value, #bp_tree_array{size = Size, data = Data})
when Size == erlang:size(Data) div 2 ->
{error, out_of_space};
insert({Selector, Key}, Value, Array = #bp_tree_array{size = Size}) ->
Pos = lower_bound(Key, Array),
case get({key, Pos}, Array) of
{ok, Key} ->
{error, already_exists};
{ok, _} ->
Array2 = shift_right(Pos, Array),
{ok, Array3} = update({key, Pos}, Key, Array2),
{ok, _Array4} = update({Selector, Pos}, Value, Array3);
{error, out_of_range} ->
Array2 = Array#bp_tree_array{size = Size + 1},
{ok, Array3} = update({key, Size + 1}, Key, Array2),
{ok, _Array4} = update({Selector, Size + 1}, Value, Array3)
end.
%%--------------------------------------------------------------------
%% @doc
%% Appends a key-value pair to an array.
%% @end
%%--------------------------------------------------------------------
-spec append({selector(), key()}, value() | {value(), value()}, array()) ->
{ok, array()} | {error, out_of_space}.
append({_Selector, _Key}, _Value, #bp_tree_array{size = Size, data = Data})
when Size == erlang:size(Data) div 2 ->
{error, out_of_space};
append({Selector, Key}, Value, Array = #bp_tree_array{size = Size}) ->
Array2 = Array#bp_tree_array{size = Size + 1},
{ok, Array3} = update({key, Size + 1}, Key, Array2),
{ok, _Array4} = update({Selector, Size + 1}, Value, Array3).
%%--------------------------------------------------------------------
%% @doc
%% Prepends a key-value pair to an array.
%% @end
%%--------------------------------------------------------------------
-spec prepend({selector(), key()}, value() | {value(), value()}, array()) ->
{ok, array()} | {error, out_of_space}.
prepend({_Selector, _Key}, _Value, #bp_tree_array{size = Size, data = Data})
when Size == erlang:size(Data) div 2 ->
{error, out_of_space};
prepend({Selector, Key}, Value, Array = #bp_tree_array{}) ->
Array2 = shift_right(1, Array),
{ok, Array3} = update({key, 1}, Key, Array2),
{ok, _Array4} = update({Selector, 1}, Value, Array3).
%%--------------------------------------------------------------------
%% @doc
%% Removes a key and associated value from an array.
%% @end
%%--------------------------------------------------------------------
-spec remove({selector(), key()}, array()) ->
{ok, array()} | {error, term()}.
remove({Selector, Key}, Array = #bp_tree_array{}) ->
remove({Selector, Key}, fun(_) -> true end, Array).
%%--------------------------------------------------------------------
%% @doc
%% Removes a key and associated value from an array if predicate is satisfied.
%% @end
%%--------------------------------------------------------------------
-spec remove({selector(), key()}, remove_pred(), array()) ->
{ok, array()} | {error, term()}.
remove({Selector, Key}, Pred, Array = #bp_tree_array{}) ->
case find(Key, Array) of
{ok, Pos} ->
{ok, Value} = get({Selector, Pos}, Array),
case Pred(Value) of
true when Selector =:= left -> {ok, shift_left(Pos, 0, Array)};
true when Selector =:= right -> {ok, shift_left(Pos, 1, Array)};
false -> {error, predicate_not_satisfied}
end;
{error, Reason} ->
{error, Reason}
end.
%%--------------------------------------------------------------------
%% @doc
%% Splits an array in half. Returns left and right parts and a split key.
%% @end
%%--------------------------------------------------------------------
-spec split(array()) -> {array(), key(), array()}.
split(Array = #bp_tree_array{size = Size, data = Data}) ->
Pivot = Size div 2 + 1,
Begin = 2 * Pivot,
SplitKey = element(Begin, Data),
LData = setelement(Begin, Data, ?NIL),
RData = erlang:make_tuple(erlang:size(Data), ?NIL),
{LData3, RData3} = lists:foldl(fun(Pos, {LData2, RData2}) ->
{
setelement(Begin + Pos, LData2, ?NIL),
setelement(Pos, RData2, element(Begin + Pos, LData2))
}
end, {LData, RData}, lists:seq(1, Size)),
{
Array#bp_tree_array{size = Pivot - 1, data = LData3},
SplitKey,
Array#bp_tree_array{size = Pivot - 1, data = RData3}
}.
%%--------------------------------------------------------------------
%% @doc
%% Merges two arrays into a single array.
%% @end
%%--------------------------------------------------------------------
-spec merge(array(), array()) -> array().
merge(LArray = #bp_tree_array{size = LSize, data = LData},
#bp_tree_array{size = RSize, data = RData}) ->
Begin = 2 * LSize,
LData2 = lists:foldl(fun(Pos, Data) ->
setelement(Begin + Pos, Data, element(Pos, RData))
end, LData, lists:seq(1, 2 * RSize + 1)),
LArray#bp_tree_array{size = LSize + RSize, data = LData2}.
%%--------------------------------------------------------------------
%% @doc
%% Converts an array into a list.
%% @end
%%--------------------------------------------------------------------
-spec to_list(array()) -> list().
to_list(#bp_tree_array{data = Data}) ->
tuple_to_list(Data).
%%--------------------------------------------------------------------
%% @doc
%% Converts a list into an array.
%% @end
%%--------------------------------------------------------------------
-spec from_list(list()) -> array().
from_list(List) ->
Len = length(lists:takewhile(fun(X) -> X =/= ?NIL end, List)),
#bp_tree_array{
size = Len div 2,
data = list_to_tuple(List)
}.
%%--------------------------------------------------------------------
%% @doc
%% Converts an array into a map.
%% @end
%%--------------------------------------------------------------------
-spec to_map(array()) -> #{key() => value()}.
to_map(Array = #bp_tree_array{}) ->
List = to_list(Array),
to_map(List, #{?SIZE_KEY => length(List)}).
%%--------------------------------------------------------------------
%% @doc
%% Converts a map into an array.
%% @end
%%--------------------------------------------------------------------
-spec from_map(#{key() => value()}) -> array().
from_map(Map) ->
Size = maps:get(?SIZE_KEY, Map),
List = maps:fold(fun
(?LAST_KEY, _, Acc) -> Acc;
(?SIZE_KEY, _, Acc) -> Acc;
(Key, Value, Acc) -> [{Key, Value} | Acc]
end, [], Map),
List2 = lists:sort(List),
List3 = lists:foldl(fun({Key, Value}, Acc) ->
[Key, Value | Acc]
end, [], List2),
List4 = [maps:get(?LAST_KEY, Map, ?NIL) | List3],
List5 = lists:foldl(fun(_, Acc) ->
[?NIL | Acc]
end, List4, lists:seq(1, Size - length(List4))),
from_list(lists:reverse(List5)).
%%====================================================================
%% Internal functions
%%====================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns position of a first key in range [Lower, Upper] that does not compare
%% less than a key.
%% @end
%%--------------------------------------------------------------------
-spec lower_bound(key(), pos_integer(), pos_integer(), array()) -> pos_integer().
lower_bound(Key, Lower, Upper, Array) when Lower =< Upper ->
Mid = (Lower + Upper) div 2,
{ok, MidKey} = get({key, Mid}, Array),
case MidKey < Key of
true -> lower_bound(Key, Mid + 1, Upper, Array);
false -> lower_bound(Key, Lower, Mid - 1, Array)
end;
lower_bound(_Key, Lower, _Upper, _Children) ->
Lower.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Removes key and associated value at Begin position and shifts all following
%% items to the left, so that to fill the hole.
%% @end
%%--------------------------------------------------------------------
-spec shift_left(pos_integer(), non_neg_integer(), array()) -> array().
shift_left(Begin, Offset, Array = #bp_tree_array{size = Size, data = Data}) ->
Data3 = lists:foldl(fun(Pos, Data2) ->
setelement(Pos, Data2, element(Pos + 2, Data2))
end, Data, lists:seq(2 * Begin - 1 + Offset, 2 * Size - 1)),
Data4 = setelement(2 * Size, Data3, ?NIL),
Data5 = setelement(2 * Size + 1, Data4, ?NIL),
Array#bp_tree_array{
size = Size - 1,
data = Data5
}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Shifts all keys by one position to the right starting from Begin position.
%% @end
%%--------------------------------------------------------------------
-spec shift_right(pos_integer(), array()) -> array().
shift_right(Begin, Array = #bp_tree_array{size = Size, data = Data}) ->
Data3 = lists:foldl(fun(Pos, Data2) ->
setelement(Pos, Data2, element(Pos - 2, Data2))
end, Data, lists:seq(2 * Size + 3, 2 * Begin + 1, -1)),
Array#bp_tree_array{
size = Size + 1,
data = Data3
}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Converts list format of an array into a map.
%% @end
%%--------------------------------------------------------------------
-spec to_map(list(), map()) -> map().
to_map([], Map) ->
Map;
to_map([?NIL], Map) ->
Map;
to_map([Value], Map) ->
Map#{?LAST_KEY => Value};
to_map([?NIL, ?NIL | _], Map) ->
Map;
to_map([Value, ?NIL | _], Map) ->
Map#{?LAST_KEY => Value};
to_map([Value, Key | List], Map) ->
to_map(List, maps:put(Key, Value, Map)). | src/bp_tree_array.erl | 0.609175 | 0.512144 | bp_tree_array.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(fabric2_txids).
-behaviour(gen_server).
-vsn(1).
-export([
start_link/0,
create/2,
remove/1
]).
-export([
init/1,
terminate/2,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3,
format_status/2
]).
-include("fabric2.hrl").
-define(ONE_HOUR, 3600000000).
-define(MAX_TX_IDS, 1000).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
create(Tx, undefined) ->
Prefix = fabric2_fdb:get_dir(Tx),
create(Tx, Prefix);
create(_Tx, LayerPrefix) ->
{Mega, Secs, Micro} = os:timestamp(),
Key = {?TX_IDS, Mega, Secs, Micro, fabric2_util:uuid()},
erlfdb_tuple:pack(Key, LayerPrefix).
remove(TxId) when is_binary(TxId) ->
gen_server:cast(?MODULE, {remove, TxId});
remove(undefined) ->
ok.
init(_) ->
{ok, #{
last_sweep => os:timestamp(),
txids => []
}}.
terminate(_, #{txids := TxIds}) ->
if TxIds == [] -> ok; true ->
fabric2_fdb:transactional(fun(Tx) ->
lists:foreach(fun(TxId) ->
erlfdb:clear(Tx, TxId)
end, TxIds)
end)
end,
ok.
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
handle_cast({remove, TxId}, St) ->
#{
last_sweep := LastSweep,
txids := TxIds
} = St,
NewTxIds = [TxId | TxIds],
NewSt = St#{txids := NewTxIds},
NeedsSweep = timer:now_diff(os:timestamp(), LastSweep) > ?ONE_HOUR,
case NeedsSweep orelse length(NewTxIds) >= ?MAX_TX_IDS of
true ->
{noreply, clean(NewSt, NeedsSweep)};
false ->
{noreply, NewSt}
end.
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
format_status(_Opt, [_PDict, State]) ->
#{
txids := TxIds
} = State,
Scrubbed = State#{
txids => {length, length(TxIds)}
},
[{data, [{"State",
Scrubbed
}]}].
clean(St, NeedsSweep) ->
#{
last_sweep := LastSweep,
txids := TxIds
} = St,
fabric2_fdb:transactional(fun(Tx) ->
lists:foreach(fun(TxId) ->
erlfdb:clear(Tx, TxId)
end, TxIds),
case NeedsSweep of
true ->
sweep(Tx, LastSweep),
St#{
last_sweep := os:timestamp(),
txids := []
};
false ->
St#{txids := []}
end
end).
sweep(Tx, {Mega, Secs, Micro}) ->
Prefix = fabric2_fdb:get_dir(Tx),
StartKey = erlfdb_tuple:pack({?TX_IDS}, Prefix),
EndKey = erlfdb_tuple:pack({?TX_IDS, Mega, Secs, Micro}, Prefix),
erlfdb:set_option(Tx, next_write_no_write_conflict_range),
erlfdb:clear_range(Tx, StartKey, EndKey). | src/fabric/src/fabric2_txids.erl | 0.597608 | 0.464234 | fabric2_txids.erl | starcoder |
%% @copyright 2014-2016 <NAME> <<EMAIL>>
%%
%% @doc A built-in layout which formats log messages by an arbitrary user defined function
%%
%% This layout formats log messages by `format_fun/0' which was specified by the argument of {@link new/1}.
%%
%% == NOTE ==
%% This module is provided for debuging/testing purposes only.
%%
%% A layout will be stored into a logi_channel's ETS.
%% Then it will be loaded every time a log message is issued.
%% Therefore if the format function (`format_fun/0') of the layout is a huge size anonymous function,
%% all log issuers which use the channel will have to pay a non negligible cost to load it.
%%
%% == EXAMPLE ==
%% <pre lang="erlang">
%% > error_logger:tty(false). % Suppresses annoying warning outputs for brevity
%%
%% > Context = logi_context:new(sample_log, info).
%% > FormatFun = fun (_, Format, Data) -> io_lib:format("EXAMPLE: " ++ Format, Data) end.
%% > Layout = logi_builtin_layout_fun:new(FormatFun).
%% > lists:flatten(logi_layout:format(Context, "Hello ~s", ["World"], Layout)).
%% "EXAMPLE: Hello World"
%% </pre>
%%
%% A layout used by a sink can be specified at the time of installing the sink:
%% <pre lang="erlang">
%% > Layout0 = logi_builtin_layout_fun:new(fun (_, Format, Data) -> io_lib:format("[LAYOUT_0] " ++ Format ++ "\n", Data) end).
%% > {ok, _} = logi_channel:install_sink(logi_builtin_sink_io_device:new(foo, [{layout, Layout0}]), info).
%% > logi:info("hello world").
%% [LAYOUT_0] hello world
%%
%% > Layout1 = logi_builtin_layout_fun:new(fun (_, Format, Data) -> io_lib:format("[LAYOUT_1] " ++ Format ++ "\n", Data) end).
%% > {ok, _} = logi_channel:install_sink(logi_builtin_sink_io_device:new(bar, [{layout, Layout1}]), info).
%% > logi:info("hello world").
%% [LAYOUT_0] hello world
%% [LAYOUT_1] hello world
%% </pre>
%% @end
-module(logi_builtin_layout_fun).
-behaviour(logi_layout).
%%----------------------------------------------------------------------------------------------------------------------
%% Exported API
%%----------------------------------------------------------------------------------------------------------------------
-export([new/1]).
-export_type([format_fun/0]).
%%----------------------------------------------------------------------------------------------------------------------
%% 'logi_layout' Callback API
%%----------------------------------------------------------------------------------------------------------------------
-export([format/4]).
%%----------------------------------------------------------------------------------------------------------------------
%% Types
%%----------------------------------------------------------------------------------------------------------------------
-type format_fun() :: fun ((logi_context:context(), io:format(), logi_layout:data()) -> logi_layout:formatted_data()).
%% A log message formatting function
%%----------------------------------------------------------------------------------------------------------------------
%% Exported Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @doc Creates a layout which formats log messages by `FormatFun'
-spec new(format_fun()) -> logi_layout:layout().
new(FormatFun) ->
_ = erlang:is_function(FormatFun, 3) orelse error(badarg, [FormatFun]),
logi_layout:new(?MODULE, FormatFun).
%%----------------------------------------------------------------------------------------------------------------------
%% 'logi_layout' Callback Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @private
-spec format(logi_context:context(), io:format(), logi_layout:data(), format_fun()) -> logi_layout:formatted_data().
format(Context, Format, Data, Fun) -> Fun(Context, Format, Data). | src/logi_builtin_layout_fun.erl | 0.572603 | 0.446434 | logi_builtin_layout_fun.erl | starcoder |
%% @doc A client for the Dataset Register REST API.
%% @see https://datasetregister.netwerkdigitaalerfgoed.nl/api
-module(dataset_register_client).
-author("<NAME> <<EMAIL>>").
-export([
validate/2,
validation_results_url/2,
submit/2
]).
-include("zotonic.hrl").
-define(URL, <<"https://datasetregister.netwerkdigitaalerfgoed.nl/api/">>).
-define(VALIDATE_URL, <<"https://datasetregister.netwerkdigitaalerfgoed.nl/validate.php?url=">>).
%% @doc Validate a dataset description with the Dataset Register.
%% @see https://netwerk-digitaal-erfgoed.github.io/requirements-datasets/
-spec validate(m_rsc:resource(), z:context()) -> valid | {invalid, 404 | 406 | map()}.
validate(Id, Context) ->
Payload = #{<<"@id">> => dataset_uri(Id, Context)},
handle_response(
httpc:request(
put, {
binary_to_list(<<?URL/binary, "/datasets/validate">>),
[],
"application/ld+json",
jsx:encode(Payload)
},
httpc_options(), []
)
).
%% @doc Submit a dataset description to the Dataset Register.
-spec submit(m_rsc:resource(), z:context()) -> valid | {invalid, 404 | 406 | map()}.
submit(Id, Context) ->
Payload = #{<<"@id">> => dataset_uri(Id, Context)},
handle_response(
httpc:request(
post, {
binary_to_list(<<?URL/binary, "/datasets">>),
[],
"application/ld+json",
jsx:encode(Payload)
},
httpc_options(), []
)
).
-spec validation_results_url(m_rsc:resource(), z:context()) -> binary().
validation_results_url(Id, Context) ->
<<?VALIDATE_URL/binary,
(z_convert:to_binary(z_url:url_encode(dataset_uri(Id, Context))))/binary>>.
-spec handle_response({ok, {{string(), pos_integer(), string()}, proplists:proplist(), list()}}) -> atom().
handle_response({ok, {{_, Success, _}, _Headers, _}}) when Success >= 200 andalso Success < 400 ->
%% Dataset description is valid.
valid;
handle_response({ok, {{_, 404, _}, _Headers, _}}) ->
%% URL does not exist.
{invalid, 404};
handle_response({ok, {{_, 400, _}, _Headers, Body}}) ->
%% Dataset description is invalid.
{invalid, jsx:decode(list_to_binary(Body))};
handle_response({ok, {{_, 406, _}, _Headers, _}}) ->
%% No dataset can be found at the URL.
{invalid, 406};
handle_response({ok, {{_, 403, _}, _Headers, _}}) ->
%% The dataset URL is not on the allow list in the Dataset Register.
{invalid, 403}.
-spec dataset_uri(m_rsc:resource(), z:context()) -> binary().
dataset_uri(Id, Context) ->
m_rsc:p(Id, uri, Context).
httpc_options() ->
[
{timeout, 10000},
{connect_timeout, 5000}
]. | support/dataset_register_client.erl | 0.532425 | 0.451992 | dataset_register_client.erl | starcoder |
%
% This file is part of AtomVM.
%
% Copyright 2020 <NAME> <<EMAIL>>
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
% SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
%
%%
%% Copyright (c) dushin.net
%% All rights reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%%-----------------------------------------------------------------------------
%% @doc An implementation of a subset of the Erlang/OTP base64 interface.
%%
%% This module is designed to be API-compatible with the Erlang/OTP base64 module,
%% with the following exceptions:
%%
%% <ul>
%% <li>No support for decoding data with whitespace in base64 data</li>
%% <li>No support for mime decoding functions</li>
%% </ul>
%% @end
%%-----------------------------------------------------------------------------
-module(base64).
-export([encode/1, encode_to_string/1, decode/1, decode_to_string/1]).
%%-----------------------------------------------------------------------------
%% @param Data the data to encode
%% @returns the base-64 data encoded, as a binary
%% @doc Base-64 encode a binary or string, outputting a binary.
%% @end
%%-----------------------------------------------------------------------------
-spec encode(binary() | iolist()) -> binary().
encode(Data) when is_binary(Data) orelse is_list(Data) ->
throw(nif_error).
%%-----------------------------------------------------------------------------
%% @param Data the data to encode
%% @returns the base-64 data encoded, as a string
%% @doc Base-64 encode a binary or string, outputting a string.
%% @end
%%-----------------------------------------------------------------------------
-spec encode_to_string(binary() | iolist()) -> string().
encode_to_string(Data) when is_binary(Data) ->
throw(nif_error).
%%-----------------------------------------------------------------------------
%% @param Data the data to decode
%% @returns the base-64 data decoded, as a binary
%% @doc Base-64 decode a binary or string, outputting a binary.
%%
%% This function will raise a badarg exception if the supplied
%% data is not valid base64-encoded data.
%% @end
%%-----------------------------------------------------------------------------
-spec decode(binary() | iolist()) -> binary().
decode(Data) when is_binary(Data) orelse is_list(Data) ->
throw(nif_error).
%%-----------------------------------------------------------------------------
%% @param Data the data to decode
%% @returns the base-64 data decoded, as a string
%% @doc Base-64 decode a binary or string, outputting a string.
%%
%% This function will raise a badarg exception if the supplied
%% data is not valid base64-encoded data.
%% @end
%%-----------------------------------------------------------------------------
-spec decode_to_string(binary() | iolist()) -> string().
decode_to_string(Data) when is_binary(Data) ->
throw(nif_error). | libs/estdlib/src/base64.erl | 0.779616 | 0.444324 | base64.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% A set of optics specific to gb_trees.
%%% @end
%%%-------------------------------------------------------------------
-module(optic_gb_trees).
%% API
-export([all/0,
all/1,
keys/0,
keys/1,
values/0,
values/1,
associations/0,
associations/1,
key/1,
key/2,
association/1,
association/2]).
%%%===================================================================
%%% API
%%%===================================================================
%% @see values/1
-spec all() -> optic:optic().
all() ->
values().
%% @see values/1
-spec all(Options) -> optic:optic() when
Options :: optic:variations().
all(Options) ->
values(Options).
%% @see keys/1
-spec keys() -> optic:optic().
keys() ->
keys(#{}).
%% @doc
%% Focus on all keys of a gb_tree.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_gb_trees:keys()],
%% gb_trees:from_orddict([{first, 1}, {second, 2}])).
%% {ok,[first,second]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec keys(Options) -> optic:optic() when
Options :: optic:variations().
keys(Options) ->
Fold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (Key, _Value, InnerAcc) ->
Fun(Key, InnerAcc)
end,
Acc,
Tree)};
false ->
{error, undefined}
end
end,
MapFold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (Key, Value, {InnerTree, InnerAcc}) ->
{NewKey, NewAcc} = Fun(Key, InnerAcc),
{gb_trees:enter(NewKey, Value, InnerTree), NewAcc}
end,
{gb_trees:empty(), Acc},
Tree)};
false ->
{error, undefined}
end
end,
New =
fun (_Data, _Template) ->
gb_trees:empty()
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see values/1
-spec values() -> optic:optic().
values() ->
values(#{}).
%% @doc
%% Focus on all values of a gb_tree.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_gb_trees:values()],
%% gb_trees:from_orddict([{first, 1}, {second, 2}])).
%% {ok,[1,2]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec values(Options) -> optic:optic() when
Options :: optic:variations().
values(Options) ->
Fold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (_Key, Value, InnerAcc) ->
Fun(Value, InnerAcc)
end,
Acc,
Tree)};
false ->
{error, undefined}
end
end,
MapFold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (Key, Value, {InnerTree, InnerAcc}) ->
{NewValue, NewAcc} = Fun(Value, InnerAcc),
{gb_trees:enter(Key, NewValue, InnerTree), NewAcc}
end,
{gb_trees:empty(), Acc},
Tree)};
false ->
{error, undefined}
end
end,
New =
fun (_Data, _Template) ->
gb_trees:empty()
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see associations/1
-spec associations() -> optic:optic().
associations() ->
associations(#{}).
%% @doc
%% Focus on all associations of a gb_tree. An association is a tuple of
%% the key and value for each entry.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_gb_trees:associations()],
%% gb_trees:from_orddict([{first, 1}, {second, 2}])).
%% {ok,[{first,1},{second,2}]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec associations(Options) -> optic:optic() when
Options :: optic:variations().
associations(Options) ->
Fold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (Key, Value, InnerAcc) ->
Fun({Key, Value}, InnerAcc)
end,
Acc,
Tree)};
false ->
{error, undefined}
end
end,
MapFold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
{ok, fold(fun (Key, Value, {InnerTree, InnerAcc}) ->
{{NewKey, NewValue}, NewAcc} = Fun({Key, Value}, InnerAcc),
{gb_trees:enter(NewKey, NewValue, InnerTree), NewAcc}
end,
{gb_trees:empty(), Acc},
Tree)};
false ->
{error, undefined}
end
end,
New =
fun (_Data, _Template) ->
gb_trees:empty()
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see key/2
-spec key(Key) -> optic:optic() when
Key :: term().
key(Key) ->
key(Key, #{}).
%% @doc
%% Focus on the value of a gb_tree key.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_gb_trees:key(first)],
%% gb_trees:from_orddict([{first, 1}, {second, 2}])).
%% {ok,[1]}
%% '''
%% @end
%% @param Key The key to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec key(Key, Options) -> optic:optic() when
Key :: term(),
Options :: optic:variations().
key(Key, Options) ->
Fold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
case gb_trees:lookup(Key, Tree) of
{value, Value} ->
{ok, Fun(Value, Acc)};
none ->
{error, undefined}
end;
false ->
{error, undefined}
end
end,
MapFold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
case gb_trees:lookup(Key, Tree) of
{value, Value} ->
{NewValue, NewAcc} = Fun(Value, Acc),
{ok, {gb_trees:enter(Key, NewValue, Tree), NewAcc}};
none ->
{error, undefined}
end;
false ->
{error, undefined}
end
end,
New =
fun (Tree, Template) ->
case is_gb_tree(Tree) of
true ->
gb_trees:enter(Key, Template, Tree);
false ->
gb_trees:from_orddict([{Key, Template}])
end
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see association/2
-spec association(Key) -> optic:optic() when
Key :: term().
association(Key) ->
association(Key, #{}).
%% @doc
%% Focus on the association for a gb_tree key. An association is the
%% tuple of a gb_tree key and value. If the key is modified, the optic is
%% no longer well behaved.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_gb_trees:association(first)],
%% gb_trees:from_orddict([{first, 1}, {second, 2}])).
%% {ok,[{first,1}]}
%% '''
%% @end
%% @param Key The key to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec association(Key, Options) -> optic:optic() when
Key :: term(),
Options :: optic:variations().
association(Key, Options) ->
Fold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
case gb_trees:lookup(Key, Tree) of
{value, Value} ->
{ok, Fun({Key, Value}, Acc)};
none ->
{error, undefined}
end;
false ->
{error, undefined}
end
end,
MapFold =
fun (Fun, Acc, Tree) ->
case is_gb_tree(Tree) of
true ->
case gb_trees:lookup(Key, Tree) of
{value, Value} ->
{{NewKey, NewValue}, NewAcc} = Fun({Key, Value}, Acc),
{ok, {gb_trees:enter(NewKey, NewValue, gb_trees:delete(Key, Tree)), NewAcc}};
none ->
{error, undefined}
end;
false ->
{error, undefined}
end
end,
New =
fun (Tree, Template) ->
case is_gb_tree(Tree) of
true ->
gb_trees:enter(Key, Template, Tree);
false ->
gb_trees:from_orddict([{Key, Template}])
end
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%%%===================================================================
%%% Internal Functions
%%%===================================================================
fold(Fun, Acc0, Tree) ->
List = gb_trees:to_list(Tree),
lists:foldl(fun ({Key, Value}, Acc) ->
Fun(Key, Value, Acc)
end,
Acc0,
List).
is_gb_tree(Unknown) ->
try gb_trees:size(Unknown) of
_ ->
true
catch
error:function_clause ->
false
end. | src/optic_gb_trees.erl | 0.677474 | 0.47171 | optic_gb_trees.erl | starcoder |
-module(spiral_matrix).
-export([make/1,
test_version/0]).
-record(remaining, {start_x :: non_neg_integer(),
end_x :: non_neg_integer(),
start_y :: non_neg_integer(),
end_y :: non_neg_integer()}).
-type matrix() :: [[pos_integer()]].
%% API
-spec make(non_neg_integer()) -> matrix().
make(0) ->
[];
make(N) when N >= 1 ->
Coordinates = coordinates(top, remaining(N)),
to_matrix(N, Coordinates).
-spec test_version() -> integer().
test_version() ->
1.
%% Internal
remaining(N) ->
End = N - 1,
#remaining{start_x=0, end_x=End, start_y=0, end_y=End}.
coordinates(_, #remaining{start_x=StartX, end_x=EndX, start_y=StartY, end_y=EndY}) when
StartX > EndX; StartY > EndY ->
[];
coordinates(top, #remaining{start_x=StartX, end_x=EndX, start_y=StartY} = Remaining) ->
Coordinates = [{X, StartY} || X <- lists:seq(StartX, EndX)],
Coordinates ++ coordinates(right, Remaining#remaining{start_y=StartY + 1});
coordinates(right, #remaining{end_x=EndX, start_y=StartY, end_y=EndY} = Remaining) ->
Coordinates = [{EndX, Y} || Y <- lists:seq(StartY, EndY)],
Coordinates ++ coordinates(bottom, Remaining#remaining{end_x=EndX - 1});
coordinates(bottom, #remaining{start_x=StartX, end_x=EndX, end_y=EndY} = Remaining) ->
Coordinates = [{X, EndY} || X <- lists:seq(EndX, StartX, -1)],
Coordinates ++ coordinates(left, Remaining#remaining{end_y=EndY - 1});
coordinates(left, #remaining{start_x=StartX, start_y=StartY, end_y=EndY} = Remaining) ->
Coordinates = [{StartX, Y} || Y <- lists:seq(EndY, StartY, -1)],
Coordinates ++ coordinates(top, Remaining#remaining{start_x=StartX + 1}).
to_matrix(N, Coordinates) ->
WithValues = enumerate(Coordinates),
Compare = fun ({{X1, Y1}, _V1}, {{X2, Y2}, _V2}) ->
{Y1, X1} =< {Y2, X2}
end,
values(split(N, lists:sort(Compare, WithValues))).
enumerate(L) ->
lists:zip(L, lists:seq(1, length(L))).
split(_N, []) ->
[];
split(N, Coordinates) ->
{Row, Rest} = lists:split(N, Coordinates),
[Row | split(N, Rest)].
values(Matrix) ->
[[Value || {_Coordinate, Value} <- Row] || Row <- Matrix]. | erlang/spiral-matrix/src/spiral_matrix.erl | 0.542863 | 0.551936 | spiral_matrix.erl | starcoder |
%% -------------------------------------------------------------------
%% ya_orset.erl Implementation of Observed-Remove Set
%% OR-Set is a CRDT. Concurrent adds commute since each one is unique. Concurrent
%% removes commute because any common pairs have the same effect, and any disjoint pairs
%% have independent effects. Concurrent add(e) and remove(f) also commute: if e != f they
%% are independent, and if e = f the remove has no effect.
%% USAGE:
% Set = ya_orset:init().
% {ok, Set1} = ya_orset:update(add, <<"foo">>, Set).
% {ok, Set2} = ya_orset:update(add, <<"foo">>, Set1).
% {ok, Set3} = ya_orset:update(add, <<"bar">>, Set2).
% {ok, Set4} = ya_orset:update(remove, <<"foo">>, Set3).
%% -------------------------------------------------------------------
-module(ya_orset).
-behaviour(ya_crdt).
%% API exports
-export([
init/0,
query/1,
query/2,
update/3,
compare/2,
merge/2
]).
%%====================================================================
%% API functions
%%====================================================================
%% @doc ORSet consists of a set of pairs
%% {element, ORSet#{{Token => Operation}, {...}}.
-spec init() -> ya_crdt:crdt().
init() -> orddict:new().
%% @doc query extracts elements e from the pairs who have not been removed
-spec query(ya_crdt:crdt()) -> [term()].
query(ORSet) ->
NonRemovedElements = orddict:filter(fun(_Elem, ORSetTokens) ->
UpdateHistory = [Token || {Token, add} <- orddict:to_list(ORSetTokens)],
length(UpdateHistory) >= 0
end, ORSet),
orddict:fecth_keys(NonRemovedElements).
-spec query(term(), ya_crdt:crdt()) -> ya_crdt:crdt().
query(Elem, ORSet) ->
case orddict:find(Elem, ORSet) of
{ok, ORSetTokens} ->
ORSetTokens;
error -> orddict:new()
end.
%% @doc generates a unique identifier in the source replica,
%% which is then propagated to downstream replicas,
%% which insert the pair into their payload.
%% e.g Two add operations generate two unique pairs,
%% but query masks the duplicates.
-spec update(ya_crdt:operation(), term(), ya_crdt:crdt()) -> {ya_crdt:status(), ya_crdt:crdt()}.
update(add, [H|Tail] = Elem, ORSet) when is_list(Elem) ->
UniqueToken = crypto:strong_rand_bytes(20),
add_element(UniqueToken, H, ORSet),
update(add, Tail, ORSet);
update(add, [], ORSet) ->
ORSet;
update(add, Elem, ORSet) ->
UniqueToken = crypto:strong_rand_bytes(20),
add_element(UniqueToken, Elem, ORSet);
%% @doc When a client calls remove(e) at some source, the set of unique tags associated with e at
%% the source is recorded. Downstream, all such pairs are removed from the local payload. Thus,
%% when remove(e) happens-after any number of add(e), all duplicate pairs are removed, and
%% the element is not in the set any more, as expected intuitively. When add(e) is concurrent
%% with remove(e), the add takes precedence, as the unique tag generated by add cannot be
%% observed by remove.
update(remove, [H|Tail]=Elem, ORSet) when is_list(Elem) ->
remove_element(H, ORSet),
update(remove, Tail, ORSet);
update(remove, [], ORSet) ->
{ok, ORSet};
update(remove, Elem, ORSet) ->
remove_element(Elem, ORSet).
-spec compare(ya_crdt:crdt(), ya_crdt:crdt()) -> boolean().
compare(ORSet1, ORSet2) ->
ORSet1 == ORSet2.
%% @doc Concurrent adds commute since each one is unique. Concurrent
%% removes commute because any common pairs have the same effect, and any disjoint pairs
%% have independent effects. Concurrent add(e) and remove(f) also commute: if e != f they
%% are independent, and if e = f the remove has no effect.
-spec merge(ya_crdt:crdt(), ya_crdt:crdt()) -> {ya_crdt:status(), ya_crdt:crdt()}.
merge(ORSet1, ORSet2) ->
MergeTokensFun = fun(_Token, Operation1, Operation2) ->
case {Operation1, Operation2} of
{add, _} -> add;
{_, add} -> add;
_Else -> remove
end
end,
MergeORSetFun = fun(_Elem, ORSetTokens1, ORSetTokens2) ->
orddict:merge(MergeTokensFun, ORSetTokens1, ORSetTokens2)
end,
orddict:merge(MergeORSetFun, ORSet1, ORSet2).
%%====================================================================
%% Internal functions
%%====================================================================
%% @doc Add #{Elem => Token} to ORSet#{Element => ORset#{[Token]}}
%% Find Elem inside ORSet, if it was found Add new Token to its Token's Set,
%% otherwise Create a new ORSet and Add new Token to it. Then return the newly
%% updated ORSet#{Elem => ORSet#{[Token, add], ...}}.
-spec add_element(term(), term(), ya_crdt:crdt()) -> {ya_crdt:status(), ya_crdt:crdt()}.
add_element(Token, Elem, ORSet) ->
case orddict:find(Elem, ORSet) of
{ok, ORSetTokens} ->
ORSetNewTokens = orddict:store(Token, add, ORSetTokens),
{ok, orddict:store(Elem, ORSetNewTokens, ORSet)};
error ->
ORSetNewTokens = orddict:store(Token, add, orddict:new()),
{ok, orddict:store(Elem, ORSetNewTokens, ORSet)}
end.
%% @doc Remove elem from ORSet, if it was found create a new ORSet which all
%% its tokens are removed, then add those tokens to the ORSet,
%% otherwise return {error, ORSet} untouched
-spec remove_element(term(), ya_crdt:crdt()) -> {ya_crdt:status(), ya_crdt:crdt()}.
remove_element(Elem, ORSet) ->
case orddict:find(Elem, ORSet) of
{ok, ORSetTokens} ->
ORSetNewTokens = orddict:fold(fun (Token, _Operation, NewORSet) ->
orddict:store(Token, remove, NewORSet)
end, orddict:new(), ORSetTokens),
{ok, orddict:store(Elem, ORSetNewTokens, ORSet)};
error -> {error, ORSet}
end. | src/ya_orset.erl | 0.516839 | 0.580828 | ya_orset.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2019-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(mria_node_monitor_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
all() ->
mria_ct:all(?MODULE).
init_per_suite(Config) ->
mria:start(),
Config.
end_per_suite(_Config) ->
ok = mria:stop().
t_cast_heartbeat(_) ->
ok = mria_node_monitor:cast(node(), heartbeat).
t_cast_suspect(_) ->
ok = mria_node_monitor:cast(node(), {suspect, 'n1@127.0.0.1', 'n2@127.0.0.1'}).
t_cast_confirm(_) ->
ok = mria_node_monitor:cast(node(), {confirm, 'n1@127.0.0.1', down}).
t_cast_report_partition(_) ->
ok = mria_node_monitor:cast(node, {report_partition, 'n1@127.0.0.1'}).
t_cast_heal_partition(_) ->
ok = mria_node_monitor:cast(node, {heal_partition, ['n1@127.0.0.1']}).
t_handle_nodeup_info(_) ->
mria_node_monitor ! {nodeup, 'n1@127.0.0.1', []}.
t_handle_nodedown_info(_) ->
mria_node_monitor ! {nodedown, 'n1@127.0.0.1', []}.
t_run_after(_) ->
TRef = mria_node_monitor:run_after(100, heartbeat),
?assert(is_reference(TRef)).
t_partitions(_) ->
[] = mria_node_monitor:partitions().
t_handle_unexpected(_) ->
{reply, ignore, state} = mria_node_monitor:handle_call(req, from, state),
{noreply, state} = mria_node_monitor:handle_cast(msg, state),
{noreply, state} = mria_node_monitor:handle_info(info, state). | test/mria_node_monitor_SUITE.erl | 0.545286 | 0.410993 | mria_node_monitor_SUITE.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2011 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc A pipe fitting that applies a function to a list of inputs,
%% and sends the accumulated results downstream. This module is
%% intended to be used as the emulation of 'reduce' phases in Riak KV
%% MapReduce.
%%
%% Upstream fittings should send each of their outputs separately.
%% This worker will assemble them into a list and apply the function
%% to that list.
%%
%% This fitting expects a 3-tuple of `{rct, Fun, Arg}'. The `Fun'
%% should be a function expecting two arguments: `Inputs :: list()'
%% and `Arg'. The fun should return a list as its result. The
%% function {@link reduce_compat/1} should be used to transform the
%% usual MapReduce phase spec (`{modfun, ...}', '{jsanon, ...}', etc.)
%% into the variety of function expected here.
%%
%% The default behavior is to apply the reduce function to the first
%% 20 inputs, and then apply the Fun to that result with the next 20
%% inputs received cons'd on the front, and repeat this re-running
%% untill finished. Two knobs exist to change this behavior. The
%% first is `reduce_phase_batch_size'. The property may be set by
%% specifying `Arg' as a proplist, and providing a positive integer.
%% For example, setting `Arg=[{reduce_phase_batch_size, 1}]', if the
%% inputs A, B, and C were received, evaluation would look something
%% like:
%% ```
%% X = Fun([A], Arg),
%% Y = Fun([B,X], Arg),
%% Z = Fun([C,Y], Arg)
%% '''
%%
%% Setting `Arg=[{reduce_phase_batch_size, 2}]'instead, with the same
%% inputs would cause evaulation to look more like:
%% ```
%% X = Fun([B,A], Arg),
%% Y = Fun([C,X], Arg)
%% '''
%% The default batch size allowed is controlled by the riak_kv
%% application environment variable `mapred_reduce_phase_batch_size'
%%
%% The other knob to control batching behavior is known as
%% `reduce_phase_only_1'. If this option is set in the `Arg'
%% proplist, the reduce function will be evaluated at most once. That
%% is, the example set of inputs from above would evaulate as:
%% ```
%% X = Fun([C,B,A], Arg)
%% '''
%%
%% To use `reduce_phase_only_1' and `reduce_phase_batch_size' over the
%% HTTP interface, specify a JSON structure as the function's
%% argument, as in:
%% ```
%% {...,"query":[...,{"reduce":{...,"arg":{"reduce_phase_batch_size":100}}}]}
%% '''
%% Or:
%% ```
%% {...,"query":[...,{"reduce":{...,"arg":{"reduce_phase_only1":true}}}]}
%% '''
%% The HTTP interface will translate that argument into a mochijson2
%% structure (e.g. `{struct, [{<<"reduce_phase_only_1">>, true}]}'),
%% which this fitting will understand. This also provides a safe way
%% to pass these arguments when using a reduce phase implemented in
%% Javascript over the Protocol Buffer or native interfaces.
%% Mochijson2 conversion will fail on the bare proplist, but will
%% succeed at encoding this form.
%%
%% If no inputs are received before eoi, this fitting evaluated the
%% function once, with an empty list as `Inputs'.
%%
%% For Riak KV MapReduce reduce phase compatibility, a chashfun that
%% directs all inputs to the same partition should be used. Multiple
%% workers will reduce only parts of the input set, and produce
%% multiple independent outputs, otherwise (note that this may be
%% desirable in a "pre-reduce" phase).
-module(riak_kv_w_reduce).
-behaviour(riak_pipe_vnode_worker).
-export([init/2,
process/3,
done/1,
archive/1,
handoff/2,
validate_arg/1]).
-export([reduce_compat/1]).
-export([no_input_run_reduce_once/0]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-include_lib("riak_pipe/include/riak_pipe.hrl").
-include_lib("riak_pipe/include/riak_pipe_log.hrl").
-include("riak_kv_js_pools.hrl").
-record(state, {acc :: list(),
inacc :: list(),
delay :: integer(),
delay_max :: integer(),
p :: riak_pipe_vnode:partition(),
fd :: riak_pipe_fitting:details()}).
-opaque state() :: #state{}.
-export_type([state/0]).
-define(DEFAULT_JS_RESERVE_ATTEMPTS, 10).
%% @doc Setup creates an empty list accumulator and
%% stashes away the `Partition' and `FittingDetails' for later.
-spec init(riak_pipe_vnode:partition(),
riak_pipe_fitting:details()) ->
{ok, state()}.
init(Partition, #fitting_details{options=Options} = FittingDetails) ->
DelayMax = calc_delay_max(FittingDetails),
Acc = case proplists:get_value(pipe_fitting_no_input, Options) of
true ->
%% AZ 479: Riak KV Map/Reduce compatibility: call reduce
%% function once when no input is received by fitting.
%% Note that the partition number given to us is bogus.
reduce([], #state{fd=FittingDetails},"riak_kv_w_reduce init");
_ ->
[]
end,
{ok, #state{acc=Acc, inacc=[], delay=0, delay_max = DelayMax,
p=Partition, fd=FittingDetails}}.
%% @doc Evaluate the function if the batch is ready.
-spec process(term(), boolean(), state()) -> {ok, state()}.
process(Input, _Last, #state{inacc=OldInAcc, delay=Delay}=State) ->
InAcc = [Input|OldInAcc],
maybe_reduce(State#state{inacc=InAcc, delay=Delay+1}, "reducing").
%% @doc Reduce if the accumulated inputs trip the batch size trigger.
-spec maybe_reduce(state(), string()) -> {ok, state()}.
maybe_reduce(#state{acc=PrevAcc, inacc=InAcc,
delay=Delay, delay_max=DelayMax}=State,
Message) ->
if Delay >= DelayMax ->
OutAcc = reduce(PrevAcc ++ lists:reverse(InAcc), State, Message),
{ok, State#state{acc=OutAcc, inacc=[], delay=0}};
true ->
{ok, State}
end.
%% @doc Reduce any unreduced inputs, and then send on the outputs.
-spec done(state()) -> ok.
done(#state{acc=Acc0, inacc=InAcc, delay=Delay, p=Partition, fd=FittingDetails} = S) ->
Acc = if Delay == 0 ->
Acc0;
true ->
reduce(Acc0 ++ lists:reverse(InAcc), S, "done()")
end,
[ riak_pipe_vnode_worker:send_output(O, Partition, FittingDetails)
|| O <- Acc ],
ok.
%% @doc The archive is the accumulator.
-spec archive(state()) -> {ok, list()}.
archive(#state{acc=Acc, inacc=InAcc}) ->
%% just send state of reduce so far
{ok, Acc ++ lists:reverse(InAcc)}.
%% @doc Handoff simply concatenates the accumulators from the remote
%% worker with the accumulator from this worker, and then reduces if
%% the resulting accumulator crosses the batch size threshold.
-spec handoff(list(), state()) -> {ok, state()}.
handoff(HandoffAcc, #state{inacc=OldInAcc}=State) ->
%% assume that inputs received by the vnode that was archived were
%% meant to arrive before any inputs received here (because the
%% typical handoff case is that this is a new node taking over)
%%
%% put all incoming unreduced inputs after all local inputs, and
%% then put all reduced inputs reversed after that. this has the
%% best chance of producing the correct order for a reduce phase
%% that was sorting inputs
%% Example: HandoffAcc = Acc ++ InAcc = [1,2,3] ++ [4,5,6]
%% OldInAcc = [9,8,7]
%% InAcc = [9,8,7] ++ [6,5,4] ++ [3,2,1]
InAcc = OldInAcc ++ lists:reverse(HandoffAcc),
maybe_reduce(State#state{inacc=InAcc, delay=length(InAcc)},
"reducing handoff").
%% @doc Actually evaluate the aggregation function.
-spec reduce([term()], state(), string()) -> list().
reduce(Inputs, #state{fd=FittingDetails}, ErrString) ->
{rct, Fun, Arg} = FittingDetails#fitting_details.arg,
?T(FittingDetails, [reduce], {reducing, ErrString, length(Inputs)}),
case Fun(Inputs, Arg) of
Outputs when is_list(Outputs) ->
?T(FittingDetails, [reduce], {reduced, ErrString, length(Outputs)}),
Outputs;
_NonListOutputs ->
exit(non_list_result)
end.
%% @doc Check that the arg is a valid arity-2 function. See {@link
%% riak_pipe_v:validate_function/3}.
-spec validate_arg({rct, function(), term()}) -> ok | {error, iolist()}.
validate_arg({rct, Fun, _FunArg}) ->
validate_fun(Fun).
validate_fun(Fun) when is_function(Fun) ->
riak_pipe_v:validate_function("arg", 2, Fun);
validate_fun(Fun) ->
{error, io_lib:format("~p requires a function as argument, not a ~p",
[?MODULE, riak_pipe_v:type_of(Fun)])}.
%% @doc Compatibility wrapper for an old-school Riak MR reduce function,
%% which is an arity-2 function `fun(InputList, SpecificationArg)'.
-spec reduce_compat(riak_kv_mrc_pipe:reduce_query_fun()) -> fun().
reduce_compat({jsanon, {Bucket, Key}})
when is_binary(Bucket), is_binary(Key) ->
reduce_compat({qfun, js_runner({jsanon, stored_source(Bucket, Key)})});
reduce_compat({jsanon, Source})
when is_binary(Source) ->
reduce_compat({qfun, js_runner({jsanon, Source})});
reduce_compat({jsfun, Name})
when is_binary(Name) ->
reduce_compat({qfun, js_runner({jsfun, Name})});
reduce_compat({strfun, {Bucket, Key}})
when is_binary(Bucket), is_binary(Key) ->
reduce_compat({strfun, stored_source(Bucket, Key)});
reduce_compat({strfun, Source}) ->
{allow_strfun, true} = {allow_strfun,
app_helper:get_env(riak_kv, allow_strfun)},
{ok, Fun} = riak_kv_mrc_pipe:compile_string(Source),
true = is_function(Fun, 2),
reduce_compat({qfun, Fun});
reduce_compat({modfun, Module, Function}) ->
reduce_compat({qfun, erlang:make_fun(Module, Function, 2)});
reduce_compat({qfun, Fun}) ->
Fun.
%% @doc True; this fitting should be started and stopped, even if
%% no inputs were received (no normal workers were started).
no_input_run_reduce_once() ->
true.
%% @doc Fetch source code for the reduce function stored in a Riak KV
%% object.
-spec stored_source(binary(), binary()) -> binary().
stored_source(Bucket, Key) ->
{ok, C} = riak:local_client(),
{ok, Object} = C:get(Bucket, Key, 1),
riak_object:get_value(Object).
%% @doc Produce a function suitable for this fitting's `Arg' that will
%% evaluate the given piece of Javascript.
-spec js_runner({jsanon | jsfun, binary()}) ->
fun( (list(), term()) -> list() ).
js_runner(JS) ->
fun(Inputs, Arg) ->
SafeArg = remove_batch_props(Arg),
JSInputs = [riak_kv_mapred_json:jsonify_not_found(I)
|| I <- Inputs],
JSCall = {JS, [JSInputs, SafeArg]},
case riak_kv_js_manager:blocking_dispatch(
?JSPOOL_REDUCE, JSCall, ?DEFAULT_JS_RESERVE_ATTEMPTS) of
{ok, Results0} when is_list(Results0) ->
[riak_kv_mapred_json:dejsonify_not_found(R)
|| R <- Results0];
{ok, NonlistResults} ->
NonlistResults; %% will blow up in reduce/3
{error, Error} ->
exit(Error)
end
end.
%% @doc Remove reduce batch size knobs from the `Arg' list, so
%% mochijson2 doesn't blow up when trying to encode them.
remove_batch_props(Arg) when is_list(Arg) ->
lists:filter(fun(reduce_phase_only_1) -> false;
({reduce_phase_only_1,_}) -> false;
({reduce_phase_batch_size,_}) -> false;
(_) -> true
end,
Arg);
remove_batch_props(Arg) ->
Arg.
%% @doc Determine what batch size should be used for this fitting.
%% Default is 20, but may be overridden by the `Arg' props
%% `reduce_phase_only_1' and `reduce_phase_batch_size', or the riak_kv
%% application environment variable `mapred_reduce_pahse_batch_size'.
%%
%% NOTE: An atom is used when the reduce should be run only once,
%% since atoms always compare greater than integers.
-spec calc_delay_max(riak_pipe_fitting:details()) ->
integer() | atom().
calc_delay_max(#fitting_details{arg = {rct, _ReduceFun, ReduceArg}}) ->
Props = case ReduceArg of
L when is_list(L) -> L; % May or may not be a proplist
{struct, L} -> delay_props_from_json(L);
_ -> []
end,
AppMax = app_helper:get_env(riak_kv, mapred_reduce_phase_batch_size, 20),
case proplists:get_value(reduce_phase_only_1, Props) of
true ->
an_atom_is_always_bigger_than_an_integer_so_make_1_huge_batch;
_ ->
proplists:get_value(reduce_phase_batch_size,
Props, AppMax)
end.
%% @doc convert JSON struct properties with similar names to Erlang
%% atoms, since the HTTP interface has no way to send atoms natively
-spec delay_props_from_json(list()) -> [{atom(), term()}].
delay_props_from_json(JsonProps) ->
Only1 = extract_json_prop(reduce_phase_only_1, JsonProps),
Batch = extract_json_prop(reduce_phase_batch_size, JsonProps),
Only1 ++ Batch.
extract_json_prop (Key, JsonProps) ->
case lists:keyfind(atom_to_binary(Key, latin1), 1, JsonProps) of
{_,Value} ->
[{Key,Value}];
false ->
[]
end.
-ifdef(TEST).
%% This test should check that the reduce function is not called more
%% often than reduce_phase_batch_size or reduce_phase_only_1 request.
batch_size_during_handoff_test() ->
Fun = fun riak_kv_mapreduce:reduce_count_inputs/2,
ReduceEvery5 = [{reduce_phase_batch_size, 5}],
AInputs = [a,b,c],
{ok, StateUnreduced} =
handoff_test_helper(Fun, ReduceEvery5, AInputs, []),
%% handing off three unprocessed inputs to a fresh worker with
%% zero unprocessed inputs should not immediately process the
%% inputs if the batch size is greater than 3
?assertEqual(length(AInputs), StateUnreduced#state.delay),
?assertEqual(lists:reverse(AInputs), StateUnreduced#state.inacc),
{ok, StateFinally} =
test_helper({ok, StateUnreduced}, [e,f]),
%% just two more inputs should still trigger the reduce
?assertEqual(0, StateFinally#state.delay),
?assertEqual([], StateFinally#state.inacc),
BInputs = [e,f,g],
{ok, StateReduced} =
handoff_test_helper(Fun, ReduceEvery5, AInputs, BInputs),
%% handing off three unprocessed inputs to a worker that has three
%% more unprocessed inputs should immediately process the inputs
%% if the batch size is less than or equal to 6
?assertEqual(0, StateReduced#state.delay),
?assertEqual([], StateReduced#state.inacc),
ReduceOnce = [reduce_phase_only_1],
{ok, StateNever} =
handoff_test_helper(Fun, ReduceOnce, AInputs, BInputs),
%% handing off unprocessed inputs when reduce_phase_only_1 is set
%% should never immediately reduce them
?assertEqual(length(AInputs++BInputs), StateNever#state.delay),
?assertEqual(lists:reverse(AInputs++BInputs), StateNever#state.inacc).
%% Start reducer A and reducer B. Feed AInputs to A and BInputs to B,
%% then archive A and handoff its data to B. Returns B's resulting
%% state.
handoff_test_helper(Fun, Arg, AInputs, BInputs) ->
{ok, StateA} = test_helper(Fun, Arg, AInputs),
{ok, StateB} = test_helper(Fun, Arg, BInputs),
{ok, Archive} = archive(StateA),
handoff(Archive, StateB).
%% Initialize a reducer with the given fun and arg, then pass it the
%% list of inputs.
test_helper(Fun, Arg, Inputs) ->
Fitting = #fitting{pid=self(),
ref=make_ref(),
chashfun=fun() -> <<0:160/integer>> end,
nval=1},
Details = #fitting_details{fitting=Fitting,
name=batch_size_during_handoff_test,
module=?MODULE,
arg={rct, Fun, Arg},
output=Fitting,
options=[],
q_limit=64},
test_helper(init(0, Details), Inputs).
%% Pass the list of inputs to the given reducer.
test_helper({ok, State}, Inputs) ->
lists:foldl(
fun(I, {ok, S}) -> process(I, true, S) end,
{ok, State},
Inputs).
-endif. | deps/riak_kv/src/riak_kv_w_reduce.erl | 0.777596 | 0.534855 | riak_kv_w_reduce.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% xqerl - XQuery processor
%%
%% Copyright (c) 2018-2020 <NAME> All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% A string table is a lookup for each distinct string and a 32 bit integer.
%% The ID for the string is made up of a hash followed by a pigeonhole
%% offset value in case of collision.
%% When a collision occurs, the offset is increased and appended to the hash.
%% When there is no collision, 0 is appended.
%% Looking up a value by string means getting its hash value, appending
%% min and max (all 1 bits) tails, and then selecting each possible ID until it
%% is found. This means that there is a maximum nr. of collisions possible,
%% so the table should not get too large. (95% max size?)
%% Linear Probing
%% Every value is written to file. This can slow down insert operations.
%% Note: For all intents and purposes, a 'string' is a UTF8 binary value.
-module(xqldb_string_table2).
-behaviour(gen_server).
-include("xqerl_db.hrl").
-export([
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
%% ====================================================================
%% API functions
%% ====================================================================
-export([
start_link/3,
stop/1,
insert/2,
lookup/2
]).
-type state() :: #{indx_file => file:io_device()}.
%% Open or create a new string table server.
-spec start_link(
open | new,
DBDirectory :: file:name_all(),
TableName :: string()
) -> {ok, Pid :: pid()}.
start_link(new, DBDirectory, TableName) ->
gen_server:start_link(?MODULE, [new, DBDirectory, TableName], []);
start_link(open, DBDirectory, TableName) ->
gen_server:start_link(?MODULE, [open, DBDirectory, TableName], []).
%% Shutdown this server.
-spec stop(db()) -> ok | {ok, _}.
stop(#{texts := Pid}) when is_pid(Pid) ->
gen_server:stop(Pid).
%% Returns the binary value for the given ID, or error if no such ID exists.
-spec lookup(db(), Id :: binary()) -> Value :: binary() | error.
lookup(#{texts := Pid}, Id) when is_pid(Pid), byte_size(Id) =< 63 ->
Id;
lookup(#{texts := Pid}, Id) when is_pid(Pid), byte_size(Id) =:= 64 ->
gen_server:call(Pid, {lookup, Id});
lookup(_, _) ->
error.
%% Returns the ID for a binary value in the table.
%% If no such value exists, creates a new value in the table and returns its ID.
-spec insert(db(), Value :: binary()) -> Id :: binary().
insert(#{texts := Pid}, Value) when is_pid(Pid), byte_size(Value) =< 63 ->
Value;
insert(#{texts := Pid}, Value) when is_pid(Pid) ->
gen_server:call(Pid, {insert, Value}).
%% ====================================================================
%% Internal functions
%% ====================================================================
%% Creates a new string table in the DBDirectory in the DB DBName,
%% with the name TableName. Deletes any existing DB file with the same name.
-spec new(
DBDirectory :: file:name_all(),
TableName :: string()
) -> state().
new(DBDirectory, TableName) when is_binary(DBDirectory) ->
new(binary_to_list(DBDirectory), TableName);
new(DBDirectory, TableName) ->
init_state(DBDirectory, TableName).
%% Opens an existing string table in the DBDirectory in the DB DBName,
%% with the name TableName.
-spec open(
DBDirectory :: file:name_all(),
TableName :: string()
) -> state().
open(DBDirectory, TableName) when is_binary(DBDirectory) ->
open(binary_to_list(DBDirectory), TableName);
open(DBDirectory, TableName) ->
init_state(DBDirectory, TableName).
init_state(DBDirectory, TableName) ->
AbsTabName = filename:join(DBDirectory, TableName),
ok = filelib:ensure_dir(AbsTabName),
IndxName = AbsTabName ++ ".heap",
{ok, IndxFile} = dets:open_file(IndxName, []),
#{indx_file => IndxFile}.
lookup_string_from_id(Id, IndxFile, ReplyTo) ->
Reply =
case dets:lookup(IndxFile, Id) of
[] ->
error;
[{_, String}] ->
String
end,
gen_server:reply(ReplyTo, Reply).
upsert_string_value(String, #{indx_file := IndxFile} = State) ->
Hash = hash(String),
case dets:lookup(IndxFile, Hash) of
[] ->
dets:insert(IndxFile, {Hash, String}),
{Hash, State};
[_] ->
{Hash, State}
end.
%% ====================================================================
%% Callbacks
%% ====================================================================
init([new, DBDirectory, TableName]) ->
State = new(DBDirectory, TableName),
{ok, State};
init([open, DBDirectory, TableName]) ->
State = open(DBDirectory, TableName),
{ok, State}.
terminate(_Reason, #{indx_file := IndxFile}) ->
dets:close(IndxFile).
handle_cast(_Request, State) -> {noreply, State}.
handle_call({insert, Value}, _From, State) ->
{Reply, State1} = upsert_string_value(Value, State),
{reply, Reply, State1};
handle_call({lookup, Id}, From, #{indx_file := IndxFile} = State) ->
Fun = fun() ->
lookup_string_from_id(Id, IndxFile, From)
end,
_ = erlang:spawn_link(Fun),
{noreply, State}.
handle_info(_Request, State) -> {noreply, State}.
code_change(_, State, _) -> {ok, State}.
hash(Value) ->
crypto:hash(sha512, Value). | src/xqldb_string_table2.erl | 0.552057 | 0.503601 | xqldb_string_table2.erl | starcoder |
%% @copyright 2017 <NAME> <<EMAIL>>
%%
%% @doc Span Context.
%%
%% <blockquote>
%% Each <b>SpanContext</b> encapsulates the following state: <br />
%% <ul>
%% <li>Any OpenTracing-implementation-dependent state (for example, trace and span ids)
%% needed to refer to a distinct <b>Span</b> across a process boundary</li>
%% <li><b>Baggage Items</b>, which are just key:value pairs that
%% cross process boundaries</li>
%% </ul>
%% <a href="https://github.com/opentracing/specification/blob/1.1/specification.md#the-opentracing-data-model">The OpenTracing Data Model</a>
%% </blockquote>
%%
%% === Callbacks ===
%%
%% This module requires following callbacks:
%%
%% ```
%% %% @doc Creates the state of a span context from the given references.
%% -callback make_span_context_state(passage:refs()) ->
%% state().
%%
%% %% @doc Injects the span context into the carrier by the specified format.
%% -callback inject_span_context(context(), format(), inject_fun(), carrier()) ->
%% carrier().
%%
%% %% @doc Extracts a span context from the carrier using the specified format.
%% %%
%% %% If the carrier contains no span context, it will return `error'.
%% -callback extract_span_context(format(), iterate_fun(), carrier()) ->
%% {ok, context()} | error.
%%
%% '''
-module(passage_span_context).
%%------------------------------------------------------------------------------
%% Exported API
%%------------------------------------------------------------------------------
-export([make/2]).
-export([get_baggage_items/1]).
-export([get_state/1]).
-export_type([context/0]).
-export_type([implementation_module/0]).
-export_type([state/0]).
-export_type([format/0]).
-export_type([carrier/0]).
-export_type([inject_fun/0]).
-export_type([iterate_fun/0]).
%%------------------------------------------------------------------------------
%% Application Internal API
%%------------------------------------------------------------------------------
-export([from_refs/2]).
-export([set_baggage_items/2]).
%%------------------------------------------------------------------------------
%% Callback API
%%------------------------------------------------------------------------------
-callback make_span_context_state(passage:refs()) -> state().
-callback inject_span_context(context(), format(), inject_fun(), carrier()) -> carrier().
-callback extract_span_context(format(), iterate_fun(), carrier()) ->
{ok, context()} | error.
%%------------------------------------------------------------------------------
%% Macros & Records
%%------------------------------------------------------------------------------
-define(CONTEXT, ?MODULE).
-record(?CONTEXT,
{
state :: state(),
baggage_items = #{} :: passage:baggage_items()
}).
%%------------------------------------------------------------------------------
%% Exported Types
%%------------------------------------------------------------------------------
-opaque context() :: #?CONTEXT{}.
%% Span context.
-type implementation_module() :: module().
%% Implementation module of this behaviour.
-type state() :: term().
%% Implementation-dependent state.
-type format() :: text_map | http_header | binary.
%% The standard injection/extraction format.
%%
%% <blockquote>
%% Both injection and extraction rely on an extensible <b>format</b> parameter
%% that dictates the type of the associated "carrier" as well as
%% how a `SpanContext' is encoded in that carrier.
%% All of the following <b>format</b>s must be supported by all Tracer implementations.
%% <ul>
%% <li><b>Text Map</b>: an arbitrary string-to-string map with an unrestricted character set for both keys and values</li>
%% <li><b>HTTP Headers</b>: a string-to-string map with keys and values that are suitable for use in HTTP headers (a la RFC 7230. In practice, since there is such "diversity" in the way that HTTP headers are treated in the wild, it is strongly recommended that Tracer implementations use a limited HTTP header key space and escape values conservatively.</li>
%% <li><b>Binary</b>: a (single) arbitrary binary blob representing a `SpanContext'</li>
%% </ul>
%% <a href="https://github.com/opentracing/specification/blob/1.1/specification.md#note-required-formats-for-injection-and-extraction">
%% Note: required formats for injection and extraction
%% (The OpenTracing Semantic Specification)
%% </a>
%% </blockquote>
-type carrier() :: term().
%% Carrier for propagating span contexts.
-type inject_fun() :: fun ((Key :: binary(), Value :: binary(), carrier()) -> carrier()).
%% Span context injection function.
%%
%% If this function is called,
%% the carrier should update own state for injecting `Key' and `Value'.
-type iterate_fun() ::
fun ((carrier()) -> {ok, Key :: binary(), Value :: binary(), carrier()} | error).
%% Iterator function.
%%
%% If the carrier has any remaining elements,
%% it will return an `ok' tuple that contains a key/value pair and updated state.
%% Otherwise, it will return `error'.
%%------------------------------------------------------------------------------
%% Exported Functions
%%------------------------------------------------------------------------------
%% @doc Makes a new span context.
-spec make(state(), passage:baggage_items()) -> context().
make(State, BaggageItems) ->
#?CONTEXT{state = State, baggage_items = BaggageItems}.
%% @doc Returns the baggage items of `Context'.
-spec get_baggage_items(context()) -> passage:baggage_items().
get_baggage_items(Context) ->
Context#?CONTEXT.baggage_items.
%% @doc Returns the state of `Context'.
-spec get_state(context()) -> state().
get_state(Context) ->
Context#?CONTEXT.state.
%%------------------------------------------------------------------------------
%% Application Internal Functions
%%------------------------------------------------------------------------------
%% @private
-spec from_refs(implementation_module(), passage:refs()) -> context().
from_refs(Module, Refs) ->
State = Module:make_span_context_state(Refs),
BaggageItems =
lists:foldl(
fun ({_, Span}, Acc) ->
maps:merge(passage_span:get_baggage_items(Span), Acc)
end,
#{},
Refs),
#?CONTEXT{state = State, baggage_items = BaggageItems}.
%% @private
-spec set_baggage_items(context(), passage:baggage_items()) -> context().
set_baggage_items(Context, BaggageItems) ->
Merged = maps:merge(Context#?CONTEXT.baggage_items, BaggageItems),
Context#?CONTEXT{baggage_items = Merged}. | src/passage_span_context.erl | 0.820865 | 0.494263 | passage_span_context.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% xqerl - XQuery processor
%%
%% Copyright (c) 2017-2020 <NAME> All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Implementation of the "http://www.w3.org/2005/xpath-functions/math"
%% namespace.
%% Pretty much just wraps the math module from Erlang and adds NaN, inf and -0.
-module(xqerl_mod_math).
-include("xqerl.hrl").
-export([acos/2]).
-export([asin/2]).
-export([atan/2]).
-export([atan2/3]).
-export([cos/2]).
-export([exp/2]).
-export([exp10/2]).
-export([log/2]).
-export([log10/2]).
-export(['pi'/1]).
-export([pow/3]).
-export([sin/2]).
-export([sqrt/2]).
-export([tan/2]).
-define(NS, <<"http://www.w3.org/2005/xpath-functions/math">>).
-define(PX, <<"math">>).
-'module-namespace'({?NS, ?PX}).
-namespaces([{"xqerl_mod_xs", "xs"}]).
-variables([]).
-functions([
{{qname, ?NS, ?PX, <<"acos">>}, {seqType, 'xs:double', zero_or_one}, [], {acos, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"asin">>}, {seqType, 'xs:double', zero_or_one}, [], {asin, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"atan">>}, {seqType, 'xs:double', zero_or_one}, [], {atan, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"atan2">>}, {seqType, 'xs:double', one}, [], {atan2, 3}, 2, [
{seqType, 'xs:double', one},
{seqType, 'xs:double', one}
]},
{{qname, ?NS, ?PX, <<"cos">>}, {seqType, 'xs:double', zero_or_one}, [], {cos, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"exp">>}, {seqType, 'xs:double', zero_or_one}, [], {exp, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"exp10">>}, {seqType, 'xs:double', zero_or_one}, [], {exp10, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"log">>}, {seqType, 'xs:double', zero_or_one}, [], {log, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"log10">>}, {seqType, 'xs:double', zero_or_one}, [], {log10, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"pi">>}, {seqType, 'xs:double', one}, [], {'pi', 1}, 0, []},
{{qname, ?NS, ?PX, <<"pow">>}, {seqType, 'xs:double', zero_or_one}, [], {pow, 3}, 2, [
{seqType, 'xs:double', zero_or_one},
{seqType, 'xs:numeric', one}
]},
{{qname, ?NS, ?PX, <<"sin">>}, {seqType, 'xs:double', zero_or_one}, [], {sin, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"sqrt">>}, {seqType, 'xs:double', zero_or_one}, [], {sqrt, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]},
{{qname, ?NS, ?PX, <<"tan">>}, {seqType, 'xs:double', zero_or_one}, [], {tan, 2}, 1, [
{seqType, 'xs:double', zero_or_one}
]}
]).
%% Returns the arc cosine of the argument.
acos(_, Arg) -> acos(Arg).
acos([]) ->
[];
acos([Seq]) ->
acos(Seq);
acos(#xqAtomicValue{value = X}) ->
acos(X);
acos(neg_zero) ->
math:acos(0);
acos(nan) ->
nan;
acos(X) when is_number(X), abs(X) > 1 ->
nan;
acos(X) ->
case catch math:acos(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the arc sine of the argument.
asin(_, Arg) -> asin(Arg).
asin([]) ->
[];
asin([Arg]) ->
asin(Arg);
asin(#xqAtomicValue{value = X}) ->
asin(X);
asin(A) when A == 0 ->
A;
asin(neg_zero) ->
neg_zero;
asin(nan) ->
nan;
asin(X) when abs(X) > 1 ->
nan;
asin(X) ->
case catch math:asin(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the arc tangent of the argument.
atan(_, Arg) -> atan(Arg).
atan([]) ->
[];
atan([Seq]) ->
atan(Seq);
atan(#xqAtomicValue{value = X}) ->
atan(X);
atan(A) when A == 0 ->
A;
atan(neg_zero) ->
neg_zero;
atan(nan) ->
nan;
atan(infinity) ->
math:pi() / 2;
atan(neg_infinity) ->
-math:pi() / 2;
atan(X) ->
case catch math:atan(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the angle in radians subtended at the origin by the point on a
%% plane with coordinates (x, y) and the positive x-axis.
atan2(_, Arg1, Arg2) -> atan2(Arg1, Arg2).
atan2([X], Y) ->
atan2(X, Y);
atan2(X, [Y]) ->
atan2(X, Y);
atan2(#xqAtomicValue{value = X}, Y) ->
atan2(X, Y);
atan2(X, #xqAtomicValue{value = Y}) ->
atan2(X, Y);
% special values
atan2(neg_zero, V) when V == 0.0 ->
neg_zero;
atan2(neg_zero, V) when V == -1.0 ->
-math:pi();
atan2(V, neg_zero) when V == -1.0 ->
math:atan2(-1.0, 0.0);
atan2(neg_zero, V) when V == 1.0 ->
neg_zero;
atan2(V, neg_zero) when V == 0.0 ->
math:pi();
atan2(neg_zero, neg_zero) ->
-math:pi();
atan2(X, Y) ->
case catch math:atan2(X, Y) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the cosine of the argument. The argument is an angle in radians.
cos(_, Arg) -> cos(Arg).
cos([]) ->
[];
cos([Seq]) ->
cos(Seq);
cos(#xqAtomicValue{value = X}) ->
cos(X);
cos(neg_zero) ->
1.0;
cos(X) ->
case catch math:cos(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the value of ex.
exp(_, Arg) -> exp(Arg).
exp([]) ->
[];
exp([Seq]) ->
exp(Seq);
exp(#xqAtomicValue{value = X}) ->
exp(X);
exp(nan = X) ->
X;
exp(infinity = X) ->
X;
exp(neg_infinity) ->
0.0;
exp(X) ->
case catch math:exp(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the value of 10x.
exp10(_, Arg) -> exp10(Arg).
exp10([]) ->
[];
exp10([Seq]) ->
exp10(Seq);
exp10(#xqAtomicValue{value = X}) ->
exp10(X);
exp10(nan = X) ->
X;
exp10(infinity = X) ->
X;
exp10(neg_infinity) ->
0.0;
exp10(X) ->
case catch math:pow(10, X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the natural logarithm of the argument.
log(_, Arg) -> log(Arg).
log([]) ->
[];
log([Seq]) ->
log(Seq);
log(#xqAtomicValue{value = X}) ->
log(X);
log(nan = X) ->
X;
log(infinity = X) ->
X;
log(neg_infinity) ->
nan;
log(X) when X == 0 ->
neg_infinity;
log(X) when X < 0 ->
nan;
log(X) ->
case catch math:log(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the base-ten logarithm of the argument.
log10(_, Arg) -> log10(Arg).
log10([]) ->
[];
log10([Seq]) ->
log10(Seq);
log10(#xqAtomicValue{value = X}) ->
log10(X);
log10(nan = X) ->
X;
log10(infinity = X) ->
X;
log10(neg_infinity) ->
nan;
log10(X) when X == 0 ->
neg_infinity;
log10(X) when X < 0 ->
nan;
log10(X) ->
case catch math:log10(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns an approximation to the mathematical constant π.
pi(_Ctx) -> math:pi().
%% Returns the result of raising the first argument to the power of the second.
pow(_, Arg1, Arg2) -> pow(Arg1, Arg2).
pow([], _) ->
[];
pow([X], Y) ->
pow(X, Y);
pow(X, [Y]) ->
pow(X, Y);
pow(#xqAtomicValue{value = X}, Y) ->
pow(X, Y);
pow(X, #xqAtomicValue{value = Y}) ->
pow(X, Y);
pow(infinity, Y) when Y == 0 ->
1.0;
pow(neg_infinity, Y) when Y == 0 ->
1.0;
pow(nan, Y) when Y == 0 ->
1.0;
pow(X, Y) when X == 0, Y < 0 ->
infinity;
pow(neg_zero, Y) when Y < 0, trunc(Y) == Y, trunc(Y) rem 2 == -1 ->
neg_infinity;
pow(neg_zero, Y) when Y < 0, trunc(Y) == Y ->
infinity;
pow(neg_zero, Y) when Y < 0 ->
infinity;
pow(neg_zero, Y) when Y > 0, trunc(Y) == Y, trunc(Y) rem 2 == 1 ->
neg_zero;
pow(neg_zero, Y) when Y > 0, trunc(Y) == Y ->
0.0;
pow(neg_zero, Y) when Y > 0 ->
0.0;
pow(X, _Y) when abs(X) == 1 ->
1.0;
pow(X, Y) ->
case catch math:pow(X, Y) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the sine of the argument. The argument is an angle in radians.
sin(_, Arg) -> sin(Arg).
sin([]) ->
[];
sin([Seq]) ->
sin(Seq);
sin(#xqAtomicValue{value = X}) ->
sin(X);
sin(X) when X == 0 ->
X;
sin(neg_zero = X) ->
X;
sin(nan = X) ->
X;
sin(neg_infinity) ->
nan;
sin(X) ->
case catch math:sin(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the non-negative square root of the argument.
sqrt(_, Arg) -> sqrt(Arg).
sqrt([]) ->
[];
sqrt([Seq]) ->
sqrt(Seq);
sqrt(#xqAtomicValue{value = X}) ->
sqrt(X);
sqrt(nan = X) ->
X;
sqrt(infinity = X) ->
X;
sqrt(neg_zero = X) ->
X;
sqrt(neg_infinity) ->
nan;
sqrt(X) ->
case catch math:sqrt(X) of
{'EXIT', _} -> nan;
Z -> Z
end.
%% Returns the tangent of the argument. The argument is an angle in radians.
tan(_, Arg) -> tan(Arg).
tan([]) ->
[];
tan([Seq]) ->
tan(Seq);
tan(#xqAtomicValue{value = X}) ->
tan(X);
tan(neg_zero) ->
neg_zero;
tan(X) ->
case catch math:tan(X) of
{'EXIT', _} -> nan;
Z -> Z
end. | src/xqerl_mod_math.erl | 0.563858 | 0.530784 | xqerl_mod_math.erl | starcoder |
%%%-------------------------------------------------------------------
%%% Licensed to the Apache Software Foundation (ASF) under one
%%% or more contributor license agreements. See the NOTICE file
%%% distributed with this work for additional information
%%% regarding copyright ownership. The ASF licenses this file
%%% to you under the Apache License, Version 2.0 (the
%%% "License"); you may not use this file except in compliance
%%% with the License. You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing,
%%% software distributed under the License is distributed on an
%%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%%% KIND, either express or implied. See the License for the
%%% specific language governing permissions and limitations
%%% under the License.
%%%
%%%
%%% @doc
%%% Run a list of key-value pairs through a list of conditions and return
%%% the specified actions for the matching ones.
%%%
%%% Filter rules are composed by a list of `{Conditions, Actions}' tuples.
%%% Processing a span means iterating through this list and when an item
%%% found where all `Conditions' evaluate to true then the `Actions' in that
%%% item are collected and at the end of the evaluation returned to the
%%% caller for execution. i.e. the filter library can be applied in different
%%% environments.
%%%
%%% The evaluation of the rules can run in 2 modes. The `break' mode means
%%% that it stops at the first matching item and returns the `Actions'
%%% specified there. The `continue' mode runs through all `{Conditions, Actions}'
%%% items and collects all matching `Actions'.
%%%
%%% The library implements the following conditions :
%%%
%%% Check the presence of a Key
%%%
%%% `{present, Key}'
%%%
%%%%
%%% Check whether 2 Keys have the same value
%%%
%%% `{same, Key1, Key2}'
%%%
%%%
%%% The value of a Key/Value pair can be compared to a value
%%%
%%% `{value, Key, ValueToCompare}'
%%%
%%% example: check the name of the span
%%%
%%% `{value, otter_span_name, "radius request"}'
%%%
%%%
%%% Checking integer values
%%% Key/Value pairs with integer values can be checked with the following
%%% conditions.
%%%
%%% `{greater, Key, Integer}'
%%% `{less, Key, Integer}'
%%% `{between, Key, Integer1, Integer2}'
%%%
%%% example: check whether the span duration is greater than 5 seconds
%%%
%%% `{greater, otter_span_duration, 5000000}'
%%%
%%%
%%% Negate condition check
%%%
%%% `{negate, Condition}'
%%%
%%% example: Check if the value of the "final_result" tag is other than "ok"
%%%
%%% `{negate, {value, "final_result", "ok"}}'
%%%
%%% One out of
%%%
%%% This condition uses a random generated number and in the range of `0 < X =< Integer',
%%% and if the generated value is 1 it returns true.
%%%
%%% `{one_out_of, Integer}'
%%%
%%% example: Match 1 out of 1000 requests
%%% `{one_out_of, 1000}'
%%% @end
%%%-------------------------------------------------------------------
-module(otter_lib_filter).
-export([run/2, run/3]).
-type condition() :: tuple().
-type action() :: tuple()|atom().
-type rules() :: [{[condition()], [action()]}].
-type tags() :: [{term(), term()}].
%%---------------------------------------------------------------------
%% @doc Run the key-value pair data (tags) through the rules with the default
%% `continue' mode.
%% @end
%%---------------------------------------------------------------------
-spec run(Tags :: tags(), Rules :: rules()) -> [action()].
run(Tags, Rules) ->
run(Tags, Rules, continue).
%%---------------------------------------------------------------------
%% @doc Run the key-value pair data (tags) through the rules with the specified
%% mode.
%% @end
%%---------------------------------------------------------------------
-spec run(Tags :: tags(), Rules :: rules(), Mode :: break|continue) -> [action()].
run(Tags, Rules, BreakOrContinue) ->
rules(Rules, Tags, BreakOrContinue, []).
rules([{Conditions, Actions} | Rest], Tags, BreakOrContinue, CollectedActions) ->
case check_conditions(Conditions, Tags) of
false ->
rules(Rest, Tags, BreakOrContinue, CollectedActions);
true when BreakOrContinue == break ->
Actions;
true when BreakOrContinue == continue ->
rules(Rest, Tags, BreakOrContinue, Actions ++ CollectedActions)
end;
rules([], _Tags, _BreakOrContinue, CollectedActions) ->
CollectedActions.
check_conditions([Condition | Rest], Tags) ->
case check(Condition, Tags) of
true ->
check_conditions(Rest, Tags);
false ->
false
end;
check_conditions([], _) ->
true.
check({negate, Condition}, Tags) ->
not check(Condition, Tags);
check({value, Key, Value}, Tags) ->
case lists:keyfind(Key, 1, Tags) of
{Key, Value} ->
true;
_ ->
false
end;
check({same, Key1, Key2}, Tags) ->
case lists:keyfind(Key1, 1, Tags) of
{Key1, Value} ->
case lists:keyfind(Key2, 1, Tags) of
{Key2, Value} ->
true;
_ ->
false
end;
_ ->
false
end;
check({greater, Key, Value}, Tags) ->
case lists:keyfind(Key, 1, Tags) of
{Key, Value1} when Value1 > Value ->
true;
_ ->
false
end;
check({less, Key, Value}, Tags) ->
case lists:keyfind(Key, 1, Tags) of
{Key, Value1} when Value1 < Value ->
true;
_ ->
false
end;
check({between, Key, Value1, Value2}, Tags) ->
case lists:keyfind(Key, 1, Tags) of
{Key, Value} when Value > Value1 andalso Value < Value2 ->
true;
_ ->
false
end;
check({present, Key}, Tags) ->
lists:keymember(Key, 1, Tags);
check({one_out_of, Nr}, _Tags) ->
case rand:uniform(Nr) - 1 of
1 -> true;
_ -> false
end;
check(_, _) ->
false. | src/otter_lib_filter.erl | 0.578329 | 0.440048 | otter_lib_filter.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
%%% @author <NAME>
%%% @copyright 2012 <NAME>
%%% @doc MongoDB GridFS API. This module provides functions for creating, reading, updating and
%% deleting files from GridFS. The exported functions exposed are similar to the CRUD
%% functions exposed by the mongo API of the MongoDB driver.
%%% @end
-module(gridfs).
%% Includes
-include("gridfs.hrl").
%% Types
%% API
-export([insert/4, insert/5, insert/6,
find/2, find/4,
find_one/2, find_one/4,
delete_one/2, delete_one/4,
delete/3, delete/4, delete/2, coll/1]).
%% External functions
%@doc Deletes files matching the selector from the fs.files and fs.chunks collections.
-spec(delete(pid(), bson:document()) -> ok).
delete(Conn, Selector) ->
delete(Conn, undefined, <<"fs">>, Selector).
%@doc Deletes files matching the selector from the specified bucket.
-spec(delete(pid(), database(), bucket(), bson:document()) -> ok).
delete(Conn, Bucket, Selector) -> delete(Conn, undefined, Bucket, Selector).
delete(Conn, Db, Bucket, Selector) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
ChunksColl = <<(coll(Bucket))/binary, ".chunks">>,
Cursor = mc_worker_api:find(Conn, {Db, FilesColl}, Selector, #{projector => #{<<"_id">> => 1}}),
Files = mc_cursor:rest(Cursor),
mc_cursor:close(Cursor),
Ids = [Id || #{<<"_id">> := Id} <- Files],
mc_worker_api:delete(Conn, {Db, ChunksColl}, {files_id, {'$in', Ids}}),
mc_worker_api:delete(Conn, {Db, FilesColl}, {'_id', {'$in', Ids}}),
ok.
%@doc Deletes the first file matching the selector from the fs.files and fs.chunks collections.
-spec(delete_one(pid(), bson:document()) -> ok).
delete_one(Conn, Selector) ->
delete_one(Conn, undefined, <<"fs">>, Selector).
%@doc Deletes the first file matching the selector from the specified bucket.
-spec(delete_one(pid(), database(), bucket(), bson:document()) -> ok).
delete_one(Conn, Db, Bucket, Selector) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
ChunksColl = <<(coll(Bucket))/binary, ".chunks">>,
case mc_worker_api:find_one(Conn, {Db, FilesColl}, Selector, {'_id', 1}) of
#{<<"_id">> := Id} ->
mc_worker_api:delete(Conn, {Db, ChunksColl}, {files_id, Id}),
mc_worker_api:delete_one(Conn, {Db, FilesColl}, {'_id', Id}),
ok;
#{} ->
ok
end.
%% @doc Executes an 'action' using the specified read and write modes to a database using a connection.
%% An 'action' is a function that takes no arguments. The fun will usually invoke functions
%% to do inserts, finds, modifies, deletes, etc.
%%-spec(do(mc_worker_api:write_mode(), mc_worker_api:read_mode(), mc_worker_api:connection()|mc_worker_api:rs_connection(),mc_worker_api:db(), mc_worker_api:action()) -> {ok, any()}|{failure, any()}).
%%do(WriteMode, ReadMode, Connection, Database, Action) ->
%% %% Since we need to store state information, we spawn a new process for this
%% %% function so that if the Action also invokes the 'do' function we don't wind up trashing
%% %% the original state.
%% ConnectionParameters = #gridfs_connection{write_mode=WriteMode, read_mode=ReadMode, connection=Connection, database=Database},
%% {ok, Pid} = gen_server:start_link(?MODULE, [ConnectionParameters], []),
%% gen_server:call(Pid, {do, Action}, infinity).
%@doc Finds the first file matching the selector from the fs.files and fs.chunks collections.
-spec(find_one(pid(), bson:document()) -> file()).
find_one(Conn, Selector) ->
find_one(Conn, undefined, <<"fs">>, Selector).
%@doc Finds the first file matching the selector from the specified bucket.
-spec(find_one(pid(), database(), bucket(), bson:document()) -> file()).
find_one(Conn, Db, Bucket, Selector) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
case mc_worker_api:find_one(Conn, {Db, FilesColl}, Selector, #{projector => #{<<"_id">> => 1}}) of
#{<<"_id">> := Id} ->
gridfs_file:new(#gridfs_connection{connection = Conn, database = Db}, Bucket, Id, self());
_ -> error
end.
%@doc Finds files matching the selector from the fs.files and fs.chunks collections
% and returns a cursor.
-spec(find(pid(), bson:document()) -> cursor()).
find(Conn, Selector) ->
find(Conn, undefined, <<"fs">>, Selector).
%@doc Finds files matching the selector from the specified bucket
% and returns a cursor.
-spec(find(pid(), database(), bucket(), bson:document()) -> cursor()).
find(Conn, Db, Bucket, Selector) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
MongoCursor = mc_worker_api:find(Conn, {Db, FilesColl}, Selector, #{projector => #{<<"_id">> => 1}}),
gridfs_cursor:new(#gridfs_connection{connection = Conn, database = Db}, Bucket, MongoCursor, self()).
%@doc Inserts a file with a specified name into the default bucket.
% The file contents can be passed as either data or a file process opened for
% reading.
insert(Conn, Db, FileName, FileData) ->
insert_with_bson(Conn, Db, {filename, FileName}, FileData).
%@doc Inserts a file with a specified bson document into the default bucket.
% The file contents can be passed as either data or a file process opened for
% reading.
insert_with_bson(Conn, Db, BsonDocument, FileData) ->
insert(Conn, Db, <<"fs">>, BsonDocument, FileData).
%@doc Inserts a file with a bson document or filename into the specified bucket.
% The file contents can be passed as either data or a file process opened for
% reading.
insert(Conn, Db, Bucket, FileName, FileData) when not is_tuple(FileName) ->
insert(Conn, Db, Bucket, {filename, FileName}, FileData);
insert(Conn, Db, Bucket, Bson, FileData) when is_binary(FileData) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
ChunksColl = <<(coll(Bucket))/binary, ".chunks">>,
ObjectId = mongo_id_server:object_id(),
insert(Conn, Db, ChunksColl, ObjectId, 0, FileData),
Md5 = list_to_binary(bin_to_hexstr(erlang:md5(FileData))),
ListBson = tuple_to_list(Bson),
ListFileAttr = ['_id', ObjectId, length, size(FileData), chunkSize, ?CHUNK_SIZE, uploadDate, erlang:timestamp(), md5, Md5],
UnifiedList = lists:append([ListFileAttr, ListBson]),
Res = mc_worker_api:insert(Conn, {Db, FilesColl}, list_to_tuple(UnifiedList)),
{ok, ObjectId};
insert(Conn, Db, Bucket, Bson, IoStream) ->
FilesColl = <<(coll(Bucket))/binary, ".files">>,
ChunksColl = <<(coll(Bucket))/binary, ".chunks">>,
ObjectId = mongo_id_server:object_id(),
{Md5, FileSize} = copy(Conn, Db, ChunksColl, ObjectId, 0, IoStream, erlang:md5_init(), 0),
Md5Str = list_to_binary(bin_to_hexstr(Md5)),
file:close(IoStream),
ListBson = tuple_to_list(Bson),
ListFileAttr = ['_id', ObjectId, length, FileSize, chunkSize, ?CHUNK_SIZE,
uploadDate, erlang:timestamp(), md5, Md5Str],
UnifiedList = lists:append([ListFileAttr, ListBson]),
mc_worker_api:insert(Conn, {Db, FilesColl}, list_to_tuple(UnifiedList)),
{ok, ObjectId}.
%% Internal functions
bin_to_hexstr(Bin) ->
lists:flatten([io_lib:format("~2.16.0b", [X]) || X <- binary_to_list(Bin)]).
insert(Conn, Db, Coll, ObjectId, N, Data) when size(Data) =< ?CHUNK_SIZE ->
mc_worker_api:insert(Conn, {Db, Coll}, {'files_id', ObjectId, data, {bin, bin, Data}, n, N});
insert(Conn, Db, Coll, ObjectId, N, Data) ->
<<Data1:(?CHUNK_SIZE * 8), Data2/binary>> = Data,
mc_worker_api:insert(Conn, {Db, Coll}, {'files_id', ObjectId, data, {bin, bin, <<Data1:(?CHUNK_SIZE * 8)>>}, n, N}),
insert(Conn, Db, Coll, ObjectId, N + 1, Data2).
copy(Conn, Db, ChunksColl, ObjectId, N, IoStream, Md5Context, Size) ->
case file:pread(IoStream, N * ?CHUNK_SIZE, ?CHUNK_SIZE) of
eof ->
{erlang:md5_final(Md5Context), Size};
{ok, Data} ->
mc_worker_api:insert(Conn, {Db, ChunksColl}, {'files_id', ObjectId, data, {bin, bin, Data}, n, N}),
copy(Conn, Db, ChunksColl, ObjectId, N + 1, IoStream, erlang:md5_update(Md5Context, Data), Size + size(Data))
end.
coll(Bucket) when is_atom(Bucket) -> atom_to_binary(Bucket, utf8);
coll(Bucket) when is_binary(Bucket) -> Bucket. | src/gridfs.erl | 0.584983 | 0.471406 | gridfs.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(rabbit_msg_store_index).
-include("../include/rabbit_msg_store.hrl").
%% Behaviour module to provide pluggable message store index.
%% The index is used to locate message on disk and for reference-counting.
%% Message store have several additional assumptions about performance and
%% atomicity of some operations. See comments for each callback.
-type(dir() :: string()).
-type(index_state() :: any()).
-type(fieldpos() :: non_neg_integer()).
-type(fieldvalue() :: any()).
-type(msg_location() :: #msg_location{}).
%% There are two ways of starting an index:
%% - `new` - starts a clean index
%% - `recover` - attempts to read a saved index
%% In both cases the old saved state should be deleted from directory.
%% Initialize a fresh index state for msg store directory.
-callback new(dir()) -> index_state().
%% Try to recover gracefully stopped index state.
-callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
%% Gracefully shutdown the index.
%% Should save the index state, which will be loaded by the 'recover' function.
-callback terminate(index_state()) -> any().
%% Lookup an entry in the index.
%% Is called concurrently by msg_store, it's clients and GC processes.
%% This function is called multiple times for each message store operation.
%% Message store tries to avoid writing messages on disk if consumers can
%% process them fast, so there will be a lot of lookups for non-existent
%% entries, which should be as fast as possible.
-callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | msg_location()).
%% Insert an entry into the index.
%% Is called by a msg_store process only.
%% This function can exit if there is already an entry with the same ID
-callback insert(msg_location(), index_state()) -> 'ok'.
%% Update an entry in the index.
%% Is called by a msg_store process only.
%% The function is called during message store recovery after crash.
%% The difference between update and insert functions, is that update
%% should not fail if entry already exist, and should be atomic.
-callback update(msg_location(), index_state()) -> 'ok'.
%% Update positional fields in the entry tuple.
%% Is called by msg_store and GC processes concurrently.
%% This function can exit if there is no entry with specified ID
%% This function is called to update reference-counters and file locations.
%% File locations are updated from a GC process, reference-counters are
%% updated from a message store process.
%% This function should be atomic.
-callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
[{fieldpos(), fieldvalue()}]),
index_state()) -> 'ok'.
%% Delete an entry from the index by ID.
%% Is called from a msg_store process only.
%% This function should be atomic.
-callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
%% Delete an exactly matching entry from the index.
%% Is called by GC process only.
%% This function should match exact object to avoid deleting a zero-reference
%% object, which reference-counter is being concurrently updated.
%% This function should be atomic.
-callback delete_object(msg_location(), index_state()) -> 'ok'.
%% Delete temporary reference count entries with the 'file' record field equal to 'undefined'.
%% Is called during index rebuild from scratch (e.g. after non-clean stop)
%% During recovery after non-clean stop or file corruption, reference-counters
%% are added to the index with `undefined` value for the `file` field.
%% If message is found in a message store file, it's file field is updated.
%% If some reference-counters miss the message location after recovery - they
%% should be deleted.
-callback clean_up_temporary_reference_count_entries_without_file(index_state()) -> 'ok'. | erlang_server/_build/default/lib/rabbit_common/src/rabbit_msg_store_index.erl | 0.618204 | 0.446434 | rabbit_msg_store_index.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose : Convert annotated kernel expressions to annotated beam format.
%% This module creates beam format annotated with variable lifetime
%% information. Each thing is given an index and for each variable we
%% store the first and last index for its occurrence. The variable
%% database, VDB, attached to each thing is only relevant internally
%% for that thing.
%%
%% For nested things like matches the numbering continues locally and
%% the VDB for that thing refers to the variable usage within that
%% thing. Variables which live through a such a thing are internally
%% given a very large last index. Internally the indexes continue
%% after the index of that thing. This creates no problems as the
%% internal variable info never escapes and externally we only see
%% variable which are alive both before or after.
%%
%% This means that variables never "escape" from a thing and the only
%% way to get values from a thing is to "return" them, with 'break' or
%% 'return'. Externally these values become the return values of the
%% thing. This is no real limitation as most nested things have
%% multiple threads so working out a common best variable usage is
%% difficult.
-module(v3_life).
-export([module/2]).
-export([vdb_find/2]).
-import(lists, [member/2,map/2,foldl/3,reverse/1,sort/1]).
-import(ordsets, [add_element/2,intersection/2,union/2]).
-include("v3_kernel.hrl").
-include("v3_life.hrl").
%% These are not defined in v3_kernel.hrl.
get_kanno(Kthing) -> element(2, Kthing).
%%set_kanno(Kthing, Anno) -> setelement(2, Kthing, Anno).
module(#k_mdef{name=M,exports=Es,attributes=As,body=Fs0}, _Opts) ->
Fs1 = functions(Fs0, []),
{ok,{M,Es,As,Fs1}}.
functions([F|Fs], Acc) ->
functions(Fs, [function(F)|Acc]);
functions([], Acc) -> reverse(Acc).
%% function(Kfunc) -> Func.
function(#k_fdef{anno=#k{a=Anno},func=F,arity=Ar,vars=Vs,body=Kb}) ->
try
As = var_list(Vs),
Vdb0 = foldl(fun ({var,N}, Vdb) -> new_var(N, 0, Vdb) end, [], As),
%% Force a top-level match!
B0 = case Kb of
#k_match{} -> Kb;
_ ->
Ka = get_kanno(Kb),
#k_match{anno=#k{us=Ka#k.us,ns=[],a=Ka#k.a},
vars=Vs,body=Kb,ret=[]}
end,
put(guard_refc, 0),
{B1,_,Vdb1} = body(B0, 1, Vdb0),
erase(guard_refc),
{function,F,Ar,As,B1,Vdb1,Anno}
catch
Class:Error ->
Stack = erlang:get_stacktrace(),
io:fwrite("Function: ~w/~w\n", [F,Ar]),
erlang:raise(Class, Error, Stack)
end.
%% body(Kbody, I, Vdb) -> {[Expr],MaxI,Vdb}.
%% Handle a body.
body(#k_seq{arg=Ke,body=Kb}, I, Vdb0) ->
%%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
A = get_kanno(Ke),
Vdb1 = use_vars(A#k.us, I, new_vars(A#k.ns, I, Vdb0)),
{Es,MaxI,Vdb2} = body(Kb, I+1, Vdb1),
E = expr(Ke, I, Vdb2),
{[E|Es],MaxI,Vdb2};
body(Ke, I, Vdb0) ->
%%ok = io:fwrite("life ~w:~p~n", [?LINE,{Ke,I,Vdb0}]),
A = get_kanno(Ke),
Vdb1 = use_vars(A#k.us, I, new_vars(A#k.ns, I, Vdb0)),
E = expr(Ke, I, Vdb1),
{[E],I,Vdb1}.
%% guard(Kguard, I, Vdb) -> Guard.
guard(#k_try{anno=A,arg=Ts,vars=[#k_var{name=X}],body=#k_var{name=X},
handler=#k_atom{val=false},ret=Rs}, I, Vdb) ->
%% Lock variables that are alive before try and used afterwards.
%% Don't lock variables that are only used inside the try expression.
Pdb0 = vdb_sub(I, I+1, Vdb),
{T,MaxI,Pdb1} = body(Ts, I+1, Pdb0),
Pdb2 = use_vars(A#k.ns, MaxI+1, Pdb1), %Save "return" values
#l{ke={protected,T,var_list(Rs)},i=I,a=A#k.a,vdb=Pdb2}.
%% expr(Kexpr, I, Vdb) -> Expr.
expr(#k_test{anno=A,op=Op,args=As}, I, _Vdb) ->
#l{ke={test,test_op(Op),atomic_list(As)},i=I,a=A#k.a};
expr(#k_call{anno=A,op=Op,args=As,ret=Rs}, I, _Vdb) ->
#l{ke={call,call_op(Op),atomic_list(As),var_list(Rs)},i=I,a=A#k.a};
expr(#k_enter{anno=A,op=Op,args=As}, I, _Vdb) ->
#l{ke={enter,call_op(Op),atomic_list(As)},i=I,a=A#k.a};
expr(#k_bif{anno=A,op=Op,args=As,ret=Rs}, I, _Vdb) ->
Bif = k_bif(A, Op, As, Rs),
#l{ke=Bif,i=I,a=A#k.a};
expr(#k_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
%% Work out imported variables which need to be locked.
Mdb = vdb_sub(I, I+1, Vdb),
M = match(Kb, A#k.us, I+1, [], Mdb),
#l{ke={match,M,var_list(Rs)},i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a};
expr(#k_guard_match{anno=A,body=Kb,ret=Rs}, I, Vdb) ->
%% Work out imported variables which need to be locked.
Mdb = vdb_sub(I, I+1, Vdb),
M = match(Kb, A#k.us, I+1, [], Mdb),
#l{ke={guard_match,M,var_list(Rs)},i=I,vdb=use_vars(A#k.us, I+1, Mdb),a=A#k.a};
expr(#k_try{}=Try, I, Vdb) ->
case is_in_guard() of
false -> body_try(Try, I, Vdb);
true -> guard(Try, I, Vdb)
end;
expr(#k_try_enter{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh}, I, Vdb) ->
%% Lock variables that are alive before the catch and used afterwards.
%% Don't lock variables that are only used inside the try.
Tdb0 = vdb_sub(I, I+1, Vdb),
%% This is the tricky bit. Lock variables in Arg that are used in
%% the body and handler. Add try tag 'variable'.
Ab = get_kanno(Kb),
Ah = get_kanno(Kh),
Tdb1 = use_vars(Ab#k.us, I+3, use_vars(Ah#k.us, I+3, Tdb0)),
Tdb2 = vdb_sub(I, I+2, Tdb1),
Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
{Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, 1000000, Tdb2)),
{Bes,_,Bdb} = body(Kb, I+4, new_vars(map(Vnames, Vs), I+3, Tdb2)),
{Hes,_,Hdb} = body(Kh, I+4, new_vars(map(Vnames, Evs), I+3, Tdb2)),
#l{ke={try_enter,#l{ke={block,Aes},i=I+1,vdb=Adb,a=[]},
var_list(Vs),#l{ke={block,Bes},i=I+3,vdb=Bdb,a=[]},
var_list(Evs),#l{ke={block,Hes},i=I+3,vdb=Hdb,a=[]}},
i=I,vdb=Tdb1,a=A#k.a};
expr(#k_catch{anno=A,body=Kb,ret=[R]}, I, Vdb) ->
%% Lock variables that are alive before the catch and used afterwards.
%% Don't lock variables that are only used inside the catch.
%% Add catch tag 'variable'.
Cdb0 = vdb_sub(I, I+1, Vdb),
{Es,_,Cdb1} = body(Kb, I+1, add_var({catch_tag,I}, I, locked, Cdb0)),
#l{ke={'catch',Es,variable(R)},i=I,vdb=Cdb1,a=A#k.a};
expr(#k_receive{anno=A,var=V,body=Kb,timeout=T,action=Ka,ret=Rs}, I, Vdb) ->
%% Work out imported variables which need to be locked.
Rdb = vdb_sub(I, I+1, Vdb),
M = match(Kb, add_element(V#k_var.name, A#k.us), I+1, [],
new_var(V#k_var.name, I, Rdb)),
{Tes,_,Adb} = body(Ka, I+1, Rdb),
#l{ke={receive_loop,atomic(T),variable(V),M,
#l{ke=Tes,i=I+1,vdb=Adb,a=[]},var_list(Rs)},
i=I,vdb=use_vars(A#k.us, I+1, Vdb),a=A#k.a};
expr(#k_receive_accept{anno=A}, I, _Vdb) ->
#l{ke=receive_accept,i=I,a=A#k.a};
expr(#k_receive_next{anno=A}, I, _Vdb) ->
#l{ke=receive_next,i=I,a=A#k.a};
expr(#k_put{anno=A,arg=Arg,ret=Rs}, I, _Vdb) ->
#l{ke={set,var_list(Rs),literal(Arg, [])},i=I,a=A#k.a};
expr(#k_break{anno=A,args=As}, I, _Vdb) ->
#l{ke={break,atomic_list(As)},i=I,a=A#k.a};
expr(#k_guard_break{anno=A,args=As}, I, Vdb) ->
Locked = [V || {V,_,_} <- Vdb],
#l{ke={guard_break,atomic_list(As),Locked},i=I,a=A#k.a};
expr(#k_return{anno=A,args=As}, I, _Vdb) ->
#l{ke={return,atomic_list(As)},i=I,a=A#k.a}.
body_try(#k_try{anno=A,arg=Ka,vars=Vs,body=Kb,evars=Evs,handler=Kh,ret=Rs},
I, Vdb) ->
%% Lock variables that are alive before the catch and used afterwards.
%% Don't lock variables that are only used inside the try.
Tdb0 = vdb_sub(I, I+1, Vdb),
%% This is the tricky bit. Lock variables in Arg that are used in
%% the body and handler. Add try tag 'variable'.
Ab = get_kanno(Kb),
Ah = get_kanno(Kh),
Tdb1 = use_vars(Ab#k.us, I+3, use_vars(Ah#k.us, I+3, Tdb0)),
Tdb2 = vdb_sub(I, I+2, Tdb1),
Vnames = fun (Kvar) -> Kvar#k_var.name end, %Get the variable names
{Aes,_,Adb} = body(Ka, I+2, add_var({catch_tag,I+1}, I+1, locked, Tdb2)),
{Bes,_,Bdb} = body(Kb, I+4, new_vars(map(Vnames, Vs), I+3, Tdb2)),
{Hes,_,Hdb} = body(Kh, I+4, new_vars(map(Vnames, Evs), I+3, Tdb2)),
#l{ke={'try',#l{ke={block,Aes},i=I+1,vdb=Adb,a=[]},
var_list(Vs),#l{ke={block,Bes},i=I+3,vdb=Bdb,a=[]},
var_list(Evs),#l{ke={block,Hes},i=I+3,vdb=Hdb,a=[]},
var_list(Rs)},
i=I,vdb=Tdb1,a=A#k.a}.
%% call_op(Op) -> Op.
%% bif_op(Op) -> Op.
%% test_op(Op) -> Op.
%% Do any necessary name translations here to munge into beam format.
call_op(#k_local{name=N}) -> N;
call_op(#k_remote{mod=M,name=N}) -> {remote,atomic(M),atomic(N)};
call_op(Other) -> variable(Other).
bif_op(#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=N}}) -> N;
bif_op(#k_internal{name=N}) -> N.
test_op(#k_remote{mod=#k_atom{val=erlang},name=#k_atom{val=N}}) -> N.
%% k_bif(Anno, Op, [Arg], [Ret], Vdb) -> Expr.
%% Build bifs, do special handling of internal some calls.
k_bif(_A, #k_internal{name=dsetelement,arity=3}, As, []) ->
{bif,dsetelement,atomic_list(As),[]};
k_bif(_A, #k_internal{name=bs_context_to_binary=Op,arity=1}, As, []) ->
{bif,Op,atomic_list(As),[]};
k_bif(_A, #k_internal{name=bs_init_writable=Op,arity=1}, As, Rs) ->
{bif,Op,atomic_list(As),var_list(Rs)};
k_bif(_A, #k_internal{name=make_fun},
[#k_atom{val=Fun},#k_int{val=Arity},
#k_int{val=Index},#k_int{val=Uniq}|Free],
Rs) ->
{bif,{make_fun,Fun,Arity,Index,Uniq},var_list(Free),var_list(Rs)};
k_bif(_A, Op, As, Rs) ->
%% The general case.
Name = bif_op(Op),
Ar = length(As),
case is_gc_bif(Name, Ar) of
false ->
{bif,Name,atomic_list(As),var_list(Rs)};
true ->
{gc_bif,Name,atomic_list(As),var_list(Rs)}
end.
%% match(Kexpr, [LockVar], I, Vdb) -> Expr.
%% Convert match tree to old format.
match(#k_alt{anno=A,first=Kf,then=Kt}, Ls, I, Ctxt, Vdb0) ->
Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
F = match(Kf, Ls, I+1, Ctxt, Vdb1),
T = match(Kt, Ls, I+1, Ctxt, Vdb1),
#l{ke={alt,F,T},i=I,vdb=Vdb1,a=A#k.a};
match(#k_select{anno=A,var=V,types=Kts}, Ls0, I, Ctxt, Vdb0) ->
Vanno = get_kanno(V),
Ls1 = case member(no_usage, Vanno) of
false -> add_element(V#k_var.name, Ls0);
true -> Ls0
end,
Anno = case member(reuse_for_context, Vanno) of
true -> [reuse_for_context|A#k.a];
false -> A#k.a
end,
Vdb1 = use_vars(union(A#k.us, Ls1), I, Vdb0),
Ts = [type_clause(Tc, Ls1, I+1, Ctxt, Vdb1) || Tc <- Kts],
#l{ke={select,literal2(V, Ctxt),Ts},i=I,vdb=Vdb1,a=Anno};
match(#k_guard{anno=A,clauses=Kcs}, Ls, I, Ctxt, Vdb0) ->
Vdb1 = use_vars(union(A#k.us, Ls), I, Vdb0),
Cs = [guard_clause(G, Ls, I+1, Ctxt, Vdb1) || G <- Kcs],
#l{ke={guard,Cs},i=I,vdb=Vdb1,a=A#k.a};
match(Other, Ls, I, _Ctxt, Vdb0) ->
Vdb1 = use_vars(Ls, I, Vdb0),
{B,_,Vdb2} = body(Other, I+1, Vdb1),
#l{ke={block,B},i=I,vdb=Vdb2,a=[]}.
type_clause(#k_type_clause{anno=A,type=T,values=Kvs}, Ls, I, Ctxt, Vdb0) ->
%%ok = io:format("life ~w: ~p~n", [?LINE,{T,Kvs}]),
Vdb1 = use_vars(union(A#k.us, Ls), I+1, Vdb0),
Vs = [val_clause(Vc, Ls, I+1, Ctxt, Vdb1) || Vc <- Kvs],
#l{ke={type_clause,type(T),Vs},i=I,vdb=Vdb1,a=A#k.a}.
val_clause(#k_val_clause{anno=A,val=V,body=Kb}, Ls0, I, Ctxt0, Vdb0) ->
New = (get_kanno(V))#k.ns,
Bus = (get_kanno(Kb))#k.us,
%%ok = io:format("Ls0 = ~p, Used=~p\n New=~p, Bus=~p\n", [Ls0,Used,New,Bus]),
Ls1 = union(intersection(New, Bus), Ls0), %Lock for safety
Vdb1 = use_vars(union(A#k.us, Ls1), I+1, new_vars(New, I, Vdb0)),
Ctxt = case V of
#k_binary{segs=#k_var{name=C0}} -> C0;
_ -> Ctxt0
end,
B = match(Kb, Ls1, I+1, Ctxt, Vdb1),
#l{ke={val_clause,literal2(V, Ctxt),B},i=I,vdb=use_vars(Bus, I+1, Vdb1),a=A#k.a}.
guard_clause(#k_guard_clause{anno=A,guard=Kg,body=Kb}, Ls, I, Ctxt, Vdb0) ->
Vdb1 = use_vars(union(A#k.us, Ls), I+2, Vdb0),
Gdb = vdb_sub(I+1, I+2, Vdb1),
OldRefc = put(guard_refc, get(guard_refc)+1),
G = guard(Kg, I+1, Gdb),
put(guard_refc, OldRefc),
B = match(Kb, Ls, I+2, Ctxt, Vdb1),
#l{ke={guard_clause,G,B},
i=I,vdb=use_vars((get_kanno(Kg))#k.us, I+2, Vdb1),
a=A#k.a}.
%% type(Ktype) -> Type.
type(k_literal) -> literal;
type(k_int) -> integer;
%%type(k_char) -> integer; %Hhhmmm???
type(k_float) -> float;
type(k_atom) -> atom;
type(k_nil) -> nil;
type(k_cons) -> cons;
type(k_tuple) -> tuple;
type(k_binary) -> binary;
type(k_bin_seg) -> bin_seg;
type(k_bin_int) -> bin_int;
type(k_bin_end) -> bin_end;
type(k_map) -> map.
%% variable(Klit) -> Lit.
%% var_list([Klit]) -> [Lit].
variable(#k_var{name=N}) -> {var,N}.
var_list(Ks) -> [variable(K) || K <- Ks].
%% atomic(Klit) -> Lit.
%% atomic_list([Klit]) -> [Lit].
atomic(#k_literal{val=V}) -> {literal,V};
atomic(#k_var{name=N}) -> {var,N};
atomic(#k_int{val=I}) -> {integer,I};
atomic(#k_float{val=F}) -> {float,F};
atomic(#k_atom{val=N}) -> {atom,N};
%%atomic(#k_char{val=C}) -> {char,C};
atomic(#k_nil{}) -> nil.
atomic_list(Ks) -> [atomic(K) || K <- Ks].
%% literal(Klit) -> Lit.
%% literal_list([Klit]) -> [Lit].
literal(#k_var{name=N}, _) -> {var,N};
literal(#k_int{val=I}, _) -> {integer,I};
literal(#k_float{val=F}, _) -> {float,F};
literal(#k_atom{val=N}, _) -> {atom,N};
%%literal(#k_char{val=C}, _) -> {char,C};
literal(#k_nil{}, _) -> nil;
literal(#k_cons{hd=H,tl=T}, Ctxt) ->
{cons,[literal(H, Ctxt),literal(T, Ctxt)]};
literal(#k_binary{segs=V}, Ctxt) ->
{binary,literal(V, Ctxt)};
literal(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=N}, Ctxt) ->
{bin_seg,Ctxt,literal(S, Ctxt),U,T,Fs,
[literal(Seg, Ctxt),literal(N, Ctxt)]};
literal(#k_bin_end{}, Ctxt) ->
{bin_end,Ctxt};
literal(#k_tuple{es=Es}, Ctxt) ->
{tuple,literal_list(Es, Ctxt)};
literal(#k_map{op=Op,var=Var,es=Es}, Ctxt) ->
{map,Op,literal(Var, Ctxt),literal_list(Es, Ctxt)};
literal(#k_map_pair{key=K,val=V}, Ctxt) ->
{map_pair,literal(K, Ctxt),literal(V, Ctxt)};
literal(#k_literal{val=V}, _Ctxt) ->
{literal,V}.
literal_list(Ks, Ctxt) ->
[literal(K, Ctxt) || K <- Ks].
literal2(#k_var{name=N}, _) -> {var,N};
literal2(#k_literal{val=I}, _) -> {literal,I};
literal2(#k_int{val=I}, _) -> {integer,I};
literal2(#k_float{val=F}, _) -> {float,F};
literal2(#k_atom{val=N}, _) -> {atom,N};
%%literal2(#k_char{val=C}, _) -> {char,C};
literal2(#k_nil{}, _) -> nil;
literal2(#k_cons{hd=H,tl=T}, Ctxt) ->
{cons,[literal2(H, Ctxt),literal2(T, Ctxt)]};
literal2(#k_binary{segs=V}, Ctxt) ->
{binary,literal2(V, Ctxt)};
literal2(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=[]}, Ctxt) ->
{bin_seg,Ctxt,literal2(S, Ctxt),U,T,Fs,[literal2(Seg, Ctxt)]};
literal2(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=N}, Ctxt) ->
{bin_seg,Ctxt,literal2(S, Ctxt),U,T,Fs,
[literal2(Seg, Ctxt),literal2(N, Ctxt)]};
literal2(#k_bin_int{size=S,unit=U,flags=Fs,val=Int,next=N}, Ctxt) ->
{bin_int,Ctxt,literal2(S, Ctxt),U,Fs,Int,
[literal2(N, Ctxt)]};
literal2(#k_bin_end{}, Ctxt) ->
{bin_end,Ctxt};
literal2(#k_tuple{es=Es}, Ctxt) ->
{tuple,literal_list2(Es, Ctxt)};
literal2(#k_map{op=Op,es=Es}, Ctxt) ->
{map,Op,literal_list2(Es, Ctxt)};
literal2(#k_map_pair{key=K,val=V}, Ctxt) ->
{map_pair,literal2(K, Ctxt),literal2(V, Ctxt)}.
literal_list2(Ks, Ctxt) ->
[literal2(K, Ctxt) || K <- Ks].
%% literal_bin(#k_bin_seg{size=S,unit=U,type=T,flags=Fs,seg=Seg,next=N}) ->
%% {bin_seg,literal(S),U,T,Fs,[literal(Seg),literal(N)]}
%% is_gc_bif(Name, Arity) -> true|false
%% Determines whether the BIF Name/Arity might do a GC.
is_gc_bif(hd, 1) -> false;
is_gc_bif(tl, 1) -> false;
is_gc_bif(self, 0) -> false;
is_gc_bif(node, 0) -> false;
is_gc_bif(node, 1) -> false;
is_gc_bif(element, 2) -> false;
is_gc_bif(get, 1) -> false;
is_gc_bif(raise, 2) -> false;
is_gc_bif(tuple_size, 1) -> false;
is_gc_bif(Bif, Arity) ->
not (erl_internal:bool_op(Bif, Arity) orelse
erl_internal:new_type_test(Bif, Arity) orelse
erl_internal:comp_op(Bif, Arity)).
%% new_var(VarName, I, Vdb) -> Vdb.
%% new_vars([VarName], I, Vdb) -> Vdb.
%% use_var(VarName, I, Vdb) -> Vdb.
%% use_vars([VarName], I, Vdb) -> Vdb.
%% add_var(VarName, F, L, Vdb) -> Vdb.
new_var(V, I, Vdb) ->
vdb_store_new(V, I, I, Vdb).
new_vars(Vs, I, Vdb0) ->
foldl(fun (V, Vdb) -> new_var(V, I, Vdb) end, Vdb0, Vs).
use_var(V, I, Vdb) ->
case vdb_find(V, Vdb) of
{V,F,L} when I > L -> vdb_update(V, F, I, Vdb);
{V,_,_} -> Vdb;
error -> vdb_store_new(V, I, I, Vdb)
end.
use_vars([], _, Vdb) -> Vdb;
use_vars([V], I, Vdb) -> use_var(V, I, Vdb);
use_vars(Vs, I, Vdb) ->
Res = use_vars_1(sort(Vs), Vdb, I),
%% The following line can be used as an assertion.
%% Res = foldl(fun (V, Vdb) -> use_var(V, I, Vdb) end, Vdb, Vs),
Res.
%% Measurements show that it is worthwhile having this special
%% function that updates/inserts several variables at once.
use_vars_1([V|_]=Vs, [{V1,_,_}=Vd|Vdb], I) when V > V1 ->
[Vd|use_vars_1(Vs, Vdb, I)];
use_vars_1([V|Vs], [{V1,_,_}|_]=Vdb, I) when V < V1 ->
%% New variable.
[{V,I,I}|use_vars_1(Vs, Vdb, I)];
use_vars_1([V|Vs], [{_,F,L}=Vd|Vdb], I) ->
%% Existing variable.
if
I > L ->[{V,F,I}|use_vars_1(Vs, Vdb, I)];
true -> [Vd|use_vars_1(Vs, Vdb, I)]
end;
use_vars_1([V|Vs], [], I) ->
%% New variable.
[{V,I,I}|use_vars_1(Vs, [], I)];
use_vars_1([], Vdb, _) -> Vdb.
add_var(V, F, L, Vdb) ->
vdb_store_new(V, F, L, Vdb).
vdb_find(V, Vdb) ->
%% Performance note: Profiling shows that this function accounts for
%% a lot of the execution time when huge constant terms are built.
%% Using the BIF lists:keyfind/3 is a lot faster than the
%% original Erlang version.
case lists:keyfind(V, 1, Vdb) of
false -> error;
Vd -> Vd
end.
%vdb_find(V, [{V1,F,L}=Vd|Vdb]) when V < V1 -> error;
%vdb_find(V, [{V1,F,L}=Vd|Vdb]) when V == V1 -> Vd;
%vdb_find(V, [{V1,F,L}=Vd|Vdb]) when V > V1 -> vdb_find(V, Vdb);
%vdb_find(V, []) -> error.
vdb_update(V, F, L, [{V1,_,_}=Vd|Vdb]) when V > V1 ->
[Vd|vdb_update(V, F, L, Vdb)];
vdb_update(V, F, L, [{V1,_,_}|Vdb]) when V == V1 ->
[{V,F,L}|Vdb].
vdb_store_new(V, F, L, [{V1,_,_}=Vd|Vdb]) when V > V1 ->
[Vd|vdb_store_new(V, F, L, Vdb)];
vdb_store_new(V, F, L, [{V1,_,_}|_]=Vdb) when V < V1 -> [{V,F,L}|Vdb];
vdb_store_new(V, F, L, []) -> [{V,F,L}].
%% vdb_sub(Min, Max, Vdb) -> Vdb.
%% Extract variables which are used before and after Min. Lock
%% variables alive after Max.
vdb_sub(Min, Max, Vdb) ->
[ if L >= Max -> {V,F,locked};
true -> Vd
end || {V,F,L}=Vd <- Vdb, F < Min, L >= Min ].
%% is_in_guard() -> true|false.
is_in_guard() ->
get(guard_refc) > 0. | dependencies/otp/17.1/lib/compiler/src/v3_life.erl | 0.50293 | 0.439266 | v3_life.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2012 <NAME>
%% Copyright 2012 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(filter_survey_prepare_matching).
-export([
survey_prepare_matching/2,
split_markers/1
]).
survey_prepare_matching(Blk, Context) ->
Matching = z_trans:lookup_fallback(
proplists:get_value(matching, Blk, <<>>),
Context),
Pairs = maybe_randomize(
z_convert:to_bool(proplists:get_value(is_random, Blk)),
[ split_option(Line) || Line <- split_lines(Matching) ]),
{Qs,As} = lists:unzip(Pairs),
Qs1 = split_markers(Qs),
As1 = split_markers(As),
case z_convert:to_bool(proplists:get_value(is_test, Blk)) of
true ->
[
{is_test, true},
{is_test_direct, z_convert:to_bool(proplists:get_value(is_test_direct, Blk))},
{items, Qs1},
{options, z_utils:randomize(As1)}
];
false ->
[
{is_test, false},
{items, Qs1},
{options, z_utils:randomize(As1)}
]
end.
maybe_randomize(false, List) -> List;
maybe_randomize(true, List) -> z_utils:randomize(List).
split_lines(Text) ->
Options = string:tokens(z_string:trim(z_convert:to_list(Text)), "\n"),
[ z_string:trim(Option) || Option <- Options ].
split_option(Option) ->
{Q,M} = lists:splitwith(fun(C) -> C /= $= end, Option),
{z_string:trim(Q), z_string:trim(drop_eq(M))}.
drop_eq([$=|Rest]) -> Rest;
drop_eq(X) -> X.
split_markers(Qs) ->
split_markers(Qs, 1, []).
split_markers([], _N, Acc) ->
lists:reverse(Acc);
split_markers([[]|Qs], N, Acc) ->
split_markers(Qs, N, Acc);
split_markers([Opt|Qs], N, Acc) ->
split_markers(Qs, N+1, [split_marker(Opt, N)|Acc]).
split_marker(X, N) ->
case split_kv(z_convert:to_binary(X)) of
[Opt] -> {z_convert:to_binary(N), Opt};
[Val,Opt] -> {Val,Opt}
end.
split_kv(Line) ->
split_kv(Line, <<>>).
split_kv(<<>>, Acc) -> [Acc];
split_kv(<<"&#", Rest/binary>>, Acc) -> split_kv(Rest, <<Acc/binary, "&#">>);
split_kv(<<"#", Rest/binary>>, Acc) -> [Acc,Rest];
split_kv(<<C/utf8, Rest/binary>>, Acc) -> split_kv(Rest, <<Acc/binary, C/utf8>>). | apps/zotonic_mod_survey/src/filters/filter_survey_prepare_matching.erl | 0.519034 | 0.465934 | filter_survey_prepare_matching.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(plumtree_util).
-export([build_tree/3,
log/2, log/3]).
%% @doc Convert a list of elements into an N-ary tree. This conversion
%% works by treating the list as an array-based tree where, for
%% example in a binary 2-ary tree, a node at index i has children
%% 2i and 2i+1. The conversion also supports a "cycles" mode where
%% the array is logically wrapped around to ensure leaf nodes also
%% have children by giving them backedges to other elements.
-spec build_tree(N :: integer(), Nodes :: [term()], Opts :: [term()])
-> orddict:orddict().
build_tree(N, Nodes, Opts) ->
Expand =
case lists:member(cycles, Opts) of
true ->
lists:flatten(lists:duplicate(N+1, Nodes));
false ->
Nodes
end,
{Tree, _} =
lists:foldl(fun(Elm, {Result, Worklist}) ->
Len = erlang:min(N, length(Worklist)),
{Children, Rest} = lists:split(Len, Worklist),
NewResult = [{Elm, Children} | Result],
{NewResult, Rest}
end, {[], tl(Expand)}, Nodes),
orddict:from_list(Tree).
-spec log(debug | info | error,
String :: string(),
Args :: list(term())) -> ok.
-ifdef(TEST).
log(Level, String) ->
log(Level, String, []).
log(debug, String, Args) ->
logger:debug(String, Args);
log(info, String, Args) ->
logger:info(String, Args);
log(error, String, Args) ->
logger:error(String, Args).
-else.
log(_Level, _String) -> ok.
log(_Level, _String, _Args) -> ok.
-endif.
%%
%% Tests
%%
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
arity_test() ->
%% 1-ary tree
?assertEqual([{node1, []}], orddict:to_list(build_tree(1, [node1], []))),
?assertEqual([{node1, [node2]},
{node2, []}], orddict:to_list(build_tree(1, [node1, node2], []))),
?assertEqual([{node1, [node2]},
{node2, [node3]},
{node3, []}], orddict:to_list(build_tree(1, [node1, node2, node3], []))),
?assertEqual([{node1, [node2]},
{node2, [node3]},
{node3, [node4]},
{node4, []}], orddict:to_list(build_tree(1, [node1, node2,
node3, node4], []))),
%% 2-ary tree
?assertEqual([{node1, []}], orddict:to_list(build_tree(2, [node1], []))),
?assertEqual([{node1, [node2]},
{node2, []}], orddict:to_list(build_tree(2, [node1, node2], []))),
?assertEqual([{node1, [node2, node3]},
{node2, []},
{node3, []}], orddict:to_list(build_tree(2, [node1, node2, node3], []))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4]},
{node3, []},
{node4, []}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4], []))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4, node5]},
{node3, []},
{node4, []},
{node5, []}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4,
node5], []))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4, node5]},
{node3, [node6]},
{node4, []},
{node5, []},
{node6, []}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4,
node5, node6], []))),
%% 3-ary tree
?assertEqual([{node1, []}], orddict:to_list(build_tree(3, [node1], []))),
?assertEqual([{node1, [node2]},
{node2, []}], orddict:to_list(build_tree(3, [node1, node2], []))),
?assertEqual([{node1, [node2, node3]},
{node2, []},
{node3, []}], orddict:to_list(build_tree(3, [node1, node2, node3], []))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, []},
{node3, []},
{node4, []}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4], []))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5]},
{node3, []},
{node4, []},
{node5, []}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5], []))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5, node6]},
{node3, []},
{node4, []},
{node5, []},
{node6, []}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5, node6], []))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5, node6, node7]},
{node3, []},
{node4, []},
{node5, []},
{node6, []},
{node7, []}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5, node6,
node7], []))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5, node6, node7]},
{node3, [node8]},
{node4, []},
{node5, []},
{node6, []},
{node7, []},
{node8, []}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5, node6,
node7, node8], []))).
cycles_test() ->
%% 1-ary tree
?assertEqual([{node1, [node1]}], orddict:to_list(build_tree(1, [node1], [cycles]))),
?assertEqual([{node1, [node2]},
{node2, [node1]}], orddict:to_list(build_tree(1, [node1, node2], [cycles]))),
?assertEqual([{node1, [node2]},
{node2, [node3]},
{node3, [node1]}], orddict:to_list(build_tree(1, [node1, node2, node3], [cycles]))),
?assertEqual([{node1, [node2]},
{node2, [node3]},
{node3, [node4]},
{node4, [node1]}], orddict:to_list(build_tree(1, [node1, node2,
node3, node4], [cycles]))),
%% 2-ary tree
?assertEqual([{node1, [node1, node1]}], orddict:to_list(build_tree(2, [node1], [cycles]))),
?assertEqual([{node1, [node2, node1]},
{node2, [node2, node1]}], orddict:to_list(build_tree(2, [node1, node2], [cycles]))),
?assertEqual([{node1, [node2, node3]},
{node2, [node1, node2]},
{node3, [node3, node1]}], orddict:to_list(build_tree(2, [node1, node2, node3], [cycles]))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4, node1]},
{node3, [node2, node3]},
{node4, [node4, node1]}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4], [cycles]))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4, node5]},
{node3, [node1, node2]},
{node4, [node3, node4]},
{node5, [node5, node1]}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4,
node5], [cycles]))),
?assertEqual([{node1, [node2, node3]},
{node2, [node4, node5]},
{node3, [node6, node1]},
{node4, [node2, node3]},
{node5, [node4, node5]},
{node6, [node6, node1]}], orddict:to_list(build_tree(2, [node1, node2,
node3, node4,
node5, node6], [cycles]))),
%% 3-ary tree
?assertEqual([{node1, [node1, node1, node1]}], orddict:to_list(build_tree(3, [node1], [cycles]))),
?assertEqual([{node1, [node2, node1, node2]},
{node2, [node1, node2, node1]}], orddict:to_list(build_tree(3, [node1, node2], [cycles]))),
?assertEqual([{node1, [node2, node3, node1]},
{node2, [node2, node3, node1]},
{node3, [node2, node3, node1]}], orddict:to_list(build_tree(3, [node1, node2, node3], [cycles]))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node1, node2, node3]},
{node3, [node4, node1, node2]},
{node4, [node3, node4, node1]}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4], [cycles]))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5, node1, node2]},
{node3, [node3, node4, node5]},
{node4, [node1, node2, node3]},
{node5, [node4, node5, node1]}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5], [cycles]))),
?assertEqual([{node1, [node2, node3, node4]},
{node2, [node5, node6, node1]},
{node3, [node2, node3, node4]},
{node4, [node5, node6, node1]},
{node5, [node2, node3, node4]},
{node6, [node5, node6, node1]}], orddict:to_list(build_tree(3, [node1, node2,
node3, node4,
node5, node6], [cycles]))).
-endif. | src/plumtree_util.erl | 0.629888 | 0.486392 | plumtree_util.erl | starcoder |
-module(vector_basics_SUITE).
-include_lib("eunit/include/eunit.hrl").
-include("erlynum.hrl").
zeros_empty_test() ->
NVector = nvector:zeros(0),
{RowsCount, ColsCount} = NVector#nvector.shape,
TotalCount = RowsCount * ColsCount,
?assertEqual(0, TotalCount),
BinData = NVector#nvector.data,
?assertEqual(0, byte_size(BinData)),
?assertEqual(0, NVector#nvector.view#view_params.size).
zeros_nonempty_test() ->
Zeros = nvector:zeros(5),
?assertEqual([0.0, 0.0, 0.0, 0.0, 0.0], nvector:to_list(Zeros)),
?assertEqual([0, 0, 0, 0, 0], nvector:to_list(Zeros, integer)),
ComplexZeros = nvector:zeros(5, [{dtype, c}]),
?assertEqual([{0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0}], nvector:to_list(ComplexZeros)),
?assertEqual([0, 0, 0, 0, 0], nvector:to_list(ComplexZeros, integer)).
ones_empty_test() ->
NVector = nvector:ones(0),
{RowsCount, ColsCount} = NVector#nvector.shape,
TotalCount = RowsCount * ColsCount,
?assertEqual(0, TotalCount),
BinData = NVector#nvector.data,
?assertEqual(0, byte_size(BinData)),
?assertEqual(0, NVector#nvector.view#view_params.size).
ones_nonempty_test() ->
Ones = nvector:ones(5),
?assertEqual([1.0, 1.0, 1.0, 1.0, 1.0], nvector:to_list(Ones)),
?assertEqual([1, 1, 1, 1, 1], nvector:to_list(Ones, integer)),
ComplexOnes = nvector:ones(5, [{dtype, c}]),
?assertEqual([{1.0,0.0}, {1.0,0.0}, {1.0,0.0}, {1.0,0.0}, {1.0,0.0}], nvector:to_list(ComplexOnes)),
?assertEqual([1, 1, 1, 1, 1], nvector:to_list(ComplexOnes, integer)).
full_empty_test() ->
NVector = nvector:full(0, 123),
{RowsCount, ColsCount} = NVector#nvector.shape,
TotalCount = RowsCount * ColsCount,
?assertEqual(0, TotalCount),
BinData = NVector#nvector.data,
?assertEqual(0, byte_size(BinData)),
?assertEqual(0, NVector#nvector.view#view_params.size).
full_nonempty_test() ->
Full = nvector:full(5, 123),
?assertEqual([123.0, 123.0, 123.0, 123.0, 123.0], nvector:to_list(Full)),
?assertEqual([123, 123, 123, 123, 123], nvector:to_list(Full, integer)),
ComplexFull = nvector:full(3, {123,456}),
?assertEqual([{123.0,456.0}, {123.0,456.0}, {123.0,456.0}], nvector:to_list(ComplexFull)),
?assertEqual([123.0, 123.0, 123.0], nvector:to_list(ComplexFull, real)),
?assertEqual([123, 123, 123], nvector:to_list(ComplexFull, integer)). | test/vector_basics_SUITE.erl | 0.565059 | 0.782912 | vector_basics_SUITE.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% This module provides the public API for eflambe. These public functions are
%%% intended to be invoked by the end user to perform profiling of their
%%% application.
%%% @end
%%%-------------------------------------------------------------------
-module(eflambe).
%% Application callbacks
-export([capture/1, capture/2, capture/3,
apply/1, apply/2]).
-type mfa_fun() :: {atom(), atom(), list()} | fun().
-type program() :: hotspot | speedscope.
-type option() :: {output_directory, binary()} | {output_format, brendan_gregg} | {open, program()}.
-type options() :: [option()].
-define(FLAGS, [call, return_to, running, procs, garbage_collection, arity,
timestamp, set_on_spawn]).
%%--------------------------------------------------------------------
%% @doc
%% Starts capturing of function call data for any invocation of the specified
%% MFA and of a flamegraph for
%% the current process.
%%
%% @end
%%--------------------------------------------------------------------
-spec capture(MFA :: mfa()) -> ok.
capture(MFA) ->
capture(MFA, 1).
-spec capture(MFA :: mfa(), NumCalls :: pos_integer()) -> ok.
capture(MFA, NumCalls) ->
capture(MFA, NumCalls, []).
-spec capture(MFA :: mfa(), NumCalls :: pos_integer(), Options :: options()) -> ok.
capture({Module, Function, Arity}, NumCalls, Options)
when is_atom(Module), is_atom(Function), is_integer(Arity) ->
ok = meck:new(Module, [unstick, passthrough]),
TraceId = setup_for_trace(),
ShimmedFunction = fun(Args) ->
{Trace, StartedNew} = start_trace(TraceId, NumCalls, [{meck, Module}|Options]),
% Invoke the original function
Results = meck:passthrough(Args),
case StartedNew of
true ->
stop_trace(Trace);
false ->
ok
end,
Results
end,
MockFun = gen_mock_fun(Arity, ShimmedFunction),
% Replace the original function with our new function that wraps the old
% function in profiling code.
meck:expect(Module, Function, MockFun).
%%--------------------------------------------------------------------
%% @doc
%% Traces the execution of the function passed in for generation of a for a
%% flamegraph of the function call.
%%
%% @end
%%--------------------------------------------------------------------
-spec apply(Function :: mfa_fun()) -> any().
apply(Function) ->
?MODULE:apply(Function, []).
-spec apply(Function :: mfa_fun(), Options :: options()) -> any().
apply({Module, Function, Args}, Options) when is_atom(Module), is_atom(Function), is_list(Args) ->
TraceId = setup_for_trace(),
{Trace, _StartedNew} = start_trace(TraceId, 1, Options),
% Invoke the original function
Results = erlang:apply(Module, Function, Args),
stop_trace(Trace),
Results;
apply({Function, Args}, Options) when is_function(Function), is_list(Args) ->
TraceId = setup_for_trace(),
{Trace, _StartedNew} = start_trace(TraceId, 1, Options),
% Invoke the original function
Results = erlang:apply(Function, Args),
stop_trace(Trace),
Results.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec start_trace(TraceId :: any(), NumCalls :: pos_integer(), Options :: list()) ->
{reference(), boolean()}.
start_trace(TraceId, NumCalls, Options) ->
case eflambe_server:start_trace(TraceId, NumCalls, Options) of
{ok, TraceId, true, Tracer} ->
MatchSpec = [{'_', [], [{message, {{cp, {caller}}}}]}],
erlang:trace_pattern(on_load, MatchSpec, [local]),
erlang:trace_pattern({'_', '_', '_'}, MatchSpec, [local]),
erlang:trace(self(), true, [{tracer, Tracer} | ?FLAGS]),
{TraceId, true};
{ok, TraceId, false, _Tracer} ->
% Trace is already running or has already finished. Or this could
% be a recursive function call. We do not need to do anything.
{TraceId, false}
end.
-spec stop_trace(reference()) -> ok.
stop_trace(Trace) ->
erlang:trace(self(), false, [all]),
{ok, _} = eflambe_server:stop_trace(Trace),
ok.
setup_for_trace() ->
application:ensure_all_started(eflambe),
eflambe_sup:get_or_start_server(),
% All traces must have a unique ref so we can keep track of them
make_ref().
% This function dyanmically generates a function of a specified arity that
% invokes `Function` with the list of all the arguments.
% https://stackoverflow.com/questions/69244814/erlang-generate-anonymous-function-of-an-arbitary-arity
-spec gen_mock_fun(non_neg_integer(), function()) -> function().
gen_mock_fun(Arity, Function) when is_function(Function) ->
ParamVars = [list_to_atom([$X| integer_to_list(I)]) || I <- lists:seq(1, Arity)],
Params = [{var, 1, Var} || Var <- ParamVars],
ParamsList = lists:foldl(fun(Elem, Acc) ->
{cons, 1, {var, 1, Elem}, Acc}
end, {nil, 1}, lists:reverse(ParamVars)),
Anno = erl_anno:new(1),
FunctionCall = {call, Anno, {var, Anno, 'Function'}, [ParamsList]},
Expr =
{'fun',
Anno,
{clauses, [{clause, Anno, Params, [], [FunctionCall]}]}},
{value, Fun, _Vars} = erl_eval:expr(Expr, [{'Function', Function}]),
Fun. | src/eflambe.erl | 0.607896 | 0.464173 | eflambe.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Machi: a small village of replicated files
%%
%% Copyright (c) 2014-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(machi_chain_manager1_converge_demo).
-include("machi.hrl").
-include("machi_projection.hrl").
-define(MGR, machi_chain_manager1).
-define(D(X), io:format(user, "~s ~p\n", [??X, X])).
-define(Dw(X), io:format(user, "~s ~w\n", [??X, X])).
-define(FLU_C, machi_flu1_client).
-define(FLU_PC, machi_proxy_flu1_client).
-compile(export_all).
-ifdef(TEST).
-ifndef(PULSE).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
%% -include_lib("eqc/include/eqc_statem.hrl").
-define(QC_OUT(P),
eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)).
-endif.
-include_lib("eunit/include/eunit.hrl").
help() ->
io:format("~s\n", [short_doc()]).
short_doc() ->
"
A visualization of the convergence behavior of the chain self-management
algorithm for Machi.
1. Set up some server and chain manager pairs.
2. Create a number of different network partition scenarios, where
(simulated) partitions may be symmetric or asymmetric. Then stop changing
the partitions and keep the simulated network stable (and perhaps broken).
3. Run a number of iterations of the algorithm in parallel by poking each
of the manager processes on a random'ish basis.
4. Afterward, fetch the chain transition changes made by each FLU and
verify that no transition was unsafe.
During the iteration periods, the following is a cheatsheet for the output.
See the internal source for interpreting the rest of the output.
'SET partitions = '
A pair-wise list of actors which cannot send messages. The
list is uni-directional. If there are three servers (a,b,c),
and if the partitions list is '[{a,b},{b,c}]' then all
messages from a->b and b->c will be dropped, but any other
sender->recipient messages will be delivered successfully.
'x uses:'
The FLU x has made an internal state transition and is using
this epoch's projection as operating chain configuration. The
rest of the line is a summary of the projection.
'CONFIRM epoch {N}'
This message confirms that all of the servers listed in the
UPI and repairing lists of the projection at epoch {N} have
agreed to use this projection because they all have written
this projection to their respective private projection stores.
The chain is now usable by/available to all clients.
'Sweet, private projections are stable'
This report announces that this iteration of the test cycle
has passed successfully. The report that follows briefly
summarizes the latest private projection used by each
participating server. For example, when in strong consistency
mode with 'a' as a witness and 'b' and 'c' as real servers:
%% Legend:
%% server name, epoch ID, UPI list, repairing list, down list, ...
%% ... witness list, 'false' (a constant value)
[{a,{{1116,<<23,143,246,55>>},[a,b],[],[c],[a],false}},
{b,{{1116,<<23,143,246,55>>},[a,b],[],[c],[a],false}}]
Both servers 'a' and 'b' agree on epoch 1116 with epoch ID
{1116,<<23,143,246,55>>} where UPI=[a,b], repairing=[],
down=[c], and witnesses=[a].
Server 'c' is not shown because 'c' has wedged itself OOS (out
of service) by configuring a chain length of zero.
If no servers are listed in the report (i.e. only '[]' is
displayed), then all servers have wedged themselves OOS, and
the chain is unavailable.
'DoIt,'
This marks a group of tick events which trigger the manager
processes to evaluate their environment and perhaps make a
state transition.
A long chain of 'DoIt,DoIt,DoIt,' means that the chain state has
(probably) settled to a stable configuration, which is the goal of the
algorithm.
Press control-c to interrupt the test....".
%% ' silly Emacs syntax highlighting....
%% convergence_demo_test_() ->
%% {timeout, 98*300, fun() -> convergence_demo_testfun() end}.
%% convergence_demo_testfun() ->
%% convergence_demo_testfun(3).
-define(DEFAULT_MGR_OPTS, [{private_write_verbose, false},
{private_write_verbose_confirm, true},
{active_mode,false},
{use_partition_simulator, true}]).
t() ->
t(3).
t(N) ->
t(N, ?DEFAULT_MGR_OPTS).
t(N, MgrOpts) ->
convergence_demo_testfun(N, MgrOpts).
convergence_demo_testfun(NumFLUs, MgrOpts0) ->
%% Faster test startup, commented: io:format(user, short_doc(), []),
%% Faster test startup, commented: timer:sleep(3000),
Apps = [sasl, ranch],
[application:start(App) || App <- Apps],
MgrOpts = MgrOpts0 ++ ?DEFAULT_MGR_OPTS,
TcpPort = proplists:get_value(port_base, MgrOpts, 62877),
TDir = proplists:get_value(tmp_dir, MgrOpts, "/tmp/c"),
ok = filelib:ensure_dir(TDir ++ "/not-used"),
_ = os:cmd("rm -rf " ++ TDir ++ "/*"),
FluInfo = [
{a,TcpPort+0,TDir++"/data.a"}, {b,TcpPort+1,TDir++"/data.b"},
{c,TcpPort+2,TDir++"/data.c"}, {d,TcpPort+3,TDir++"/data.d"},
{e,TcpPort+4,TDir++"/data.e"}, {f,TcpPort+5,TDir++"/data.f"},
{g,TcpPort+6,TDir++"/data.g"}, {h,TcpPort+7,TDir++"/data.h"},
{i,TcpPort+8,TDir++"/data.i"}, {j,TcpPort+9,TDir++"/data.j"},
{k,TcpPort+10,TDir++"/data.k"}, {l,TcpPort+11,TDir++"/data.l"},
{m,TcpPort+12,TDir++"/data.m"}, {n,TcpPort+13,TDir++"/data.n"},
{o,TcpPort+14,TDir++"/data.o"}, {p,TcpPort+15,TDir++"/data.p"},
{q,TcpPort+16,TDir++"/data.q"}, {r,TcpPort+17,TDir++"/data.r"}
],
FLU_biglist = [X || {X,_,_} <- FluInfo],
All_list = lists:sublist(FLU_biglist, NumFLUs),
io:format(user, "\nSET # of FLUs = ~w members ~w).\n",
[NumFLUs, All_list]),
machi_partition_simulator:start_link({111,222,33}, 0, 100),
_ = machi_partition_simulator:get(All_list),
Ps = [#p_srvr{name=Name,address="localhost",port=Port,
props=[{data_dir,Dir}|MgrOpts]} ||
{Name,Port,Dir} <- lists:sublist(FluInfo, NumFLUs)],
{ok, SupPid} = machi_flu_sup:start_link(),
[{ok, _} = machi_flu_psup:start_flu_package(P) || P <- Ps],
Namez = [begin
{ok, PPid} = ?FLU_PC:start_link(P),
{Name, PPid}
end || #p_srvr{name=Name}=P <- Ps],
MembersDict = machi_projection:make_members_dict(Ps),
Witnesses = proplists:get_value(witnesses, MgrOpts, []),
CMode = case {Witnesses, proplists:get_value(consistency_mode, MgrOpts,
ap_mode)} of
{[_|_], _} -> cp_mode;
{_, cp_mode} -> cp_mode;
{_, ap_mode} -> ap_mode
end,
MgrNamez = [begin
MgrName = machi_flu_psup:make_mgr_supname(Name),
ok = ?MGR:set_chain_members(MgrName, ch_demo, 0, CMode,
MembersDict,Witnesses),
{Name, MgrName}
end || #p_srvr{name=Name} <- Ps],
try
[{_, Ma}|_] = MgrNamez,
{ok, P1} = ?MGR:test_calc_projection(Ma, false),
[_ = ?FLU_PC:write_projection(FLUPid, public, P1) ||
{_, FLUPid} <- Namez, FLUPid /= Ma],
machi_partition_simulator:reset_thresholds(10, 50),
_ = machi_partition_simulator:get(All_list),
Parent = self(),
DoIt = fun(Iters, S_min, S_max) ->
%% io:format(user, "\nDoIt: top\n\n", []),
io:format(user, "DoIt, ", []),
Pids = [{spawn(fun() ->
random:seed(now()),
[begin
erlang:yield(),
perhaps_adjust_pstore_sleep(),
S_max_rand = random:uniform(
S_max + 1),
%% io:format(user, "{t}", []),
Elapsed =
?MGR:sleep_ranked_order(
S_min, S_max_rand,
M_name, All_list),
_ = (catch ?MGR:trigger_react_to_env(MMM)),
%% Be more unfair by not
%% sleeping here.
% timer:sleep(S_max - Elapsed),
Elapsed
end || _ <- lists:seq(1, Iters)],
Parent ! {done, self()}
end), M_name} || {M_name, MMM} <- MgrNamez ],
[receive
{done, ThePid} ->
ok
after 120*1000 ->
[begin
case whereis(XX) of
undefined -> ok;
XXPid -> {_, XXbin} = process_info(XXPid, backtrace),
{_, XXdict} = process_info(XXPid, dictionary),
TTT = proplists:get_value(ttt, XXdict),
io:format(user, "BACK ~w: ttt=~w\n~s\n", [XX, TTT, XXbin])
end
end || XX <- [file_server_2] ++
[a_chmgr,b_chmgr,c_chmgr,d_chmgr,e_chmgr,f_chmgr,g_chmgr,h_chmgr,i_chmgr,j_chmgr] ++
[a_pstore,b_pstore,c_pstore,d_pstore,e_pstore,f_pstore,g_pstore,h_pstore,i_pstore,j_pstore] ++
[a_fitness,b_fitness,c_fitness,d_fitness,e_fitness,f_fitness,g_fitness,h_fitness,i_fitness,j_fitness] ],
[begin
[begin
case whereis(XX) of
undefined -> ok;
XXPid -> {_, XXbin} = process_info(XXPid, backtrace),
io:format(user, "BACK ~w: ~w\n~s\n", [XX, time(), XXbin])
end
end || XX <- [a_pstore,b_pstore,c_pstore,d_pstore,e_pstore,f_pstore,g_pstore,h_pstore,i_pstore,j_pstore] ],
timer:sleep(20)
end || _ <- lists:seq(1,30)],
exit({icky_timeout, M_name})
end || {ThePid,M_name} <- Pids]
end,
%% machi_partition_simulator:reset_thresholds(10, 50),
%% io:format(user, "\nLet loose the dogs of war!\n", []),
%% [DoIt(20, 0, 0) || _ <- lists:seq(1,9)],
%% %% io:format(user, "\nVariations of puppies and dogs of war!\n", []),
%% %% [begin
%% %% machi_partition_simulator:reset_thresholds(90, 90),
%% %% DoIt(7, 0, 0),
%% %% machi_partition_simulator:always_these_partitions([]),
%% %% DoIt(7, 0, 0)
%% %% end || _ <- lists:seq(1, 3)],
machi_partition_simulator:always_these_partitions([]),
io:format(user, "\nPuppies for everyone!\n", []),
[DoIt(20, 0, 0) || _ <- lists:seq(1,9)],
AllPs = make_partition_list(All_list),
PartitionCounts = lists:zip(AllPs, lists:seq(1, length(AllPs))),
MaxIters = NumFLUs * (NumFLUs + 1) * 6,
[begin
machi_partition_simulator:always_these_partitions(Partition),
io:format(user, "\n~s SET partitions = ~w (~w of ~w)\n",
[machi_util:pretty_time(), Partition, Count, length(AllPs)]),
true = lists:foldl(
fun(_, true) ->
true;
(_, _) ->
%% Run a few iterations
[DoIt(10, 10, 50) || _ <- lists:seq(1, 6)],
%% If stable, return true to short circuit remaining
private_projections_are_stable(Namez, DoIt)
end, false, lists:seq(0, MaxIters)),
io:format(user, "\n~s Sweet, private projections are stable\n", [machi_util:pretty_time()]),
io:format(user, "\t~P\n", [get(stable), 24]),
io:format(user, "Rolling sanity check ... ", []),
PrivProjs = [{Name, begin
{ok, Ps8} = ?FLU_PC:get_all_projections(
FLU, private, infinity),
[P || P <- Ps8,
P#projection_v1.epoch_number /= 0]
end} || {Name, FLU} <- Namez],
try
[{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane_retrospective(Psx, FLU)} ||
{FLU, Psx} <- PrivProjs]
catch
_Err:_What when CMode == cp_mode ->
io:format(user, "none proj skip detected, TODO? ", []);
_Err:_What when CMode == ap_mode ->
io:format(user, "PrivProjs ~p\n", [PrivProjs]),
exit({line, ?LINE, _Err, _What})
end,
io:format(user, "Yay!\n", []),
%% io:format(user, "\n\nEXITING!\n\n", []), timer:sleep(500), erlang:halt(0),
ReportXX = machi_chain_manager1_test:unanimous_report(Namez),
true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX),
io:format(user, "Yay for ReportXX!\n", []),
timer:sleep(1234),
%% TODO: static count is not sufficient. Must not delete the last
%% private_proj_is_upi_unanimous projection!
MaxFiles = 123,
[begin
Privs = filelib:wildcard(Dir ++ "/projection/private/*"),
FilesToDel1 = lists:sublist(Privs,
max(0, length(Privs)-MaxFiles)),
[_ = file:delete(File) || File <- FilesToDel1],
Pubs = filelib:wildcard(Dir ++ "/projection/public/*"),
FilesToDel2 = lists:sublist(Pubs,
max(0, length(Pubs)-MaxFiles)),
[_ = file:delete(File) || File <- FilesToDel2],
io:format(user, "Yay, now prune: ~w ~w, ", [length(FilesToDel1), length(FilesToDel2)])
end || Dir <- filelib:wildcard(TDir ++ "/data*")],
io:format(user, "\n", []),
timer:sleep(1250),
ok
end || {Partition, Count} <- PartitionCounts
],
io:format(user, "\nSET partitions = []\n", []),
io:format(user, "We should see convergence to 1 correct chain.\n", []),
machi_partition_simulator:no_partitions(),
[DoIt(50, 10, 50) || _ <- [1,2,3]],
true = private_projections_are_stable(Namez, DoIt),
io:format(user, "~s\n", [os:cmd("date")]),
%% We are stable now ... analyze it.
%% Create a report where at least one FLU has written a
%% private projection.
Report = machi_chain_manager1_test:unanimous_report(Namez),
%% ?D(Report),
%% Report is ordered by Epoch. For each private projection
%% written during any given epoch, confirm that all chain
%% members appear in only one unique chain, i.e., the sets of
%% unique chains are disjoint.
true = machi_chain_manager1_test:all_reports_are_disjoint(Report),
%% io:format(user, "\nLast Reports: ~p\n", [lists:nthtail(length(Report)-8,Report)]),
%% For each chain transition experienced by a particular FLU,
%% confirm that each state transition is OK.
PrivProjs = [{Name, begin
{ok, Ps9} = ?FLU_PC:get_all_projections(FLU,
private),
[P || P <- Ps9,
P#projection_v1.epoch_number /= 0]
end} || {Name, FLU} <- Namez],
try
[{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane_retrospective(Psx, FLU)} ||
{FLU, Psx} <- PrivProjs],
io:format(user, "\nAll sanity checks pass, hooray!\n", [])
catch
_Err:_What when CMode == cp_mode ->
io:format(user, "none proj skip detected, TODO? ", []);
_Err:_What when CMode == ap_mode ->
io:format(user, "Report ~p\n", [Report]),
io:format(user, "PrivProjs ~p\n", [PrivProjs]),
exit({line, ?LINE, _Err, _What})
end,
%% ?D(R_Projs),
ok
catch
XX:YY ->
io:format(user, "BUMMER ~p ~p @ ~p\n",
[XX, YY, erlang:get_stacktrace()]),
exit({bummer,XX,YY})
after
exit(SupPid, normal),
ok = machi_partition_simulator:stop(),
[ok = ?FLU_PC:quit(PPid) || {_, PPid} <- Namez],
machi_util:wait_for_death(SupPid, 100),
[application:start(App) || App <- lists:reverse(Apps)]
end.
%% Many of the static partition lists below have been problematic at one
%% time or another.....
%%
%% Uncomment *one* of the following make_partition_list() bodies.
make_partition_list(All_list) ->
%% [ [{a,c},{b,c}],
%% [{a,b},{c,b}] ].
%% Island1 = [hd(All_list), lists:last(All_list)],
%% Island2 = All_list -- Island1,
%% [
%% [{X,Y} || X <- Island1, Y <- Island2]
%% ++
%% [{X,Y} || X <- Island2, Y <- Island1]
%% ].
_X_Ys1 = [[{X,Y}] || X <- All_list, Y <- All_list, X /= Y],
_X_Ys2 = [[{X,Y}, {A,B}] || X <- All_list, Y <- All_list, X /= Y,
A <- All_list, B <- All_list, A /= B,
X /= A],
_X_Ys3 = [[{X,Y}, {A,B}, {C,D}] || X <- All_list, Y <- All_list, X /= Y,
A <- All_list, B <- All_list, A /= B,
C <- All_list, D <- All_list, C /= D,
X /= A, X /= C, A /= C],
%% _X_Ys4 = [[{X,Y}, {A,B}, {C,D}, {E,F}] ||
%% X <- All_list, Y <- All_list, X /= Y,
%% A <- All_list, B <- All_list, A /= B,
%% C <- All_list, D <- All_list, C /= D,
%% E <- All_list, F <- All_list, E /= F,
%% X /= A, X /= C, X /= E, A /= C, A /= E,
%% C /= E],
%% Concat = _X_Ys1,
%% Concat = _X_Ys2,
%% Concat = _X_Ys1 ++ _X_Ys2,
%% %% Concat = _X_Ys3,
Concat = _X_Ys1 ++ _X_Ys2 ++ _X_Ys3,
%% Concat = _X_Ys1 ++ _X_Ys2 ++ _X_Ys3 ++ _X_Ys4,
NoPartitions = lists:duplicate(trunc(length(Concat) * 0.1), []),
uniq_reverse(random_sort(lists:usort([lists:sort(L) || L <- Concat])
++ NoPartitions)).
%% %% for len=5 and 2 witnesses
%% [
%% [{b,c}],
%% [],
%% [{b,c}],
%% [{a,c},{b,c}],
%% [{b,c}],
%% [],
%% [{c,d}],
%% [],
%% [{d,e}],
%% [],
%% [{d,c}],
%% [{b,c}],
%% [],
%% [{c,e}]
%% ].
%% [
%% [{b,c}],
%% [{a,c},{b,c}]
%% %% [{b,c}],
%% %% [],
%% %% [{c,d}],
%% %% [],
%% %% [{d,e}],
%% %% [],
%% %% [{c,e}]
%% ].
%% [
%% [{b,c}],
%% [{b,c},{c,d},{e,a}],
%% [{a,c},{a,d},{a,e},{c,a},{d,a},{e,a},{b,c},{b,d},{b,e},{b,c},{b,d},{b,e}, % iof2
%% {c,a},{c,b},{c,d},{c,e},{a,c},{b,c},{d,c},{e,c}, % island of 1
%% {d,a},{d,b},{d,c},{d,e},{a,d},{b,d},{c,d},{e,d}, % island of 1
%% {e,a},{e,b},{e,c},{e,d},{a,e},{b,e},{c,e},{d,e}],% island of 1
%% [{a,e},{b,c},{d,e}] % the stinker?
%% ,
%% [],
%% [{b,a},{d,e},{e,a}],
%% [{b,c},{c,d}],
%% [{a,c},{c,a},{d,b}],
%% [{a,e},{c,e},{e,d}],
%% [{a,e},{c,d},{d,b}],
%% [{b,e},{c,a},{e,d}],
%% [{b,c},{c,d},{e,a}],
%% [{d,e},{e,c}],
%% [{a,e},{b,c},{d,e}] % the stinker?
%% ,
%% [],
%% [{e,a},{g,d}],
%% [{b,f},{f,b}],
%% [{a,g},{c,d}]
%% ]. % for 5 in AP, yay, working now.
%% [ [{a,b},{b,d},{c,b}],
%% [{a,b},{b,d},{c,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}],
%% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}],
%% [{a,b},{b,d},{c,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}],
%% [{a,b},{b,d},{c,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ].
%% [ [{a,b}, {b,c}],
%% [{a,b}, {a,c}] ].
%% Q = [ {X,Y} || X <- [a], Y <- [b,c,d,e,f,g,h,i,j,k,l,m,n,o,p] ],
%% %% [ [{d,e}], Q]. %% len=7 problem: bad inner flip when ps=[] at end!
%% [ Q, [{a,b},{c,d},{e,f}] ]. %% len=7 problem: WTF, double-check please!
%% len=7 problem: insane evil-near-infinite-loop sometimes
%% [ [{a,b}], Q, [{c,d}], Q, [{d,e}], Q].
%% [ [{a,b}, {b,c}] ]. %% hosed-not-equal @ 3 FLUs
%% [ [{b,d}] ].
%% [ [{a,b}], [], [{a,b}], [], [{a,b}] ].
%% [
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% [{b,a},{d,e}],
%% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], []
%% ].
%% [
%% [{a,b}],
%% [],
%% [{a,b}, {b,a}],
%% [],
%% [{b,c}]
%% ].
%% [
%% [{a,c},{b,a},{c,b}],
%% [{b,a}]
%% ].
%% [
%% [{c,b}, {c,a}],
%% [{b,c}, {b,a}],
%% [],
%% [{c,b}, {c,a}],
%% [{b,c}, {b,a}]
%% ].
%% [
%% [{a,b}], [],
%% [{b,a}, {b,c}], [],
%% [{c,b}, {c,a}, {d,c}], [],
%% [{c,b}, {c,a}], [],
%% [{b,a}, {c,a}], [],
%% [{a,b}, {c,b}], [],
%% [{b,c}, {a,c}]
%% ].
%% [ [{a,b},{b,c},{c,a}],
%% [{a,b}, {b,a}, {a,c},{c,a}] ].
%% [ [{a,b}, {c,b}],
%% [{a,b}, {b,c}] ].
%% [ [{a,b}, {b,c}, {c,d}],
%% [{a,b}, {b,c},{b,d}, {c,d}],
%% [{b,a}, {b,c}, {c,d}],
%% [{a,b}, {c,b}, {c,d}],
%% [{a,b}, {b,c}, {d,c}] ].
%% [
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% [{a,b}], [], [{a,b}], [], [{a,b}]
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [],
%% %% [{b,a},{d,e}],
%% %% [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], [], [{a,b}], []
%% ].
%% [ [{a,b}, {b,c}, {c,d}, {d,e}],
%% [{b,a}, {b,c}, {c,d}, {d,e}],
%% [{a,b}, {c,b}, {c,d}, {d,e}],
%% [{a,b}, {b,c}, {d,c}, {d,e}],
%% [{a,b}, {b,c}, {c,d}, {e,d}] ].
%% [ [{c,a}] ]. %% TODO double-check for total repair stability at SET=[]!!
%% [ [{c,a}],
%% [{c,b}, {a, b}] ].
%% [ [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}],
%% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {b,c}],
%% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {c,d}] ].
%% [ [{a,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}],
%% [{a,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}],
%% [{a,b}],
%% [{a,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}],
%% [{a,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ].
todo_why_does_this_crash_sometimes(FLUName, FLU, PPPepoch) ->
try
{ok, _}=Res = ?FLU_PC:read_projection(FLU, public, PPPepoch),
Res
catch _:_ ->
io:format(user, "QQQ Whoa, it crashed this time for ~p at epoch ~p\n",
[FLUName, PPPepoch]),
timer:sleep(1000),
exit(still_a_problem),
?FLU_PC:read_projection(FLU, public, PPPepoch)
end.
private_projections_are_stable(Namez, PollFunc) ->
FilterNoneProj = fun({_EpochID,[],[],_Dn,_W,InnerP}) -> false;
(_) -> true
end,
Private1x = [{Name, get_latest_inner_proj_summ(FLU)} || {Name,FLU} <- Namez],
Private1 = [X || X={_,Proj} <- Private1x, FilterNoneProj(Proj)],
[PollFunc(15, 1, 10) || _ <- lists:seq(1,6)],
Private2x = [{Name, get_latest_inner_proj_summ(FLU)} || {Name,FLU} <- Namez],
Private2 = [X || X={_,Proj} <- Private2x, FilterNoneProj(Proj)],
%% Is = [Inner_p || {_,_,_,_,Inner_p} <- Private1],
put(stable, lists:sort(Private1)),
%% We want either all true or all false (inner or not) ... except
%% that it isn't quite that simple. I've now witnessed a case
%% where the projections are stable but not everyone is
%% unanimously outer or unanimously inner!
%% Old partitions: [{a,b},{b,c},{c,a}]
%% result: all 3 had inner proj of [self]
%% New partitions: [{b,a},{c,b}]
%% Priv1 [{342,[c,a],[],[b],[],false},
%% {326,[b],[],[a,c],[],true},
%% {342,[c,a],[],[b],[],false}]
%% ... and it stays completely stable with these epoch #s.
%%
%% So, instead, if inner/outer status isn't unanimous, then we
%% should check to see if the sets of unique UPIs are disjoint.
%%
FLUs = [FLU || {FLU,_Pid} <- Namez],
U_UPI_Rs = lists:usort([UPI++Rep ||
{_Nm,{_EpochID,UPI,Rep,_Dn,_W,InnerP}} <- Private2]),
FLU_uses = [{Name, EpochID} ||
{Name,{EpochID,_UPI,Rep,_Dn,_W,InnerP}} <- Private2],
Witnesses = hd([Ws ||
{_Name,{_EpochID,_UPI,Rep,_Dn,Ws,InnerP}} <- Private2x]),
HaveWitnesses_p = Witnesses /= [],
CMode = if HaveWitnesses_p -> cp_mode;
true -> ap_mode
end,
Unanimous_with_all_peers_p =
lists:all(fun({FLU, UsesEpochID}) ->
WhoInEpochID = [Name ||
{Name,{EpochID,_UPI,_Rep,_Dn,_W,I_}}<-Private2,
EpochID == UsesEpochID],
WhoInEpochID_s = ordsets:from_list(WhoInEpochID),
UPI_R_versions = [UPI++Rep ||
{_Name,{EpochID,UPI,Rep,_Dn,_W,I_}}<-Private2,
EpochID == UsesEpochID],
UPI_R_vers_s = ordsets:from_list(hd(UPI_R_versions)),
UPI_R_versions == [ [] ] % This FLU in minority partition
orelse
(length(lists:usort(UPI_R_versions)) == 1
andalso
(ordsets:is_subset(UPI_R_vers_s, WhoInEpochID_s) orelse
(CMode == cp_mode andalso
ordsets:is_disjoint(UPI_R_vers_s, WhoInEpochID_s))))
end, FLU_uses),
Flat_U_UPI_Rs = lists:flatten(U_UPI_Rs),
%% Pubs = [begin
%% {ok, P} = ?FLU_PC:read_latest_projection(FLU, public),
%% {Name, P#projection_v1.epoch_number}
%% end || {Name, FLU} <- Namez],
%% In AP mode, if not disjoint, then a FLU will appear twice in
%% flattented U_UPIs.
AP_mode_disjoint_test_p =
if CMode == cp_mode ->
true;
CMode == ap_mode ->
lists:sort(Flat_U_UPI_Rs) == lists:usort(Flat_U_UPI_Rs)
end,
CP_mode_agree_test_p =
if CMode == cp_mode ->
FullMajority = (length(Namez) div 2) + 1,
EpochIDs = lists:sort(
[EpochID || {_Name,{EpochID,_UPI,_Rep,_Dn,_W,I_}}<-Private2]),
case lists:reverse(lists:sort(uniq_c(EpochIDs))) of
[{Count,EpochID}|_] when Count >= FullMajority ->
[{UPI, Rep}] = lists:usort(
[{_UPI,_Rep} || {_Name,{EpochIDx,_UPI,_Rep,_Dn,_W,I_}}<-Private2,
EpochIDx == EpochID]),
ExpectedFLUs = lists:sort(UPI ++ Rep),
UsingFLUs = lists:sort(
[Name || {Name,{EpochIDx,_UPI,_Rep,_Dn,_W,I_}}<-Private2,
EpochIDx == EpochID]),
io:format(user, "Priv2: EID ~W e ~w u ~w\n", [EpochID, 7, ExpectedFLUs, UsingFLUs]),
ordsets:is_subset(ordsets:from_list(ExpectedFLUs),
ordsets:from_list(UsingFLUs));
[{1=_Count,_EpochID}|_] ->
%% Our list is sorted & reversed, so 1=_Count
%% is biggest. If a majority is using the none proj,
%% then we're OK.
Private2None = [X || {_,{_,[],[],_,_,_}}=X <- Private2],
length(Private2None) >= FullMajority;
[] when Private2 == [], Private2x /= [] ->
%% Everyone is using none proj, chain is unavailable.
true;
Else ->
%% This is bad: we have a count that's less than
%% FullMajority but greater than 1.
false
end;
CMode == ap_mode ->
true
end,
%% io:format(user, "\nPriv1 ~p\nPriv2 ~p\n1==2 ~w ap_disjoint ~w u_all_peers ~w cp_mode_agree ~w\n", [lists:sort(Private1), lists:sort(Private2), Private1 == Private2, AP_mode_disjoint_test_p, Unanimous_with_all_peers_p, CP_mode_agree_test_p]),
Private1 == Private2 andalso
AP_mode_disjoint_test_p andalso
(
%% Another property that we want is that for each participant
%% X mentioned in a UPI or Repairing list of some epoch E that
%% X is using the same epoch E.
%%
%% It's possible (in theory) for humming consensus to agree on
%% the membership of UPI+Repairing but arrive those lists at
%% different epoch numbers. Machi chain replication won't
%% work in that case: all participants need to be using the
%% same epoch (and csum)!
(CMode == ap_mode andalso Unanimous_with_all_peers_p)
orelse
(CMode == cp_mode andalso CP_mode_agree_test_p)
).
get_latest_inner_proj_summ(FLU) ->
{ok, Proj} = ?FLU_PC:read_latest_projection(FLU, private),
#projection_v1{epoch_number=E, epoch_csum= <<CSum4:4/binary, _/binary>>,
upi=UPI, repairing=Repairing,
witnesses=Witnesses, down=Down} = Proj,
Inner_p = false,
EpochID = {E, CSum4},
{EpochID, UPI, Repairing, Down, Witnesses, Inner_p}.
uniq_reverse(L) ->
uniq_reverse(L, []).
uniq_reverse([], Acc) ->
Acc;
uniq_reverse([H|T], []) ->
uniq_reverse(T, [H]);
uniq_reverse([Same|T], [Same|_]=Acc) ->
uniq_reverse(T, Acc);
uniq_reverse([H|T], Acc) ->
uniq_reverse(T, [H|Acc]).
random_sort(L) ->
random:seed(now()),
L1 = [{random:uniform(), X} || X <- L],
[X || {_, X} <- lists:sort(L1)].
foo(NumFLUs, MgrOpts0) ->
timer:sleep(100),
%% Faster test startup, commented: io:format(user, short_doc(), []),
%% Faster test startup, commented: timer:sleep(3000),
TcpPort = 62877,
ok = filelib:ensure_dir("/tmp/c/not-used"),
FluInfo = [
{a,TcpPort+0,"/tmp/c/data.a"}, {b,TcpPort+1,"/tmp/c/data.b"},
{c,TcpPort+2,"/tmp/c/data.c"}, {d,TcpPort+3,"/tmp/c/data.d"},
{e,TcpPort+4,"/tmp/c/data.e"}, {f,TcpPort+5,"/tmp/c/data.f"},
{g,TcpPort+6,"/tmp/c/data.g"}, {h,TcpPort+7,"/tmp/c/data.h"},
{i,TcpPort+8,"/tmp/c/data.i"}, {j,TcpPort+9,"/tmp/c/data.j"},
{k,TcpPort+10,"/tmp/c/data.k"}, {l,TcpPort+11,"/tmp/c/data.l"},
{m,TcpPort+12,"/tmp/c/data.m"}, {n,TcpPort+13,"/tmp/c/data.n"},
{o,TcpPort+14,"/tmp/c/data.o"}, {p,TcpPort+15,"/tmp/c/data.p"},
{q,TcpPort+16,"/tmp/c/data.q"}, {r,TcpPort+17,"/tmp/c/data.r"}
],
FLU_biglist = [X || {X,_,_} <- FluInfo],
All_list = lists:sublist(FLU_biglist, NumFLUs),
io:format(user, "\nSET # of FLUs = ~w members ~w).\n",
[NumFLUs, All_list]),
machi_partition_simulator:start_link({111,222,33}, 0, 100),
_ = machi_partition_simulator:get(All_list),
Ps = [#p_srvr{name=Name,address="localhost",port=Port} ||
{Name,Port,_Dir} <- lists:sublist(FluInfo, NumFLUs)],
PsDirs = lists:zip(Ps,
[Dir || {_,_,Dir} <- lists:sublist(FluInfo, NumFLUs)]),
FLU_pids = [machi_flu1_test:setup_test_flu(Name, Port, Dir) ||
{#p_srvr{name=Name,port=Port}, Dir} <- PsDirs],
Namez = [begin
{ok, PPid} = ?FLU_PC:start_link(P),
{Name, PPid}
end || {#p_srvr{name=Name}=P, _Dir} <- PsDirs],
MembersDict = machi_projection:make_members_dict(Ps),
MgrOpts = MgrOpts0 ++ ?DEFAULT_MGR_OPTS,
MgrNamez =
[begin
{ok, MPid} = ?MGR:start_link(P#p_srvr.name, MembersDict, MgrOpts),
{P#p_srvr.name, MPid}
end || P <- Ps],
try
[{_, Ma}|_] = MgrNamez,
{ok, P1} = ?MGR:test_calc_projection(Ma, false),
[ok = ?FLU_PC:write_projection(FLUPid, public, P1) ||
{_, FLUPid} <- Namez, FLUPid /= Ma],
machi_partition_simulator:reset_thresholds(10, 50),
_ = machi_partition_simulator:get(All_list),
Parent = self(),
DoIt = fun(Iters, S_min, S_max) ->
%% io:format(user, "\nDoIt: top\n\n", []),
io:format(user, "DoIt, ", []),
Pids = [spawn(fun() ->
random:seed(now()),
[begin
erlang:yield(),
S_max_rand = random:uniform(
S_max + 1),
%% io:format(user, "{t}", []),
Elapsed =
?MGR:sleep_ranked_order(
S_min, S_max_rand,
M_name, All_list),
_ = ?MGR:trigger_react_to_env(MMM),
%% Be more unfair by not
%% sleeping here.
% timer:sleep(S_max - Elapsed),
Elapsed
end || _ <- lists:seq(1, Iters)],
Parent ! done
end) || {M_name, MMM} <- MgrNamez ],
[receive
done ->
ok
after 120*1000 ->
exit(icky_timeout)
end || _ <- Pids]
end,
%% machi_partition_simulator:reset_thresholds(10, 50),
%% io:format(user, "\nLet loose the dogs of war!\n", []),
machi_partition_simulator:always_these_partitions([]),
io:format(user, "\nPuppies for everyone!\n", []),
[DoIt(30, 0, 0) || _ <- lists:seq(1,5)],
DoIt
catch XXX:YYY ->
{XXX,YYY}
end.
uniq_c(L) ->
uniq_c(L, 0, unused).
uniq_c([], 0, _Last) ->
[];
uniq_c([], Count, Last) ->
[{Count, Last}];
uniq_c([H|T], 0, _Last) ->
uniq_c(T, 1, H);
uniq_c([H|T], Count, H) ->
uniq_c(T, Count+1, H);
uniq_c([H|T], Count, Last) ->
[{Count, Last}|uniq_c(T, 1, H)].
perhaps_adjust_pstore_sleep() ->
try
{ok, Bin} = file:read_file("/tmp/pstore_sleep_msec"),
{MSec,_} = string:to_integer(binary_to_list(Bin)),
ets:insert(?TEST_ETS_TABLE, {projection_store_sleep_time, MSec})
catch _:_ ->
ok
end.
%% MaxIters = NumFLUs * (NumFLUs + 1) * 6,
%% Stable = fun(S_Namez) ->
%% true = lists:foldl(
%% fun(_, true) ->
%% true;
%% (_, _) ->
%% %% Run a few iterations
%% [DoIt(10, 10, 50) || _ <- lists:seq(1, 6)],
%% %% If stable, return true to short circuit remaining
%% private_projections_are_stable(S_Namez, DoIt)
%% end, false, lists:seq(0, MaxIters))
%% end,
%% %% Part_b = [{a,b},{c,d}],
%% Part_b = [{c,d}],
%% %% Part_b = [{X,Y} || {X,_} <- Namez, {Y,_} <- Namez, X == b orelse Y == b],
%% %% Part_d = [{X,Y} || {X,_} <- Namez, {Y,_} <- Namez, X == d orelse Y == d],
%% %% machi_partition_simulator:always_these_partitions(Part_b),
%% %% io:format(user, "\nSET partitions = ~w at ~w\n", [Part_b, time()]),
%% %% true = Stable(Namez), io:format(user, "\nSweet, private projections are stable\n", []), io:format(user, "\t~P\n", [get(stable), 14]), (fun() -> ReportXX = machi_chain_manager1_test:unanimous_report(Namez), true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX), io:format(user, "Yay for ReportXX!\n", []) end)(),
%% %% Part_bd = Part_b ++ Part_d,
%% %% machi_partition_simulator:always_these_partitions(Part_bd),
%% %% io:format(user, "\nSET partitions = ~w at ~w\n", [Part_bd, time()]),
%% %% true = Stable(Namez), io:format(user, "\nSweet, private projections are stable\n", []), io:format(user, "\t~P\n", [get(stable), 14]), (fun() -> ReportXX = machi_chain_manager1_test:unanimous_report(Namez), true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX), io:format(user, "Yay for ReportXX!\n", []) end)(),
%% os:cmd("rm /tmp/signal"),
%% Part_b_partial_d = Part_b ++ [{e,f}],
%% %% Part_b_partial_d = [{a,b}, {b,c}, {c,d}, {d,e}],
%% %% Part_b_partial_d = [{a,b}, {b,d}, {d,e}],
%% machi_partition_simulator:always_these_partitions(Part_b_partial_d),
%% io:format(user, "\nSET partitions = ~w at ~w\n", [Part_b_partial_d, time()]),
%% %% Only_ab_namez = [T || T={Name, _} <- Namez, lists:member(Name, [a,b])],
%% true = Stable(Namez), io:format(user, "\nSweet, private projections are stable\n", []), io:format(user, "\t~P\n", [get(stable), 14]), (fun() -> ReportXX = machi_chain_manager1_test:unanimous_report(Namez), true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX), io:format(user, "Yay for ReportXX!\n", []) end)(),
%% machi_partition_simulator:always_these_partitions([{b,c}]),
%% io:format(user, "\nSET partitions = ~w at ~w\n", [[{b,c}], time()]),
%% %% Only_ab_namez = [T || T={Name, _} <- Namez, lists:member(Name, [a,b])],
%% [true = Stable(Namez) || _ <- [1,2,3] ], io:format(user, "\nSweet, private projections are stable\n", []), io:format(user, "\t~P\n", [get(stable), 14]), (fun() -> ReportXX = machi_chain_manager1_test:unanimous_report(Namez), true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX), io:format(user, "Yay for ReportXX!\n", []) end)(),
%% %% [begin
%% %% QQQ = lists:sublist(Part_b_partial_d, NNN),
%% %% machi_partition_simulator:always_these_partitions(QQQ),
%% %% io:format(user, "\nSET partitions = ~w at ~w\n", [QQQ, time()]),
%% %% %% Only_ab_namez = [T || T={Name, _} <- Namez, lists:member(Name, [a,b])],
%% %% true = Stable(Namez), io:format(user, "\nSweet, private projections are stable\n", []), io:format(user, "\t~P\n", [get(stable), 14]), (fun() -> ReportXX = machi_chain_manager1_test:unanimous_report(Namez), true = machi_chain_manager1_test:all_reports_are_disjoint(ReportXX), io:format(user, "Yay for ReportXX!\n", []) end)()
%% %% end || NNN <- lists:seq(1, length(Part_b_partial_d))],
%% io:format(user, "\nSET partitions = []\n", []),
%% io:format(user, "We should see convergence to 1 correct chain.\n", []),
%% machi_partition_simulator:no_partitions(),
%% [DoIt(50, 10, 50) || _ <- [1,2,3]],
%% true = private_projections_are_stable(Namez, DoIt),
%% io:format(user, "~s\n", [os:cmd("date")]),
%% %% We are stable now ... analyze it.
%% %% Create a report where at least one FLU has written a
%% %% private projection.
%% Report = machi_chain_manager1_test:unanimous_report(Namez),
%% %% ?D(Report),
%% %% Report is ordered by Epoch. For each private projection
%% %% written during any given epoch, confirm that all chain
%% %% members appear in only one unique chain, i.e., the sets of
%% %% unique chains are disjoint.
%% true = machi_chain_manager1_test:all_reports_are_disjoint(Report),
%% %% io:format(user, "\nLast Reports: ~p\n", [lists:nthtail(length(Report)-8,Report)]),
%% %% For each chain transition experienced by a particular FLU,
%% %% confirm that each state transition is OK.
%% PrivProjs = [{Name, begin
%% {ok, Ps9} = ?FLU_PC:get_all_projections(FLU,
%% private),
%% [P || P <- Ps9,
%% P#projection_v1.epoch_number /= 0]
%% end} || {Name, FLU} <- Namez],
%% try
%% [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane_retrospective(Psx, FLU)} ||
%% {FLU, Psx} <- PrivProjs],
%% io:format(user, "\nAll sanity checks pass, hooray!\n", [])
%% catch _Err:_What ->
%% io:format(user, "Report ~p\n", [Report]),
%% io:format(user, "PrivProjs ~p\n", [PrivProjs]),
%% exit({line, ?LINE, _Err, _What})
%% end,
%% %% ?D(R_Projs),
%% ok
%% catch
%% XX:YY ->
%% io:format(user, "BUMMER ~p ~p @ ~p\n",
%% [XX, YY, erlang:get_stacktrace()]),
%% exit({bummer,XX,YY})
%% after
%% [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez],
%% [ok = ?FLU_PC:quit(PPid) || {_, PPid} <- Namez],
%% [ok = machi_flu1:stop(FLUPid) || FLUPid <- FLU_pids],
%% ok = machi_partition_simulator:stop()
%% end.
-endif. % !PULSE
-endif. % TEST | test/machi_chain_manager1_converge_demo.erl | 0.542621 | 0.423041 | machi_chain_manager1_converge_demo.erl | starcoder |
%% vim: set ai et sw=4 sts=4:
%% See LICENSE for licensing information.
-module(yaml_flow).
-export([ mapping/2
, sequence/2
]).
-include("yaml_grapheme.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%=======================================================================
-spec mapping(yaml_event:state(), yaml:props()) -> yaml_event:emit().
mapping(E, #{ from := From, anchor := Anchor, tag := Tag }) ->
Event = {start_of_mapping, From, Anchor, Tag},
Next = fun (EE) -> collection(EE, ${, mapping, From) end,
yaml_event:emit(Event, E, Next).
%-----------------------------------------------------------------------
end_of_mapping(E, #{ anchor := no_anchor, tag := no_tag }, At) ->
end_of_mapping(E, At).
%-----------------------------------------------------------------------
end_of_mapping(E, At) ->
case yaml_event:top(E) of
{explicit, flow, From} ->
Next = fun (EE) -> end_of_mapping(EE, At) end,
Top = yaml_event:top(E, value, flow, From),
empty_after_indicator(Top, From, Next);
{key, flow, _} ->
end_of_mapping(yaml_event:pop(E), At);
{value, flow, From} ->
Next = fun (EE) -> end_of_mapping(EE, At) end,
empty_after_indicator(yaml_event:pop(E), From, Next);
{mapping, flow, _} ->
Event = {end_of_mapping, yaml_event:coord(E)},
Next = fun (EE) -> end_of_collection(EE) end,
yaml_event:emit(Event, yaml_event:pop(E), Next);
T ->
throw({E, At, T})
end.
%=======================================================================
-spec sequence(yaml_event:state(), yaml:props()) -> yaml_event:emit().
sequence(E, #{ from := From, anchor := Anchor, tag := Tag }) ->
Event = {start_of_sequence, From, Anchor, Tag},
Next = fun (EE) -> collection(EE, $[, sequence, From) end,
yaml_event:emit(Event, E, Next).
%-----------------------------------------------------------------------
end_of_sequence(E, #{ anchor := no_anchor, tag := no_tag }, At) ->
end_of_sequence(E, At).
%-----------------------------------------------------------------------
end_of_sequence(E, At) ->
case yaml_event:top(E) of
{sequence, flow, _} ->
Event = {end_of_sequence, yaml_event:coord(E)},
Next = fun (EE) -> end_of_collection(EE) end,
yaml_event:emit(Event, yaml_event:pop(E), Next);
T ->
throw({E, At, T})
end.
%=======================================================================
collection(E, Indicator, Flow, At) ->
S = yaml_event:scan(E),
Indicator = yaml_scan:grapheme(S),
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
Pushed = yaml_event:push(Scanned, Flow, flow, At),
entry(Pushed, Flow).
%-----------------------------------------------------------------------
end_of_collection(E) ->
case yaml_event:top(E) of
{pair_key, flow, _} ->
end_of_content(E);
{sequence, flow, _} ->
end_of_content(E);
{_, N, _} when N =/= flow ->
yaml_block:flow_did_end(E);
T ->
throw({E, T})
end.
%=======================================================================
-spec props_empty(yaml:coord()) -> yaml:props().
props_empty(From) ->
#{ from => From, anchor => no_anchor, tag => no_tag }.
%=======================================================================
space_in_flow(E) ->
case yaml_space:space(E) of
{{_, in_line, _, _}, E1} ->
{flow, E1};
{Space = {_, indent_line, _, _}, E1} ->
case yaml_event:is_indented(E) of
true ->
{flow, E1};
false ->
{Space, E1}
end;
{Space, E1} ->
{Space, E1}
end.
%=======================================================================
entry(E, Flow) ->
case space_in_flow(E) of
{flow, E1} ->
entry_detect(E1, Flow);
{Space, E1} ->
throw({E1, Space})
end.
%-----------------------------------------------------------------------
entry_detect(E, mapping) ->
case yaml_implicit:detect(E, flow) of
explicit_key ->
explicit_key(E);
explicit_value ->
explicit_missing_key(E);
implicit_key ->
implicit_key(E);
false ->
implicit_key(E);
D ->
throw({E, D})
end;
entry_detect(E, sequence) ->
case yaml_implicit:detect(E, flow) of
explicit_key ->
explicit_pair(E);
explicit_value ->
explicit_pair_missing_key(E);
implicit_key ->
implicit_pair(E);
false ->
entry_with_empty_props(E, yaml_event:coord(E));
D ->
throw({E, D})
end;
entry_detect(E, _) ->
entry_with_empty_props(E, yaml_event:coord(E)).
%-----------------------------------------------------------------------
entry_with_empty_props(E, At) ->
entry_with_props(E, props_empty(At)).
%-----------------------------------------------------------------------
entry_with_props(E, Props) ->
S = yaml_event:scan(E),
case yaml_scan:grapheme(S) of
$& ->
property_anchor(E, Props);
$! ->
property_tag(E, Props);
$* ->
alias(E, Props);
$' ->
scalar(yaml_single:scalar(E, flow, Props));
$\" ->
scalar(yaml_double:scalar(E, flow, Props));
${ ->
mapping(E, Props);
$} ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
end_of_mapping(Scanned, Props, yaml_scan:coord(S));
$[ ->
sequence(E, Props);
$] ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
end_of_sequence(Scanned, Props, yaml_scan:coord(S));
$, ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
comma(Scanned, Props, yaml_scan:coord(S));
G when ?IS_PLAIN_CHECK_INDICATOR(G) ->
scalar(yaml_plain:scalar(E, flow, Props));
G when ?IS_INDICATOR(G) ->
throw({E, Props, G});
G when ?IS_PRINTABLE(G) ->
scalar(yaml_plain:scalar(E, flow, Props));
G ->
throw({E, Props, G})
end.
%=======================================================================
property_anchor(E, Props) ->
{Anchor, Errors, E1} = yaml_anchor:property(E),
property_anchor(E1, Errors, Anchor, Props).
%-----------------------------------------------------------------------
property_anchor(E, [Error | Errors], Anchor, Props) ->
Next = fun (EE) -> property_anchor(EE, Errors, Anchor, Props) end,
yaml_event:error(Error, E, Next);
property_anchor(E, [], Anchor, Props = #{ anchor := no_anchor }) ->
after_property(E, Props#{ anchor => Anchor });
property_anchor(E, [], Anchor, Props = #{ from := Start }) ->
{anchor, From, Thru, _} = Anchor,
Error = {multiple_anchors, From, Thru, {flow, Start, Thru}},
Next = fun (EE) -> after_property(EE, Props#{ anchor => Anchor }) end,
yaml_event:error(Error, E, Next).
%-----------------------------------------------------------------------
property_tag(E, Props) ->
{Tag, Errors, E1} = yaml_tag:property(E),
property_tag(E1, Errors, Tag, Props).
%-----------------------------------------------------------------------
property_tag(E, [Error | Errors], Tag, Props) ->
Next = fun (EE) -> property_tag(EE, Errors, Tag, Props) end,
yaml_event:error(Error, E, Next);
property_tag(E, [], Tag, Props = #{ tag := no_tag }) ->
after_property(E, Props#{ tag => Tag });
property_tag(E, [], Tag, Props = #{ from := Start }) ->
{tag, From, Thru, _, _} = Tag,
Error = {multiple_tags, From, Thru, {flow, Start, Thru}},
Next = fun (EE) -> after_property(EE, Props#{ tag => Tag }) end,
yaml_event:error(Error, E, Next).
%-----------------------------------------------------------------------
after_property(E, Props) ->
{Space, E1} = yaml_space:space(E),
after_property_space(E1, Props, Space).
%-----------------------------------------------------------------------
after_property_space(E, Props, {End, in_line, _, _}) ->
case yaml_implicit:detect(E, flow) of
explicit_value ->
Next = fun after_content/1,
empty(E, Props, End, Next);
false ->
entry_with_props(E, Props)
end.
%=======================================================================
empty(E, At = {_, _}, Next) ->
Event = {empty, At, At, no_anchor, no_tag},
yaml_event:emit(Event, E, Next).
%-----------------------------------------------------------------------
empty(E, #{ from := From, anchor := Anchor, tag := Tag }, Thru, Next) ->
Event = {empty, From, Thru, Anchor, Tag},
yaml_event:emit(Event, E, Next).
%-----------------------------------------------------------------------
empty_after_indicator(E, {R, C}, Next) ->
At = {R, C + 1},
Event = {empty, At, At, no_anchor, no_tag},
yaml_event:emit(Event, E, Next).
%=======================================================================
alias(E, #{ anchor := no_anchor, tag := no_tag }) ->
scalar(yaml_anchor:alias(E)).
%-----------------------------------------------------------------------
scalar({Scalar, Errors, E}) ->
scalar_emit(E, Errors, Scalar).
%-----------------------------------------------------------------------
scalar_emit(E, [Error | Errors], Scalar) ->
Next = fun (EE) -> scalar_emit(EE, Errors, Scalar) end,
yaml_event:error(Error, E, Next);
scalar_emit(E, [], Scalar) ->
Next = fun end_of_content/1,
yaml_event:emit(Scalar, E, Next).
%=======================================================================
end_of_content(E) ->
case yaml_event:top(E) of
{explicit, flow, At} ->
space_after_content(yaml_event:top(E, key, flow, At));
{key, flow, _} ->
space_after_content(E);
{pair_explicit, flow, At} ->
space_after_content(yaml_event:top(E, pair_key, flow, At));
{pair_key, flow, _} ->
space_after_content(E);
{pair_value, flow, _} ->
At = yaml_event:coord(E),
Event = {end_of_mapping, At},
Next = fun space_after_content/1,
yaml_event:emit(Event, yaml_event:pop(E), Next);
{sequence, flow, _} ->
space_after_content(E);
{value, flow, _} ->
space_after_content(yaml_event:pop(E))
end.
%-----------------------------------------------------------------------
space_after_content(E) ->
case space_in_flow(E) of
{flow, E1} ->
after_content(E1);
{Space, E1} ->
throw({E1, Space})
end.
%-----------------------------------------------------------------------
after_content(E) ->
S = yaml_event:scan(E),
case yaml_scan:grapheme(S) of
$: ->
% no need to check for following non-plain character
% yaml_plain:scalar() would alrady have checked and a $:
% following a JSON like key can be immediately adjacent to
% a plain value
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
colon_after_content(Scanned, yaml_scan:coord(S));
$, ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
comma_after_content(Scanned, yaml_scan:coord(S));
$} ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
end_of_mapping(Scanned, yaml_scan:coord(S));
$] ->
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
end_of_sequence(Scanned, yaml_scan:coord(S));
_ ->
throw(E)
end.
%=======================================================================
colon_after_content(E, At) ->
case yaml_event:top(E) of
{key, flow, _} ->
entry(yaml_event:top(E, value, flow, At), value);
{pair_key, flow, _} ->
entry(yaml_event:top(E, pair_value, flow, At), value);
{sequence, flow, From} ->
Next = fun (EE) -> comma_after_content(EE, At) end,
Error = {bad_implicit_key, At, At, {sequence, From, At}},
yaml_event:error(Error, E, Next);
T ->
throw({E, At, T})
end.
%=======================================================================
comma(E, Props, Thru) ->
case yaml_event:top(E) of
{value, flow, _} ->
comma_missing_value(E, Props, Thru);
T ->
throw({E, Props, Thru, T})
end.
%-----------------------------------------------------------------------
comma_after_content(E, At) ->
case yaml_event:top(E) of
{key, flow, _} ->
comma_missing_value(E, At);
{mapping, flow, _} ->
entry(E, mapping);
{sequence, flow, _} ->
entry(E, sequence);
T ->
throw({E, At, T})
end.
%-----------------------------------------------------------------------
comma_missing_value(E, At) ->
Next = fun (EE) -> entry(EE, mapping) end,
empty(yaml_event:pop(E), At, Next).
%-----------------------------------------------------------------------
comma_missing_value(E, Props, Thru) ->
Next = fun (EE) -> entry(EE, mapping) end,
empty(yaml_event:pop(E), Props, Thru, Next).
%=======================================================================
explicit_key(E) ->
S = yaml_event:scan(E),
At = yaml_scan:coord(S),
$? = yaml_scan:grapheme(S),
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
Pushed = yaml_event:push(Scanned, explicit, flow, At),
entry(Pushed, explicit).
%=======================================================================
explicit_pair(E) ->
S = yaml_event:scan(E),
At = yaml_scan:coord(S),
$? = yaml_scan:grapheme(S),
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
Pushed = yaml_event:push(Scanned, pair_explicit, flow, At),
Event = {start_of_mapping, At, no_anchor, no_tag},
Next = fun (EE) -> entry(EE, explicit) end,
yaml_event:emit(Event, Pushed, Next).
%=======================================================================
explicit_missing_key(E) ->
explicit_missing_key(E, value).
%-----------------------------------------------------------------------
explicit_missing_key(E, Flow) ->
S = yaml_event:scan(E),
At = yaml_scan:coord(S),
$: = yaml_scan:grapheme(S),
Scanned = yaml_event:scan_to(E, yaml_scan:next(S)),
Pushed = yaml_event:push(Scanned, Flow, flow, At),
Next = fun (EE) -> entry(EE, Flow) end,
empty(Pushed, At, Next).
%=======================================================================
explicit_pair_missing_key(E) ->
At = yaml_event:coord(E),
Event = {start_of_mapping, At, no_anchor, no_tag},
Next = fun (EE) -> explicit_missing_key(EE, pair_value) end,
yaml_event:emit(Event, E, Next).
%=======================================================================
implicit_key(E) ->
At = yaml_event:coord(E),
Pushed = yaml_event:push(E, key, flow, At),
entry_with_empty_props(Pushed, At).
%=======================================================================
implicit_pair(E) ->
At = yaml_event:coord(E),
Pushed = yaml_event:push(E, pair_key, flow, At),
Event = {start_of_mapping, At, no_anchor, no_tag},
Next = fun (EE) -> entry_with_empty_props(EE, At) end,
yaml_event:emit(Event, Pushed, Next). | src/yaml_flow.erl | 0.555194 | 0.432902 | yaml_flow.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc Baggage is used to annotate telemetry, adding context and
%% information to metrics, traces, and logs. It is represented by a set
%% of name/value pairs describing user-defined properties.
%% @end
%%%-------------------------------------------------------------------------
-module(otel_baggage).
-export([set/1,
set/2,
set/3,
set/4,
get_all/0,
get_all/1,
clear/0,
clear/1,
get_text_map_propagators/0]).
%% keys and values are UTF-8 binaries
-type key() :: unicode:unicode_binary().
-type value() :: unicode:unicode_binary().
-type metadata() :: [unicode:unicode_binary() | {unicode:unicode_binary(), unicode:unicode_binary()}].
-type t() :: #{key() => {value(), metadata()}}.
-export_type([t/0,
key/0,
value/0]).
-define(DEC2HEX(X),
if ((X) >= 0) andalso ((X) =< 9) -> (X) + $0;
((X) >= 10) andalso ((X) =< 15) -> (X) + $A - 10
end).
-define(HEX2DEC(X),
if ((X) >= $0) andalso ((X) =< $9) -> (X) - $0;
((X) >= $A) andalso ((X) =< $F) -> (X) - $A + 10;
((X) >= $a) andalso ((X) =< $f) -> (X) - $a + 10
end).
-define(BAGGAGE_KEY, '$__otel_baggage_ctx_key').
-define(BAGGAGE_HEADER, <<"baggage">>).
-spec set(#{key() => value()} | [{key(), value()}]) -> ok.
set(KeyValues) when is_list(KeyValues) ->
set(maps:from_list(KeyValues));
set(KeyValues) when is_map(KeyValues) ->
Baggage = otel_ctx:get_value(?BAGGAGE_KEY, #{}),
otel_ctx:set_value(?BAGGAGE_KEY, maps:merge(Baggage, verify_baggage(KeyValues)));
set(_) ->
ok.
%% Ctx will never be a list or binary so we can tell if a context is passed by checking that
-spec set(otel_ctx:t() | key() | unicode:charlist(), #{key() => value()} | [{key(), value()}] | value()) -> otel_ctx:t().
set(Key, Value) when is_list(Key) ; is_binary(Key) ->
set(Key, Value, []);
set(Ctx, KeyValues) when is_list(KeyValues) ->
set(Ctx, maps:from_list(KeyValues));
set(Ctx, KeyValues) when is_map(KeyValues) ->
Baggage = otel_ctx:get_value(Ctx, ?BAGGAGE_KEY, #{}),
otel_ctx:set_value(Ctx, ?BAGGAGE_KEY, maps:merge(Baggage, verify_baggage(KeyValues))).
-spec set(otel_ctx:t() | key(), key() | value(), value() | list()) -> ok | otel_ctx:t().
set(Key, Value, Metadata) when is_list(Key) ; is_binary(Key) ->
Baggage = otel_ctx:get_value(?BAGGAGE_KEY, #{}),
otel_ctx:set_value(?BAGGAGE_KEY, maps:merge(Baggage, verify_baggage(#{Key => {Value, Metadata}})));
set(Ctx, Key, Value) ->
set(Ctx, Key, Value, []).
-spec set(otel_ctx:t(), key(), value(), metadata()) -> otel_ctx:t().
set(Ctx, Key, Value, Metadata) ->
Baggage = otel_ctx:get_value(Ctx, ?BAGGAGE_KEY, #{}),
otel_ctx:set_value(Ctx, ?BAGGAGE_KEY, maps:merge(Baggage, verify_baggage(#{Key => {Value, Metadata}}))).
-spec get_all() -> t().
get_all() ->
otel_ctx:get_value(?BAGGAGE_KEY, #{}).
-spec get_all(otel_ctx:t()) -> t().
get_all(Ctx) ->
otel_ctx:get_value(Ctx, ?BAGGAGE_KEY, #{}).
-spec clear() -> ok.
clear() ->
otel_ctx:set_value(?BAGGAGE_KEY, #{}).
-spec clear(otel_ctx:t()) -> otel_ctx:t().
clear(Ctx) ->
otel_ctx:set_value(Ctx, ?BAGGAGE_KEY, #{}).
-spec get_text_map_propagators() -> {otel_propagator:text_map_extractor(), otel_propagator:text_map_injector()}.
get_text_map_propagators() ->
ToText = fun(Baggage) when is_map(Baggage) ->
case maps:fold(fun(Key, Value, Acc) ->
[$,, [encode_key(Key), "=", encode_value(Value)] | Acc]
end, [], Baggage) of
[$, | List] ->
[{?BAGGAGE_HEADER, unicode:characters_to_binary(List)}];
_ ->
[]
end;
(_) ->
[]
end,
FromText = fun(Headers, CurrentBaggage) ->
case lookup(?BAGGAGE_HEADER, Headers) of
undefined ->
CurrentBaggage;
String ->
Pairs = string:lexemes(String, [$,]),
lists:foldl(fun(Pair, Acc) ->
[Key, Value] = string:split(Pair, "="),
Acc#{decode_key(Key) =>
decode_value(Value)}
end, CurrentBaggage, Pairs)
end
end,
Inject = otel_ctx:text_map_injector(?BAGGAGE_KEY, ToText),
Extract = otel_ctx:text_map_extractor(?BAGGAGE_KEY, FromText),
{Extract, Inject}.
%%
%% checks the keys, values and metadata are valid and drops them if they are not
%% all strings are converted to binaries
verify_baggage(KeyValues) ->
maps:fold(fun(K, V, Acc) ->
%% TODO: filter out keys with invalid characters here
case to_binary(K) of
error ->
Acc;
BinKey ->
case update_metadata(V) of
{true, ValueMetadata} ->
Acc#{BinKey => ValueMetadata};
_ ->
Acc
end
end
end, #{}, KeyValues).
to_binary(String) when is_list(String)->
%% wrap in a `try' in case the list is not a printable list
%% this can go away if we start checking the key with a regex
%% to ensure it only contains certain characters
try unicode:characters_to_binary(String) catch _:_ -> error end;
to_binary(String) when is_atom(String) ->
atom_to_binary(String, utf8);
to_binary(String) when is_binary(String) ->
String;
to_binary(_) ->
error.
update_metadata({Value, Metadata}) when is_list(Value) ->
case to_binary(Value) of
error ->
false;
BinValue ->
update_metadata(BinValue, Metadata)
end;
update_metadata(Value) when is_list(Value) ->
case to_binary(Value) of
error ->
false;
BinValue ->
update_metadata(BinValue, [])
end;
update_metadata({Value, Metadata}) ->
update_metadata(Value, Metadata);
update_metadata(Value) ->
update_metadata(Value, []).
update_metadata(Value, Metadata) when (is_binary(Value) orelse is_list(Value)) andalso is_list(Metadata) ->
{true, {Value, lists:filtermap(fun verify_metadata/1, Metadata)}};
update_metadata(_, _) ->
false.
verify_metadata({MK, MV}) when is_binary(MK) , is_binary(MV) ->
true;
verify_metadata(M) when is_binary(M) ->
true;
verify_metadata(_) ->
false.
%% find a header in a list, ignoring case
lookup(_, []) ->
undefined;
lookup(Header, [{H, Value} | Rest]) ->
case string:equal(Header, H, true, none) of
true ->
Value;
false ->
lookup(Header, Rest)
end.
encode_key(Key) ->
form_urlencode(Key, [{encoding, utf8}]).
encode_value({Value, Metadata}) ->
EncodedMetadata = encode_metadata(Metadata),
EncodedValue = form_urlencode(Value, [{encoding, utf8}]),
unicode:characters_to_binary(lists:join(<<";">>, [EncodedValue | EncodedMetadata])).
encode_metadata(Metadata) when is_list(Metadata) ->
lists:filtermap(fun({MK, MV}) when is_binary(MK) , is_binary(MV) ->
{true, [MK, <<"=">>, MV]};
(M) when is_binary(M) ->
{true, M};
(_) ->
false
end, Metadata);
encode_metadata(_) ->
[].
decode_key(Key) ->
percent_decode(string:trim(unicode:characters_to_binary(Key))).
decode_value(ValueAndMetadata) ->
[Value | MetadataList] = string:lexemes(ValueAndMetadata, [$;]),
{string_decode(Value), lists:filtermap(fun metadata_decode/1, MetadataList)}.
metadata_decode(Metadata) ->
case string:split(Metadata, "=") of
[MetadataKey] ->
{true, string_decode(MetadataKey)};
[MetadataKey, MetadataValue] ->
{true, {string_decode(MetadataKey), string_decode(MetadataValue)}};
_ ->
false
end.
string_decode(S) ->
percent_decode(string:trim(unicode:characters_to_binary(S))).
%% TODO: call `uri_string:percent_decode' and remove this when OTP-23 is
%% the oldest version we maintain support for
-spec percent_decode(URI) -> Result when
URI :: uri_string:uri_string(),
Result :: uri_string:uri_string() |
{error, {invalid, {atom(), {term(), term()}}}}.
percent_decode(URI) when is_list(URI) orelse
is_binary(URI) ->
raw_decode(URI).
%% TODO: call `uri_string:percent_encode' when it is added to OTP and
%% available in the oldest version we support
form_urlencode(Cs, [{encoding, Encoding}])
when is_list(Cs), Encoding =:= utf8; Encoding =:= unicode ->
B = convert_to_binary(Cs, utf8, Encoding),
html5_byte_encode(B);
form_urlencode(Cs, [{encoding, Encoding}])
when is_binary(Cs), Encoding =:= utf8; Encoding =:= unicode ->
html5_byte_encode(Cs);
form_urlencode(Cs, [{encoding, Encoding}]) when is_list(Cs); is_binary(Cs) ->
throw({error,invalid_encoding, Encoding});
form_urlencode(Cs, _) ->
throw({error,invalid_input, Cs}).
html5_byte_encode(B) ->
html5_byte_encode(B, <<>>).
%%
html5_byte_encode(<<>>, Acc) ->
Acc;
html5_byte_encode(<<$ ,T/binary>>, Acc) ->
html5_byte_encode(T, <<Acc/binary,$+>>);
html5_byte_encode(<<H,T/binary>>, Acc) ->
case is_url_char(H) of
true ->
html5_byte_encode(T, <<Acc/binary,H>>);
false ->
<<A:4,B:4>> = <<H>>,
html5_byte_encode(T, <<Acc/binary,$%,(?DEC2HEX(A)),(?DEC2HEX(B))>>)
end;
html5_byte_encode(H, _Acc) ->
throw({error,invalid_input, H}).
%% Return true if input char can appear in form-urlencoded string
%% Allowed chararacters:
%% 0x2A, 0x2D, 0x2E, 0x30 to 0x39, 0x41 to 0x5A,
%% 0x5F, 0x61 to 0x7A
is_url_char(C)
when C =:= 16#2A; C =:= 16#2D;
C =:= 16#2E; C =:= 16#5F;
16#30 =< C, C =< 16#39;
16#41 =< C, C =< 16#5A;
16#61 =< C, C =< 16#7A -> true;
is_url_char(_) -> false.
%% Convert to binary
convert_to_binary(Binary, InEncoding, OutEncoding) ->
case unicode:characters_to_binary(Binary, InEncoding, OutEncoding) of
{error, _List, RestData} ->
throw({error, invalid_input, RestData});
{incomplete, _List, RestData} ->
throw({error, invalid_input, RestData});
Result ->
Result
end.
-spec raw_decode(list()|binary()) -> list() | binary() | uri_string:error().
raw_decode(Cs) ->
raw_decode(Cs, <<>>).
%%
raw_decode(L, Acc) when is_list(L) ->
try
B0 = unicode:characters_to_binary(L),
B1 = raw_decode(B0, Acc),
unicode:characters_to_list(B1)
catch
throw:{error, Atom, RestData} ->
{error, Atom, RestData}
end;
raw_decode(<<$%,C0,C1,Cs/binary>>, Acc) ->
case is_hex_digit(C0) andalso is_hex_digit(C1) of
true ->
B = ?HEX2DEC(C0)*16+?HEX2DEC(C1),
raw_decode(Cs, <<Acc/binary, B>>);
false ->
throw({error,invalid_percent_encoding,<<$%,C0,C1>>})
end;
raw_decode(<<C,Cs/binary>>, Acc) ->
raw_decode(Cs, <<Acc/binary, C>>);
raw_decode(<<>>, Acc) ->
check_utf8(Acc).
%% Returns Cs if it is utf8 encoded.
check_utf8(Cs) ->
case unicode:characters_to_list(Cs) of
{incomplete,_,_} ->
throw({error,invalid_utf8,Cs});
{error,_,_} ->
throw({error,invalid_utf8,Cs});
_ -> Cs
end.
-spec is_hex_digit(char()) -> boolean().
is_hex_digit(C)
when $0 =< C, C =< $9;$a =< C, C =< $f;$A =< C, C =< $F -> true;
is_hex_digit(_) -> false. | apps/opentelemetry_api/src/otel_baggage.erl | 0.5144 | 0.432003 | otel_baggage.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Boolean primitive CRDT.
%%
%% @reference <NAME>, <NAME>, <NAME> and <NAME>
%% Composition of State-based CRDTs (2015)
%% [http://haslab.uminho.pt/cbm/files/crdtcompositionreport.pdf]
-module(state_boolean).
-author("<NAME> <<EMAIL>>").
-behaviour(type).
-behaviour(state_type).
-define(TYPE, ?MODULE).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0, new/1]).
-export([mutate/3, delta_mutate/3, merge/2]).
-export([query/1, equal/2, is_bottom/1,
is_inflation/2, is_strict_inflation/2,
irreducible_is_strict_inflation/2]).
-export([join_decomposition/1, delta/2, digest/1]).
-export([encode/2, decode/2]).
-export_type([state_boolean/0, state_boolean_op/0]).
-opaque state_boolean() :: {?TYPE, payload()}.
-type payload() :: 0 | 1.
-type state_boolean_op() :: true.
%% @doc Create a new `state_boolean()'
-spec new() -> state_boolean().
new() ->
{?TYPE, 0}.
%% @doc Create a new `state_boolean()'
-spec new([term()]) -> state_boolean().
new([]) ->
new().
%% @doc Mutate a `state_boolean()'.
-spec mutate(state_boolean_op(), type:id(), state_boolean()) ->
{ok, state_boolean()}.
mutate(Op, Actor, {?TYPE, _Boolean}=CRDT) ->
state_type:mutate(Op, Actor, CRDT).
%% @doc Delta-mutate a `state_boolean()'.
%% The first argument can only be `true'.
%% The second argument is the replica id.
%% The third argument is the `state_boolean()' to be inflated.
-spec delta_mutate(state_boolean_op(), type:id(), state_boolean()) ->
{ok, state_boolean()}.
delta_mutate(true, _Actor, {?TYPE, _Boolean}) ->
{ok, {?TYPE, 1}}.
%% @doc Returns the value of the `state_boolean()'.
-spec query(state_boolean()) -> boolean().
query({?TYPE, Boolean}) ->
Boolean == 1.
%% @doc Merge two `state_boolean()'.
%% Join is the logical or.
-spec merge(state_boolean(), state_boolean()) -> state_boolean().
merge({?TYPE, Boolean1}, {?TYPE, Boolean2}) ->
{?TYPE, max(Boolean1, Boolean2)}.
%% @doc Equality for `state_boolean()'.
-spec equal(state_boolean(), state_boolean()) -> boolean().
equal({?TYPE, Boolean1}, {?TYPE, Boolean2}) ->
Boolean1 == Boolean2.
%% @doc Check if a Boolean is bottom.
-spec is_bottom(state_boolean()) -> boolean().
is_bottom({?TYPE, Boolean}) ->
Boolean == 0.
%% @doc Given two `state_boolean()', check if the second is an inflation
%% of the first.
-spec is_inflation(state_boolean(), state_boolean()) -> boolean().
is_inflation({?TYPE, Boolean1}, {?TYPE, Boolean2}) ->
Boolean1 == Boolean2 orelse
(Boolean1 == 0 andalso Boolean2 == 1).
%% @doc Check for strict inflation.
-spec is_strict_inflation(state_boolean(), state_boolean()) -> boolean().
is_strict_inflation({?TYPE, _}=CRDT1, {?TYPE, _}=CRDT2) ->
state_type:is_strict_inflation(CRDT1, CRDT2).
%% @doc Check for irreducible strict inflation.
-spec irreducible_is_strict_inflation(state_boolean(),
state_type:digest()) ->
boolean().
irreducible_is_strict_inflation({?TYPE, _}=A, B) ->
state_type:irreducible_is_strict_inflation(A, B).
-spec digest(state_boolean()) -> state_type:digest().
digest({?TYPE, _}=CRDT) ->
{state, CRDT}.
%% @doc Join decomposition for `state_boolean()'.
-spec join_decomposition(state_boolean()) -> [state_boolean()].
join_decomposition({?TYPE, _}=Boolean) ->
[Boolean].
%% @doc Delta calculation for `state_boolean()'.
-spec delta(state_boolean(), state_type:digest()) -> state_boolean().
delta({?TYPE, _}=A, B) ->
state_type:delta(A, B).
-spec encode(state_type:format(), state_boolean()) -> binary().
encode(erlang, {?TYPE, _}=CRDT) ->
erlang:term_to_binary(CRDT).
-spec decode(state_type:format(), binary()) -> state_boolean().
decode(erlang, Binary) ->
{?TYPE, _} = CRDT = erlang:binary_to_term(Binary),
CRDT.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual({?TYPE, 0}, new()).
query_test() ->
Boolean0 = new(),
Boolean1 = {?TYPE, 1},
?assertEqual(false, query(Boolean0)),
?assertEqual(true, query(Boolean1)).
delta_true_test() ->
Boolean0 = new(),
{ok, {?TYPE, Delta1}} = delta_mutate(true, 1, Boolean0),
?assertEqual({?TYPE, 1}, {?TYPE, Delta1}).
true_test() ->
Boolean0 = new(),
{ok, Boolean1} = mutate(true, 1, Boolean0),
?assertEqual({?TYPE, 1}, Boolean1).
merge_test() ->
Boolean1 = {?TYPE, 0},
Boolean2 = {?TYPE, 1},
Boolean3 = merge(Boolean1, Boolean1),
Boolean4 = merge(Boolean1, Boolean2),
Boolean5 = merge(Boolean2, Boolean1),
Boolean6 = merge(Boolean2, Boolean2),
?assertEqual({?TYPE, 0}, Boolean3),
?assertEqual({?TYPE, 1}, Boolean4),
?assertEqual({?TYPE, 1}, Boolean5),
?assertEqual({?TYPE, 1}, Boolean6).
merge_deltas_test() ->
Boolean1 = {?TYPE, 0},
Delta1 = {?TYPE, 0},
Delta2 = {?TYPE, 1},
Boolean3 = merge(Delta1, Boolean1),
Boolean4 = merge(Boolean1, Delta1),
DeltaGroup = merge(Delta1, Delta2),
?assertEqual({?TYPE, 0}, Boolean3),
?assertEqual({?TYPE, 0}, Boolean4),
?assertEqual({?TYPE, 1}, DeltaGroup).
equal_test() ->
Boolean1 = {?TYPE, 0},
Boolean2 = {?TYPE, 1},
?assert(equal(Boolean1, Boolean1)),
?assertNot(equal(Boolean1, Boolean2)).
is_bottom_test() ->
Boolean0 = new(),
Boolean1 = {?TYPE, 1},
?assert(is_bottom(Boolean0)),
?assertNot(is_bottom(Boolean1)).
is_inflation_test() ->
Boolean1 = {?TYPE, 0},
Boolean2 = {?TYPE, 1},
?assert(is_inflation(Boolean1, Boolean1)),
?assert(is_inflation(Boolean1, Boolean2)),
?assertNot(is_inflation(Boolean2, Boolean1)),
?assert(is_inflation(Boolean2, Boolean2)),
%% check inflation with merge
?assert(state_type:is_inflation(Boolean1, Boolean1)),
?assert(state_type:is_inflation(Boolean1, Boolean2)),
?assertNot(state_type:is_inflation(Boolean2, Boolean1)),
?assert(state_type:is_inflation(Boolean2, Boolean2)).
is_strict_inflation_test() ->
Boolean1 = {?TYPE, 0},
Boolean2 = {?TYPE, 1},
?assertNot(is_strict_inflation(Boolean1, Boolean1)),
?assert(is_strict_inflation(Boolean1, Boolean2)),
?assertNot(is_strict_inflation(Boolean2, Boolean1)),
?assertNot(is_strict_inflation(Boolean2, Boolean2)).
join_decomposition_test() ->
Boolean1 = {?TYPE, 1},
Decomp1 = join_decomposition(Boolean1),
?assertEqual([Boolean1], Decomp1).
encode_decode_test() ->
Boolean = {?TYPE, 1},
Binary = encode(erlang, Boolean),
EBoolean = decode(erlang, Binary),
?assertEqual(Boolean, EBoolean).
-endif. | src/state_boolean.erl | 0.702122 | 0.457621 | state_boolean.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.