code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
% INSTRUCTIONS: Copy this module and modify as appropriate
% for the function this block will perform.
% Comments marked "INSTRUCTIONS:" may be deleted
% Instructions: Follow the formatting of the @doc comment block exactly as shown
% It is used for generating LinkBlox web pages
%%% @doc
%%% BLOCKTYPE
%%% INSTRUCTIONS: Insert a one line description of the block here
%%% DESCRIPTION
%%% INSTRUCTIONS: Insert description of block operation here
%%% Use as many lines as necessary
%%% LINKS
%%% INSTRUCTIONS: Insert URL(s) here, (one per line)
%%% for links to external resources related to this block
%%% such as hardware data sheets, etc.
%%% @end
% INSTRUCTIONS: Modify to match new block type module name
% All block type module names must begin with "lblx_"
% to uniquely identify them as LinkBlox block type modules.
%
% To allow the user to identify and create blocks, in each language module used:
% add a <module name => block type name string> pair
% to the block_type_names() map in each Language Module,
% Add a <module name => block type description> string pair
% to the block_type_descrs() map in each Language Module,
-module(lblx_template).
-author("<NAME>").
% INSTRUCTIONS: Adjust path to hrl file as needed
-include("../block_state.hrl").
%% ====================================================================
%% API functions
%% ====================================================================
% INSTRUCTIONS: The following 10 functions must be implemented and exported by all block types
-export([groups/0, version/0]).
-export([create/2, create/4, create/5, upgrade/1, initialize/1, execute/2, delete/1]).
% INSTRUCTIONS: Optional custom message handling functions.
% If the block type needs to handle custom messages, not currently handled by block_server(),
% export the necessary function(s) here, and create the function(s) below.
% Otherwise, delete this line.
-export([handle_call/3, handle_cast/2, handle_info/2]).
% INSTRUCTIONS: Classify block type, by assigning it to one or more groups
groups() -> [none].
% INSTRUCTIONS: Add a block type description string to each Language Module,
% INSTRUCTIONS: Set block type version number.
% Use pattern: Major.Minor.Patch
% When a block is created, the Config version attribute value
% is set to this version.
% When a block is loaded from a config file, the version attribute value
% is compared to this.
% If the versions are different, the upgrade() function is called.
version() -> "0.1.0".
%% Merge the block type specific, Config, Input, and Output attributes
%% with the common Config, Input, and Output attributes, that all block types have
-spec default_configs(BlockName :: block_name(),
Description :: string()) -> config_attribs().
default_configs(BlockName, Description) ->
attrib_utils:merge_attribute_lists(
block_common:configs(BlockName, ?MODULE, version(), Description),
[
% INTRUCTIONS: Insert block type specific config attribute tuples here
% Config attribute tuples consist of a value name and a value
% Example: {gpio_pin, {0}}
% Array Example: {start_rows, [{1}, {2}]}
% The block is (re) initialized, when any config value is modified.
{config1, {"Example Value"}}
]).
-spec default_inputs() -> input_attribs().
default_inputs() ->
attrib_utils:merge_attribute_lists(
block_common:inputs(),
[
% INTRUCTIONS: Insert block type specific input attribute tuples here
% Input attribute tuples consist of a value name, a value, and a default value
% Example: {hi_limit, {100, {100}}}
% Array Example: {inputs, [{empty, {empty}}, {empty, {empty}}]}
{input1, {"Example Input Value", {"Example Input Value"}}}
]).
-spec default_outputs() -> output_attribs().
default_outputs() ->
attrib_utils:merge_attribute_lists(
block_common:outputs(),
[
% INTRUCTIONS: Insert block type specific output attribute tuples here
% Output attribute tuples consist of a value name, a calculated value,
% and a list of links to block input values
% Output values are always set to 'null' and empty link list on creation
% Example: {dwell, {null, []}}
% Array Example: {digit, [{null, []}, {null, []}]}
{output1, {null, []}}
]).
%%
%% Create a set of block attributes for this block type.
%% Init attributes are used to override the default attribute values
%% and to add attributes to the lists of default attributes
%%
-spec create(BlockName :: block_name(),
Description :: string()) -> block_defn().
create(BlockName, Description) ->
create(BlockName, Description, [], [], []).
-spec create(BlockName :: block_name(),
Description :: string(),
InitConfig :: config_attribs(),
InitInputs :: input_attribs()) -> block_defn().
create(BlockName, Description, InitConfig, InitInputs) ->
create(BlockName, Description, InitConfig, InitInputs, []).
-spec create(BlockName :: block_name(),
Description :: string(),
InitConfig :: config_attribs(),
InitInputs :: input_attribs(),
InitOutputs :: output_attribs()) -> block_defn().
create(BlockName, Description, InitConfig, InitInputs, InitOutputs) ->
% Update Default Config, Input, Output, and Private attribute values
% with the initial values passed into this function.
%
% If any of the intial attributes do not already exist in the
% default attribute lists, merge_attribute_lists() will create them.
Config = attrib_utils:merge_attribute_lists(default_configs(BlockName, Description), InitConfig),
Inputs = attrib_utils:merge_attribute_lists(default_inputs(), InitInputs),
Outputs = attrib_utils:merge_attribute_lists(default_outputs(), InitOutputs),
% This is the block definition,
{Config, Inputs, Outputs}.
%%
%% Upgrade block attribute values, when block code and block data versions are different
%%
-spec upgrade(BlockDefn :: block_defn()) -> {ok, block_defn()} | {error, atom()}.
upgrade({Config, Inputs, Outputs}) ->
% INSTRUCTIONS: This function is called, on block creation, when the
% module's version does not match the version in the block's config data.
% Depending on the version(s) perform any necessary adjustments to the
% block's attributes, to make it compatible with the current block type's code.
% If upgrading the attributes is not possible, return an error and reason.
ModuleVer = version(),
{BlockName, BlockModule, ConfigVer} = config_utils:name_module_version(Config),
BlockType = type_utils:type_name(BlockModule),
case attrib_utils:set_value(Config, version, version()) of
{ok, UpdConfig} ->
m_logger:info(block_type_upgraded_from_ver_to,
[BlockName, BlockType, ConfigVer, ModuleVer]),
{ok, {UpdConfig, Inputs, Outputs}};
{error, Reason} ->
m_logger:error(err_upgrading_block_type_from_ver_to,
[Reason, BlockName, BlockType, ConfigVer, ModuleVer]),
{error, Reason}
end.
%%
%% Initialize block values
%% Perform any setup here as needed before starting execution
%%
-spec initialize(BlockState :: block_state()) -> block_state().
initialize({Config, Inputs, Outputs, Private}) ->
% INSTRUCTIONS: Perform block type specific initializations here
% Add and intialize private attributes here
Outputs1 = output_utils:set_value_status(Outputs, null, initialed),
Private1 = Private,
% This is the block state
{Config, Inputs, Outputs1, Private1}.
%%
%% Execute the block specific functionality
%%
-spec execute(BlockState :: block_state(),
ExecMethod :: exec_method()) -> block_state().
execute({Config, Inputs, Outputs, Private}, disable) ->
% INSTRUCTIONS: If block's disable input value changes to true,
% this function will be called.
% Normally, just set all outputs to null, and set status output to disabled.
% If block controls external resource(s) (i.e. hardware or external apps),
% you may need to disable or default those resource(s) in this function.
% As in the main execute() function, only the Outputs or Private
% attrib values may be modified.
Outputs1 = output_utils:update_all_outputs(Outputs, null, disabled),
{Config, Inputs, Outputs1, Private};
execute({Config, Inputs, Outputs, Private}, _ExecMethod) ->
% INSTRUCTIONS: Perform block type specific actions here,
% read input value(s) calculate new output value(s)
% set block output status and value
% Example block execution:
% read input1 and set value output to same, and set status output to normal,
% unless error encountered reading input1
case input_utils:get_any_type(Inputs, input1) of
{ok, InputVal} ->
Outputs1 = output_utils:set_value_normal(Outputs, InputVal);
{error, _Reason} ->
Outputs1 = output_utils:set_value_status(Outputs, null, input_err)
end,
% Return updated block state
{Config, Inputs, Outputs1, Private}.
%%
%% Delete the block
%%
-spec delete(BlockState :: block_state()) -> block_defn().
delete({Config, Inputs, Outputs, _Private}) ->
% INSTRUCTIONS: Perform any block type specific delete functionality here
% Return block definition, (Block state less Private values)
% in case calling function wants to reuse them.
%
% Private values are created in the block initialization routine
% So they should be deleted here
{Config, Inputs, Outputs}.
% INSTRUCTIONS: If needed, implement block type specific message handling functions here.
% These function(s) are typically requrired for block types that
% interact with 3rd party libraries, external hardware, blocks on other nodes, or timers.
% See: lblx_mqtt_pub_sub and lblx_gpio_di block types for examples
% If all of the messages are already handled in the block_server module,
% these handle_*() functions are not required, and may be deleted.
%%
%% Handle block type specific call message(s)
%%
-spec handle_call(Request :: term(),
From :: {pid(), Tag :: term()},
BlockState :: block_state()) -> {reply, ok, block_state()}.
handle_call(Request, From, BlockState) ->
{BlockName, BlockModule} = config_utils:name_module(BlockState),
m_logger:warning(block_type_name_unknown_call_msg_from, [BlockModule, BlockName, Request, From]),
{reply, ok, BlockState}.
%%
%% Handle block type specific cast message(s)
%%
-spec handle_cast(Msg :: term(),
BlockState :: block_state()) -> {noreply, block_state()}.
handle_cast(Msg, BlockState) ->
{BlockName, BlockModule} = config_utils:name_module(BlockState),
m_logger:warning(block_type_name_unknown_cast_msg, [BlockModule, BlockName, Msg]),
{noreply, BlockState}.
%%
%% Handle block type specific info message(s)
%%
-spec handle_info(Info :: term(),
BlockState :: block_state()) -> {noreply, block_state()}.
handle_info(Info, BlockState) ->
{BlockName, BlockModule} = config_utils:name_module(BlockState),
m_logger:warning(block_type_name_unknown_info_msg, [BlockModule, BlockName, Info]),
{noreply, BlockState}.
%% ====================================================================
%% Internal functions
%% ====================================================================
%% ====================================================================
%% Tests
%% ====================================================================
% INSTRUCTIONS: Create unit tests here.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
% INSTRUCTIONS: include this test fixture to exercise the block's input to output functionality
-include("block_io_test_gen.hrl").
% INSTRUCTIONS:
% test_sets() is a list of tuples.
% Each tuple defines one I/O test, and contains an atom and 3 lists.
%
% The atom is the execution method, default value input_cos. (See the block_state.hrl module for a list of execution methods)
%
% The first list contains {config value id, value} tuples.
%
% The second list contains {input value id, value} tuples, that the inputs of the block
% will be set to before executing the block.
%
% The third list contains {output value id, value} tuples, that represent
% the expected output values after executing the block
%
% Each tuple exec_method / config / input / output values tuple represents a test.
%
% A test consists of setting the input values to the input values of the list,
% executing the block, and comparing the actual output values to the expected output values list
%
% The block state is preserved between each test, and used in the subsequent test.
% Any input value IDs not specified in the input list, will not be modified before the test
% Any output value IDs not specified in the output list, will not be compared after the test
%
% Execution method, config value list, and input value list are optional.
% The expected output values list is mandatory
% If the config values list is included, there must be and input values list too.
% Empty lists are allowed
%
test_sets() ->
[
{input_cos, [{config1, 123}], [{input1, "I/O Unit Test 1"}], [{status, normal}, {value, "I/O Unit Test 1"}]},
{input_cos, [], [{input1, "I/O Unit Test 2"}], [{status, normal}, {value, "I/O Unit Test 2"}]}
].
-endif. | src/block_types/lblx_template.erl | 0.5 | 0.521167 | lblx_template.erl | starcoder |
-module(f64_SUITE).
-compile(export_all).
-include_lib("stdlib/include/assert.hrl").
all() ->
[
format_neg_zero,
g_big_pos_float,
g_small_neg_float,
g_close_to_zero,
g_denormalized,
g_normalized,
g_choice,
g_ryu,
g_anomalous,
g_misc
].
format_neg_zero(_Config) ->
<<NegZero/float>> = <<16#8000000000000000:64>>,
?assertEqual("-0.0", binary_to_list(iolist_to_binary(ken_ryu_f64:fwrite_g(<<NegZero/float>>)))).
-define(ONE(N), 1 bsl N - 1).
-define(ALL_ONES, ((1 bsl 52) - 1)).
g_warm_up(_Config) ->
g_t(0.5),
g_t(-0.5),
g_t((1 bsl 55) * 0.5),
g_t(-(1 bsl 55) * 0.5),
g_t(1.6799127650033296e+308),
g_t(pack(1, 0, 2#1010101010001100101000010111100101000000101100110001)),
g_t(pack(1, 0, 2#0001010000001001110100000101010101001110010001010110)),
g_t(234324324.23432432432432),
ok.
g_big_pos_float(_Config) ->
%% The greatest positive float:
ft({{0, 2046, ?ONE(52)}, 100, 0}),
ok.
g_small_neg_float(_Config) ->
%% The least negative float:
ft({{1, 2046, ?ONE(52)}, 0, 100}),
ok.
g_close_to_zero(_Config) ->
%% A few denormalized floats close to zero:
ft({{0, 0, 0}, 100, 100}),
% -0.0
g_t(pack(1, 0, 0)),
ok.
g_denormalized(_Config) ->
%% Denormalized floats (mantissa carry):
%% D = 5,
%% Faster:
D = 1,
[ft({{S, 0, ?ONE(N)}, D, D}) || S <- [0, 1], N <- lists:seq(0, 52)],
ok.
g_normalized(_Config) ->
%% Normalized floats (exponent carry):
%% D = 5,
%% Faster:
D = 1,
[ft({{S, E, ?ONE(52)}, D, D}) || S <- [0, 1], E <- lists:seq(0, 2045)],
ok.
g_choice(_Config) ->
%% Exponent should be used when and only when the string is shorter.
%% (g_misc/0 checks this too, and probably more throughly).
L = [
0.0003,
3.0e-5,
3.3e-5,
3.3e-4,
314.0,
314.1,
310.0,
3.1e6,
-100.0,
3.34e4,
3.0e3,
3.34333e9,
3.3433323e10,
33433323700.0,
0.00197963,
1.97963e-4
],
lists:foreach(fun(V) -> g_t(V) end, L),
ok.
g_anomalous(_Config) ->
%% These test cases come from https://github.com/microsoft/STL/blob/f1515e04fd00876137e762c08b90d9aa450859e0/tests/std/tests/P0067R5_charconv/double_to_chars_test_cases.hpp
%% https://www.exploringbinary.com/the-shortest-decimal-string-that-round-trips-may-not-be-the-nearest/
%% This is an exhaustive list of anomalous values
%% Because math, these values have shortest-round-trip decimal representations containing 16 significant digits,
%% but those decimal digits aren't what would be printed by "%.15e". For ordinary values, shortest-round-trip
%% behaves as if it can magically pick a precision for "%.*e", finding the smallest precision that round-trips.
%% (That is, start with the exact decimal representation, and then round it as much as possible.) These anomalous
%% values demonstrate an exception to that mental model. They aren't being "incorrectly rounded"; instead, having
%% the shortest output that round-trips is being prioritized. (This differs by 1 in the least significant decimal
%% digit printed, so it's a very small difference.)
L_anom =
[
6.386688990511104e+293,
5.282945311356653e+269,
6.150157786156811e+259,
5.334411546303884e+241,
5.386379163185535e+213,
6.483618076376552e+178,
6.183260036827614e+172,
5.896816288783659e+166,
5.758609657015292e+163,
5.623642243178996e+160,
6.243497100631985e+144,
8.263199609878108e+121,
6.455624695217272e+119,
6.156563468186638e+113,
7.167183174968974e+103,
6.518515124270356e+91,
6.070840288205404e+82,
6.129982163463556e+54,
5.986310706507379e+51,
5.444517870735016e+39,
5.316911983139664e+36,
6.189700196426902e+26,
5.960464477539063e-08,
5.684341886080802e-14,
6.617444900424222e-24,
6.310887241768095e-30,
7.174648137343064e-43,
7.854549544476363e-90,
6.653062250012736e-111,
5.075883674631299e-116,
6.256509672447191e-148,
4.887898181599368e-150,
5.966672584960166e-154,
5.426657103235053e-166,
5.351097043477547e-197,
5.225680706521042e-200,
6.083493012144512e-210,
5.940911144672375e-213,
6.290184345309701e-235,
6.142758149716505e-238,
7.678447687145631e-239,
5.858190679279809e-244,
5.641232424577593e-278,
8.209073602596753e-289,
7.291122019556398e-304,
7.120236347223045e-307
],
lists:foreach(fun(V) -> g_t(V) end, L_anom),
%% This is an exhaustive list of almost-but-not-quite-anomalous values.
L_quasi_anom =
[
6.237000967296e+290,
6.090821257125e+287,
8.25460204899477e+267,
5.78358058743443e+222,
7.1362384635298e+44,
6.10987272699921e-151,
5.17526350329881e-172,
6.84940421565126e-195
],
lists:foreach(fun(V) -> g_t(V) end, L_quasi_anom),
ok.
g_ryu(_Config) ->
%% specific white box tests that should trigger specific edge cases
%% to the ryu algorithm see:
%% https://github.com/ulfjack/ryu/blob/master/ryu/tests/d2s_test.cc
%% this list is regression tests from the ryu C ref implementation
L_regression =
[
-2.109808898695963e16,
4.940656e-318,
1.18575755E-316,
2.989102097996e-312,
9.0608011534336e15,
4.708356024711512e18,
9.409340012568248e18,
1.2345678
],
lists:foreach(fun(V) -> g_t(V) end, L_regression),
%% These numbers have a mantissa that is a multiple of the largest power of 5 that fits,
%% and an exponent that causes the computation for q to result in 22, which is a corner
%% case for Ryu.
L_pow5 = [16#4830F0CF064DD592, 16#4840F0CF064DD592, 16#4850F0CF064DD592],
lists:foreach(fun(V) -> g_t(i_2_d(V)) end, L_pow5),
%% Test 32-bit chunking 2^32 +- 1/2
L_32bits = [4.294967294, 4.294967295, 4.294967296, 4.294967297, 4.294967298],
lists:foreach(fun(V) -> g_t(V) end, L_32bits),
%% Test 32-bit chunking 2^32 +- 1/2
L_32bits = [4.294967294, 4.294967295, 4.294967296, 4.294967297, 4.294967298],
lists:foreach(fun(V) -> g_t(V) end, L_32bits),
L = [
1.2e+1,
1.23e+2,
1.234e+3,
1.2345e+4,
1.23456e+5,
1.234567e+6,
1.2345678e+7,
1.23456789e+8,
1.23456789e+9,
1.234567895e+9,
1.2345678901e+10,
1.23456789012e+11,
1.234567890123e+12,
1.2345678901234e+13,
1.23456789012345e+14,
1.234567890123456e+15
],
lists:foreach(fun(V) -> g_t(V) end, L),
%% power of 2
L_pow2 =
[
8.0,
64.0,
512.0,
8192.0,
65536.0,
524288.0,
8388608.0,
67108864.0,
536870912.0,
8589934592.0,
68719476736.0,
549755813888.0,
8796093022208.0,
70368744177664.0,
562949953421312.0,
9007199254740992.0
],
lists:foreach(fun(V) -> g_t(V) end, L_pow2),
%% 1000 * power of 2
L_pow2_1000 =
[
8.0e+3,
64.0e+3,
512.0e+3,
8192.0e+3,
65536.0e+3,
524288.0e+3,
8388608.0e+3,
67108864.0e+3,
536870912.0e+3,
8589934592.0e+3,
68719476736.0e+3,
549755813888.0e+3,
8796093022208.0e+3
],
lists:foreach(fun(V) -> g_t(V) end, L_pow2_1000),
%% 10^15 + 10^i
L_pow10_plus =
[
1.0e+15 + 1.0e+0,
1.0e+15 + 1.0e+1,
1.0e+15 + 1.0e+2,
1.0e+15 + 1.0e+3,
1.0e+15 + 1.0e+4,
1.0e+15 + 1.0e+5,
1.0e+15 + 1.0e+6,
1.0e+15 + 1.0e+7,
1.0e+15 + 1.0e+8,
1.0e+15 + 1.0e+9,
1.0e+15 + 1.0e+10,
1.0e+15 + 1.0e+11,
1.0e+15 + 1.0e+12,
1.0e+15 + 1.0e+13,
1.0e+15 + 1.0e+14
],
lists:foreach(fun(V) -> g_t(V) end, L_pow10_plus),
%% min and max
g_t(i_2_d(1)),
g_t(i_2_d(16#7fefffffffffffff)),
%% lots of trailing zeroes
g_t(2.98023223876953125e-8),
%% Switch to Subnormal
g_t(2.2250738585072014e-308),
%% special case to check for the shift to the right by 128
L_shift =
[
parts_2_f(0, 4, 0),
parts_2_f(0, 6, 1 bsl 53 - 1),
parts_2_f(0, 41, 0),
parts_2_f(0, 40, 1 bsl 53 - 1),
parts_2_f(0, 1077, 0),
parts_2_f(0, 1076, 1 bsl 53 - 1),
parts_2_f(0, 307, 0),
parts_2_f(0, 306, 1 bsl 53 - 1),
parts_2_f(0, 934, 16#000FA7161A4D6E0C)
],
lists:foreach(fun(V) -> g_t(V) end, L_shift),
%% following test cases come from https://github.com/microsoft/STL/blob/f1515e04fd00876137e762c08b90d9aa450859e0/tests/std/tests/P0067R5_charconv/double_to_chars_test_cases.hpp
%% These numbers have odd mantissas (unaffected by shifting)
%% that are barely within the "max shifted mantissa" limit.
L_mantissas_within_limit =
[
1801439850948197.0e1,
360287970189639.0e2,
72057594037927.0e3,
14411518807585.0e4,
2882303761517.0e5,
576460752303.0e6,
115292150459.0e7,
23058430091.0e8,
4611686017.0e9,
922337203.0e10,
184467439.0e11,
36893487.0e12,
7378697.0e13,
1475739.0e14,
295147.0e15,
59029.0e16,
11805.0e17,
2361.0e18,
471.0e19,
93.0e20,
17.0e21,
3.0e22
],
lists:foreach(fun(V) -> g_t(V) end, L_mantissas_within_limit),
%% These numbers have odd mantissas (unaffected by shifting)
%% that are barely above the "max shifted mantissa" limit.
L_mantissas_above_limit =
[
1801439850948199.0e1,
360287970189641.0e2,
72057594037929.0e3,
14411518807587.0e4,
2882303761519.0e5,
576460752305.0e6,
115292150461.0e7,
23058430093.0e8,
4611686019.0e9,
922337205.0e10,
184467441.0e11,
36893489.0e12,
7378699.0e13,
1475741.0e14,
295149.0e15,
59031.0e16,
11807.0e17,
2363.0e18,
473.0e19,
95.0e20,
19.0e21,
5.0e22
],
lists:foreach(fun(V) -> g_t(V) end, L_mantissas_above_limit),
L_switch =
[
1801439850948197.0e1,
360287970189639.0e2,
72057594037927.0e3,
14411518807585.0e4,
2882303761517.0e5,
576460752303.0e6,
115292150459.0e7,
23058430091.0e8,
4611686017.0e9,
922337203.0e10,
184467439.0e11,
36893487.0e12,
7378697.0e13,
1475739.0e14,
295147.0e15,
59029.0e16,
11805.0e17,
2361.0e18,
471.0e19,
93.0e20,
17.0e21,
3.0e22,
1801439850948199.0e1,
360287970189641.0e2,
72057594037929.0e3,
14411518807587.0e4,
2882303761519.0e5,
576460752305.0e6,
115292150461.0e7,
23058430093.0e8,
4611686019.0e9,
922337205.0e10,
184467441.0e11,
36893489.0e12,
7378699.0e13,
1475741.0e14,
295149.0e15,
59031.0e16,
11807.0e17,
2363.0e18,
473.0e19,
95.0e20,
19.0e21,
5.0e22,
302230528.0e15,
302232576.0e15,
81123342286848.0e18,
81192061763584.0e18
],
lists:foreach(fun(V) -> g_t(V) end, L_switch),
L_edge =
[
123456789012345683968.0,
1.9156918820264798e-56,
6.6564021122018745e+264,
4.91e-6,
5.547e-6
],
lists:foreach(fun(V) -> g_t(V) end, L_edge),
ok.
g_misc(_Config) ->
L_0_308 = lists:seq(0, 308),
L_0_307 = lists:seq(0, 307),
%% Faster:
L_1_9 = [1, 5, 9],
L_0_9 = [0, 1, 5, 9],
%% 1.0,10.0, ... 2.0,20.0, ... 9.0,90.0, ... -1,-10, ... -2.0,-20.0...
[g_t(S * T * pow10(N)) || S <- [1.0, -1.0], T <- L_1_9, N <- L_0_307],
%% 1.0,1.0/10,1.0/100,... 2.0,2.0/10,2.0/100, ... 9.0,9.0/10,9.0/100,
%% -1.0,-1.0/10,... -9.0,-9.0/10...
[g_t(S * T / pow10(N)) || S <- [1.0, -1.0], T <- L_1_9, N <- L_0_308],
%% 0.0,1.0,2.0,...,9.0, 0.0,10.0,20.0,...,90.0,...
%% 0.0,-1.0,-2.0,...,-9.0, 0.0,-10.0,-20.0,...,-90.0,...
[
g_t(S * list_to_float([D + $0] ++ lists:duplicate(N, $0) ++ ".0"))
|| S <- [1.0, -1.0], N <- lists:seq(0, 300), D <- L_0_9
],
%% 0.0,0.1,0.2,...0,9, 0.0,0.01,0.02,...,0.09,
%% 0.0,-0.1,-0.2,...-0,9, 0.0,-0.01,-0.02,...,-0.09,
[
g_t(S * list_to_float("0." ++ lists:duplicate(N, $0) ++ [D + $0]))
|| S <- [1.0, -1.0], N <- lists:seq(0, 300), D <- L_0_9
],
ok.
ft({{S, E, M}, L, G}) ->
ft({pack(S, E, M), L, G});
ft({V, Less, Greater}) when is_float(V) ->
_ = g_t(V),
ft(V, fun inc/1, Greater),
ft(V, fun dec/1, Less).
ft(V0, F, I) when I > 0, is_float(V0) ->
V = F(V0),
_ = g_t(V),
ft(V, F, I - 1);
ft(V, _F, 0) when is_float(V) ->
ok.
g_t(V) when is_float(V) ->
Io = ken_ryu_f64:fwrite_g(<<V/float>>),
Sv = binary_to_list(iolist_to_binary(Io)),
ok = g_t(V, Sv),
Sv.
%% -> ok | THROW
%% Checks that Sv is the shortest, correctly rounded string that
%% converts to V when read back with list_to_float/1.
%% Note: in a few cases the least significant digit has been
%% incremented by one, namely when the correctly rounded string
%% converts to another floating point number.
g_t(V, Sv) when V > 0.0; V < 0.0 ->
try
g_t_1(V, Sv)
catch
Reason ->
throw({Reason, V, Sv})
end;
g_t(Zero, Format) ->
case <<Zero/float>> of
<<1:1, _:63>> ->
"-0.0" = Format,
ok;
<<0:1, _:63>> ->
"0.0" = Format,
ok
end.
g_t_1(V, Sv) ->
%% Check that the least significant digit is correct.
%% If Sv is "3.14" then Sv- is "3.13" and Sv+ is "3.15".
%% Check that |V - Sv| =< (V - Sv-) and
%% that |V - Sv| =< (Sv+ - V)
Times = least_significant_digit(Sv),
?assertNotEqual(0, Times),
S =
if
V < 0 ->
-1;
true ->
1
end,
SvMinus = incr_lsd(Sv, -S),
SvPlus = incr_lsd(Sv, S),
Svr = s2r(Sv),
Svminusr = s2r(SvMinus),
Svplusr = s2r(SvPlus),
Vr = f2r(V),
Abs_Sv_Vr = rat_abs(rat_minus(Svr, Vr)),
Svminus_Vr = rat_minus(Vr, Svminusr),
Svplus_Vr = rat_minus(Svplusr, Vr),
%% The are 45 (negative) floats where SvMinus (SvPlus) is closer
%% to V than Sv, but such that when reading SvMinus (SvPlus) wrong
%% float would be returned.
case rat_lte(Abs_Sv_Vr, Svminus_Vr) of
true ->
?assertEqual(ok, ok);
false ->
try list_to_float(SvMinus) of
VMinus -> ?assertNotEqual(V, VMinus)
catch
error:badarg ->
ok
end
end,
case rat_lte(Abs_Sv_Vr, Svplus_Vr) of
true ->
?assertEqual(ok, ok);
false ->
try list_to_float(SvPlus) of
VPlus -> ?assertNotEqual(V, VPlus)
catch
error:badarg ->
ok
end
end,
%% Check that Sv is closer to V than to V- and V+.
%% Check that |V - Sv| =< (V - V-) and
%% that |V - Sv| =< (V+ - V)
%% (An alternative is V- + V =< 2*Sv =< V + V+.)
case inc(V) of
inf ->
ok;
Vplus ->
Vplusr = f2r(Vplus),
V_Vplusr = rat_minus(Vplusr, Vr),
?assert(rat_lte(Abs_Sv_Vr, V_Vplusr))
end,
case dec(V) of
'-inf' ->
ok;
Vminus ->
Vminusr = f2r(Vminus),
V_Vminusr = rat_minus(Vr, Vminusr),
?assert(rat_lte(Abs_Sv_Vr, V_Vminusr))
end,
%% Check that no prefix of Sv yields V.
%% If Sv is "3.14" then Svlow is "3.1" and Svhigh is "3.2".
%%
%% This is just one way of getting Svlow and Svhigh:
if
V < 0 ->
SvHigh = step_lsd(Sv, -Times),
SvLow = step_lsd(Sv, 10 - Times);
true ->
SvHigh = step_lsd(Sv, 10 - Times),
SvLow = step_lsd(Sv, -Times)
end,
try list_to_float(SvHigh) of
VHigh -> ?assertNotEqual(V, VHigh)
catch
error:badarg ->
ok
end,
try list_to_float(SvLow) of
VLow -> ?assertNotEqual(V, VLow)
catch
error:badarg ->
ok
end,
%% Check that Sv has enough digits.
?assertEqual(V, list_to_float(Sv)),
g_choice_1(Sv),
ok.
i_2_d(Int) ->
<<F:64/float>> = <<Int:64/unsigned-integer>>,
F.
parts_2_f(S, E, M) ->
<<F:64/float>> = <<S:1, E:11, M:52>>,
F.
pack(Sign, Exp, Frac) ->
<<Float:64/float>> = <<Sign:1, Exp:11, Frac:52>>,
Float.
pow10(X) ->
int_pow(10, X).
int_pow(X, 0) when is_integer(X) ->
1;
int_pow(X, N) when is_integer(X), is_integer(N), N > 0 ->
int_pow(X, N, 1).
int_pow(X, N, R) when N < 2 ->
R * X;
int_pow(X, N, R) ->
int_pow(
X * X,
N bsr 1,
case N band 1 of
1 -> R * X;
0 -> R
end
).
dec(F) when is_float(F) ->
<<S:1, BE:11, M:52>> = <<F:64/float>>,
dec({S, BE, M});
dec({1, 2046, ?ALL_ONES}) ->
'-inf';
dec({S, BE, M}) when
0 =< S,
S =< 1,
0 =< BE,
BE =< 2046,
0 =< M,
M =< ?ALL_ONES
->
{S1, BE1, M1} = dec1(S, BE, M),
<<F:64/float>> = <<S:1, BE:11, M:52>>,
<<F1:64/float>> = <<S1:1, BE1:11, M1:52>>,
true = F1 < F,
F1.
dec1(0, 0, 0) ->
dec1(1, 0, 0);
dec1(0, BE, 0) ->
{0, BE - 1, ?ALL_ONES};
dec1(0, BE, M) ->
{0, BE, M - 1};
dec1(1, BE, ?ALL_ONES) ->
{1, BE + 1, 0};
dec1(1, BE, M) ->
{1, BE, M + 1}.
inc(F) when is_float(F) ->
<<S:1, BE:11, M:52>> = <<F:64/float>>,
inc({S, BE, M});
inc({0, 2046, ?ALL_ONES}) ->
inf;
inc({S, BE, M}) when
0 =< S,
S =< 1,
0 =< BE,
BE =< 2046,
0 =< M,
M =< ?ALL_ONES
->
{S1, BE1, M1} = inc1(S, BE, M),
<<F:64/float>> = <<S:1, BE:11, M:52>>,
<<F1:64/float>> = <<S1:1, BE1:11, M1:52>>,
true = F1 > F,
F1.
inc1(0, BE, ?ALL_ONES) ->
{0, BE + 1, 0};
inc1(0, BE, M) ->
{0, BE, M + 1};
inc1(1, 0, 0) ->
inc1(0, 0, 0);
inc1(1, BE, 0) ->
{1, BE - 1, ?ALL_ONES};
inc1(1, BE, M) ->
{1, BE, M - 1}.
least_significant_digit("-" ++ Ds) ->
least_significant_digit(Ds);
least_significant_digit("+" ++ Ds) ->
least_significant_digit(Ds);
least_significant_digit(Ds) ->
[MS | _E] = string:tokens(Ds, "eE"),
lsd0(lists:reverse(MS)) - $0.
lsd0("0." ++ Ds) ->
lsd1(Ds);
lsd0([D | _Ds]) ->
D.
lsd1("0" ++ Ds) ->
lsd1(Ds);
lsd1([D | _Ds]) ->
D.
%% Assumes Ds represents some other number than zero.
%% Increments or decrements the least significant digit.
incr_lsd("-" ++ Ds, I) ->
"-" ++ incr_lsd(Ds, I);
incr_lsd(Ds, I) when I =:= 1; I =:= -1 ->
[MS | E] = string:tokens(Ds, "eE"),
X = ["e" || true <- [E =/= []]],
lists:flatten([incr_lsd0(lists:reverse(MS), I, []), X, E]).
incr_lsd0("0." ++ Ds, C, L) ->
incr_lsd1(Ds, C, [$., $0 | L]);
incr_lsd0(Ds, C, L) ->
incr_lsd2(Ds, C, L).
incr_lsd1("0" ++ Ds, C, L) ->
incr_lsd1(Ds, C, [$0 | L]);
incr_lsd1(Ds, C, L) ->
incr_lsd2(Ds, C, L).
incr_lsd2([], C, L) ->
[C + $0 | L];
incr_lsd2("." ++ Ds, C, L) ->
incr_lsd2(Ds, C, [$. | L]);
incr_lsd2("9" ++ Ds, 1 = C, L) ->
incr_lsd2(Ds, C, [$0 | L]);
incr_lsd2("0" ++ Ds, -1 = C, L) ->
incr_lsd2(Ds, C, [$9 | L]);
incr_lsd2([D | Ds], C, L) ->
lists:reverse(Ds, [D + C | L]).
s2r(S) when is_list(S) ->
case string:tokens(S, "eE") of
[MS] ->
s10(MS);
[MS, ES] ->
Mr = s10(MS),
E = list_to_integer(ES),
if
E < 0 ->
rat_multiply(Mr, {1, pow10(-E)});
true ->
rat_multiply(Mr, {pow10(E), 1})
end
end.
%%% Rational numbers (very scetchy).
rat_abs({A, B}) when A < 0 ->
{-A, B};
rat_abs({A, B}) ->
{A, B}.
rat_lte({A, B}, {C, D}) when B =/= 0, D =/= 0 ->
A * D =< C * B.
rat_minus({A, B}, {C, D}) ->
rat_plus({A, B}, {-C, D}).
rat_plus({A, B}, {C, D}) when B =/= 0, D =/= 0 ->
rat_normalize({A * D + B * C, B * D}).
rat_multiply({A, B}, {C, D}) when B =/= 0, D =/= 0 ->
rat_normalize({A * C, B * D}).
rat_normalize({T, N}) when N =/= 0 ->
G = gcd(T, N),
T2 = T div G,
N2 = N div G,
if
T2 < 0 ->
if
N2 < 0 -> {-T2, -N2};
true -> {T2, N2}
end;
true ->
if
N2 < 0 -> {-T2, -N2};
true -> {T2, N2}
end
end.
gcd(A, 0) -> A;
gcd(A, B) -> gcd(B, A rem B).
%% Check that there is an exponent if and only if characters are saved
%% when abs(list_to_float(S)) < float(1 bsl 53) and that there is an
%% exponent when abs(list_to_float(S)) >= float(1 bsl 53).
g_choice_1(S) when is_list(S) ->
ShouldAlwaysHaveExponent = abs(list_to_float(S)) >= float(1 bsl 53),
HasExponent = lists:member($e, S) orelse lists:member($E, S),
case ShouldAlwaysHaveExponent of
true ->
?assert(HasExponent);
false ->
g_choice_small(S)
end.
%% Check that there is an exponent if and only if characters are
%% saved. Note: this assumes floating point numbers "Erlang style"
%% (with a single zero before and after the dot, and no extra leading
%% zero in the exponent).
g_choice_small(S) when is_list(S) ->
[MS | ES0] = string:tokens(S, "eE"),
[IS, FS] = string:tokens(MS, "."),
Il = length(IS),
Fl = length(FS),
Pre = z(MS),
Post = z(lists:reverse(MS)),
ES = lists:append(ES0),
El = length(ES),
I = list_to_integer(IS),
?assertNot((El =/= 0) and ((I > 9) or (I < -9))),
?assertNot((El =/= 0) and (I =:= 0)),
% DDDD0000.0
if
Pre =:= 0, Post > 0, El =:= 0 ->
Saving =
if
I < 0, Il =:= Post + 2 ->
Post;
I > 0, Il =:= Post + 1 ->
Post;
I =/= 0, true ->
Post + 1
end,
Cost = 1 + length(integer_to_list(Il - 1)),
?assertNot(Cost < Saving);
% 0.000DDDD
Pre > 0, Post =:= 0, El =:= 0 ->
Saving =
if
Fl =:= Pre + 1 ->
Pre;
true ->
Pre + 1
end,
Cost = 2 + length(integer_to_list(Pre + 1)),
?assertNot(Cost < Saving);
% D.DDDeDD
Pre =:= 0, Post =:= 0, El > 0 ->
E = list_to_integer(ES),
if
E >= 0 ->
Cost = E - (Fl - 1);
E < 0 ->
Cost = -E
end,
Saving = length(ES) + 1,
?assertNotEqual(Cost, Saving),
?assertNot(Cost < Saving);
% DDD.DDD
Pre =:= 0, Post =:= 0, El =:= 0 ->
?assert(true);
true ->
throw(badly_formed_floating_point_string)
end.
z("0." ++ Ds) ->
length(lists:takewhile(fun(D) -> D =:= $0 end, Ds));
z(_Ds) ->
0.
f2r(F) when is_float(F) ->
<<S:1, BE:11, M:52>> = <<F:64/float>>,
f2r({S, BE, M});
f2r({S, BE, M}) when
0 =< S,
S =< 1,
0 =< BE,
BE =< 2046,
0 =< M,
M =< ?ALL_ONES
->
Vr = {T, N} = f2r1(S, BE, M),
<<F:64/float>> = <<S:1, BE:11, M:52>>,
case catch T / N of
{'EXIT', _} -> ok;
TN -> true = F =:= TN
end,
Vr.
f2r1(S, 0, M) ->
rat_multiply({sign(S), 1}, {M, 1 bsl 1074});
f2r1(S, BE, M) when BE - 1075 >= 0 ->
rat_multiply({sign(S), 1}, {((1 bsl 52) + M) * (1 bsl (BE - 1075)), 1});
f2r1(S, BE, M) ->
rat_multiply({sign(S), 1}, {(1 bsl 52) + M, 1 bsl (1075 - BE)}).
sign(0) ->
1;
sign(1) ->
-1.
step_lsd(Ds, 0) ->
Ds;
step_lsd(Ds, N) when N > 0 ->
NDs = incr_lsd(Ds, 1),
step_lsd(NDs, N - 1);
step_lsd(Ds, N) when N < 0 ->
NDs = incr_lsd(Ds, -1),
step_lsd(NDs, N + 1).
s10("-" ++ S) ->
rat_multiply({-1, 1}, s10(S));
s10(S) ->
[AS, BS] = string:tokens(S, "."),
Sc = length(BS),
A = list_to_integer(AS),
B = list_to_integer(BS),
F = pow10(Sc),
rat_multiply({1, 1}, {A * F + B, F}). | test/f64_SUITE.erl | 0.552781 | 0.522141 | f64_SUITE.erl | starcoder |
%% @author <NAME>
-module(euclid).
%% API exports
-export([is_even/1, is_odd/1, floor/1, ceiling/1, is_power_of_two/1]).
-export([is_perfect_square/1, modulus/2, remainder/2, power_of_two/1]).
-export([ceiling_to_power_of_two/1, gcd/1, gcd/2, lcm/1, lcm/2]).
%%====================================================================
%% API functions
%%====================================================================
is_even(X) when is_number(X) ->
(X band 2#1) == 2#0.
is_odd(X) when is_number(X) ->
(X band 2#1) == 2#1.
%% Erlang doesn't provide a floor and ceiling funtions.
%% The following implementations are possible by implementing the trunc function.
%% https://erlangcentral.org/wiki/index.php?title=Floating_Point_Rounding
floor(X) when X < 0 ->
E = trunc(X),
case X - E == 0 of
true -> E;
false -> E - 1
end;
floor(X) ->
trunc(X).
ceiling(X) when X < 0 ->
trunc(X);
ceiling(X) ->
E = trunc(X),
case X - E == 0 of
true -> E;
false -> E + 1
end.
% Check if the given value is a perfect power of two
is_power_of_two(X) when X > 0 ->
(X band (X - 1)) == 2#0.
% Check if the given integer value is a perfect square
is_perfect_square(X) when X < 0 ->
false;
is_perfect_square(X) ->
E = X band 16#F,
case E of
E when E > 9 ->
false;
E when E == 0;E == 1;E == 4;E == 9 ->
Z = trunc(math:sqrt(X) + 0.5),
(Z * Z) == X;
_ ->
false
end.
%% Congruent Modulo
%% http://math.stackexchange.com/questions/2991/not-understanding-simple-modulus-congruency
%% https://blogs.msdn.microsoft.com/ericlippert/2011/12/05/whats-the-difference-remainder-vs-modulus/
modulus(Dividend, Divisor) when is_number(Dividend), is_number(Divisor)->
((Dividend rem Divisor) + Divisor) rem Divisor.
remainder(Dividend, Divisor) when is_number(Dividend), is_number(Divisor)->
Dividend rem Divisor.
% Raises two over the given value
power_of_two(X) when X > 0 ->
1 bsl X.
% Get the closest larger power of two
ceiling_to_power_of_two(X) ->
to_power_of_two(X-1, 1).
%% Greatest Common Divisor
gcd(X, Y) when Y == 0 ->
abs(X);
gcd(X, Y) ->
gcd(Y, X rem Y).
gcd(Nums) when is_list(Nums), length(Nums) == 0 ->
0;
gcd(Nums) when is_list(Nums) ->
G = abs(hd(Nums)),
gcd(tl(Nums), G, 0).
%% Least Common Multiple
lcm(X, Y) when X == 0; Y == 0 ->
0;
lcm(X, Y) ->
abs((X div gcd(X, Y))*Y).
lcm(Nums) when is_list(Nums), size(Nums) == 0 ->
1;
lcm(Nums) when is_list(Nums) ->
L = abs(hd(Nums)),
lcm(tl(Nums), L, 0).
%%====================================================================
%% Internal functions
%%====================================================================
gcd(Nums, Gcd, _) when length(Nums) > 0, Gcd > 1 ->
G = gcd(Gcd, hd(Nums)),
gcd(tl(Nums), G, 0);
gcd(Nums, Gcd, _) when length(Nums) == 0 ->
Gcd.
lcm(Nums, Lcm, _) when length(Nums) > 0 ->
L = lcm(Lcm, hd(Nums)),
lcm(tl(Nums), L, 0);
lcm(Nums, Lcm, _) when length(Nums) == 0 ->
Lcm.
to_power_of_two(E, P) when P =< 32->
to_power_of_two(E bor (E bsr P), P+P);
to_power_of_two(E, P) when P > 32 ->
E + 1. | src/euclid.erl | 0.551091 | 0.534916 | euclid.erl | starcoder |
%% Copyright (c) 2020-2021 <NAME> <<EMAIL>>.
%%
%% Permission to use, copy, modify, and/or distribute this software for any
%% purpose with or without fee is hereby granted, provided that the above
%% copyright notice and this permission notice appear in all copies.
%%
%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
%% SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
%% IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-module(json_parser).
-export([parse/2]).
-type state() :: initial
| whitespace
| value
| null
| true
| false
| string
| array_start
| array_element_or_end
| array_separator_or_end
| array_element
| array_end
| object_start
| object_key_or_end
| object_key
| object_key_value_separator
| object_value
| object_member_separator_or_end
| object_member_separator
| object_end
| number
| final.
-type stack_element() :: json:value()
| {json:position(), undefined, undefined}
| {json:position(), binary(), undefined}
| {json:position(), binary(), json:value()}.
-type stack() :: [stack_element()].
-spec parse(binary(), json:parsing_options()) ->
{ok, json:value()} | {error, json:error()}.
parse(Data, Options) ->
case parse(initial, [value, final], Data, [], {1,1}, Options) of
{ok, Value, <<>>, _} ->
{ok, Value};
{ok, _, TrailingData, Pos} ->
{error, #{reason => {unexpected_trailing_data, TrailingData},
position => Pos}};
{error, Error} ->
{error, Error}
end.
-spec parse(Current :: state(), Nexts :: [state()], binary(), stack(),
json:position(), json:parsing_options()) ->
{ok, json:value(), binary(), json:position()} | {error, json:error()}.
%% Initial state.
parse(initial, Nexts, Data, Stack, Pos, Options) ->
parse(whitespace, Nexts, Data, Stack, Pos, Options);
%% Whitespace
parse(whitespace, Nexts, <<$\n, Data/binary>>, Stack, {R, _}, Options) ->
parse(whitespace, Nexts, Data, Stack, {R+1,1}, Options);
parse(whitespace, Nexts, <<B, Data/binary>>, Stack, {R,C}, Options) when
B =:= $\s; B =:= $\t; B =:= $\r ->
parse(whitespace, Nexts, Data, Stack, {R,C+1}, Options);
parse(whitespace, [Next | Nexts], Data, Stack, Pos, Options) ->
parse(Next, Nexts, Data, Stack, Pos, Options);
%% Main value dispatch
parse(value, Next, Data = <<$n, _/binary>>, Stack, Pos, Options) ->
parse(null, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<$t, _/binary>>, Stack, Pos, Options) ->
parse(true, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<$f, _/binary>>, Stack, Pos, Options) ->
parse(false, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<$", _/binary>>, Stack, Pos, Options) ->
parse(string, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<$[, _/binary>>, Stack, Pos, Options) ->
parse(array_start, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<${, _/binary>>, Stack, Pos, Options) ->
parse(object_start, Next, Data, Stack, Pos, Options);
parse(value, Next, Data = <<B, _/binary>>, Stack, Pos, Options) when
B =:= $-; B >= $0, B =< $9->
parse(number, Next, Data, Stack, Pos, Options);
parse(value, _, <<B/utf8, _/binary>>, _, Pos, _) ->
{error, #{reason => {unexpected_character, B}, position => Pos}};
parse(value, _, <<_, _/binary>>, _, Pos, _) ->
{error, #{reason => invalid_utf8_sequence, position => Pos}};
parse(value, _, <<>>, [], Pos, _) ->
{error, #{reason => no_value, position => Pos}};
parse(value, _, <<>>, [Value | _], Pos, _) when is_list(Value) ->
{error, #{reason => truncated_array, position => Pos}};
parse(value, _, <<>>, [Value | _], Pos, _) when is_tuple(Value) ->
{error, #{reason => truncated_object, position => Pos}};
parse(value, _, <<>>, [Value | _], Pos, _) when is_map(Value) ->
{error, #{reason => truncated_object, position => Pos}};
%% Null
parse(null, Nexts, <<"null", Data/binary>>, Stack, {R,C}, Options) ->
Stack2 = stack_merge(null, Stack),
parse(whitespace, Nexts, Data, Stack2, {R,C+4}, Options);
parse(null, _, _, _, Pos, _) ->
{error, #{reason => invalid_element, position => Pos}};
%% True
parse(true, Nexts, <<"true", Data/binary>>, Stack, {R,C}, Options) ->
Stack2 = stack_merge(true, Stack),
parse(whitespace, Nexts, Data, Stack2, {R,C+4}, Options);
parse(true, _, _, _, Pos, _) ->
{error, #{reason => invalid_element, position => Pos}};
%% False
parse(false, Nexts, <<"false", Data/binary>>, Stack, {R,C}, Options) ->
Stack2 = stack_merge(false, Stack),
parse(whitespace, Nexts, Data, Stack2, {R,C+5}, Options);
parse(false, _, _, _, Pos, _) ->
{error, #{reason => invalid_element, position => Pos}};
%% String
parse(string, Nexts, Data = <<$", _/binary>>, Stack, {R,C}, Options) ->
case parse_string(Data) of
{ok, String, N, Rest} ->
Stack2 = stack_merge(String, Stack),
parse(whitespace, Nexts, Rest, Stack2, {R,C+N}, Options);
{error, Reason, N} ->
{error, #{reason => Reason, position => {R,C+N}}}
end;
%% Array
parse(array_start, Nexts, <<$[, Data/binary>>, Stack, {R,C}, Options) ->
parse(whitespace, [array_element_or_end | Nexts],
Data, [[] | Stack], {R,C+1}, Options);
parse(array_element_or_end, Nexts, Data = <<$], _/binary>>,
Stack, Pos, Options) ->
parse(array_end, Nexts, Data, Stack, Pos, Options);
parse(array_element_or_end, Nexts, Data, Stack, Pos, Options) ->
parse(array_element, Nexts, Data, Stack, Pos, Options);
parse(array_element, Nexts, Data, Stack, Pos, Options) ->
parse(value, [array_separator_or_end | Nexts], Data, Stack, Pos, Options);
parse(array_separator_or_end, Nexts, Data = <<$], _/binary>>,
Stack, Pos, Options) ->
parse(array_end, Nexts, Data, Stack, Pos, Options);
parse(array_separator_or_end, Nexts, Data = <<$,, _/binary>>,
Stack, Pos, Options) ->
parse(array_separator, Nexts, Data, Stack, Pos, Options);
parse(array_separator_or_end, _, <<B/utf8, _/binary>>, _, Pos, _) ->
{error, #{reason => {unexpected_character, B}, position => Pos}};
parse(array_separator_or_end, _, <<_, _/binary>>, _, Pos, _) ->
{error, #{reason => invalid_utf8_sequence, position => Pos}};
parse(array_separator_or_end, _, <<>>, _, Pos, _) ->
{error, #{reason => truncated_array, position => Pos}};
parse(array_separator, Nexts, <<$,, Data/binary>>, Stack, {R,C}, Options) ->
parse(whitespace, [array_element | Nexts], Data, Stack, {R,C+1}, Options);
parse(array_end, Nexts, <<$], Data/binary>>,
[Value | Stack], {R,C}, Options) ->
Stack2 = stack_merge(lists:reverse(Value), Stack),
parse(whitespace, Nexts, Data, Stack2, {R,C+1}, Options);
%% Object
parse(object_start, Nexts, <<${, Data/binary>>, Stack, {R,C}, Options) ->
parse(whitespace, [object_key_or_end | Nexts],
Data, [#{} | Stack], {R,C+1}, Options);
parse(object_key_or_end, Nexts, Data = <<$}, _/binary>>,
Stack, Pos, Options) ->
parse(object_end, Nexts, Data, Stack, Pos, Options);
parse(object_key_or_end, Nexts, Data, Stack, Pos, Options) ->
parse(object_key, Nexts, Data, Stack, Pos, Options);
parse(object_key, Nexts, Data, Stack, Pos, Options) ->
Stack2 = [{Pos, undefined, undefined} | Stack],
parse(value, [object_key_value_separator | Nexts],
Data, Stack2, Pos, Options);
parse(object_key_value_separator, Nexts, <<$:, Data/binary>>,
Stack, {R,C}, Options) ->
parse(whitespace, [object_value | Nexts], Data, Stack, {R,C+1}, Options);
parse(object_key_value_separator, _, <<B/utf8, _/binary>>, _, Pos, _) ->
{error, #{reason => {unexpected_character, B}, position => Pos}};
parse(object_key_value_separator, _, <<_, _/binary>>, _, Pos, _) ->
{error, #{reason => invalid_utf8_sequence, position => Pos}};
parse(object_key_value_separator, _, <<>>, _, Pos, _) ->
{error, #{reason => truncated_object, position => Pos}};
parse(object_value, Nexts, Data, Stack, Pos, Options) ->
parse(value, [object_member_separator_or_end | Nexts],
Data, Stack, Pos, Options);
parse(object_member_separator_or_end, Nexts,
Data = <<$}, _/binary>>, [Value | Stack], Pos, Options) ->
case stack_merge_member(Value, Stack, Options) of
{ok, Stack2} ->
parse(object_end, Nexts, Data, Stack2, Pos, Options);
{error, Reason} ->
{KeyPos, _, _} = Value,
{error, #{reason => Reason, position => KeyPos}}
end;
parse(object_member_separator_or_end, Nexts,
Data = <<$,, _/binary>>, [Value | Stack], Pos, Options) ->
case stack_merge_member(Value, Stack, Options) of
{ok, Stack2} ->
parse(object_member_separator, Nexts, Data, Stack2, Pos, Options);
{error, Reason} ->
{KeyPos, _, _} = Value,
{error, #{reason => Reason, position => KeyPos}}
end;
parse(object_member_separator_or_end, _, <<>>, _, Pos, _) ->
{error, #{reason => truncated_object, position => Pos}};
parse(object_member_separator, Nexts, <<$,, Data/binary>>,
Stack, {R,C}, Options) ->
parse(whitespace, [object_key | Nexts], Data, Stack, {R,C+1}, Options);
parse(object_end, Nexts, <<$}, Data/binary>>,
[Value | Stack], {R,C}, Options) ->
Stack2 = stack_merge(Value, Stack),
parse(whitespace, Nexts, Data, Stack2, {R,C+1}, Options);
%% Number
parse(number, Nexts, Data, Stack, {R,C}, Options) ->
case parse_number(Data) of
{ok, Number, N, Rest} ->
Stack2 = stack_merge(Number, Stack),
parse(whitespace, Nexts, Rest, Stack2, {R,C+N}, Options);
{error, Reason, N} ->
{error, #{reason => Reason, position => {R,C+N}}}
end;
%% Final state.
parse(final, [], Data, [Value], Pos, _) ->
{ok, Value, Data, Pos}.
-spec parse_string(binary()) ->
{ok, binary(), non_neg_integer(), binary()} |
{error, json:error_reason(), non_neg_integer()}.
parse_string(<<$", Data/binary>>) ->
parse_string(Data, 1, []).
-spec parse_string(binary(), non_neg_integer(), iodata()) ->
{ok, binary(), non_neg_integer(), binary()} |
{error, json:error_reason(), non_neg_integer()}.
parse_string(Data, N, Acc) ->
case read_non_escaped_characters(Data) of
{ok, Data2 = <<$", Rest/binary>>, N2} ->
Acc2 = binary:part(Data, {0, byte_size(Data) - byte_size(Data2)}),
String = iolist_to_string(lists:reverse([Acc2 | Acc])),
{ok, String, N+N2+1, Rest};
{ok, Data2 = <<$\\, _/binary>>, N2} ->
Acc2 = binary:part(Data, {0, byte_size(Data) - byte_size(Data2)}),
case parse_escape_sequence(Data2) of
{ok, Code, Length, Data3} ->
parse_string(Data3, N+N2+Length, [Code | [Acc2 | Acc]]);
{error, Reason, Offset} ->
{error, Reason, N+N2+Offset}
end;
{error, Reason, Offset} ->
{error, Reason, N+Offset}
end.
-spec read_non_escaped_characters(binary()) ->
{ok, binary(), non_neg_integer()} |
{error, json:error_reason(), non_neg_integer()}.
read_non_escaped_characters(Data) ->
read_non_escaped_characters(Data, 0).
-spec read_non_escaped_characters(binary(), non_neg_integer()) ->
{ok, binary(), non_neg_integer()} |
{error, json:error_reason(), non_neg_integer()}.
read_non_escaped_characters(<<>>, N) ->
{error, truncated_string, N-1};
read_non_escaped_characters(Data = <<$", _/binary>>, N) ->
{ok, Data, N};
read_non_escaped_characters(Data = <<$\\, _/binary>>, N) ->
{ok, Data, N};
read_non_escaped_characters(<<B, _/binary>>, N) when B =< 16#1f ->
{error, {invalid_string_character, B}, N};
read_non_escaped_characters(<<_/utf8, Data/binary>>, N) ->
read_non_escaped_characters(Data, N+1);
read_non_escaped_characters(<<_, _/binary>>, N) ->
{error, invalid_utf8_sequence, N}.
-spec parse_escape_sequence(binary()) ->
{ok, non_neg_integer(), non_neg_integer(), binary()} |
{error, json:error_reason(), non_neg_integer()}.
parse_escape_sequence(<<$\\, $\", Data/binary>>) ->
{ok, $", 2, Data};
parse_escape_sequence(<<$\\, $\\, Data/binary>>) ->
{ok, $\\, 2, Data};
parse_escape_sequence(<<$\\, $\/, Data/binary>>) ->
{ok, $/, 2, Data};
parse_escape_sequence(<<$\\, $b, Data/binary>>) ->
{ok, $\b, 2, Data};
parse_escape_sequence(<<$\\, $f, Data/binary>>) ->
{ok, $\f, 2, Data};
parse_escape_sequence(<<$\\, $r, Data/binary>>) ->
{ok, $\r, 2, Data};
parse_escape_sequence(<<$\\, $n, Data/binary>>) ->
{ok, $\n, 2, Data};
parse_escape_sequence(<<$\\, $t, Data/binary>>) ->
{ok, $\t, 2, Data};
parse_escape_sequence(Data = <<$\\, B, _/binary>>) when B =:= $u; B =:= $U ->
case parse_unicode_escape_sequence(Data) of
{ok, Code1, Data2} when Code1 >= 16#d800, Code1 =< 16#dbff ->
case parse_unicode_escape_sequence(Data2) of
{ok, Code2, Data3} ->
Code = 16#10_000 + (Code1 - 16#d800 bsl 10) + (Code2 - 16#dc00),
{ok, Code, 12, Data3};
{error, Reason} ->
{error, Reason, 6}
end;
{ok, Code1, Data2} ->
{ok, Code1, 6, Data2};
{error, Reason} ->
{error, Reason, 0}
end;
parse_escape_sequence(<<$\\, _, _/binary>>) ->
{error, invalid_escape_sequence, 0};
parse_escape_sequence(<<$\\>>) ->
{error, truncated_escape_sequence, 0}.
-spec parse_unicode_escape_sequence(binary()) ->
{ok, non_neg_integer(), binary()} | {error, json:error_reason()}.
parse_unicode_escape_sequence(<<$\\, B, HexDigits:4/binary,
Data/binary>>) when
B =:= $u; B =:= $U ->
try
Code = binary_to_integer(HexDigits, 16),
{ok, Code, Data}
catch error:badarg ->
{error, invalid_escape_sequence}
end;
parse_unicode_escape_sequence(<<$\\, B, _/binary>>) when
B =:= $u; B =:= $U ->
{error, truncated_escape_sequence};
parse_unicode_escape_sequence(<<"\\", _, _/binary>>) ->
{error, invalid_escape_sequence};
parse_unicode_escape_sequence(<<"\\", _/binary>>) ->
{error, truncated_escape_sequence};
parse_unicode_escape_sequence(<<_/binary>>) ->
{error, truncated_utf16_surrogate_pair}.
-spec parse_number(binary()) ->
{ok, number(), non_neg_integer(), binary()} |
{error, json:error_reason(), non_neg_integer()}.
parse_number(Data) ->
parse_number(Data, 0, sign, {1, undefined, 1, undefined}).
-spec parse_number(binary(), non_neg_integer(),
State :: sign
| integer_part
| fractional_part
| exponent | exponent_sign | exponent_part
| final,
{Sign :: -1 | 1,
Base :: undefined
| {integer, integer()}
| {float, float()},
ExponentSign :: -1 | 1,
Exponent :: undefined | integer()}) ->
{ok, number(), non_neg_integer(), binary()} |
{error, json:error_reason(), non_neg_integer()}.
%% Sign
parse_number(<<$-, Data/binary>>, N, sign, {_, B, ES, E}) ->
parse_number(Data, N+1, integer_part, {-1, B, ES, E});
parse_number(Data, P, sign, Acc) ->
parse_number(Data, P, integer_part, Acc);
%% Integer part
parse_number(Data = <<C, _/binary>>, N, integer_part, {S, _, ES, E}) when
C >= $0, C =< $9 ->
{I, N2, Rest} = parse_integer(Data),
parse_number(Rest, N+N2, fractional_part, {S, {integer, I}, ES, E});
parse_number(_, N, integer_part, _Acc) ->
{error, invalid_number, N};
%% Fractional part
parse_number(<<$., Data/binary>>, N, fractional_part, {S, B, ES, E}) ->
case Data of
<<C, _/binary>> when C >= $0, C =< $9 ->
{F0, N2, Rest} = parse_integer(Data),
F = F0 / math:pow(10, N2),
I = case B of
undefined -> 0;
{integer, I2} -> I2
end,
parse_number(Rest, N+N2+1, exponent, {S, {float, I + F}, ES, E});
_ ->
{error, invalid_number, N}
end;
parse_number(Data, N, fractional_part, Acc) ->
parse_number(Data, N, exponent, Acc);
%% Exponent
parse_number(<<C, Data/binary>>, N, exponent, Acc) when C =:= $e; C =:= $E ->
parse_number(Data, N+1, exponent_sign, Acc);
parse_number(Data, N, exponent, Acc) ->
parse_number(Data, N, final, Acc);
%% Exponent sign
parse_number(<<$-, Data/binary>>, N, exponent_sign, {S, B, _, E}) ->
parse_number(Data, N+1, exponent_part, {S, B, -1, E});
parse_number(<<$+, Data/binary>>, N, exponent_sign, {S, B, _, E}) ->
parse_number(Data, N+1, exponent_part, {S, B, 1, E});
parse_number(Data, N, exponent_sign, Acc) ->
parse_number(Data, N, exponent_part, Acc);
%% Exponent part
parse_number(Data = <<C, _/binary>>, N, exponent_part, {S, B, ES, _}) when
C >= $0, C =< $9 ->
{E, N2, Rest} = parse_integer(Data),
parse_number(Rest, N+N2, final, {S, B, ES, E});
parse_number(_, N, exponent_part, _Acc) ->
{error, invalid_number, N};
%% Final state
parse_number(Data, N, final, {S, {integer, I}, _, undefined}) ->
{ok, S * I, N, Data};
parse_number(Data, N, final, {S, {integer, I}, ES, E}) ->
{ok, S * I* math:pow(10, E * ES), N, Data};
parse_number(Data, N, final, {S, {float, F}, _, undefined}) ->
{ok, S * F, N, Data};
parse_number(Data, N, final, {S, {float, F}, ES, E}) ->
{ok, S * F * math:pow(10, E * ES), N, Data}.
-spec parse_integer(binary()) ->
{integer(), non_neg_integer(), binary()}.
parse_integer(Data) ->
parse_integer(Data, 0, 0).
-spec parse_integer(binary(), non_neg_integer(), integer()) ->
{integer(), non_neg_integer(), binary()}.
parse_integer(<<>>, N, I) ->
{I, N, <<>>};
parse_integer(Data = <<C, Rest/binary>>, N, I) ->
case is_digit(C) of
true ->
parse_integer(Rest, N+1, I*10 + C-$0);
false ->
{I, N, Data}
end.
-spec is_digit(integer()) -> boolean().
is_digit(C) when C >= $0, C =< $9 ->
true;
is_digit(_) ->
false.
-spec iolist_to_string(iolist()) -> binary().
iolist_to_string(Data) ->
%% We always validate data put into the iolist, so it should not fail here.
case unicode:characters_to_binary(Data) of
String when is_binary(String) ->
String;
{error, _, _} ->
error({invalid_unicode_data, Data});
{incomplete, _, _} ->
error({invalid_unicode_data, Data})
end.
-spec stack_merge(stack_element(), stack()) -> stack().
stack_merge(Value, []) ->
[Value];
stack_merge(Key, [{Pos, undefined, undefined} | Values]) ->
[{Pos, Key, undefined} | Values];
stack_merge(Value, [{Pos, Key, undefined} | Values]) ->
[{Pos, Key, Value} | Values];
stack_merge(Value, [Parent | Values]) when is_list(Parent) ->
[[Value | Parent] | Values].
-spec stack_merge_member(stack_element(), stack(), json:parsing_options()) ->
{ok, stack()} | {error, json:error_reason()}.
stack_merge_member({_, Key, Value}, [Parent | Values], Options) when
is_binary(Key) ->
case maps:get(duplicate_key_handling, Options, last) of
last ->
{ok, [Parent#{Key => Value} | Values]};
first ->
case maps:is_key(Key, Parent) of
true ->
{ok, [Parent | Values]};
false ->
{ok, [Parent#{Key => Value} | Values]}
end;
error ->
case maps:is_key(Key, Parent) of
true ->
{error, {duplicate_key, Key}};
false ->
{ok, [Parent#{Key => Value} | Values]}
end
end;
stack_merge_member({_, Key, _}, _, _) ->
{error, {invalid_key, Key}}. | src/json_parser.erl | 0.521471 | 0.54056 | json_parser.erl | starcoder |
%%%--------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2017 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%--------------------------------------------------------------------
%%% @doc
%%% Histogram in which each counter corresponds to a time slot. Each increment
%%% has to provide current timestamp, it shifts the histogram if the time since
%%% last update is longer than defined time window.
%%%
%%% NOTE: As clocks may warp backward, timestamps taken consecutively may not be
%%% monotonic, which could break the prepend-only histograms used by this
%%% module. For that reason, all timestamps are checked against the last update
%%% value and, if needed, rounded up artificially to ensure monotonicity. This
%%% may cause spikes in specific histogram windows, but ensures their integrity.
%%% @end
%%%--------------------------------------------------------------------
-module(time_slot_histogram).
-author("<NAME>").
-record(time_slot_histogram, {
start_time :: timestamp(),
last_update_time :: timestamp(),
time_window :: window(),
values :: values(),
size :: non_neg_integer(),
type :: type()
}).
-type type() :: normal | cumulative.
% Time unit can be arbitrary, depending on the module that uses the histogram.
% Note that the time window and consecutive update times must use the same unit.
-type timestamp() :: non_neg_integer().
% See the module description for details on monotonic timestamps.
-opaque monotonic_timestamp() :: {monotonic, timestamp()}.
% length of one time window
-type window() :: pos_integer().
-type histogram() :: #time_slot_histogram{}.
-type values() :: histogram:histogram() | cumulative_histogram:histogram().
-export_type([histogram/0, monotonic_timestamp/0]).
%% API
-export([
new/2, new/3, new_cumulative/3,
ensure_monotonic_timestamp/2,
increment/2, increment/3,
decrement/2, decrement/3,
get_histogram_values/1, get_last_update/1,
get_sum/1, get_average/1, get_size/1, reset_cumulative/2]).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns new empty time_slot_histogram with given Size and TimeWindow.
%% LastUpdate and StartTime are set to 0.
%% @end
%%--------------------------------------------------------------------
-spec new(window(), histogram:size()) -> histogram().
new(TimeWindow, Size) ->
new(0, 0, TimeWindow, Size, normal).
%%--------------------------------------------------------------------
%% @doc
%% Returns time_slot_histogram with given LastUpdateTime and
%% provided values or empty histogram of given size.
%% @end
%%--------------------------------------------------------------------
-spec new(LastUpdate :: timestamp(), window(), histogram:size() | values()) -> histogram().
new(LastUpdate, TimeWindow, HistogramValues) when is_list(HistogramValues) ->
new(LastUpdate, LastUpdate, TimeWindow, HistogramValues, normal);
new(LastUpdate, TimeWindow, Size) when is_integer(Size) ->
new(LastUpdate, TimeWindow, histogram:new(Size)).
%%--------------------------------------------------------------------
%% @doc
%% Returns a cumulative time_slot_histogram with given LastUpdateTime and
%% provided values or empty histogram of given size.
%% @end
%%--------------------------------------------------------------------
-spec new_cumulative(LastUpdate :: timestamp(), window(), histogram:size() | values()) -> histogram().
new_cumulative(LastUpdate, TimeWindow, HistogramValues) when is_list(HistogramValues) ->
new(LastUpdate, LastUpdate, TimeWindow, HistogramValues, cumulative);
new_cumulative(LastUpdate, TimeWindow, Size) when is_integer(Size) ->
new_cumulative(LastUpdate, TimeWindow, cumulative_histogram:new(Size)).
%% @private
-spec new(StartTime :: timestamp(), LastUpdate :: timestamp(),
window(), histogram:size() | values(), type()) -> histogram().
new(StartTime, LastUpdate, TimeWindow, HistogramValues, Type) when is_list(HistogramValues) ->
#time_slot_histogram{
start_time = StartTime,
last_update_time = LastUpdate,
time_window = TimeWindow,
values = HistogramValues,
size = length(HistogramValues),
type = Type
};
new(StartTime, LastUpdate, TimeWindow, Size, Type = normal) when is_integer(Size) ->
new(StartTime, LastUpdate, TimeWindow, histogram:new(Size), Type);
new(StartTime, LastUpdate, TimeWindow, Size, Type = cumulative) when is_integer(Size) ->
new(StartTime, LastUpdate, TimeWindow, cumulative_histogram:new(Size), Type).
%%-------------------------------------------------------------------
%% @doc
%% Ensures that the given timestamp is not smaller than the last update.
%% Must be called before performing any modification on time slot histograms.
%% @end
%%-------------------------------------------------------------------
-spec ensure_monotonic_timestamp(histogram() | timestamp(), timestamp()) -> monotonic_timestamp().
ensure_monotonic_timestamp(#time_slot_histogram{last_update_time = LastUpdate}, Current) ->
ensure_monotonic_timestamp(LastUpdate, Current);
ensure_monotonic_timestamp(LastUpdate, Current) ->
{monotonic, max(LastUpdate, Current)}.
%%--------------------------------------------------------------------
%% @equiv update(Histogram, CurrentMonotonicTime, 1).
%% @end
%%--------------------------------------------------------------------
-spec increment(histogram(), CurrentMonotonicTime :: monotonic_timestamp()) -> histogram().
increment(Histogram, CurrentMonotonicTime) ->
increment(Histogram, CurrentMonotonicTime, 1).
%%--------------------------------------------------------------------
%% @doc
%% Increments newest time window by N. The function shifts time slots
%% if the difference between provided CurrentMonotonicTime and LastUpdate
%% is greater than TimeWindow.
%% @end
%%--------------------------------------------------------------------
-spec increment(histogram(), CurrentMonotonicTime :: monotonic_timestamp(), N :: non_neg_integer()) ->
histogram().
increment(TSH = #time_slot_histogram{type = Type}, CurrentMonotonicTime, N) ->
ShiftSize = calc_shift_size(TSH, CurrentMonotonicTime),
ShiftedHistogram = shift_values(TSH, ShiftSize),
{monotonic, LastUpdate} = CurrentMonotonicTime,
TSH#time_slot_histogram{
last_update_time = LastUpdate,
values = increment_by_type(ShiftedHistogram, Type, N)
}.
%%--------------------------------------------------------------------
%% @doc
%% This function shifts cumulative histogram and fills shifted slots
%% with zeros.
%% NOTE!!! This function must be used carefully as it's unusual for
%% cumulative histogram to fill new slots with zeros instead of previous
%% values.
%% e.g. it can be used to reset cumulative histograms after restarts
%% @end
%%--------------------------------------------------------------------
-spec reset_cumulative(histogram(), CurrentMonotonicTime :: monotonic_timestamp()) ->
histogram().
reset_cumulative(TSH = #time_slot_histogram{type = cumulative}, CurrentMonotonicTime) ->
TSH2 = increment(TSH#time_slot_histogram{type = normal}, CurrentMonotonicTime, 0),
TSH2#time_slot_histogram{type = cumulative}.
%%--------------------------------------------------------------------
%% @equiv decrement(Histogram, CurrentMonotonicTime, 1).
%% @end
%%--------------------------------------------------------------------
-spec decrement(histogram(), CurrentMonotonicTime :: monotonic_timestamp()) -> histogram().
decrement(Histogram, CurrentMonotonicTime) ->
decrement(Histogram, CurrentMonotonicTime, 1).
%%--------------------------------------------------------------------
%% @doc
%% Decrements newest time window by N. The function shifts time slots
%% if the difference between provided CurrentMonotonicTime and LastUpdate
%% is greater than the TimeWindow.
%% @end
%%--------------------------------------------------------------------
-spec decrement(histogram(), CurrentMonotonicTime :: monotonic_timestamp(), N :: non_neg_integer()) ->
histogram().
decrement(TSH = #time_slot_histogram{type = Type}, CurrentMonotonicTime, N) ->
ShiftSize = calc_shift_size(TSH, CurrentMonotonicTime),
ShiftedHistogram = shift_values(TSH, ShiftSize),
{monotonic, LastUpdate} = CurrentMonotonicTime,
TSH#time_slot_histogram{
last_update_time = LastUpdate,
values = decrement_by_type(ShiftedHistogram, Type, N)
}.
-spec get_histogram_values(histogram()) -> histogram:histogram().
get_histogram_values(#time_slot_histogram{values = Histogram}) ->
Histogram.
-spec get_last_update(histogram()) -> timestamp().
get_last_update(#time_slot_histogram{last_update_time = LastUpdate}) ->
LastUpdate.
-spec get_size(histogram()) -> timestamp().
get_size(#time_slot_histogram{size = Size}) ->
Size.
-spec get_sum(histogram()) -> non_neg_integer().
get_sum(TimeSlotHistogram) ->
lists:sum(get_histogram_values(TimeSlotHistogram)).
-spec get_average(histogram()) -> number().
get_average(TimeSlotHistogram) ->
get_sum(TimeSlotHistogram) / get_size(TimeSlotHistogram).
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec calc_shift_size(histogram(), monotonic_timestamp()) -> non_neg_integer().
calc_shift_size(#time_slot_histogram{
start_time = StartTime,
last_update_time = LastUpdate,
time_window = TimeWindow
}, {monotonic, CurrentTime}) ->
max(0, (CurrentTime - StartTime) div TimeWindow - (LastUpdate - StartTime) div TimeWindow).
-spec shift_values(histogram(), non_neg_integer()) -> values().
shift_values(#time_slot_histogram{type = normal, values = Histogram}, ShiftSize) ->
histogram:shift(Histogram, ShiftSize);
shift_values(#time_slot_histogram{type = cumulative, values = Histogram}, ShiftSize) ->
cumulative_histogram:shift(Histogram, ShiftSize).
-spec increment_by_type(values(), type(), non_neg_integer()) -> values().
increment_by_type(HistogramValues, normal, N) ->
histogram:increment(HistogramValues, N);
increment_by_type(HistogramValues, cumulative, N) ->
cumulative_histogram:increment(HistogramValues, N).
-spec decrement_by_type(values(), type(), non_neg_integer()) -> values().
decrement_by_type(HistogramValues, normal, N) ->
histogram:decrement(HistogramValues, N);
decrement_by_type(HistogramValues, cumulative, N) ->
cumulative_histogram:decrement(HistogramValues, N). | src/modules/datastore/utils/file_popularity/time_slot_histogram.erl | 0.643441 | 0.438605 | time_slot_histogram.erl | starcoder |
-module(woody_client_metrics).
% api
-export([new/2]).
-export([delete/1]).
-export([increment_counter/1]).
-export([increment_counter/2]).
-export([decrement_counter/1]).
-export([decrement_counter/2]).
-export([update_histogram/2]).
-export([update_gauge/2]).
-export([update_meter/2]).
% -type metric() :: metrics:metric().
-type metric() :: counter | histogram | gauge | meter.
-spec new(metric(), any()) -> ok | {error, term()}.
new(_, _) ->
ok.
-spec delete(any()) -> ok.
delete(_) ->
ok.
-spec increment_counter(any()) -> ok | {error, term()}.
increment_counter(Key) ->
increment_counter(Key, 1).
-spec increment_counter(any(), number()) -> ok | {error, term()}.
increment_counter([hackney, _Host, _], _) ->
% we don't need per host metrics
ok;
increment_counter(Key, Value) ->
update_metric(counter, Key, Value).
-spec decrement_counter(any()) -> ok | {error, term()}.
decrement_counter(Key) ->
decrement_counter(Key, 1).
-spec decrement_counter(any(), number()) -> ok | {error, term()}.
decrement_counter(Key, Value) ->
increment_counter(Key, -Value).
-spec update_histogram(any(), number() | function()) -> ok | {error, term()}.
update_histogram(Key, Value) ->
update_metric(histogram, Key, Value).
-spec update_gauge(any(), number()) -> ok | {error, term()}.
update_gauge(Key, Value) ->
update_metric(gauge, Key, Value).
-spec update_meter(any(), number()) -> ok | {error, term()}.
update_meter(Key, Value) ->
update_metric(meter, Key, Value).
%% internals
update_metric(meter, _, _) ->
{error, not_allowed};
update_metric(histogram, _, Value) when is_function(Value) ->
{error, not_allowed};
update_metric(histogram, Key, Value) ->
update_metric(gauge, Key, Value);
update_metric(Type, Key0, Value) ->
case is_allowed_metric(Key0) of
true ->
Key = map_key(Key0),
hay_metrics:push(hay_metrics:construct(Type, tag_key(Key), Value));
false ->
{error, not_allowed}
end.
tag_key(Key) when is_list(Key) ->
[woody, client | Key].
is_allowed_metric([hackney_pool, _, Metric]) ->
lists:member(Metric, get_allowed_pool_metrics());
is_allowed_metric(Key) ->
lists:member(Key, get_allowed_request_metrics()).
map_key(Key) ->
case maps:get(Key, get_key_mapping(), undefined) of
undefined ->
Key;
MappedKey ->
MappedKey
end.
% gets
get_key_mapping() ->
maps:get(metric_key_mapping, get_options(), #{}).
get_allowed_request_metrics() ->
[
[hackney, total_requests],
[hackney, finished_requests],
[hackney, nb_requests]
].
get_allowed_pool_metrics() ->
[free_count, no_socket, in_use_count, queue_counter].
get_options() ->
genlib_app:env(woody, woody_client_metrics_options, #{}). | src/woody_client_metrics.erl | 0.511229 | 0.44083 | woody_client_metrics.erl | starcoder |
%% -------------------------------------------------------------------
%% Copyright <2020> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
%% antidote_crdt_secure_counter_pn: A convergent, replicated, operation
%% based secure (using the Paillier cryptosystem) PN-Counter.
-module(antidote_crdt_secure_counter_pn).
-behaviour(antidote_crdt).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([
new/0,
value/1,
downstream/2,
update/2,
equal/2,
to_binary/1,
from_binary/1,
is_operation/1,
require_state_downstream/1
]).
-type freshness() :: fresh | spoiled.
-type delta() :: integer().
-type nsquare() :: pos_integer().
-type state() :: {freshness(), integer()}.
-type op() :: {increment, {delta(), nsquare()}}.
-type effect() :: {delta(), nsquare()}.
%% @doc Create a new, empty 'antidote_crdt_secure_counter_pn'.
%%
%% The first element of the state tuple indicates whether the counter
%% is newly created (no increments done yet) or not. The second element
%% represents the encrypted (as specified by the Paillier scheme) total
%% value of the counter.
-spec new() -> state().
new() ->
{fresh, 0}.
%% @doc Returns the encrypted (as specified by the Paillier scheme) total
%% value of a secure pn-counter.
-spec value(state()) -> integer().
value({_, Value}) when is_integer(Value) ->
Value.
%% @doc Generate a downstream operation.
%%
%% The first parameter is a tuple of the form `{increment, {integer(), pos_integer()}}'.
%% Where the first integer represents the encrypted delta value by which the counter
%% will be incremented. The second integer represents the N squared value calculated
%% during the key generation phase of the Paillier cryptosystem.
%%
%% The value of N is part of the user's public key, it is ok for the server to know this
%% value. Invalid `NSquare' values (less than or equal to zero) are rejected, and a
%% downstream effect is not generated.
%%
%% The second parameter is the secure pn-counter, this parameter is not actually used.
-spec downstream(op(), state()) -> {ok, effect()}.
downstream({increment, {Delta, NSquare}}, _SecurePNCounter) when
is_integer(Delta) and is_integer(NSquare) and (NSquare > 0)
->
{ok, {Delta, NSquare}}.
%% @doc Updates a given secure pn-counter, incrementing it by a given
%% encrypted `Delta'. By incrementing we mean performing the homomorphic
%% addition of the counter value with the given delta. As described by
%% the Paillier cryptosystem, the homomorphic addition of two plaintexts
%% translates to the product of two ciphertexts modulo N squared.
%%
%% Returns the updated secure pn-counter.
-spec update(effect(), state()) -> {ok, state()}.
update({Delta, _NSquare}, {fresh, _Value}) ->
{ok, {spoiled, Delta}};
update({Delta, NSquare}, {spoiled, Value}) ->
{ok, {spoiled, (Value * Delta) rem NSquare}}.
%% @doc Compare if two secure counters are equal.
-spec equal(state(), state()) -> boolean().
equal(SecurePNCounter1, SecurePNCounter2) ->
SecurePNCounter1 =:= SecurePNCounter2.
-spec to_binary(state()) -> binary().
to_binary(SecurePNCounter) ->
term_to_binary(SecurePNCounter).
-spec from_binary(binary()) -> {ok, state()}.
from_binary(Bin) ->
{ok, binary_to_term(Bin)}.
%% @doc The following function verifies that a given operation is supported by
%% this particular CRDT.
-spec is_operation(term()) -> boolean().
is_operation({increment, {Delta, NSquare}}) when
is_integer(Delta) and is_integer(NSquare) and (NSquare > 0)
->
true;
is_operation(_) ->
false.
%% @doc Returns true if ?MODULE:downstream/2 needs the state of crdt
%% to generate downstream effect.
require_state_downstream(_) ->
false.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
prepare_and_effect(Op, PNCounter) ->
{ok, Downstream} = downstream(Op, PNCounter),
update(Downstream, PNCounter).
new_test() ->
?assertEqual({fresh, 0}, new()).
value_test() ->
?assertEqual(0, value({fresh, 0})),
?assertEqual(4, value({spoiled, 4})).
update_test() ->
Counter = new(),
% Fresh counter becomes spoiled.
{ok, Counter1} = prepare_and_effect({increment, {2, 1}}, Counter),
?assertEqual({spoiled, 2}, Counter1),
% Spoiled counter stays spoiled and correctly updates value.
{ok, Counter2} = prepare_and_effect({increment, {3, 36}}, Counter1),
?assertEqual({spoiled, 6}, Counter2).
reject_invalid_nsquare_test() ->
Counter = new(),
Operation1 = {increment, {1, 0}},
Operation2 = {increment, {1, -1}},
?assertNot(is_operation(Operation1)),
?assertNot(is_operation(Operation2)),
?assertError(function_clause, downstream(Operation1, Counter)),
?assertError(function_clause, downstream(Operation2, Counter)).
equal_test() ->
Counter1 = {fresh, 4},
Counter2 = {fresh, 2},
Counter3 = {fresh, 2},
?assertNot(equal(Counter1, Counter2)),
?assert(equal(Counter2, Counter3)),
Counter4 = {spoiled, 5},
Counter5 = {spoiled, 3},
Counter6 = {spoiled, 3},
?assertNot(equal(Counter4, Counter5)),
?assert(equal(Counter5, Counter6)),
?assertNot(equal(Counter1, Counter4)).
binary_test() ->
Counter1 = {spoiled, 4, 5},
BinaryCounter1 = to_binary(Counter1),
{ok, Counter2} = from_binary(BinaryCounter1),
?assert(equal(Counter1, Counter2)).
-endif. | apps/antidote_crdt/src/antidote_crdt_secure_counter_pn.erl | 0.675551 | 0.492981 | antidote_crdt_secure_counter_pn.erl | starcoder |
-module(problem2016_01).
-export([solve1/1, solve2/1]).
-type turn() :: left | right.
-type dir() :: north | west | south | east.
-type coord() :: { integer(), integer() }.
-type pos() :: { coord(), dir() }.
-type steps() :: non_neg_integer().
-type steps_list() :: [ coord() ].
-type instruction() :: { turn(), steps() }.
-type line() :: { coord(), coord() }.
-type lines() :: [ line() ].
-spec parse_instruction( [ char() ] ) -> instruction().
parse_instruction( [ $L | Steps ] ) ->
{ left, list_to_integer( Steps ) };
parse_instruction( [ $R | Steps ] ) ->
{ right, list_to_integer( Steps ) }.
-spec do_turn( turn(), dir() ) -> dir().
do_turn( left, north ) -> west;
do_turn( left, west ) -> south;
do_turn( left, south ) -> east;
do_turn( left, east ) -> north;
do_turn( right, north ) -> east;
do_turn( right, west ) -> north;
do_turn( right, south ) -> west;
do_turn( right, east ) -> south.
-spec do_steps( pos(), steps() ) -> coord().
do_steps( { { X, Y }, north }, Steps ) -> { X, Y - Steps };
do_steps( { { X, Y }, east }, Steps ) -> { X + Steps, Y };
do_steps( { { X, Y }, south }, Steps ) -> { X, Y + Steps };
do_steps( { { X, Y }, west }, Steps ) -> { X - Steps, Y }.
-spec get_steps_list( pos(), steps() ) -> steps_list().
get_steps_list( Pos, Steps ) ->
lists:map( fun( S ) -> do_steps( Pos, S ) end, lists:seq( 1, Steps ) ).
-spec get_next_pos( instruction(), pos() ) -> pos().
get_next_pos( { Turn, Steps }, { Pos, Face } ) ->
NewFace = do_turn( Turn, Face ),
{ NewX, NewY } = do_steps( { Pos, NewFace }, Steps ),
{ { NewX, NewY }, NewFace }.
parse_instructions( Input ) ->
lists:map( fun parse_instruction/1, string:tokens( Input, " ," ) ).
-spec solve1( string() ) -> non_neg_integer().
solve1( Input ) ->
Instructions = parse_instructions( Input ),
StartPos = { { 0, 0 }, north },
{ { X, Y }, _ } = lists:foldl( fun get_next_pos/2,
StartPos,
Instructions ),
abs( X ) + abs( Y ).
belongs( X, A, B ) ->
( X >= min( A, B ) ) andalso ( X =< max( A, B ) ).
-spec intersects( coord(), line() ) -> boolean().
intersects( { X, Y }, { { X1, Y1 }, { X2, Y2 } } ) ->
belongs( X, X1, X2 ) andalso belongs( Y, Y1, Y2 ).
-spec find_if_already_visited( steps_list(), lines() ) -> coord() | not_found.
find_if_already_visited( StepsList, PrevLines ) ->
listz:find( fun( Step ) ->
lists:any( fun( Line ) ->
intersects( Step, Line )
end, PrevLines)
end,
StepsList ).
-spec find_first_intersection( [ instruction() ], pos(), lines() ) -> coord().
find_first_intersection( [ { _, 0 } = Instruction | Rest ], CurrentPos, PrevLines ) ->
find_first_intersection( Rest, get_next_pos( Instruction, CurrentPos ), PrevLines );
find_first_intersection( [ { Turn, Steps } | Rest ], { Pos, Face }, PrevLines ) ->
NewFace = do_turn( Turn, Face ),
StepsList = get_steps_list( { Pos, NewFace }, Steps ),
case find_if_already_visited( StepsList, PrevLines ) of
not_found ->
FirstStep = erlang:hd( StepsList ),
LastStep = lists:last( StepsList ),
NextPos = { LastStep, NewFace },
NewLine = { FirstStep, LastStep } ,
find_first_intersection( Rest, NextPos, [ NewLine | PrevLines ] );
VisitedPos -> VisitedPos
end.
-spec solve2( string() ) -> non_neg_integer().
solve2( Input ) ->
Instructions = parse_instructions( Input ),
{ X, Y } = find_first_intersection( Instructions, { { 0, 0 }, north }, [ { { 0, 0 }, { 0, 0 } } ] ),
abs( X ) + abs( Y ).
-include_lib("eunit/include/eunit.hrl").
solve1_test_() ->
[ ?_assertEqual( 5, solve1( "R2, L3" ) ),
?_assertEqual( 2, solve1( "R2, R2, R2" ) ),
?_assertEqual( 12, solve1( "R5, L5, R5, R3" ) ) ].
solve2_test_() ->
[ ?_assertEqual( 4, solve2( "R8, R4, R4, R8" ) ) ].
aux_test_() ->
[ ?_assert( belongs( 0, -1, 1 ) ),
?_assert( belongs( 2, 0, 2 ) ),
?_assert( belongs( 2, 2, 0 ) ),
?_assertNot( belongs( 3, 0, 2 ) ),
?_assertNot( belongs( 3, 2, 0 ) ),
?_assert( intersects( { 2, 2 }, { { 0, 2 }, { 4, 2 } } ) ),
?_assert( intersects( { 2, 2 }, { { 2, 0 }, { 2, 4 } } ) ),
?_assert( intersects( { 0, 2 }, { { 0, 2 }, { 4, 2 } } ) ),
?_assert( intersects( { 4, 2 }, { { 0, 2 }, { 4, 2 } } ) ),
?_assertNot(intersects( { 3, 3 }, { { 0, 2 }, { 4, 2 } } ) ),
?_assertEqual( [ { 2, 1 }, { 3, 1 }, { 4, 1 } ], get_steps_list( { { 1, 1 }, east }, 3 ) ),
?_assertEqual( { 1, 0 }, find_if_already_visited( [ { 0, 0 }, { 1, 0 }, { 2, 0 } ], [ { { 5, 5 }, { 5, 10 } }, { { 1, -2 }, { 1, 2 } } ] ) )
]. | src/2016/problem2016_01.erl | 0.50708 | 0.662169 | problem2016_01.erl | starcoder |
%% =============================================================================
%% bondy_auth_wamp_cryptosign.erl -
%%
%% Copyright (c) 2016-2022 Leapsight. All rights reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% =============================================================================
%% -----------------------------------------------------------------------------
%% @doc
%% ## References
%% * [BrowserAuth](http://www.browserauth.net)
%% * [Binding Security Tokens to TLS Channels](https://www.ietf.org/proceedings/90/slides/slides-90-uta-0.pdf)
%% @end
%% -----------------------------------------------------------------------------
-module(bondy_auth_wamp_cryptosign).
-behaviour(bondy_auth).
-include("bondy_security.hrl").
-type state() :: map().
-type challenge_error() :: missing_pubkey | no_matching_pubkey.
%% BONDY_AUTH CALLBACKS
-export([init/1]).
-export([requirements/0]).
-export([challenge/3]).
-export([authenticate/4]).
%% =============================================================================
%% BONDY_AUTH CALLBACKS
%% =============================================================================
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec init(bondy_auth:context()) ->
{ok, State :: state()} | {error, Reason :: any()}.
init(Ctxt) ->
try
User = bondy_auth:user(Ctxt),
User =/= undefined
andalso true == bondy_rbac_user:has_authorized_keys(User)
orelse throw(invalid_context),
{ok, maps:new()}
catch
throw:Reason ->
{error, Reason}
end.
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec requirements() -> map().
requirements() ->
#{
identification => true,
password => <PASSWORD>,
authorized_keys => true
}.
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec challenge(
Details :: map(), AuthCtxt :: bondy_auth:context(), State :: state()) ->
{true, Extra :: map(), NewState :: state()}
| {error, challenge_error(), NewState :: state()}.
challenge(Details, Ctxt, State) ->
try
HexKey = maps_utils:get_path(
[authextra, <<"pubkey">>], Details, undefined
),
HexKey =/= undefined orelse throw(missing_pubkey),
Key = decode_hex(HexKey),
%% The stored keys are hex formatted so that we can easily compare here
Keys = bondy_rbac_user:authorized_keys(bondy_auth:user(Ctxt)),
case lists:member(Key, Keys) of
true ->
Challenge = enacl:randombytes(32),
NewState = State#{
pubkey => Key,
challenge => Challenge
},
Extra = #{
challenge => encode_hex(Challenge),
channel_binding => undefined %% TODO
},
{true, Extra, NewState};
false ->
{error, no_matching_pubkey, State}
end
catch
throw:Reason ->
{error, Reason, State}
end.
%% -----------------------------------------------------------------------------
%% @doc
%% @end
%% -----------------------------------------------------------------------------
-spec authenticate(
Signature :: binary(),
DataIn :: map(),
Ctxt :: bondy_auth:context(),
CBState :: state()) ->
{ok, DataOut :: map(), CBState :: state()}
| {error, Reason :: any(), CBState :: state()}.
authenticate(EncSignature, _, _, #{pubkey := PK} = State)
when is_binary(EncSignature) ->
try
Challenge = maps:get(challenge, State),
Signature0 = decode_hex(EncSignature),
Signature = normalise_signature(Signature0, Challenge),
%% Verify that the Challenge was signed using the Ed25519 key
case enacl:sign_verify_detached(Signature, Challenge, PK) of
true ->
{ok, #{}, State};
false ->
%% Challenge does not match the expected
{error, invalid_signature, State}
end
catch
error:badarg ->
%% enacl failed
{error, invalid_signature, State};
error:invalid_signature ->
%% normalise failed
{error, invalid_signature, State};
throw:invalid_hex_encoding ->
{error, invalid_signature, State}
end.
%% =============================================================================
%% PRIVATE
%% =============================================================================
%% @private
decode_hex(HexString) ->
try hex_utils:hexstr_to_bin(HexString)
% of
% Bin when byte_size(Bin) == 96 ->
% Bin;
% _ ->
% throw(invalid_signature_length)
catch
throw:Reason ->
throw(Reason);
error:_ ->
throw(invalid_hex_encoding)
end.
%% @private
encode_hex(Bin) when is_binary(Bin) ->
list_to_binary(hex_utils:bin_to_hexstr(Bin)).
%% @private
%% @doc As the cryptosign spec is not formal some clients e.g. Python
%% return Signature(64) ++ Challenge(32) while others e.g. JS return just the
%% Signature(64).
%% @end
normalise_signature(Signature, _) when byte_size(Signature) == 64 ->
Signature;
normalise_signature(Signature, Challenge) when byte_size(Signature) == 96 ->
case binary:match(Signature, Challenge) of
{64, 32} ->
binary:part(Signature, {0, 64});
_ ->
throw(invalid_signature)
end;
normalise_signature(_, _) ->
throw(invalid_signature). | apps/bondy/src/bondy_auth_wamp_cryptosign.erl | 0.543348 | 0.405596 | bondy_auth_wamp_cryptosign.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc 2PSet CRDT: two-phased set.
%% Once removed, elements cannot be added again.
%% Also, this is not an observed removed variant.
%% This means elements can be removed before being
%% in the set.
%%
%% @reference <NAME>, <NAME>, and <NAME>
%% Delta State Replicated Data Types (2016)
%% [http://arxiv.org/pdf/1603.01529v1.pdf]
%%
%% @reference <NAME>
%% delta-enabled-crdts C++ library
%% [https://github.com/CBaquero/delta-enabled-crdts]
-module(state_twopset).
-author("<NAME> <<EMAIL>>").
-behaviour(type).
-behaviour(state_type).
-define(TYPE, ?MODULE).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0, new/1]).
-export([mutate/3, delta_mutate/3, merge/2]).
-export([query/1, equal/2, is_bottom/1, is_inflation/2,
is_strict_inflation/2,
irreducible_is_strict_inflation/2]).
-export([join_decomposition/1, delta/2, digest/1]).
-export([encode/2, decode/2]).
-export_type([state_twopset/0, state_twopset_op/0]).
-opaque state_twopset() :: {?TYPE, payload()}.
-type payload() :: {ordsets:ordset(any()), ordsets:ordset(any())}.
-type element() :: term().
-type state_twopset_op() :: {add, element()} |
{rmv, element()}.
%% @doc Create a new, empty `state_twopset()'
-spec new() -> state_twopset().
new() ->
{?TYPE, {ordsets:new(), ordsets:new()}}.
%% @doc Create a new, empty `state_twopset()'
-spec new([term()]) -> state_twopset().
new([]) ->
new().
-spec mutate(state_twopset_op(), type:id(), state_twopset()) ->
{ok, state_twopset()}.
mutate(Op, Actor, {?TYPE, _TwoPSet}=CRDT) ->
state_type:mutate(Op, Actor, CRDT).
%% @doc Delta-mutate a `state_twopset()'.
%% The first argument can be `{add, element()}' or
%% `{rmv, element()}'.
-spec delta_mutate(state_twopset_op(), type:id(), state_twopset()) ->
{ok, state_twopset()}.
delta_mutate({add, Elem}, _Actor, {?TYPE, {Added, _Removed}}) ->
Delta = minimum_delta(Elem, Added),
{ok, {?TYPE, {Delta, ordsets:new()}}};
delta_mutate({rmv, Elem}, _Actor, {?TYPE, {_Added, Removed}}) ->
Delta = minimum_delta(Elem, Removed),
{ok, {?TYPE, {ordsets:new(), Delta}}}.
%% @doc When trying to add an element to a set
%% a delta could be a set with only that element.
%% But if the element is already in the set,
%% the minimum delta would be the empty set.
minimum_delta(Elem, Set) ->
case ordsets:is_element(Elem, Set) of
true ->
ordsets:new();
false ->
ordsets:add_element(Elem, ordsets:new())
end.
%% @doc Returns the value of the `state_twopset()'.
%% This value is a set with added elements minus
%% the removed elements.
-spec query(state_twopset()) -> sets:set(element()).
query({?TYPE, {Added, Removed}}) ->
sets:from_list(ordsets:subtract(Added, Removed)).
%% @doc Merge two `state_twopset()'.
%% The result is the component wise set union.
-spec merge(state_twopset(), state_twopset()) -> state_twopset().
merge({?TYPE, {Added1, Removed1}}, {?TYPE, {Added2, Removed2}}) ->
Added = ordsets:union(Added1, Added2),
Removed = ordsets:union(Removed1, Removed2),
{?TYPE, {Added, Removed}}.
%% @doc Equality for `state_twopset()'.
-spec equal(state_twopset(), state_twopset()) -> boolean().
equal({?TYPE, {Added1, Removed1}}, {?TYPE, {Added2, Removed2}}) ->
ordsets_ext:equal(Added1, Added2) andalso
ordsets_ext:equal(Removed1, Removed2).
%% @doc Check if a TwoPSet is bottom.
-spec is_bottom(state_twopset()) -> boolean().
is_bottom({?TYPE, {Added, Removed}}) ->
orddict:is_empty(Added) andalso
orddict:is_empty(Removed).
%% @doc Given two `state_twopset()', check if the second is an inflation
%% of the first.
%% The second `state_twopset()' is an inflation if the first set
%% with adds is a subset of the second with adds.
%% Vice versa for the sets with removes.
-spec is_inflation(state_twopset(), state_twopset()) -> boolean().
is_inflation({?TYPE, {Added1, Removed1}}, {?TYPE, {Added2, Removed2}}) ->
ordsets:is_subset(Added1, Added2) andalso
ordsets:is_subset(Removed1, Removed2);
%% @todo get back here later
is_inflation({cardinality, Value}, {?TYPE, _}=CRDT) ->
sets:size(query(CRDT)) >= Value.
%% @doc Check for strict inflation.
-spec is_strict_inflation(state_twopset(), state_twopset()) -> boolean().
is_strict_inflation({?TYPE, _}=CRDT1, {?TYPE, _}=CRDT2) ->
state_type:is_strict_inflation(CRDT1, CRDT2);
%% @todo get back here later
is_strict_inflation({cardinality, Value}, {?TYPE, _}=CRDT) ->
sets:size(query(CRDT)) > Value.
%% @doc Check for irreducible strict inflation.
-spec irreducible_is_strict_inflation(state_twopset(),
state_type:digest()) ->
boolean().
irreducible_is_strict_inflation({?TYPE, _}=A, B) ->
state_type:irreducible_is_strict_inflation(A, B).
-spec digest(state_twopset()) -> state_type:digest().
digest({?TYPE, _}=CRDT) ->
{state, CRDT}.
%% @doc Join decomposition for `state_twopset()'.
-spec join_decomposition(state_twopset()) -> [state_twopset()].
join_decomposition({?TYPE, {Added, Removed}}) ->
L1 = ordsets:fold(
fun(Elem, Acc) ->
[{?TYPE, {[Elem], []}} | Acc]
end,
[],
Added
),
L2 = ordsets:fold(
fun(Elem, Acc) ->
[{?TYPE, {[], [Elem]}} | Acc]
end,
[],
Removed
),
lists:append(L1, L2).
%% @doc Delta calculation for `state_twopset()'.
-spec delta(state_twopset(), state_type:digest()) -> state_twopset().
delta({?TYPE, _}=A, B) ->
state_type:delta(A, B).
-spec encode(state_type:format(), state_twopset()) -> binary().
encode(erlang, {?TYPE, _}=CRDT) ->
erlang:term_to_binary(CRDT).
-spec decode(state_type:format(), binary()) -> state_twopset().
decode(erlang, Binary) ->
{?TYPE, _} = CRDT = erlang:binary_to_term(Binary),
CRDT.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual({?TYPE, {ordsets:new(), ordsets:new()}}, new()).
query_test() ->
Set0 = new(),
Set1 = {?TYPE, {[<<"a">>], []}},
Set2 = {?TYPE, {[<<"a">>, <<"c">>], [<<"a">>, <<"b">>]}},
?assertEqual(sets:new(), query(Set0)),
?assertEqual(sets:from_list([<<"a">>]), query(Set1)),
?assertEqual(sets:from_list([<<"c">>]), query(Set2)).
delta_add_test() ->
Actor = 1,
Set0 = new(),
{ok, {?TYPE, Delta1}} = delta_mutate({add, <<"a">>}, Actor, Set0),
Set1 = merge({?TYPE, Delta1}, Set0),
{ok, {?TYPE, Delta2}} = delta_mutate({add, <<"a">>}, Actor, Set1),
Set2 = merge({?TYPE, Delta2}, Set1),
{ok, {?TYPE, Delta3}} = delta_mutate({add, <<"b">>}, Actor, Set2),
Set3 = merge({?TYPE, Delta3}, Set2),
?assertEqual({?TYPE, {[<<"a">>], []}}, {?TYPE, Delta1}),
?assertEqual({?TYPE, {[<<"a">>], []}}, Set1),
?assertEqual({?TYPE, {[], []}}, {?TYPE, Delta2}),
?assertEqual({?TYPE, {[<<"a">>], []}}, Set2),
?assertEqual({?TYPE, {[<<"b">>], []}}, {?TYPE, Delta3}),
?assertEqual({?TYPE, {[<<"a">>, <<"b">>], []}}, Set3).
add_test() ->
Actor = 1,
Set0 = new(),
{ok, Set1} = mutate({add, <<"a">>}, Actor, Set0),
{ok, Set2} = mutate({add, <<"b">>}, Actor, Set1),
?assertEqual({?TYPE, {[<<"a">>], []}}, Set1),
?assertEqual({?TYPE, {[<<"a">>, <<"b">>], []}}, Set2).
rmv_test() ->
Actor = 1,
Set0 = new(),
{ok, Set1} = mutate({add, <<"a">>}, Actor, Set0),
{ok, Set2} = mutate({add, <<"b">>}, Actor, Set1),
{ok, Set3} = mutate({rmv, <<"b">>}, Actor, Set2),
?assertEqual({?TYPE, {[<<"a">>], []}}, Set1),
?assertEqual({?TYPE, {[<<"a">>, <<"b">>], []}}, Set2),
?assertEqual({?TYPE, {[<<"a">>, <<"b">>], [<<"b">>]}}, Set3).
merge_deltas_test() ->
Set1 = {?TYPE, {[<<"a">>], []}},
Delta1 = {?TYPE, {[], [<<"a">>, <<"b">>]}},
Delta2 = {?TYPE, {[<<"c">>], []}},
Set2 = merge(Delta1, Set1),
Set3 = merge(Set1, Delta1),
DeltaGroup = merge(Delta1, Delta2),
?assertEqual({?TYPE, {[<<"a">>], [<<"a">>, <<"b">>]}}, Set2),
?assertEqual({?TYPE, {[<<"a">>], [<<"a">>, <<"b">>]}}, Set3),
?assertEqual({?TYPE, {[<<"c">>], [<<"a">>, <<"b">>]}}, DeltaGroup).
equal_test() ->
Set1 = {?TYPE, {[<<"a">>], []}},
Set2 = {?TYPE, {[<<"a">>], [<<"a">>]}},
?assert(equal(Set1, Set1)),
?assert(equal(Set2, Set2)),
?assertNot(equal(Set1, Set2)).
is_bottom_test() ->
Set0 = new(),
Set1 = {?TYPE, {[<<"a">>], []}},
?assert(is_bottom(Set0)),
?assertNot(is_bottom(Set1)).
is_inflation_test() ->
Set1 = {?TYPE, {[<<"a">>], []}},
Set2 = {?TYPE, {[<<"a">>], [<<"b">>]}},
Set3 = {?TYPE, {[<<"a">>, <<"b">>], []}},
?assert(is_inflation(Set1, Set1)),
?assert(is_inflation(Set1, Set2)),
?assertNot(is_inflation(Set2, Set1)),
?assert(is_inflation(Set1, Set3)),
?assertNot(is_inflation(Set2, Set3)),
%% check inflation with merge
?assert(state_type:is_inflation(Set1, Set1)),
?assert(state_type:is_inflation(Set1, Set2)),
?assertNot(state_type:is_inflation(Set2, Set1)),
?assert(state_type:is_inflation(Set1, Set3)),
?assertNot(state_type:is_inflation(Set2, Set3)).
is_strict_inflation_test() ->
Set1 = {?TYPE, {[<<"a">>], []}},
Set2 = {?TYPE, {[<<"a">>], [<<"b">>]}},
Set3 = {?TYPE, {[<<"a">>, <<"b">>], []}},
?assertNot(is_strict_inflation(Set1, Set1)),
?assert(is_strict_inflation(Set1, Set2)),
?assertNot(is_strict_inflation(Set2, Set1)),
?assert(is_strict_inflation(Set1, Set3)),
?assertNot(is_strict_inflation(Set2, Set3)).
join_decomposition_test() ->
Set1 = {?TYPE, {[<<"a">>, <<"b">>], [<<"b">>, <<"c">>]}},
Decomp1 = join_decomposition(Set1),
List = [{?TYPE, {[<<"a">>], []}},
{?TYPE, {[<<"b">>], []}},
{?TYPE, {[], [<<"b">>]}},
{?TYPE, {[], [<<"c">>]}}],
?assertEqual(lists:sort(List), lists:sort(Decomp1)).
encode_decode_test() ->
Set = {?TYPE, {[<<"a">>, <<"b">>], [<<"b">>, <<"c">>]}},
Binary = encode(erlang, Set),
ESet = decode(erlang, Binary),
?assertEqual(Set, ESet).
-endif. | src/state_twopset.erl | 0.659186 | 0.434101 | state_twopset.erl | starcoder |
-module(rfc3339).
%% Types
-export_type([year/0, month/0, day/0, date/0,
hour/0, minute/0, second/0, time/0,
fraction_unit/0, fraction/0,
datetime/0, offset/0,
error/0]).
%% API
-export([format_datetime/1, format_datetime/2,
format_local_datetime/2, format_local_datetime/3,
format_date/1,
format_time/1]).
%% API
-export([parse_datetime/1,
parse_local_datetime/1,
parse_date/1,
parse_time/1]).
-ifndef(pre18).
%% API
-export([format_system_time/1, format_system_time/2,
parse_system_time/1, parse_system_time/2]).
-endif.
-define(IS_DIGITS(A), A >= $0, A =< $9).
-define(IS_DIGITS(A, B), ?IS_DIGITS(A), ?IS_DIGITS(B)).
-define(IS_DIGITS(A, B, C, D), ?IS_DIGITS(A, B), ?IS_DIGITS(C, D)).
-compile({inline, [digits_to_integer/1, digits_to_integer/2, digits_to_integer/4]}).
%%%===================================================================
%%% Types
%%%===================================================================
-type year() :: 0..9999.
-type month() :: 1..12.
-type day() :: 1..31.
-type hour() :: 0..23.
-type minute() :: 0..59.
-type second() :: 0..59.
-type fraction_unit() ::
{0..999, millisecond}
| {0..999999, microsecond}
| {0..999999999, nanosecond}.
-type fraction() :: 0..999999 | fraction_unit().
-type date() :: {year(), month(), day()}.
-type time() :: {hour(), minute(), second()}.
-type datetime() :: {date(), time()}.
-type offset() :: {-23..23, minute()}.
-type error() :: badarg | baddate | badtime | badfrac | badoffset.
%%%===================================================================
%%% API
%%%===================================================================
%% @equiv format_local_datetime(DateTime, _Offset = {0, 0})
-spec format_datetime(datetime()) -> iodata().
format_datetime(DateTime) ->
format_local_datetime(DateTime, _Offset = {0, 0}).
%%--------------------------------------------------------------------
%% @equiv format_local_datetime(DateTime, _Offset = {0, 0}, Frac)
-spec format_datetime(datetime(), fraction()) -> iodata().
format_datetime(DateTime, Frac) ->
format_local_datetime(DateTime, _Offset = {0, 0}, Frac).
%%--------------------------------------------------------------------
%% @doc
%% Format timestamp with UTC offset. Function behaviour is undefined
%% for invalid input values.
%% @end
-spec format_local_datetime(datetime(), offset() | undefined) -> iodata().
format_local_datetime(_DateTime = {Date, Time}, Offset) ->
[format_date(Date), $T, format_time(Time), format_offset(Offset)].
%%--------------------------------------------------------------------
%% @doc
%% Format timestamp with UTC offset and fraction of a second. Function
%% behaviour is undefined for invalid input values.
%% @end
-spec format_local_datetime(datetime(), offset() | undefined, fraction()) -> iodata().
format_local_datetime(_DateTime = {Date, Time}, Offset, Frac) ->
[format_date(Date), $T, format_time(Time, Frac), format_offset(Offset)].
%%--------------------------------------------------------------------
%% @doc
%% Format date. E.g., `iolist_to_binary({@module}:format_date({2016,
%% 6, 20}))' will result in `<<"2016-06-20">>'. Year value must be
%% less than 10000. Function behaviour is undefined for invalid input.
%% @end
-spec format_date(date()) -> iodata().
format_date(_Date = {Year, Month, Day}) ->
[format4(Year), $-, format2(Month), $-, format2(Day)].
%%--------------------------------------------------------------------
%% @doc
%% Format time of day. E.g.,
%% `iolist_to_binary({@module}:format_time({9, 38, 14}))' will result
%% in `<<"09:38:14">>' binary string. Function behaviour is undefined
%% for invalid input values.
%% @end
-spec format_time(time()) -> iodata().
format_time(_Time = {Hour, Minute, Second}) ->
[format2(Hour), $:, format2(Minute), $:, format2(Second)].
%%--------------------------------------------------------------------
-spec format_time(time(), fraction()) -> iodata().
format_time(Time, Frac) ->
[format_time(Time), $., format_fraction(Frac)].
%%%===================================================================
%%% API
%%%===================================================================
%% @doc
%% Parse timestamp and convert it to UTC. Parsing with this function
%% is equivalent to parsing with parse_local_datetime/1 and then
%% compensating for UTC offset.
%%
%% For timestamps with unknown UTC offset (offset is "-00:00" in the
%% source text, parsed as `undefined') conversion isn't possible and
%% `badoffset' atom will be thrown.
%% @end
%% @throws error()
%% @see parse_local_datetime/1
-spec parse_datetime(iodata()) -> {datetime(), fraction() | undefined}.
parse_datetime(Str) ->
case parse_local_datetime(Str) of
{_Local, _Offset = undefined, _Frac} -> throw(badoffset);
{Local, Offset, Frac} -> {remove_offset(Local, Offset), Frac}
end.
%%--------------------------------------------------------------------
%% @doc
%% Parse timestamp with UTC offset.
%%
%% Date part of the timestamp must represent a valid date, or
%% `baddate' atom will be thrown.
%%
%% Symbols "T", "t" and " " (space) are allowed as separators between
%% date and time of day parts of the timestamp.
%%
%% For invalid time of day value atom `badtime' will be thrown. Leap
%% seconds are allowed in the time of day part, along with any valid
%% hour and minute and will be mapped to second 59. According to UTC,
%% leap seconds are allowed only at end of month, but it seems
%% impractical to implement such a check, and leap second at any hour
%% and minute will be parsed and remapped to second 59.
%%
%% Fraction of a second is parsed with minimal fraction unit
%% possible. E.g., fraction "52" will be parsed as `{520,
%% millisecond}', fraction "5234" will be parsed as `{523400,
%% microsecond}'. Sub-nanosecond fractions aren't supported for now
%% and atom `badfrac' will be thrown. If timestamp doesn't have
%% fraction of a second in the source text, fraction will be parsed as
%% atom `undefined'.
%%
%% UTC offsets from "-23:59" to "+23:59" all considered to be valid and
%% will be parsed. UTC offset "Z" (or "z") is parsed as zero offset
%% `{0, 0}'. Unknown UTC offset ("-00:00" in the source text) is parsed
%% as atom `undefined'.
%% @end
%% @throws error()
-spec parse_local_datetime(iodata()) ->
{datetime(), offset() | undefined,
fraction() | undefined}.
parse_local_datetime(Str) when is_binary(Str) ->
case parse_date_part(Str) of
{Date, <<Sep, TimeStr/bytes>>}
when Sep =:= $T;
Sep =:= $t;
Sep =:= $\s ->
case parse_time_part(TimeStr) of
{Time, <<$., FracStr/bytes>>} ->
case parse_frac_part(FracStr) of
{Frac, OffsetStr} ->
case parse_offset_part(OffsetStr) of
{Offset, <<>>} -> {{Date, Time}, Offset, Frac};
{_Offset, _StrLeft} -> throw(badarg)
end
end;
{Time, OffsetStr} ->
Frac = undefined,
case parse_offset_part(OffsetStr) of
{Offset, <<>>} -> {{Date, Time}, Offset, Frac};
{_Offset, _StrLeft} -> throw(badarg)
end
end;
{_Date, _BadStr} -> throw(badarg)
end;
parse_local_datetime(Str) when is_list(Str) ->
parse_local_datetime(iolist_to_binary(Str)).
%%--------------------------------------------------------------------
%% @doc
%% Parse date. E.g., `{@module}:parse_date(<<"2016-06-20">>)' will
%% result in `{2016, 6, 20}' term. Function throws atom `badarg' for
%% syntactically incorrect input and throws `baddate' when invalid
%% date value parsed.
%% @end
%% @throws badarg | baddate
-spec parse_date(iodata()) -> date().
parse_date(Str) when is_binary(Str) ->
case parse_date_part(Str) of
{Date, <<>>} -> Date;
{_Date, _StrLeft} -> throw(badarg)
end;
parse_date(Str) when is_list(Str) -> parse_date(iolist_to_binary(Str)).
%%--------------------------------------------------------------------
%% @doc
%% Parse time of day. E.g., `{@module}:parse_time(<<"09:38:14">>)'
%% will result in `{9, 38, 14}' term. Function throws atom `badarg'
%% for syntactically incorrect input and throws `badtime' when invalid
%% time value parsed.
%%
%% Leap seconds are allowed along with any valid hour and minute and
%% will be mapped to second 59. E.g., term `{9, 38, 59}' will be
%% result of `{@module}:parse_time(<<"09:38:60">>)'.
%% @end
%% @throws badarg | badtime
-spec parse_time(iodata()) -> time().
parse_time(Str) when is_binary(Str) ->
case parse_time_part(Str) of
{Time, <<>>} -> Time;
{_Time, _StrLeft} -> throw(badarg)
end;
parse_time(Str) when is_list(Str) -> parse_time(iolist_to_binary(Str)).
%%%===================================================================
%%% API
%%%===================================================================
-ifndef(pre18).
%% @equiv format_system_time(SysTime, _Unit = native)
-spec format_system_time(non_neg_integer()) -> iodata().
format_system_time(SysTime) -> format_system_time(SysTime, native).
%%--------------------------------------------------------------------
%% @doc
%% Format Erlang system time.
%% @end
-spec format_system_time(non_neg_integer(), erlang:time_unit()) -> iodata().
format_system_time(SysTime, 1) ->
format_datetime(system_seconds_to_datetime(SysTime));
format_system_time(SysTime, 1000) ->
format_datetime(system_seconds_to_datetime(SysTime div 1000),
_Frac = {SysTime rem 1000, millisecond});
format_system_time(SysTime, 1000000) ->
format_datetime(system_seconds_to_datetime(SysTime div 1000000),
_Frac = {SysTime rem 1000000, microsecond});
format_system_time(SysTime, 1000000000) ->
format_datetime(system_seconds_to_datetime(SysTime div 1000000000),
_Frac = {SysTime rem 1000000000, nanosecond});
format_system_time(SysTime, native) ->
format_system_time(SysTime, erlang:convert_time_unit(1, seconds, native));
format_system_time(SysTime, Unit)
when Unit =:= second;
Unit =:= seconds ->
format_system_time(SysTime, 1);
format_system_time(SysTime, Unit)
when Unit =:= millisecond;
Unit =:= milli_seconds ->
format_system_time(SysTime, 1000);
format_system_time(SysTime, Unit)
when Unit =:= microsecond;
Unit =:= micro_seconds ->
format_system_time(SysTime, 1000000);
format_system_time(SysTime, Unit)
when Unit =:= nanosecond;
Unit =:= nano_seconds ->
format_system_time(SysTime, 1000000000);
format_system_time(SysTime, PartsPerSecond) when PartsPerSecond < 1000 ->
format_system_time(erlang:convert_time_unit(SysTime, PartsPerSecond, 1000), 1000);
format_system_time(SysTime, PartsPerSecond) when PartsPerSecond < 1000000 ->
format_system_time(erlang:convert_time_unit(SysTime, PartsPerSecond, 1000000), 1000000);
format_system_time(SysTime, PartsPerSecond) when PartsPerSecond < 1000000000 ->
format_system_time(erlang:convert_time_unit(SysTime, PartsPerSecond, 1000000000), 1000000000);
format_system_time(_SysTime, _Unit) -> error(badarg).
%%--------------------------------------------------------------------
%% @equiv parse_system_time(Str, _Unit = native)
-spec parse_system_time(iodata()) -> non_neg_integer().
parse_system_time(Str) -> parse_system_time(Str, native).
%%--------------------------------------------------------------------
%% @doc
%% Parse Erlang system time. For timestamps prior to
%% "1970-01-01T00:00:00Z" error atom `baddate' will be thrown, as
%% there's no way to represent such timestamp as POSIX time.
%% Conversion to requested time unit may result in accuracy
%% loss. E.g., timestamp has nanosecond fraction and conversion to
%% seconds was requested.
%% @end
%% @throws error()
-spec parse_system_time(iodata(), erlang:time_unit()) -> non_neg_integer().
parse_system_time(Str, Unit) ->
{SysTime, FromUnit} =
case parse_datetime(Str) of
{{Date, Time}, Frac} when Date >= {1970, 1, 1} ->
Seconds = datetime_to_system_seconds({Date, Time}),
case Frac of
undefined -> {Seconds, seconds};
{N, millisecond} -> {Seconds * 1000 + N, milli_seconds};
{N, microsecond} -> {Seconds * 1000000 + N, micro_seconds};
{N, nanosecond} -> {Seconds * 1000000000 + N, nano_seconds}
end;
_PreEpoch -> throw(baddate)
end,
erlang:convert_time_unit(SysTime, FromUnit, Unit).
-endif. % pre18
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec format2(0..99) -> iodata().
format2(N) ->
<<(N div 10 + $0),
(N rem 10 + $0)>>.
%%--------------------------------------------------------------------
-spec format3(0..999) -> iodata().
format3(N) ->
<<(N div 100 + $0),
(N rem 100 div 10 + $0),
(N rem 10 + $0)>>.
%%--------------------------------------------------------------------
-spec format4(0..9999) -> iodata().
format4(N) ->
<<(N div 1000 + $0),
(N rem 1000 div 100 + $0),
(N rem 100 div 10 + $0),
(N rem 10 + $0)>>.
%%--------------------------------------------------------------------
-spec format6(0..999999) -> iodata().
format6(N) ->
[format4(N div 100),
format2(N rem 100)].
%%--------------------------------------------------------------------
-spec format9(0..999999999) -> iodata().
format9(N) ->
[format6(N div 1000),
format3(N rem 1000)].
%%--------------------------------------------------------------------
-spec format_fraction(fraction()) -> iodata().
format_fraction({N, millisecond}) when N < 1000 -> format3(N);
format_fraction({N, microsecond}) when N < 1000000 -> format6(N);
format_fraction({N, nanosecond}) when N < 1000000000 -> format9(N);
format_fraction(N) when N >= 0, N < 1000000 -> format6(N);
format_fraction(_Frac) -> error(badarg).
%%--------------------------------------------------------------------
-spec format_offset(offset() | undefined) -> iodata().
format_offset(undefined) -> <<"-00:00">>;
format_offset({0, 0}) -> <<"Z">>;
format_offset({Hours, Minutes}) ->
[case Hours < 0 of
true -> [$-, format2(-Hours)];
false -> [$+, format2(Hours)]
end, $:, format2(Minutes)].
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec digits_to_integer(48..57) -> 0..9.
digits_to_integer(A) -> A - $0.
%%--------------------------------------------------------------------
-spec digits_to_integer(48..57, 48..57) -> 0..99.
digits_to_integer(A, B) ->
digits_to_integer(A) * 10 + digits_to_integer(B).
%%--------------------------------------------------------------------
-spec digits_to_integer(48..57, 48..57, 48..57, 48..57) -> 0..9999.
digits_to_integer(A, B, C, D) ->
digits_to_integer(A) * 1000 + digits_to_integer(B) * 100 +
digits_to_integer(C) * 10 + digits_to_integer(D).
%%--------------------------------------------------------------------
-spec parse_date_part(binary()) -> {date(), binary()}.
parse_date_part(<<Y3, Y2, Y1, Y0, $-,
M1, M0, $-,
D1, D0,
Str/bytes>>)
when ?IS_DIGITS(Y3, Y2, Y1, Y0),
?IS_DIGITS(M1, M0),
?IS_DIGITS(D1, D0) ->
Date = {digits_to_integer(Y3, Y2, Y1, Y0),
digits_to_integer(M1, M0),
digits_to_integer(D1, D0)},
case calendar:valid_date(Date) of
true -> {Date, Str};
false -> throw(baddate)
end;
parse_date_part(_BadStr) -> throw(badarg).
%%--------------------------------------------------------------------
-spec parse_time_part(binary()) -> {time(), binary()}.
parse_time_part(<<H1, H0, $:,
M1, M0, $:,
S1, S0,
Str/bytes>>)
when ?IS_DIGITS(H1, H0),
?IS_DIGITS(M1, M0),
?IS_DIGITS(S1, S0) ->
case {digits_to_integer(H1, H0),
digits_to_integer(M1, M0),
digits_to_integer(S1, S0)} of
{Hour, Minute, 60}
when Hour =< 23, Minute =< 59 -> {{Hour, Minute, 59}, Str};
Time = {Hour, Minute, Second}
when Hour =< 23, Minute =< 59, Second =< 59 -> {Time, Str};
_BadTime -> throw(badtime)
end;
parse_time_part(_BadStr) -> throw(badarg).
%%--------------------------------------------------------------------
-spec parse_frac_part(binary()) -> {fraction(), binary()}.
parse_frac_part(Str) -> parse_frac_part(Str, _Frac = 0, _FracLen = 0).
%%--------------------------------------------------------------------
-spec parse_frac_part(binary(), non_neg_integer(), non_neg_integer()) ->
{fraction(), binary()}.
parse_frac_part(<<D, Str/bytes>>, Frac, FracLen) when ?IS_DIGITS(D) ->
parse_frac_part(Str, 10 * Frac + (D - $0), FracLen + 1);
parse_frac_part(Str, Frac, FracLen) when FracLen > 0 ->
{case FracLen of
1 -> {Frac * 100, millisecond};
2 -> {Frac * 10, millisecond};
3 -> {Frac, millisecond};
4 -> {Frac * 100, microsecond};
5 -> {Frac * 10, microsecond};
6 -> {Frac, microsecond};
7 -> {Frac * 100, nanosecond};
8 -> {Frac * 10, nanosecond};
9 -> {Frac, nanosecond};
_ -> throw(badfrac)
end, Str};
parse_frac_part(_Str, _Frac, _FracLen) -> throw(badarg).
%%--------------------------------------------------------------------
-spec parse_offset_part(binary()) -> {offset() | undefined, binary()}.
parse_offset_part(<<Z, Str/bytes>>) when Z =:= $Z; Z =:= $z -> {{0, 0}, Str};
parse_offset_part(<<"-00:00", Str/bytes>>) -> {undefined, Str};
parse_offset_part(<<Sign, H1, H0, $:, M1, M0, Str/bytes>>)
when ?IS_DIGITS(H1, H0),
?IS_DIGITS(M1, M0) ->
case {digits_to_integer(H1, H0),
digits_to_integer(M1, M0)} of
{Hour, Minute}
when Hour =< 23, Minute =< 59 ->
case Sign of
$- -> {{-Hour, Minute}, Str};
$+ -> {{Hour, Minute}, Str};
_ -> throw(badarg)
end;
_BadOffset -> throw(badoffset)
end;
parse_offset_part(_BadStr) -> throw(badarg).
%%--------------------------------------------------------------------
-spec remove_offset(datetime(), offset()) -> datetime().
remove_offset(DateTime, {0, 0}) -> DateTime;
remove_offset(DateTime, {Hours, Minutes}) ->
calendar:gregorian_seconds_to_datetime(
calendar:datetime_to_gregorian_seconds(DateTime) -
(Hours * 3600 + Minutes * 60)
).
%%%===================================================================
%%% Internal functions
%%%===================================================================
-ifndef(pre18).
-spec datetime_to_system_seconds(calendar:datetime1970()) -> non_neg_integer().
datetime_to_system_seconds(DateTime) ->
Epoch = {{1970, 1, 1}, {0, 0, 0}},
calendar:datetime_to_gregorian_seconds(DateTime) -
calendar:datetime_to_gregorian_seconds(Epoch).
%%--------------------------------------------------------------------
-spec system_seconds_to_datetime(non_neg_integer()) -> calendar:datetime1970().
system_seconds_to_datetime(Seconds) ->
Epoch = {{1970, 1, 1}, {0, 0, 0}},
calendar:gregorian_seconds_to_datetime(
calendar:datetime_to_gregorian_seconds(Epoch) + Seconds
).
-endif. % pre18
%%%===================================================================
%%% Tests
%%%===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
format_datetime_1_test_() ->
[ ?_assertEqual(<<"1995-11-13T13:27:08Z">>,
iolist_to_binary(format_datetime({{1995, 11, 13}, {13, 27, 8}})))
].
format_datetime_2_test_() ->
[ ?_assertEqual(<<"1970-01-01T00:00:00.000001Z">>,
iolist_to_binary(format_datetime({{1970, 1, 1}, {0, 0, 0}}, 1))),
?_assertError(badarg, format_datetime({{1970, 1, 1}, {0, 0, 0}}, 1000000))
].
format_datetime_3_test_() ->
[ ?_assertEqual(<<"1970-01-01T00:00:00.001Z">>,
iolist_to_binary(format_datetime({{1970, 1, 1}, {0, 0, 0}}, {1, millisecond}))),
?_assertEqual(<<"1970-01-01T00:00:00.000001Z">>,
iolist_to_binary(format_datetime({{1970, 1, 1}, {0, 0, 0}}, {1, microsecond}))),
?_assertEqual(<<"1970-01-01T00:00:00.000000001Z">>,
iolist_to_binary(format_datetime({{1970, 1, 1}, {0, 0, 0}}, {1, nanosecond})))
].
format_local_datetime_2_test_() ->
[ ?_assertEqual(<<"2016-06-20T09:38:15+03:00">>,
iolist_to_binary(format_local_datetime(
{{2016, 06, 20}, {09, 38, 15}}, {3, 0}))),
?_assertEqual(<<"2016-02-12T11:02:09-00:00">>,
iolist_to_binary(format_local_datetime(
{{2016, 02, 12}, {11, 02, 09}}, undefined))),
?_assertEqual(<<"2016-02-12T01:02:03Z">>,
iolist_to_binary(format_local_datetime(
{{2016, 02, 12}, {01, 02, 03}}, {0, 0}))),
?_assertEqual(<<"9999-12-31T23:59:59+23:59">>,
iolist_to_binary(format_local_datetime(
{{9999, 12, 31}, {23, 59, 59}}, {23, 59})))
].
format_local_datetime_3_test_() ->
[ ?_assertEqual(<<"1999-09-15T16:47:01.004567-11:30">>,
iolist_to_binary(format_local_datetime({{1999, 09, 15}, {16, 47, 1}},
{-11, 30}, 4567)))
].
format_date_1_test_() ->
[ ?_assertEqual(<<"0000-01-01">>, iolist_to_binary(format_date({0, 1, 1}))),
?_assertEqual(<<"9999-12-31">>, iolist_to_binary(format_date({9999, 12, 31}))),
?_assertEqual(<<"2017-02-29">>, iolist_to_binary(format_date({2017, 02, 29}))),
?_assertEqual(<<"2016-06-20">>, iolist_to_binary(format_date({2016, 06, 20}))) ].
format_time_1_test_() ->
[ ?_assertEqual(<<"00:00:00">>, iolist_to_binary(format_time({0, 0, 0}))),
?_assertEqual(<<"23:59:59">>, iolist_to_binary(format_time({23, 59, 59}))),
?_assertEqual(<<"09:38:15">>, iolist_to_binary(format_time({09, 38, 15}))) ].
parse_date_1_test_() ->
[ ?_assertThrow(badarg, parse_date(<<>>)),
?_assertThrow(badarg, parse_date(<<"1970-01-XX">>)),
?_assertThrow(badarg, parse_date(<<"1970.01.01">>)),
?_assertThrow(badarg, parse_date(<<"+970-+1-+1">>)),
?_assertThrow(baddate, parse_date(<<"2017-02-29">>)),
?_assertEqual({1970, 1, 1}, parse_date(<<"1970-01-01">>)),
?_assertEqual({1970, 1, 1}, parse_date(["1970", $-, "01", $- | "01"]))
].
parse_time_1_test_() ->
[ ?_assertThrow(badarg, parse_time(<<>>)),
?_assertThrow(badarg, parse_time(<<"22:13:??">>)),
?_assertThrow(badarg, parse_time(<<"22-13-57">>)),
?_assertThrow(badtime, parse_time(<<"22:13:75">>)),
?_assertThrow(badarg, parse_time(<<"+2:+3:+7">>)),
?_assertEqual({22, 13, 57}, parse_time(<<"22:13:57">>)),
?_assertEqual({22, 13, 57}, parse_time(["22", $:, <<"13:">> | "57"])),
?_assertEqual({23, 59, 59}, parse_time(<<"23:59:60">>))
].
parse_datetime_1_test_() ->
DateTime = {{2017, 3, 3}, {13, 31, 37}},
Ans = {DateTime, {523800, microsecond}},
[ ?_assertMatch(Ans, parse_datetime(<<"2017-03-03T16:31:37.5238+03:00">>)),
?_assertMatch(Ans, parse_datetime(<<"2017-03-03t16:31:37.5238+03:00">>)),
?_assertMatch(Ans, parse_datetime(<<"2017-03-03 16:31:37.5238+03:00">>)),
?_assertThrow(badoffset, parse_datetime(<<"2017-03-03T16:31:37.5238-00:00">>)),
?_assertThrow(badarg, parse_datetime(<<"2017-03-03T16:31:37.5238?00:00">>)),
?_assertMatch(Ans, parse_datetime(<<"2017-03-03T13:31:37.5238Z">>)),
?_assertMatch(Ans, parse_datetime(<<"2017-03-03t13:31:37.5238z">>)),
?_assertMatch({DateTime, undefined},
parse_datetime(<<"2017-03-03T13:31:37Z">>)),
?_assertMatch({DateTime, {100, millisecond}},
parse_datetime(<<"2017-03-03T13:31:37.1Z">>)),
?_assertMatch({DateTime, {120, millisecond}},
parse_datetime(<<"2017-03-03T13:31:37.12Z">>)),
?_assertMatch({DateTime, {123, millisecond}},
parse_datetime(<<"2017-03-03T13:31:37.123Z">>)),
?_assertMatch({DateTime, {123400, microsecond}},
parse_datetime(<<"2017-03-03T13:31:37.1234Z">>)),
?_assertMatch({DateTime, {123450, microsecond}},
parse_datetime(<<"2017-03-03T13:31:37.12345Z">>)),
?_assertMatch({DateTime, {123456, microsecond}},
parse_datetime(<<"2017-03-03T13:31:37.123456Z">>)),
?_assertMatch({DateTime, {123456700, nanosecond}},
parse_datetime(<<"2017-03-03T13:31:37.1234567Z">>)),
?_assertMatch({DateTime, {123456780, nanosecond}},
parse_datetime(<<"2017-03-03T13:31:37.12345678Z">>)),
?_assertMatch({DateTime, {123456789, nanosecond}},
parse_datetime(<<"2017-03-03T13:31:37.123456789Z">>)),
?_assertThrow(badfrac, parse_datetime(<<"2017-03-03T13:31:37.1234567890Z">>)),
?_assertThrow(badarg, parse_datetime(<<"2017-03-03T13:31:37.Z">>)),
?_assertThrow(badarg, parse_datetime(<<"2017-03-03T13:31:37.+123Z">>)),
?_assertThrow(badarg, parse_datetime(<<"2017-03-03T13:31:37.-123Z">>)),
?_assertMatch({{{1996, 12, 20}, {0, 39, 57}}, undefined},
parse_datetime(<<"1996-12-19T16:39:57-08:00">>))
].
parse_local_datetime_1_test_() ->
DateTime = {{2017, 3, 3}, {16, 31, 37}},
Offset = {3, 0},
Frac = {523800, microsecond},
[ ?_assertMatch({DateTime, Offset, Frac},
parse_local_datetime(<<"2017-03-03T16:31:37.5238+03:00">>)),
?_assertMatch({DateTime, Offset, undefined},
parse_local_datetime(<<"2017-03-03T16:31:37+03:00">>)),
?_assertMatch({DateTime, undefined, Frac},
parse_local_datetime(<<"2017-03-03T16:31:37.5238-00:00">>)),
?_assertThrow(badarg, parse_local_datetime(<<"2017-03-03T16:31:37+03:+4">>)),
?_assertThrow(badarg, parse_local_datetime(<<"2017-03-03T16:31:37+03:XX">>)),
?_assertThrow(badarg, parse_local_datetime(<<"2017-03-03T16:31:37-+3:00">>)),
?_assertThrow(badarg, parse_local_datetime(<<"2017-03-03T16:31:37--3:00">>)),
?_assertThrow(badoffset, parse_local_datetime(<<"2017-03-03T16:31:37+42:00">>)),
?_assertMatch({{{1985, 4, 12}, {23, 20, 50}}, {0, 0}, {520, millisecond}},
parse_local_datetime(<<"1985-04-12T23:20:50.52Z">>))
].
-ifndef(pre18).
format_system_time_2_test_() ->
[ ?_assertEqual(<<"1970-01-01T00:02:03Z">>,
iolist_to_binary(format_system_time(123, seconds))),
?_assertEqual(<<"1970-01-01T00:00:00.123Z">>,
iolist_to_binary(format_system_time(123, milli_seconds))),
?_assertEqual(<<"1970-01-01T00:00:00.000123Z">>,
iolist_to_binary(format_system_time(123, micro_seconds))),
?_assertEqual(<<"1970-01-01T00:00:00.000000123Z">>,
iolist_to_binary(format_system_time(123, nano_seconds)))
].
parse_system_time_1_test_() ->
[ ?_assertEqual(123, parse_system_time(<<"1970-01-01T00:02:03Z">>, seconds)),
?_assertEqual(123, parse_system_time(<<"1970-01-01T00:00:00.123Z">>, milli_seconds)),
?_assertEqual(123, parse_system_time(<<"1970-01-01T00:00:00.000123Z">>, micro_seconds)),
?_assertEqual(123, parse_system_time(<<"1970-01-01T00:00:00.000000123Z">>, nano_seconds))
].
-endif. % pre18
-endif. | src/rfc3339.erl | 0.591487 | 0.491822 | rfc3339.erl | starcoder |
-module(pso).
-export([run/5]).
-type position() :: particle:position().
-type value() :: particle:value().
%% @doc
%%
%% Run `Iterations' iterations of the PSO algorithm. The position and
%% value of the best solution is returned.
%%
%% @end
-spec run(ObjectiveFun :: fun((position()) -> value()),
InitialPositions :: [position()],
InitialVelocities :: [particle:velocity()],
Network :: network:network(),
Iterations :: pos_integer()) -> particle:state().
run(ObjectiveFun, InitialPositions, InitialVelocities, Network, Iterations) ->
Particles = maps:from_list(
[{N,
element(2, particle_server:start_link(Pos, Vel, ObjectiveFun, []))}
|| {N, Pos, Vel} <- lists:zip3(network:all_nodes(Network),
InitialPositions,
InitialVelocities)]),
%% Set up the neighbors.
maps_foreach(
fun (N, ParticleServer) ->
particle_server:set_neighbors(
ParticleServer,
[maps:get(Neighbor, Particles)
|| Neighbor <- network:neighbors(N, Network)])
end, Particles),
%% Kick off the iteration.
maps_foreach(
fun (_, ParticleServer) ->
particle_server:eval(ParticleServer, Iterations)
end, Particles),
%% Get the best solution
Best = best_solution(maps:values(Particles), Iterations),
%% Stop the servers
lists:foreach(fun particle_server:stop/1, maps:values(Particles)),
Best.
-spec maps_foreach(Fun :: fun((Key, Value) -> any()),
Map :: #{Key := Value}) -> any().
maps_foreach(Fun, Map) ->
maps:fold(fun (Key, Value, _) -> Fun(Key, Value) end, nil, Map).
-spec best_solution(Particles :: [pid()], After :: pos_integer())
-> particle:state().
best_solution(Particles, After) ->
hd(lists:keysort(2, [particle_server:state(Particle, After)
|| Particle <- Particles])). | src/pso.erl | 0.589835 | 0.588121 | pso.erl | starcoder |
% Copyright (c) 2014-2015, <NAME> <<EMAIL>>
%
% Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted,
% provided that the above copyright notice and this permission notice appear in all copies.
%
% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
% WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
% DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
% NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
%
% @author <NAME> <<EMAIL>>
% @copyright {@years} <NAME>
% @version {@version}
% @doc The `noesis_polyline' module provides functions for working with
% <a href="http://en.wikipedia.org/wiki/Polygonal_chain">polylines</a>.
-module(noesis_polyline).
% Types
-type line() :: binary().
-export_type([
line/0
]).
% API
-export([
encode/1,
decode/1
]).
% API
% @doc Takes a {@link noesis_geometry:path()} and returns a {@link line()}.<br /><br />
% Based on the algorithm described <a href="https://developers.google.com/maps/documentation/utilities/polylinealgorithm">here</a>.
-spec encode(noesis_geometry:path()) -> line().
encode(Path) ->
encode_acc(Path, 0, 0, []).
% @doc Takes a {@link line()} and returns a {@link noesis_geometry:path()}.<br /><br />
% Based on the algorithm described <a href="https://developers.google.com/maps/documentation/utilities/polylinealgorithm">here</a>.
-spec decode(line()) -> noesis_geometry:path().
decode(Line) ->
decode_acc(Line, 0, 0, []).
% Private
-spec encode_acc(noesis_geometry:path(), noesis_geometry:latitude(), noesis_geometry:longitude(), iolist()) -> line().
encode_acc([], _PLat, _PLng, Acc) ->
iolist_to_binary(lists:reverse(Acc));
encode_acc([{Lng, Lat}|Rest], PLat, PLng, Acc) ->
LatE5 = round(Lat * 1.0e5),
LngE5 = round(Lng * 1.0e5),
EncodedLat = encode_part(encode_sign(LatE5 - PLat), []),
EncodedLng = encode_part(encode_sign(LngE5 - PLng), []),
encode_acc(Rest, LatE5, LngE5, [[EncodedLat, EncodedLng] | Acc]).
-spec encode_sign(integer()) -> integer().
encode_sign(Num) when Num < 0 ->
bnot (Num bsl 1);
encode_sign(Num) ->
Num bsl 1.
-spec encode_part(integer(), iolist()) -> iolist().
encode_part(Num, Result) when Num < 32 ->
[Result, Num + 63];
encode_part(Num, Result) ->
Value = (32 bor (Num band 31)) + 63,
encode_part(Num bsr 5, [Result, Value]).
-spec decode_acc(line(), noesis_geometry:latitude(), noesis_geometry:longitude(), noesis_geometry:path()) -> noesis_geometry:path().
decode_acc(<<>>, _Lat, _Lng, Acc) ->
lists:reverse(Acc);
decode_acc(Line, Lat, Lng, Acc) ->
{DLat, Rest} = decode_part(Line, 32, 0, 0),
Lat2 = Lat + DLat,
{DLng, Rest2} = decode_part(Rest, 32, 0, 0),
Lng2 = Lng + DLng,
decode_acc(Rest2, Lat2, Lng2, [{Lng2 / 1.0e5, Lat2 / 1.0e5} | Acc]).
-spec decode_part(line(), non_neg_integer(), non_neg_integer(), integer()) -> {integer(), line()}.
decode_part(Line, B, _Shift, Result) when B < 32 ->
Result2 = if
Result band 1 == 0 -> Result bsr 1;
true -> bnot (Result bsr 1)
end,
{Result2, Line};
decode_part(<<C:8, Rest/binary>>, _OldB, Shift, Result) ->
B = C - 63,
Result2 = Result bor ((B band 31) bsl Shift),
decode_part(Rest, B, Shift + 5, Result2).
-ifdef(PERF).
horse_encode() ->
horse:repeat(100000,
encode([{-120.2, 38.5}, {-120.95, 40.7}, {-126.453, 43.252}])).
-endif. | src/noesis_polyline.erl | 0.792304 | 0.406155 | noesis_polyline.erl | starcoder |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Copyright (c) 2022 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% -----------------------------------------------------------------------------
% @author <NAME> <<EMAIL>>
% @copyright 2022 <NAME>
% @doc
% Checks if two words are are formed by rearaging their letters.
% Not the fastest way to compare. Can be slow for long strings
% because big numbers arithmetic is slow, though
% <i>eqp</i> looks fast enough for me.
% Just for fun - I tried to use only arithmetic operations.
% @end
% -----------------------------------------------------------------------------
- module(anagram).
-export([eq/2, eq/4]).
-export([as_sum/1, as_sum/3]).
-export([as_prod/1, as_prod/2]).
-export([as_prime_prod/1, as_prime_prod/2]).
-export([prime/1]).
-export([eqp/2, eqp/3]).
-define(ASCII_PRINTABLE_SIZE, 96). % number of printable ASCII characters: 126-31+1
-define(ASCII_PRINTABLE_BASE, ?ASCII_PRINTABLE_SIZE).
-define(ASCII_PRINTABLE_START, 31). % <i>space</i> (code 32) is mapped to 1
% -----------------------------------------------------------------------------
-spec eq(string(), string()) -> boolean().
% @doc Checks that two strings consists of the same multy set of letters.
% Same as <i>eq(A, B, ?ASCII_PRINTABLE_START, ?ASCII_PRINTABLE_BASE)</i>
eq(A, B) -> eq(A, B, ?ASCII_PRINTABLE_START, ?ASCII_PRINTABLE_BASE).
-spec eq(string(), string(), pos_integer(), pos_integer()) -> boolean().
% @doc Checks that two strings consists of the same multy set of letters.
eq(A, B, Start, Base) ->
length(A) == length(B) andalso
lists:sum(A) == lists:sum(B) andalso % optimization
% as_prod(A, Start) == as_prod(B, Start) andalso
% I think that previous two checks are sufficient
% but was not able to prove this
as_sum(A, Start, Base) == as_sum(B, Start, Base).
% -----------------------------------------------------------------------------
-spec as_sum(string()) -> pos_integer().
% @doc Calculates unique integer for string.
% Same as <i>as_sum(A, ?ASCII_PRINTABLE_START, ?ASCII_PRINTABLE_BASE)</i>
as_sum(A) -> as_sum(A, ?ASCII_PRINTABLE_START, ?ASCII_PRINTABLE_BASE).
-spec as_sum(string(), pos_integer(), pos_integer()) -> pos_integer().
% @doc Calculates unique integer for string.
as_sum(A, Start, Base) ->
P = Base * length(A), % to prevent carring over
lists:foldl(fun(X, S) -> (X-Start)*P + S end, 0, A).
% -----------------------------------------------------------------------------
-spec as_prod(string()) -> pos_integer().
% @doc Calculates product of all charactes
% Same as <i>as_prod(A, ?ASCII_PRINTABLE_START)</i>
as_prod(A) -> as_prod(A, ?ASCII_PRINTABLE_START).
-spec as_prod(string(), pos_integer()) -> pos_integer().
% @doc Calculates product of all charactes
as_prod(A, Start) -> lists:foldl(fun(X, P) -> (X-Start)*P end, 1, A).
% -----------------------------------------------------------------------------
-spec eqp(string(), string()) -> boolean().
% @doc Checks that two strings consists of the same multy set of letters.
% Uses mapping strings to prime number products for comparison
% Same as <i>eqp(A, B, ?ASCII_PRINTABLE_START)</i>
eqp(A, B) -> eqp(A, B, ?ASCII_PRINTABLE_START).
-spec eqp(string(), string(), pos_integer()) -> boolean().
% @doc Checks that two strings consists of the same multy set of letters.
% Uses mapping strings to prime number products for comparison
eqp(A, B, Start) ->
length(A) == length(B) andalso % optimoisation
as_prime_prod(A, Start) == as_prime_prod(B, Start).
% -----------------------------------------------------------------------------
-spec as_prime_prod(string()) -> pos_integer().
% @doc Calculates product of all charactes mapped to its' prime numbers
% Same as <i>as_prime_prod(A, ?ASCII_PRINTABLE_START)</i>
as_prime_prod(A) -> as_prime_prod(A, ?ASCII_PRINTABLE_START).
-spec as_prime_prod(string(), pos_integer()) -> pos_integer().
% @doc Calculates product of all charactes mapped to its' prime numbers
as_prime_prod(A, Start) -> lists:foldl(fun(X, P) -> prime(X-Start)*P end, 1, A).
% -----------------------------------------------------------------------------
-spec prime(pos_integer()) -> pos_integer().
% @doc Retuns prime number (from first 100).
% Numbers taken from <i>wikpedia</i>
% <a href="https://en.wikipedia.org/wiki/List_of_prime_numbers">
% <i>"The first 100 prime numbers"</i></a>.
prime( 1) -> 2;
prime( 2) -> 3;
prime( 3) -> 5;
prime( 4) -> 7;
prime( 5) -> 11;
prime( 6) -> 13;
prime( 7) -> 17;
prime( 8) -> 19;
prime( 9) -> 23;
prime( 10) -> 29;
prime( 11) -> 31;
prime( 12) -> 37;
prime( 13) -> 41;
prime( 14) -> 43;
prime( 15) -> 47;
prime( 16) -> 53;
prime( 17) -> 59;
prime( 18) -> 61;
prime( 19) -> 67;
prime( 20) -> 71;
prime( 21) -> 73;
prime( 22) -> 79;
prime( 23) -> 83;
prime( 24) -> 89;
prime( 25) -> 97;
prime( 26) -> 101;
prime( 27) -> 103;
prime( 28) -> 107;
prime( 29) -> 109;
prime( 30) -> 113;
prime( 31) -> 127;
prime( 32) -> 131;
prime( 33) -> 137;
prime( 34) -> 139;
prime( 35) -> 149;
prime( 36) -> 151;
prime( 37) -> 157;
prime( 38) -> 163;
prime( 39) -> 167;
prime( 40) -> 173;
prime( 41) -> 179;
prime( 42) -> 181;
prime( 43) -> 191;
prime( 44) -> 193;
prime( 45) -> 197;
prime( 46) -> 199;
prime( 47) -> 211;
prime( 48) -> 223;
prime( 49) -> 227;
prime( 50) -> 229;
prime( 51) -> 233;
prime( 52) -> 239;
prime( 53) -> 241;
prime( 54) -> 251;
prime( 55) -> 257;
prime( 56) -> 263;
prime( 57) -> 269;
prime( 58) -> 271;
prime( 59) -> 277;
prime( 60) -> 281;
prime( 61) -> 283;
prime( 62) -> 293;
prime( 63) -> 307;
prime( 64) -> 311;
prime( 65) -> 313;
prime( 66) -> 317;
prime( 67) -> 331;
prime( 68) -> 337;
prime( 69) -> 347;
prime( 70) -> 349;
prime( 71) -> 353;
prime( 72) -> 359;
prime( 73) -> 367;
prime( 74) -> 373;
prime( 75) -> 379;
prime( 76) -> 383;
prime( 77) -> 389;
prime( 78) -> 397;
prime( 79) -> 401;
prime( 80) -> 409;
prime( 81) -> 419;
prime( 82) -> 421;
prime( 83) -> 431;
prime( 84) -> 433;
prime( 85) -> 439;
prime( 86) -> 443;
prime( 87) -> 449;
prime( 88) -> 457;
prime( 89) -> 461;
prime( 90) -> 463;
prime( 91) -> 467;
prime( 92) -> 479;
prime( 93) -> 487;
prime( 94) -> 491;
prime( 95) -> 499;
prime( 96) -> 503;
prime( 97) -> 509;
prime( 98) -> 521;
prime( 99) -> 523;
prime(100) -> 541. | src/anagram.erl | 0.588889 | 0.459379 | anagram.erl | starcoder |
%%%=============================================================================
%% Copyright 2012- Klarna AB
%% Copyright 2015- AUTHORS
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc Json schema validation module.
%%
%% This module is the core of jesse, it implements the validation functionality
%% according to the standard.
%% @end
%%%=============================================================================
-module(jesse_lib).
%% API
-export([ empty_if_not_found/1
, is_array/1
, is_json_object/1
, is_null/1
, re_run/2
, re_options/0
]).
%% Includes
-include("jesse_schema_validator.hrl").
%%% API
%% @doc Returns an empty list if the given value is ?not_found.
-spec empty_if_not_found(Value :: any()) -> any().
empty_if_not_found(?not_found) ->
[];
empty_if_not_found(Value) ->
Value.
%% @doc Checks if the given value is json `array'.
%% This check is needed since objects in `jsx' are lists (proplists).
-spec is_array(Value :: any()) -> boolean().
is_array(Value)
when is_list(Value) ->
not is_json_object(Value);
is_array(_) ->
false.
%% @doc A naive check if the given data is a json object.
%% Supports two main formats of json representation:
%% 1) mochijson2 format (`{struct, proplist()}')
%% 2) jiffy format (`{proplist()}')
%% 3) jsx format (`[{binary() | atom(), any()}]')
%% Returns `true' if the given data is an object, otherwise `false' is returned.
-spec is_json_object(Value :: any()) -> boolean().
?IF_MAPS(
is_json_object(Map)
when erlang:is_map(Map) ->
true;
)
is_json_object({struct, Value})
when is_list(Value) ->
true;
is_json_object({Value})
when is_list(Value) ->
true;
%% handle `jsx' empty objects
is_json_object([{}]) ->
true;
%% very naive check. checks only the first element.
is_json_object([{Key, _Value} | _])
when is_binary(Key) orelse is_atom(Key)
andalso Key =/= struct ->
true;
is_json_object(_) ->
false.
%% @doc Checks if the given value is json `null'.
-spec is_null(Value :: any()) -> boolean().
is_null(null) ->
true;
is_null(_Value) ->
false.
%% @doc Run the RE against the subject using the `re_options' from the jesse
%% application environment. `{capture, none}' is always used.
-spec re_run( Subject :: iodata() | unicode:charlist()
, RE :: iodata() | unicode:charlist()
) -> match
| nomatch.
re_run(Subject, RE) ->
re:run(Subject, RE, [{capture, none} | re_options()]).
%% @doc Returns the base re options from jesse environment which will be used
%% when running client-provided patterns. By default, that is `[unicode, ucp]'
%% for the fullest compatibility matching unicode code points beyond ISO Latin-1
%% in character classes like `\w', `\s', and `\d'. Use only `[unicode]' instead
%% (without `ucp`) for better performance at the expense of full non-ISO Latin-1
%% compatibility in character classes. See also notes on the `ucp' option at
%% [https://www.erlang.org/doc/man/re.html#compile-2 re:compile/2].
-spec re_options() -> list().
re_options() ->
application:get_env(jesse, re_options, [unicode, ucp]). | src/jesse_lib.erl | 0.787319 | 0.415254 | jesse_lib.erl | starcoder |
%%
%% @doc Problem: given a sequence `Xs = [X1, X2,..., Xn]' of natural numbers,
%% find a linear-time algorithm to build a tree with fringe `Xs' that
%% minimises {@link cost/1. cost} function.
%%
%% The fringe of a tree is the list of labels at the leaves in left-to-right order.
%%
%% The presented {@link mincost_tree/1. mincost_tree} algorithm can be used
%% to solve the following problem:
%%
%% Given an arbitrary list of trees `Ts = [T1, T2,..., Tn]' together with their
%% heights `Hs = [H1, H2,..., Hn]', find a linear time algorithm to combine
%% them into a single tree `T' of minimum height.
%% Trees `Ts' should appear as subtrees of the tree `T' in the same order
%% as they appear in the list `Ts'.
%%
%% The solution is: run {@link mincost_tree/1. mincost_tree} algorithm for
%% fringe `Hs'. In the resulting tree replace all leaves `Hi' with the
%% corresponding tree `Ti'.
%%
%% @reference [B1] Chapter 7, pp. 41–49
%%
-module(mintrees).
-author("<NAME> <<EMAIL>>").
-export([cost/1]).
-export([mincost_tree/1, trees/1]).
-export([mincost_tree2/1, trees2/1]).
-import(core, [foldl1/2]).
-import(lists, [concat/1, foldl/3, map/2]).
%% =============================================================================
%% Types
%% =============================================================================
-type tree(Type) :: {leaf, Type} | {fork, tree(Type), tree(Type)}.
-type forest(Type) :: [tree(Type)].
%%
%% @doc Defines cost of the integer tree.
%%
-spec cost(Tree :: tree(integer())) -> integer().
cost({leaf, X}) -> X;
cost({fork, U, V}) -> 1 + max(cost(U), cost(V)).
%%
%% @doc General right fold on nonempty lists.
%%
-spec foldrn(F, G, [A]) -> B when
F :: fun((A, B) -> B),
G :: fun((A) -> B),
A :: term(),
B :: term().
foldrn(_, G, [X]) -> G(X);
foldrn(F, G, [X | Xs]) -> F(X, foldrn(F, G, Xs)).
%% =============================================================================
%% Exponential algorithms
%% =============================================================================
%%
%% @doc Returns a minimal cost tree for the given fringe.
%% @see trees2/1
%%
-spec mincost_tree2(Fringe :: [integer()]) -> tree(integer()).
mincost_tree2(Fringe) -> core:min_by(fun cost/1, trees2(Fringe)).
%%
%% @doc Returns list of trees for the given fringe.
%% Intuitive algorithm to build trees by prefixing
%% previously built right sub-trees.
%%
-spec trees2(Fringe :: [integer()]) -> [tree(integer())].
trees2(Fringe) ->
F = fun(P, Ts) -> concat(map(fun(T) -> prefixes2(P, T) end, Ts)) end,
G = fun(X) -> [{leaf, X}] end,
foldrn(F, G, Fringe).
prefixes2(X, {leaf, _} = T) -> [{fork, {leaf, X}, T}];
prefixes2(X, {fork, U, V} = T) ->
Us = [{fork, Un, V} || Un <- prefixes2(X, U)],
[{fork, {leaf, X}, T} | Us].
%%
%% @doc Returns list of trees for the given fringe.
%% Another algorithm to build trees by rolling up
%% left spines
%%
-spec trees(Fringe :: [integer()]) -> [tree(integer())].
trees(Fringe) -> map(fun rollup/1, forests(Fringe)).
-spec forests(Fringe :: [integer()]) -> [forest(integer())].
forests(Fringe) ->
F = fun(P, Ts) -> concat(map(fun(T) -> prefixes(P, T) end, Ts)) end,
G = fun(X) -> [[{leaf, X}]] end,
foldrn(F, G, Fringe).
-spec prefixes(X :: integer(), Ts :: forest(integer())) -> [forest(integer())].
prefixes(X, Ts) ->
[[{leaf, X} | split_and_rollup(K, Ts)] || K <- lists:seq(1, length(Ts))].
split_and_rollup(K, Ts) ->
{L, R} = lists:split(K, Ts),
[rollup(L) | R].
%%
%% @doc Left fold forest (lists of trees) into a single tree.
%%
-spec rollup(Forest :: forest(T)) -> tree(T).
rollup(Forest) ->
F = fun(L, R) -> {fork, L, R} end,
foldl1(F, Forest).
%% =============================================================================
%% Linear algorithm
%% =============================================================================
%%
%% @doc Returns a minimal cost tree for the given fringe.
%%
-spec mincost_tree(Fringe :: [integer()]) -> tree(integer()).
mincost_tree(Fringe) ->
G = fun(X) -> [leaf(X)] end,
TCs = foldrn(fun insert/2, G, Fringe),
F = fun(L, R) -> {fork, L, R} end,
foldl1(F, [T || {_, T} <- TCs]).
insert(X, Ts) -> [leaf(X) | split(X, Ts)].
split(_, [U]) -> [U];
split(X, [{Cu, _} = U, {Cv, _} = V | Ts] = Spine) ->
Cxu = max(X, Cu),
if
Cxu < Cv -> Spine;
true -> split(X, [fork(U, V) | Ts])
end.
%% Smart constructors to pair tree node with its cost.
leaf(X) -> {X, {leaf, X}}.
fork({A, U}, {B, V}) -> {1 + max(A, B), {fork, U, V}}.
%% =============================================================================
%% Unit tests
%% =============================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
trees_test_() ->
[
?_assertEqual(14, length(trees([1, 2, 3, 4, 5])))
].
foldrn_test_() ->
F = fun(X, Y) -> X + Y end,
G = fun(X) -> X + 1 end,
[
?_assertEqual(16, foldrn(F, G, [1, 2, 3, 4, 5]))
].
trees2_test_() ->
[
?_assertEqual(14, length(trees2([1, 2, 3, 4, 5])))
].
mincost_tree_test_() ->
[
?_assertEqual(
{fork,
{fork,
{fork,
{fork,
{leaf, 1},
{leaf, 2}
},
{leaf, 3}
},
{leaf, 4}
},
{leaf, 5}
},
mincost_tree2([1, 2, 3, 4, 5])
),
?_assertEqual(
mincost_tree2([1, 2, 3, 4, 5]),
mincost_tree([1, 2, 3, 4, 5])
)
].
-endif. | lib/ndpar/src/mintrees.erl | 0.724578 | 0.867485 | mintrees.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% State Handler - This module encapsulates the `shards' state.
%%%
%%% There are different properties that have to be stored somewhere so
%%% `shards' can work properly. Remember, `shards' performs a logic on
%%% top of `ETS', for example, compute the shard and/or node based on
%%% the `Key' where the action will be applied. To do so, it needs the
%%% number of shards or partitions, the function to pick the shard
%%% and/or node (in case of global scope), the table type and
%%% of course, the module to use depending on the scope;
%%% `shards_local' or `shards_dist'.
%%%
%%% Because of that, when a new table is created using `shards',
%%% a new supervision tree is created as well to represent that table.
%%% The supervisor is `shards_owner_sup' and it has a control ETS
%%% table to save the `state' so it can be fetched later at any time.
%%% @end
%%%-------------------------------------------------------------------
-module(shards_state).
%% API
-export([
new/0,
new/1,
new/2,
new/3,
new/5,
new/6,
get/1,
is_state/1,
from_map/1,
to_map/1
]).
%% API – Getters & Setters
-export([
module/1,
module/2,
sup_name/1,
sup_name/2,
keypos/1,
keypos/2,
n_shards/1,
n_shards/2,
pick_shard_fun/1,
pick_shard_fun/2,
pick_node_fun/1,
pick_node_fun/2,
scope/1,
eval_pick_shard/2,
eval_pick_shard/3
]).
%%%===================================================================
%%% Types & Macros
%%%===================================================================
%% Default number of shards
-define(N_SHARDS, erlang:system_info(schedulers_online)).
%% @type op() = r | w | d.
%%
%% Defines operation type.
%% <li>`r': Read operations.</li>
%% <li>`w': Write operation.</li>
%% <li>`d': Delete operations.</li>
-type op() :: r | w | d.
%% @type key() = term().
%%
%% Defines key type.
-type key() :: term().
%% @type n_shards() = pos_integer().
%%
%% Defines number of shards.
-type n_shards() :: pos_integer().
%% @type keypos() = pos_integer().
%%
%% The key position in the tuple.
-type keypos() :: pos_integer().
%% @type range() = pos_integer().
%%
%% Defines the range or set – `range > 0'.
-type range() :: pos_integer().
%% @type pick_fun() = fun((key(), range(), op()) -> non_neg_integer() | any).
%%
%% Defines spec function to pick or compute the shard and/or node.
%% The function returns a value for `Key' within the range 0..Range-1.
-type pick_fun() :: fun((key(), range(), op()) -> non_neg_integer() | any).
%% @type scope() = l | g.
%%
%% Defines the scope, if it is local `l' or global `g'.
-type scope() :: l | g.
%% State definition
-record(state, {
module = shards_local :: module(),
sup_name = shards_sup :: atom(),
n_shards = ?N_SHARDS :: pos_integer(),
pick_shard_fun = fun shards_lib:pick/3 :: pick_fun(),
pick_node_fun = fun shards_lib:pick/3 :: pick_fun(),
keypos = 1 :: pos_integer()
}).
%% @type state() = #state{}.
%%
%% Defines `shards' state.
-type state() :: #state{}.
%% @type state_map() = #{
%% module => module(),
%% sup_name => atom(),
%% n_shards => pos_integer(),
%% pick_shard_fun => pick_fun(),
%% pick_node_fun => pick_fun(),
%% keypos => pos_integer()
%% }.
%%
%% Defines the map representation of the `shards' state:
%% <ul>
%% <li>`module': Module to be used depending on the `scope':
%% `shards_local' or `shards_dist'.</li>
%% <li>`sup_name': Registered name for `shards_sup'.</li>
%% <li>`n_shards': Number of ETS shards/fragments.</li>
%% <li>`pick_shard_fun': Function callback to pick/compute the shard.</li>
%% <li>`pick_node_fun': Function callback to pick/compute the node.</li>
%% <li>`keypos': Position of the key in the tuple.</li>
%% </ul>
-type state_map() :: #{
module => module(),
sup_name => atom(),
n_shards => pos_integer(),
pick_shard_fun => pick_fun(),
pick_node_fun => pick_fun(),
keypos => pos_integer()
}.
%% Exported types
-export_type([
op/0,
key/0,
n_shards/0,
keypos/0,
range/0,
pick_fun/0,
scope/0,
state/0,
state_map/0
]).
%%%===================================================================
%%% API
%%%===================================================================
-spec new() -> state().
new() ->
#state{}.
-spec new(pos_integer()) -> state().
new(Shards) ->
#state{n_shards = Shards}.
-spec new(pos_integer(), module()) -> state().
new(Shards, Module) ->
#state{n_shards = Shards, module = Module}.
-spec new(pos_integer(), module(), atom()) -> state().
new(Shards, Module, SupName) ->
#state{n_shards = Shards, module = Module, sup_name = SupName}.
-spec new(pos_integer(), module(), atom(), pick_fun(), pos_integer()) -> state().
new(Shards, Module, SupName, PickShardFun, KeyPos) ->
#state{
n_shards = Shards,
module = Module,
sup_name = SupName,
pick_shard_fun = PickShardFun,
keypos = KeyPos
}.
-spec new(pos_integer(), module(), atom(), pick_fun(), pos_integer(), pick_fun()) -> state().
new(Shards, Module, SupName, PickShardFun, KeyPos, PickNodeFun) ->
#state{
n_shards = Shards,
module = Module,
sup_name = SupName,
pick_shard_fun = PickShardFun,
pick_node_fun = PickNodeFun,
keypos = KeyPos
}.
%% @doc
%% Returns the `state' for the given table `Tab'.
%% @end
-spec get(Tab :: atom()) -> state().
get(Tab) when is_atom(Tab) ->
case ets:lookup(Tab, state) of
[State] -> State;
_ -> error(badarg)
end.
%% @doc
%% Returns `true' in the given argument is a valid state, otherwise
%% `false' is returned.
%% @end
-spec is_state(any()) -> boolean().
is_state(#state{}) ->
true;
is_state(_) ->
false.
%% @doc
%% Builds a new `state' from the given `Map'.
%% @end
-spec from_map(map()) -> state().
from_map(Map) ->
#state{
module = maps:get(module, Map, shards_local),
sup_name = maps:get(sup_name, Map, shards_sup),
n_shards = maps:get(n_shards, Map, ?N_SHARDS),
pick_shard_fun = maps:get(pick_shard_fun, Map, fun shards_lib:pick/3),
pick_node_fun = maps:get(pick_node_fun, Map, fun shards_lib:pick/3),
keypos = maps:get(keypos, Map, 1)
}.
%% @doc
%% Converts the given `state' into a `map'.
%% @end
-spec to_map(state()) -> state_map().
to_map(State) ->
#{
module => State#state.module,
sup_name => State#state.sup_name,
n_shards => State#state.n_shards,
pick_shard_fun => State#state.pick_shard_fun,
pick_node_fun => State#state.pick_node_fun,
keypos => State#state.keypos
}.
%%%===================================================================
%%% API
%%%===================================================================
-spec module(state() | atom()) -> module().
module(#state{module = Module}) ->
Module;
module(Tab) when is_atom(Tab) ->
module(?MODULE:get(Tab)).
-spec module(module(), state()) -> state().
module(Module, #state{} = State) when is_atom(Module) ->
State#state{module = Module}.
-spec sup_name(state() | atom()) -> atom().
sup_name(#state{sup_name = SupName}) ->
SupName;
sup_name(Tab) when is_atom(Tab) ->
sup_name(?MODULE:get(Tab)).
-spec sup_name(atom(), state()) -> state().
sup_name(SupName, #state{} = State) when is_atom(SupName) ->
State#state{sup_name = SupName}.
-spec n_shards(state() | atom()) -> pos_integer().
n_shards(#state{n_shards = Shards}) ->
Shards;
n_shards(Tab) when is_atom(Tab) ->
n_shards(?MODULE:get(Tab)).
-spec n_shards(pos_integer(), state()) -> state().
n_shards(Shards, #state{} = State) when is_integer(Shards), Shards > 0 ->
State#state{n_shards = Shards}.
-spec keypos(state() | atom()) -> pos_integer().
keypos(#state{keypos = KeyPos}) ->
KeyPos;
keypos(Tab) when is_atom(Tab) ->
keypos(?MODULE:get(Tab)).
-spec keypos(pos_integer(), state()) -> state().
keypos(KeyPos, #state{} = State) when is_integer(KeyPos), KeyPos > 0 ->
State#state{keypos = KeyPos}.
-spec pick_shard_fun(state() | atom()) -> pick_fun().
pick_shard_fun(#state{pick_shard_fun = PickShardFun}) ->
PickShardFun;
pick_shard_fun(Tab) when is_atom(Tab) ->
pick_shard_fun(?MODULE:get(Tab)).
-spec pick_shard_fun(pick_fun(), state()) -> state().
pick_shard_fun(Fun, #state{} = State) when is_function(Fun, 3) ->
State#state{pick_shard_fun = Fun}.
-spec pick_node_fun(state() | atom()) -> pick_fun().
pick_node_fun(#state{pick_node_fun = PickNodeFun}) ->
PickNodeFun;
pick_node_fun(Tab) when is_atom(Tab) ->
pick_node_fun(?MODULE:get(Tab)).
-spec pick_node_fun(pick_fun(), state()) -> state().
pick_node_fun(Fun, #state{} = State) when is_function(Fun, 3) ->
State#state{pick_node_fun = Fun}.
-spec scope(state() | atom()) -> scope().
scope(#state{module = shards_local}) ->
l;
scope(#state{module = shards_dist}) ->
g;
scope(Tab) when is_atom(Tab) ->
scope(?MODULE:get(Tab)).
%% @equiv eval_pick_shard(Key, w, State)
eval_pick_shard(Key, State) ->
eval_pick_shard(Key, w, State).
-spec eval_pick_shard(key(), op(), state()) -> non_neg_integer() | any.
eval_pick_shard(Key, Op, #state{pick_shard_fun = PickShardFun, n_shards = Shards}) ->
PickShardFun(Key, Shards, Op). | src/shards_state.erl | 0.615203 | 0.472562 | shards_state.erl | starcoder |
%% @author Couchbase <<EMAIL>>
%% @copyright 2017-2020 Couchbase, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% High Level Steps:
%% 1. For each couchstore bucket, get the EP-engine disk failure stats
%% from the stats archiver.
%% 2. Compare each stat sample with its previous value and
%% count the # of times the stat has incremented during the user configured
%% time period.
%% If the above count is over some threshold, then it indicates sustained
%% failure.
%% 3. If any of the stats show sustained failure then KV stats monitor
%% will report I/O error for the corresponding bucket.
%%
%% Since we are looking for sustained failure, we are not interested
%% in the value of the stat itself but rather the number of samples
%% where the stat has increased. The threshold is for the number of samples.
%% E.g. A timePeriod of 100s has 100 stat samples (one per second). If 60
%% of those samples show an increment over the previous sample then that
%% is considered a sustained failure.
%% EP engine retry policy for write failure is to retry the write every second
%% and indefinitely. As long as the disk failure continues to exist,
%% the write related failure stat will continue to increase. This is
%% irrespective of whether the client continues to perform writes or not.
%% As a result, more or less every sample of the write related failure stats
%% should show an increment over the previous one.
%% EP engine's retry policy for reads is different. It does not retry reads
%% on read failure. The read related failure stat will continue to increase
%% as long as the client is performing read ops and the disk failure
%% continues to exist.
%%
-module(kv_stats_monitor).
-behaviour(gen_server).
-include("ns_common.hrl").
%% Frequency at which stats are checked
-define(REFRESH_INTERVAL, ?get_param(refresh_interval, 2000)). % 2 seconds
%% Percentage threshold
-define(DISK_ISSUE_THRESHOLD, ?get_param(disk_issue_threshold, 60)).
-export([start_link/0]).
-export([get_buckets/0,
get_reason/1,
analyze_status/1,
is_failure/1]).
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-record(state, {
buckets :: dict:dict(),
%% Monitor disk failure stats only if auto-failover on
%% disk issues is enabled.
enabled = false :: boolean(),
%% Number of stats samples to monitor - depends on timePeriod
%% set by the user and the REFRESH_INTERVAL.
numSamples = nil :: nil | integer(),
refresh_timer_ref = undefined,
stats_collector = undefined,
latest_stats = {undefined, dict:new()}
}).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%% gen_server callbacks
init([]) ->
Self = self(),
Self ! refresh,
chronicle_compat:subscribe_to_key_change(
fun (auto_failover_cfg) ->
Self ! {event, auto_failover_cfg};
(Key) ->
case ns_bucket:buckets_change(Key) of
false ->
ok;
true ->
Self ! {event, buckets}
end
end),
{Enabled, NumSamples} = get_failover_on_disk_issues(
auto_failover:get_cfg()),
{ok, maybe_spawn_stats_collector(#state{buckets = reset_bucket_info(),
enabled = Enabled,
numSamples = NumSamples})}.
handle_call(get_buckets, _From, #state{buckets = Buckets} = State) ->
RV = dict:fold(
fun(Bucket, {Status, _}, Acc) ->
[{Bucket, Status} | Acc]
end, [], Buckets),
{reply, RV, State};
handle_call(Call, From, State) ->
?log_warning("Unexpected call ~p from ~p when in state:~n~p",
[Call, From, State]),
{reply, nack, State}.
handle_cast(Cast, State) ->
?log_warning("Unexpected cast ~p when in state:~n~p", [Cast, State]),
{noreply, State}.
handle_info(refresh, #state{enabled = false} = State) ->
{noreply, State};
handle_info(refresh, #state{buckets = Buckets,
numSamples = NumSamples,
latest_stats = {TS, Stats}} = State) ->
NewBuckets = check_for_disk_issues(Buckets, TS, Stats, NumSamples),
NewState = maybe_spawn_stats_collector(
State#state{buckets = NewBuckets,
latest_stats = {undefined, dict:new()}}),
{noreply, resend_refresh_msg(NewState)};
handle_info({event, buckets}, #state{buckets = Dict} = State) ->
NewBuckets0 = ns_bucket:get_bucket_names_of_type(persistent),
NewBuckets = lists:sort(NewBuckets0),
KnownBuckets = lists:sort(dict:fetch_keys(Dict)),
ToRemove = KnownBuckets -- NewBuckets,
ToAdd = NewBuckets -- KnownBuckets,
NewDict0 = lists:foldl(
fun (Bucket, Acc) ->
dict:erase(Bucket, Acc)
end, Dict, ToRemove),
NewDict = lists:foldl(
fun (Bucket, Acc) ->
dict:store(Bucket, {active, []}, Acc)
end, NewDict0, ToAdd),
{noreply, State#state{buckets = NewDict}};
handle_info({event, auto_failover_cfg},
#state{enabled = OldEnabled} = State) ->
{Enabled, NumSamples} =
get_failover_on_disk_issues(auto_failover:get_cfg()),
NewState = case Enabled of
OldEnabled -> State;
false -> State#state{buckets = reset_bucket_info()};
true -> resend_refresh_msg(State)
end,
?log_debug("auto_failover_cfg change enabled:~p numSamples:~p ",
[Enabled, NumSamples]),
{noreply, NewState#state{enabled = Enabled, numSamples = NumSamples}};
handle_info({Pid, BucketStats}, #state{stats_collector = Pid} = State) ->
TS = os:system_time(millisecond),
{noreply, State#state{stats_collector = undefined,
latest_stats = {TS, BucketStats}}};
handle_info(Info, State) ->
?log_warning("Unexpected message ~p when in state:~n~p", [Info, State]),
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% APIs
get_buckets() ->
gen_server:call(?MODULE, get_buckets).
get_reason({io_failed, Buckets}) ->
{"Disk reads and writes failed on following buckets: " ++
string:join(Buckets, ", ") ++ ".", io_failed};
get_reason({read_failed, Buckets}) ->
{"Disk reads failed on following buckets: " ++
string:join(Buckets, ", ") ++ ".", read_failed};
get_reason({write_failed, Buckets}) ->
{"Disk writes failed on following buckets: " ++
string:join(Buckets, ", ") ++ ".", write_failed}.
is_failure(Failure) ->
lists:member(Failure, get_errors()).
analyze_status(Buckets) ->
DiskErrs = get_errors(),
lists:foldl(
fun ({B, State}, Acc) ->
case lists:member(State, DiskErrs) of
true ->
case lists:keyfind(State, 1, Acc) of
false ->
[{State, [B]} | Acc];
{State, Bs} ->
lists:keyreplace(State, 1, Acc, {State, [B | Bs]})
end;
false ->
Acc
end
end, [], Buckets).
%% Internal functions
get_errors() ->
[io_failed | [Err || {_, Err} <- failure_stats()]].
reset_bucket_info() ->
Buckets = ns_bucket:node_bucket_names_of_type(node(), persistent),
lists:foldl(
fun (Bucket, Acc) ->
dict:store(Bucket, {active, []}, Acc)
end, dict:new(), Buckets).
failure_stats() ->
[{ep_data_read_failed, read_failed},
{ep_data_write_failed, write_failed}].
get_latest_stats(Bucket) ->
try ns_memcached:stats(Bucket, <<"disk-failures">>) of
{ok, RawStats} ->
[{binary_to_atom(K, latin1), binary_to_integer(V)}
|| {K, V} <- RawStats];
Err ->
?log_debug("Error ~p while trying to read disk-failures stats for "
"bucket ~p", [Err, Bucket]),
[]
catch
_:E ->
?log_debug("Exception ~p while trying to read disk-failures stats "
"for bucket ~p", [E, Bucket]),
[]
end.
check_for_disk_issues(Buckets, TS, LatestStats, NumSamples) ->
dict:map(
fun (Bucket, Info) ->
case dict:find(Bucket, LatestStats) of
{ok, Stats} ->
check_for_disk_issues_stats(TS, Stats, Info, NumSamples);
error ->
Info
end
end, Buckets).
check_for_disk_issues_stats(CurrTS, Vals, {_, PastInfo}, NumSamples) ->
%% Vals is of the form: [{stat1, CurrVal1}, {stat2, CurrVal2}, ...]}
%% PastInfo is of the form:
%% [{stat1, {PrevVal1, PrevTS1, BitString}},
%% {stat2, {PrevVal2, PrevTS2, BitString}}, ...]
%% If current value of a stat is greater than its previous value,
%% then append "1" to the bit string. Otherwise append "0".
NewStatsInfo = lists:map(
fun ({Stat, CurrVal}) ->
case lists:keyfind(Stat, 1, PastInfo) of
false ->
{Stat, {CurrVal, CurrTS, <<0:1>>}};
{Stat, PrevInfo} ->
New = process_stat(CurrVal, CurrTS,
PrevInfo, NumSamples),
{Stat, New}
end
end, Vals),
check_for_disk_issues_stats_inner(NewStatsInfo, NumSamples).
check_for_disk_issues_stats_inner(StatsInfo, NumSamples) ->
Threshold = round(NumSamples * ?DISK_ISSUE_THRESHOLD / 100),
Failures = lists:filtermap(
fun ({Stat, {_, _, Bits}}) ->
case is_stat_increasing(Bits, Threshold) of
true ->
Err = proplists:get_value(Stat,
failure_stats()),
{true, Err};
false ->
false
end
end, StatsInfo),
BucketStatus = case Failures of
[] ->
active;
[Err] ->
Err;
[_|_] ->
io_failed
end,
{BucketStatus, StatsInfo}.
process_stat(CurrVal, CurrTS, {PrevVal, PrevTS, Bits}, NumSamples) ->
{NewVal, NewTS, NewBits0} = case CurrTS =:= PrevTS of
true ->
{PrevVal, PrevTS, <<Bits/bits, 0:1>>};
false ->
B = case CurrVal > PrevVal of
true ->
<<1:1>>;
false ->
<<0:1>>
end,
{CurrVal, CurrTS, <<Bits/bits, B/bits>>}
end,
NewBits = remove_old_entries(NewBits0, NumSamples),
{NewVal, NewTS, NewBits}.
remove_old_entries(Bits, NumSamples) ->
Size = bit_size(Bits),
case Size > NumSamples of
true ->
N = Size - NumSamples,
<<_H:N/bits, Rest/bits>> = Bits,
Rest;
false ->
Bits
end.
is_stat_increasing(Bits, Threshold) ->
Size = bit_size(Bits),
case <<0:Size>> =:= Bits of
true ->
false;
false ->
case Size < Threshold of
true ->
%% Auto-failover on disk issues is disabled
%% by default. When user turns it ON or increases
%% the timeperiod, there will be a short period before
%% the Size catches up with the Threshold.
false;
false ->
AllOnes = << <<1:1>> || _N <- lists:seq(1,Size) >>,
case AllOnes =:= Bits of
true ->
true;
false ->
over_threshold(Bits, Threshold)
end
end
end.
over_threshold(_Bits, 0) ->
true;
over_threshold(<<>>, _Threshold) ->
false;
over_threshold(<<1:1, Rest/bits>>, Threshold) ->
over_threshold(Rest, Threshold - 1);
over_threshold(<<0:1, Rest/bits>>, Threshold) ->
over_threshold(Rest, Threshold).
get_failover_on_disk_issues(Config) ->
case menelaus_web_auto_failover:get_failover_on_disk_issues(Config) of
undefined ->
{false, nil};
{Enabled, TimePeriod} ->
NumSamples = round((TimePeriod * 1000)/?REFRESH_INTERVAL),
{Enabled, NumSamples}
end.
resend_refresh_msg(#state{refresh_timer_ref = undefined} = State) ->
Ref = erlang:send_after(?REFRESH_INTERVAL, self(), refresh),
State#state{refresh_timer_ref = Ref};
resend_refresh_msg(#state{refresh_timer_ref = Ref} = State) ->
_ = erlang:cancel_timer(Ref),
resend_refresh_msg(State#state{refresh_timer_ref = undefined}).
maybe_spawn_stats_collector(#state{stats_collector = undefined,
buckets = Buckets} = State) ->
Self = self(),
Pid = proc_lib:spawn_link(
fun () ->
Res = dict:map(fun (Bucket, _Info) ->
get_latest_stats(Bucket)
end, Buckets),
Self ! {self(), Res}
end),
State#state{stats_collector = Pid};
maybe_spawn_stats_collector(#state{stats_collector = Pid} = State) ->
?log_warning("Ignoring start of stats collector as the previous one "
"haven't finished yet: ~p", [Pid]),
State. | src/kv_stats_monitor.erl | 0.615666 | 0.427098 | kv_stats_monitor.erl | starcoder |
-module(day5).
-behavior(aoc).
-export([input_type/0, parse_input/1, p1/1, p2/1]).
-type seat() :: {
Row :: non_neg_integer(),
Col :: non_neg_integer(),
Id :: non_neg_integer()
}.
input_type() -> lines.
parse_input(Lines) -> lists:map(fun seat/1, Lines).
-spec p1(Seats :: list(seat())) -> non_neg_integer().
%% @doc Calculate highest seat ID given binary partitioning addresses
%%
%% Seat assignments are given by a sequence of characters
%% specifying one half of a region to split. For the row
%% we get `F' for "the forward half" and `B' for "the back half."
%% For the column in the row we get `L' for "left" and `R'
%% for the right half.
%%
%% Example:
%% ```
%% --- column bits
%% FBFBBFFRLR
%% ------- row bits
%% '''
%%
%% This function translates a list of these addresses into their
%% row number, column number, and seat ID, which is the value of
%% the full binary address, or Row * 8 + Col.
%%
%% It then needs to return the largest seat ID in the manifest.
%% @end
p1(Seats) ->
lists:foldl(fun ({_R, _C, ID}, Max) -> max(Max, ID) end, 0, Seats).
-spec p2(Lines :: list(binary())) -> list(non_neg_integer()).
%% @doc Given a manifest of seat assignments, finds un-filled seats
%% @end
p2(Seats0) ->
Seats1 = lists:sort(fun ({_, _, ID1}, {_, _, ID2}) -> ID1 =< ID2 end, Seats0),
missing_seats(Seats1).
%% Internal functions
seat(<<R6, R5, R4, R3, R2, R1, R0, C2, C1, C0>>) ->
Row = num_val([R0, R1, R2, R3, R4, R5, R6]),
Col = num_val([C0, C1, C2]),
{Row, Col, Row * 8 + Col}.
missing_seats([First | Seats]) ->
missing_seats(Seats, First, []).
missing_seats([], _LastSeat, Missing) -> Missing;
missing_seats([{_, _, ID} = Seat | Seats], {_, _, LastId}, Missing) ->
case ID - LastId of
1 -> missing_seats(Seats, Seat, Missing);
2 -> missing_seats(Seats, Seat, [ID - 1 | Missing])
end.
num_val(Bits) when is_list(Bits) ->
num_val(Bits, 1, 0).
num_val([], _Multiplier, Value) -> Value;
num_val([Bit | Bits], Multiplier, Value) ->
num_val(Bits, Multiplier * 2, Value + binary(Bit) * Multiplier).
binary($F) -> 0;
binary($L) -> 0;
binary($B) -> 1;
binary($R) -> 1. | src/day5.erl | 0.680348 | 0.616214 | day5.erl | starcoder |
-module (element_grid).
-compile(export_all).
-include_lib("wf.hrl").
reflect() -> record_info(fields, grid).
render_element(#grid_clear {}) ->
"<div class='clear'></div>\n";
render_element(Record0) ->
Record = to_grid_record(Record0),
Body = rewrite_body(lists:flatten([Record#grid.body])),
element_panel:render_element(#panel {
class=to_classes(Record),
body=case Record#grid.type of
container ->
Body;
grid ->
#panel {
id=Record#grid.id,
anchor=Record#grid.anchor,
class=[grid, Record#grid.class],
style=Record#grid.style,
body=Body
}
end
}).
to_grid_record(X) ->
setelement(1, X, grid).
%% - Add omega to last grid element.
%% - Add a clear statement at end of body.
rewrite_body(Body) ->
case is_grid_body(Body) of
true ->
%% Add alpha to first grid element.
[First|L1] = Body,
First1 = to_grid_record(First),
Body1 = [First1#grid {alpha = true }|L1],
%% Add omega to last element, and add a clear div.
{L2, Last} = lists:split(length(Body1) - 1, Body1),
Last1 = to_grid_record(hd(Last)),
Body2 = L2 ++ [Last1#grid { omega = true }, #grid_clear {}],
Body2;
false ->
Body
end.
%% Return true if all elements are grid elements.
is_grid_body(Body) ->
F = fun(X) ->
is_tuple(X) andalso size(X) > 3 andalso element(3, X) == ?MODULE
end,
Body /= [] andalso lists:all(F, Body).
%% Given a grid record, create the list of 960.gs classes to position
%% this grid.
to_classes(Record) ->
C = case Record#grid.type of
container ->
%% Construct the container_N class, and add any other user
%% defined classes.
ContainerClass = "container_" ++ integer_to_list(Record#grid.columns),
[ContainerClass, Record#grid.class];
grid ->
%% Just construct the grid_N class. User defined classes
%% are added to inner panel.
GridClass = "grid_" ++ integer_to_list(Record#grid.columns),
[GridClass]
end,
%% Check for alpha...
C1 = case Record#grid.alpha of
true -> [alpha|C];
_ -> C
end,
%% Check for omega...
C2 = case Record#grid.omega of
true -> [omega|C1];
_ -> C1
end,
%%% Check for prefix...
C3 = case Record#grid.prefix of
undefined -> C2;
Prefix -> ["prefix_" ++ integer_to_list(Prefix)|C2]
end,
%%% Check for suffix...
C4 = case Record#grid.suffix of
undefined -> C3;
Suffix -> ["suffix_" ++ integer_to_list(Suffix)|C3]
end,
%%% Check for push...
C5 = case Record#grid.push of
undefined -> C4;
Push -> ["push_" ++ integer_to_list(Push)|C4]
end,
%%% Check for pull...
C6 = case Record#grid.pull of
undefined -> C5;
Pull -> ["pull_" ++ integer_to_list(Pull)|C5]
end,
C6. | apps/nitrogen/src/elements/layout/element_grid.erl | 0.541894 | 0.59887 | element_grid.erl | starcoder |
%%% Copyright 2022 Nomasystems, S.L. http://www.nomasystems.com
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(nmaglev_SUITE).
-include_lib("stdlib/include/assert.hrl").
%%% EXTERNAL EXPORTS
-compile([nowarn_export_all, export_all]).
%%%-----------------------------------------------------------------------------
%%% EXTERNAL EXPORTS
%%%-----------------------------------------------------------------------------
all() ->
[usage, consistency, distribution].
%%%-----------------------------------------------------------------------------
%%% INIT SUITE EXPORTS
%%%-----------------------------------------------------------------------------
init_per_suite(Conf) ->
nct_util:setup_suite(Conf).
%%%-----------------------------------------------------------------------------
%%% END SUITE EXPORTS
%%%-----------------------------------------------------------------------------
end_per_suite(Conf) ->
nct_util:teardown_suite(Conf).
%%%-----------------------------------------------------------------------------
%%% INIT CASE EXPORTS
%%%-----------------------------------------------------------------------------
init_per_testcase(Case, Conf) ->
ct:print("Starting test case ~p", [Case]),
nct_util:init_traces(Case),
Conf.
%%%-----------------------------------------------------------------------------
%%% END CASE EXPORTS
%%%-----------------------------------------------------------------------------
end_per_testcase(Case, Conf) ->
nct_util:end_traces(Case),
ct:print("Test case ~p completed", [Case]),
Conf.
%%%-----------------------------------------------------------------------------
%%% TEST CASES
%%%-----------------------------------------------------------------------------
usage() ->
[{userdata, [{doc, "Tests the usage of the library"}]}].
usage(_Conf) ->
Nodes = ["node" ++ erlang:integer_to_list(N) || N <- lists:seq(0, 50)],
MaglevTable = nmaglev:create(Nodes),
NodeForKey1 = nmaglev:get(<<"key1">>, MaglevTable),
NodeForKey2 = nmaglev:get(<<"key2">>, MaglevTable),
NodeForKey3 = nmaglev:get(<<"key3">>, MaglevTable),
?assertEqual(NodeForKey1, nmaglev:get(<<"key1">>, MaglevTable)),
?assertEqual(NodeForKey2, nmaglev:get(<<"key2">>, MaglevTable)),
?assertEqual(NodeForKey3, nmaglev:get(<<"key3">>, MaglevTable)),
NewNodes = lists:delete(NodeForKey1, Nodes),
NewMaglevTable = nmaglev:create(NewNodes),
?assertNotEqual(NodeForKey1, nmaglev:get(<<"key1">>, NewMaglevTable)),
?assertEqual(NodeForKey2, nmaglev:get(<<"key2">>, NewMaglevTable)),
?assertEqual(NodeForKey3, nmaglev:get(<<"key3">>, NewMaglevTable)),
ok.
distribution() ->
[{userdata, [{doc, "Tests the distribution of the maglev hashing algorithm"}]}].
distribution(_Conf) ->
StartTime = erlang:timestamp(),
Nodes = ["node" ++ erlang:integer_to_list(N) || N <- lists:seq(0, 50)],
MaglevMap = nmaglev:create(Nodes),
Distribution = lists:foldl(
fun({_Permutation, Node}, Acc) ->
case Acc of
#{Node := Count} ->
Acc#{Node => Count + 1};
Acc ->
Acc#{Node => 1}
end
end,
#{},
maps:to_list(MaglevMap)
),
ct:print("Maglev distribution: ~p", [Distribution]),
DistributionList = maps:to_list(Distribution),
CheckDeviation = fun(NodeCount) ->
Fun = fun({_OtherNode, OtherNodeCount}) ->
Deviation = (erlang:abs((NodeCount - OtherNodeCount) / NodeCount) * 100),
Deviation < 10
end,
lists:all(Fun, DistributionList)
end,
[true = CheckDeviation(NodeCount) || {_Node, NodeCount} <- DistributionList],
Time = timer:now_diff(erlang:timestamp(), StartTime),
ct:print("Distribution test elapsed time: ~p ms", [(Time / 1000)]),
ok.
consistency() ->
[{userdata, [{doc, "Tests the consistency of the maglev hashing algorithm"}]}].
consistency(_Conf) ->
StartTime = erlang:timestamp(),
[ok = consistency_test(NodesNum) || NodesNum <- lists:seq(2, 200)],
Time = timer:now_diff(erlang:timestamp(), StartTime),
ct:print("Consistency test elapsed time: ~p ms", [(Time / 1000)]),
ok.
%%%-----------------------------------------------------------------------------
%%% INTERNAL FUNCTIONS
%%%-----------------------------------------------------------------------------
consistency_test(NodesNum) ->
Nodes = ["node" ++ erlang:integer_to_list(N) || N <- lists:seq(0, NodesNum)],
MaglevMap = nmaglev:create(Nodes, 997),
Node = nmaglev:get(self(), MaglevMap),
NodesWithoutOtherNode = remove_other_node(Nodes, Node),
MaglevMapWithoutOneNode = nmaglev:create(NodesWithoutOtherNode, 997),
true = Node == nmaglev:get(self(), MaglevMapWithoutOneNode),
NewNode = "node" ++ erlang:integer_to_list(NodesNum + 1),
MaglevMapPlusOneNode = nmaglev:create(Nodes ++ [NewNode], 997),
ok =
case nmaglev:get(self(), MaglevMapPlusOneNode) of
Node ->
ok;
NewNode ->
ok;
Other ->
{ko, Other}
end,
ok.
remove_other_node([Node1, Node2 | Rest], NodeToKeep) ->
case NodeToKeep of
Node1 ->
[Node1 | Rest];
_ ->
[Node2 | Rest]
end. | test/nmaglev_SUITE.erl | 0.547706 | 0.449755 | nmaglev_SUITE.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(vtree_cleanup).
-include("vtree.hrl").
-include("couch_db.hrl").
-export([cleanup/2]).
-ifdef(makecheck).
-compile(nowarn_export_all).
-compile(export_all).
-endif.
% Nodes get cleaned up by partition ID, nothing else matters.
-spec cleanup(Vt :: #vtree{}, Nodes :: [#kv_node{}]) -> #vtree{}.
cleanup(Vt, []) ->
Vt;
cleanup(#vtree{root=nil}=Vt, _Nodes) ->
Vt;
cleanup(Vt, Nodes) ->
T1 = erlang:monotonic_time(),
Root = Vt#vtree.root,
PartitionedNodes = [Nodes],
KpNodes = cleanup_multiple(Vt, PartitionedNodes, [Root]),
NewRoot = case KpNodes of
[] -> nil;
KpNodes ->
vtree_modify:write_new_root(Vt, KpNodes)
end,
couch_log:debug("Cleanup took: ~ps~n",
[erlang:convert_time_unit(erlang:monotonic_time() - T1, native, microsecond)/1000000]),
Vt#vtree{root=NewRoot}.
-spec cleanup_multiple(Vt :: #vtree{}, ToCleanup :: [#kv_node{}],
Existing :: [#kp_node{}]) -> [#kp_node{}].
cleanup_multiple(Vt, ToCleanup, Existing) ->
ModifyFuns = {fun cleanup_nodes/2, fun partition_nodes/3},
vtree_modify:modify_multiple(Vt, ModifyFuns, ToCleanup, Existing, []).
-spec cleanup_nodes(ToCleanup :: [#kv_node{}], Existing :: [#kv_node{}]) ->
[#kv_node{}].
cleanup_nodes(ToCleanup, Existing) ->
% Filter out all children that should be deleted
[E || E <- Existing, not(member_of_nodes(E, ToCleanup))].
% Returns true if a given KV-node is member of a list of KV-nodes.
% The `partition` is used to determine whether it is a member or not.
-spec member_of_nodes(Node :: #kv_node{}, Nodes :: [#kv_node{}]) -> boolean().
member_of_nodes(_A, []) ->
false;
member_of_nodes(A, [B|_]) when A#kv_node.partition == B#kv_node.partition ->
true;
member_of_nodes(A, [_B|Rest]) ->
member_of_nodes(A, Rest).
% NOTE vmx 2014-08-05: This isn't efficient for the cleanup. But for now it's
% the easist possible way with maximum code re-use. The cleanup will be moved
% to C in one point anyway.
-spec partition_nodes(ToPartition :: [#kv_node{}], KpNodes :: [#kp_node{}],
Less :: lessfun()) -> [[#kv_node{}]].
partition_nodes(ToPartition, KpNodes, _Less) ->
% Put the node into every partition as we want to search the full tree
lists:map(fun(_) -> ToPartition end, KpNodes). | vtree/src/vtree_cleanup.erl | 0.624637 | 0.482917 | vtree_cleanup.erl | starcoder |
-module(expect).
-export([load_form/1, update_form/3, update_form/2, save_form/2,
print_diff/2, diff_form/3, run_form/2]).
%% Inspired by: https://blog.janestreet.com/testing-with-expectations/
%% Main idea:
%% - Make it trivial to add a test
%% - Diff of the expect file indicates change of meaning / error
%% - A committed diff indicates accepted change of meaning
%% See readme_expect.expect and readme_expect.erl for an example.
%% Syntax: single thunk, containing a single clause, containing a
%% single Term which is an assoc list from expressions to terms.
load_form(FileName) ->
case file:read_file(FileName) of
{ok, Bin} ->
Str = tools:format("~s",[Bin]),
{ok, Toks, _} = erl_scan:string(Str),
{ok, Form} = erl_parse:parse_form(Toks),
unpack(Form);
Error ->
throw({expect_load_form, FileName, Error})
end.
%% Some ad-hoc formatting. Can't figure out how to have
%% erl_prettypr:format display strings and binaries in a readable way.
save_form(FileName, {FunName, Triplets}) ->
ok = file:write_file(
FileName,
["%% -*- erlang -*-\n",
atom_to_list(FunName),"() ->\n[\n",
join(",\n", [format_test(T) || T <- Triplets]),
"].\n"]).
format_test({Form,OldVal,NewVal}) ->
Inner =
case OldVal == NewVal of
true ->
[", %% =>\n", format_val(NewVal)];
false ->
[", %% expected =>\n", format_val(OldVal), "\n",
", %% found =>\n", format_val(NewVal)]
end,
["{ ",
["%" || _ <- lists:seq(1,78)],
"\n",
erl_prettypr:format(Form),
"\n",
Inner,
"\n}\n"].
%% Compat with older version.
%% join(Lists,Sep) -> lists:join(Lists,Sep).
join(_, []) -> [];
join(Sep, [First | Els]) -> [First, [[Sep,El] || El <- Els]].
%% Value needs to be parsable, e.g. Can't have #Fun<...>.
%% See type_base.erl for similar code.
format_val(Val) ->
ValFmt = tools:format_binary("~70p",[Val]),
try
Val = type_base:decode({pterm, ValFmt}),
ValFmt
catch
_:_ ->
[[["%% ", Line, "\n"] || Line <- re:split(ValFmt,"\n")],
"not_printable"]
end.
%% save_form(FileName, Form) ->
%% Str = erl_prettypr:format(pack(Form)),
%% ok = file:write_file(
%% FileName,
%% ["%% -*- erlang -*-\n", Str]).
%% Full file.
unpack(
{function,_,FunName,0,
[{clause,_,[],[],
[Term]}]}) ->
{FunName, unpack_list(Term)}.
%% Unpack the assoc list, parsing the second element in the pair but
%% leaving the first intact. Third and subsequent tuple elements are
%% ignored. The third element is used to store error messages in case
%% a test fails.
unpack_list({nil,_}) -> [];
unpack_list({cons,_,{tuple,_,[Expr,Term|_]},Tail}) ->
[{Expr,erl_parse:normalise(Term)} | unpack_list(Tail)].
%% pack({FunName,List}) ->
%% {function,0,FunName,0,
%% [{clause,0,[],[],
%% [pack_list(List)]}]}.
%% pack_list([]) -> {nil,0};
%% pack_list([{Expr,Term}|Tail]) ->
%% {cons,0,{tuple,0,[Expr,erl_parse:abstract(Term)]},
%% pack_list(Tail)}.
%% Check an evaluated form with the previous values, and write it
%% back.
update_form(FileIn,
FileOut,
TestPairsOrTriplets) ->
{Name, Old} = load_form(FileIn),
{Forms, OldVals} = lists:unzip(Old),
Thunks =
lists:map(
fun({Thunk,_,_}) -> Thunk;
({Thunk,_}) -> Thunk end,
TestPairsOrTriplets),
NewVals = [catch Thunk() || Thunk <- Thunks],
New = lists:zip3(Forms, OldVals, NewVals),
save_form(FileOut, {Name, New}),
{Forms,NewVals,OldVals}.
run_form(FileName, TestThunk) ->
{Forms,NewVals,OldVals} = update_form(FileName, TestThunk()),
Diff = expect:diff_form(Forms, OldVals, NewVals),
expect:print_diff(FileName, Diff),
case Diff of
[] -> ok;
_ -> throw({expect_failed,
filename:basename(FileName)})
end.
diff_form(Forms, OldVals, NewVals) ->
%% Return diff.
lists:append(
lists:map(
fun({_,{OldVal,NewVal}}=Test) ->
case NewVal of
OldVal -> [];
_ -> [Test]
end
end,
lists:zip(Forms, lists:zip(OldVals, NewVals)))).
print_diff(FileName, Diff) ->
lists:foreach(
fun({Form,{Old,New}}) ->
io:format(
"~s:~p: ~s~n- ~p~n+ ~p~n",
[FileName,
erl_syntax:get_pos(Form),
erl_prettypr:format(Form),
Old,
New])
end,
Diff).
update_form(FileIn, TestResults) ->
update_form(FileIn, FileIn ++ ".new", TestResults).
%% expect:check_form("/home/tom/src/scope_display:expect_tests(). | src/expect.erl | 0.517083 | 0.488649 | expect.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2001-2020. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(cover).
%%
%% This module implements the Erlang coverage tool.
%%
%% ARCHITECTURE
%%
%% The coverage tool consists of one process on each node involved in
%% coverage analysis. The process is registered as 'cover_server'
%% (?SERVER). The cover_server on the 'main' node is in charge, and
%% it monitors the cover_servers on all remote nodes. When it gets a
%% 'DOWN' message for another cover_server, it marks the node as
%% 'lost'. If a nodeup is received for a lost node the main node
%% ensures that the cover compiled modules are loaded again. If the
%% remote node was alive during the disconnected period, cover data
%% for this period will also be included in the analysis.
%%
%% The cover_server process on the main node is implemented by the
%% functions init_main/1 and main_process_loop/1. The cover_server on
%% the remote nodes are implemented by the functions init_remote/2 and
%% remote_process_loop/1.
%%
%% COUNTERS
%%
%% The 'counters' modules is used for counting how many time each line
%% executed. Each cover-compiled module will have its own array of
%% counters.
%%
%% The counter reference for module Module is stored in a persistent
%% term with the key {cover,Module}.
%%
%% When the cover:local_only/0 function has been called, the reference
%% for the counter array will be compiled into each cover-compiled
%% module directly (instead of retrieving it from a persistent term).
%% That will be faster, but the resulting code can be only be used on
%% the main node.
%%
%% TABLES
%%
%% Each node has two tables: ?COVER_MAPPING_TABLE and ?COVER_CLAUSE_TABLE.
%% ?COVER_MAPPING_TABLE maps from a #bump{} record to an index in the
%% counter array for the module. It is used both during instrumentation
%% of cover-compiled modules and when collecting the counter values.
%%
%% ?COVER_CLAUSE_TABLE contains information about which clauses in which modules
%% cover is currently collecting statistics.
%%
%% The main node owns the tables ?COLLECTION_TABLE and
%% ?COLLECTION_CLAUSE_TABLE. The counter data is consolidated into those
%% tables from the counters on both the main node and from remote nodes.
%% This consolidation is done when a remote node is stopped with
%% cover:stop/1 or just before starting an analysis.
%%
%% The main node also has a table named ?BINARY_TABLE. This table
%% contains the abstract code code for each cover-compiled
%% module. This is necessary so that the code can be loaded on remote
%% nodes that are started after the compilation.
%%
%% PARALLELISM
%%
%% To take advantage of SMP when doing the cover analysis both the data
%% collection and analysis has been parallelized. One process is spawned for
%% each node when collecting data, and on the remote node when collecting data
%% one process is spawned per module.
%%
%% When analyzing data it is possible to issue multiple
%% analyse(_to_file)/X calls at once. They are, however, all calls
%% (for backwards compatibility reasons), so the user of cover will
%% have to spawn several processes to to the calls (or use
%% async_analyse_to_file/X).
%%
%% External exports
-export([start/0, start/1,
compile/1, compile/2, compile_module/1, compile_module/2,
compile_directory/0, compile_directory/1, compile_directory/2,
compile_beam/1, compile_beam_directory/0, compile_beam_directory/1,
analyse/0, analyse/1, analyse/2, analyse/3,
analyze/0, analyze/1, analyze/2, analyze/3,
analyse_to_file/0,
analyse_to_file/1, analyse_to_file/2, analyse_to_file/3,
analyze_to_file/0,
analyze_to_file/1, analyze_to_file/2, analyze_to_file/3,
async_analyse_to_file/1,async_analyse_to_file/2,
async_analyse_to_file/3, async_analyze_to_file/1,
async_analyze_to_file/2, async_analyze_to_file/3,
export/1, export/2, import/1,
modules/0, imported/0, imported_modules/0, which_nodes/0, is_compiled/1,
reset/1, reset/0,
flush/1,
stop/0, stop/1,
local_only/0]).
-export([remote_start/1,get_main_node/0]).
%% Used internally to ensure we upgrade the code to the latest version.
-export([main_process_loop/1,remote_process_loop/1]).
-record(main_state, {compiled=[], % [{Module,File}]
imported=[], % [{Module,File,ImportFile}]
stopper, % undefined | pid()
local_only=false, % true | false
nodes=[], % [Node]
lost_nodes=[]}). % [Node]
-record(remote_data, {module,
file,
code,
mapping,
clauses}).
-record(remote_state, {compiled=[], % [{Module,File}]
main_node}). % atom()
-record(bump, {module = '_', % atom()
function = '_', % atom()
arity = '_', % integer()
clause = '_', % integer()
line = '_' % integer()
}).
-define(BUMP_REC_NAME,bump).
-define(CHUNK_SIZE, 20000).
-record(vars, {module, % atom() Module name
init_info=[], % [{M,F,A,C,L}]
function, % atom()
arity, % int()
clause, % int()
lines, % [int()]
no_bump_lines, % [int()]
depth, % int()
is_guard=false % boolean
}).
-define(COVER_MAPPING_TABLE, 'cover_internal_mapping_table').
-define(COVER_CLAUSE_TABLE, 'cover_internal_clause_table').
-define(BINARY_TABLE, 'cover_binary_code_table').
-define(COLLECTION_TABLE, 'cover_collected_remote_data_table').
-define(COLLECTION_CLAUSE_TABLE, 'cover_collected_remote_clause_table').
-define(TAG, cover_compiled).
-define(SERVER, cover_server).
%% Line doesn't matter.
-define(BLOCK(Expr), {block,erl_anno:new(0),[Expr]}).
-define(BLOCK1(Expr),
if
element(1, Expr) =:= block ->
Expr;
true -> ?BLOCK(Expr)
end).
-define(SPAWN_DBG(Tag,Value),put(Tag,Value)).
-define(STYLESHEET, "styles.css").
-define(TOOLS_APP, tools).
-include_lib("stdlib/include/ms_transform.hrl").
%%%----------------------------------------------------------------------
%%% External exports
%%%----------------------------------------------------------------------
-spec start() -> {'ok', pid()} | {'error', Reason} when
Reason :: {'already_started', pid()}
| term().
start() ->
case whereis(?SERVER) of
undefined ->
Starter = self(),
Pid = spawn(fun() ->
?SPAWN_DBG(start,[]),
init_main(Starter)
end),
Ref = erlang:monitor(process,Pid),
Return =
receive
{?SERVER,started} ->
{ok,Pid};
{?SERVER,{error,Error}} ->
{error,Error};
{'DOWN', Ref, _Type, _Object, Info} ->
{error,Info}
end,
erlang:demonitor(Ref),
Return;
Pid ->
{error,{already_started,Pid}}
end.
-spec start(Nodes) -> {'ok', StartedNodes}
| {'error', 'not_main_node'}
| {'error', 'local_only'} when
Nodes :: node() | [node()],
StartedNodes :: [node()].
start(Node) when is_atom(Node) ->
start([Node]);
start(Nodes) ->
call({start_nodes,remove_myself(Nodes,[])}).
-spec local_only() -> 'ok' | {'error', 'too_late'}.
local_only() ->
call(local_only).
-type compile_result() :: {'ok', Module :: module()}
| {'error', file:filename()}
| {'error', 'not_main_node'}.
-type mod_file() :: (Module :: module()) | (File :: file:filename()).
-type mod_files() :: mod_file() | [mod_file()].
-type option() :: {'i', Dir :: file:filename()}
| {'d', Macro :: atom()}
| {'d', Macro :: atom(), Value :: term()}
| 'export_all'.
-spec compile(ModFiles) -> Result | [Result] when
ModFiles :: mod_files(),
Result :: compile_result().
compile(ModFile) ->
compile_module(ModFile, []).
-spec compile(ModFiles, Options) -> Result | [Result] when
ModFiles :: mod_files(),
Options :: [option()],
Result :: compile_result().
compile(ModFile, Options) ->
compile_module(ModFile, Options).
-spec compile_module(ModFiles) -> Result | [Result] when
ModFiles :: mod_files(),
Result :: compile_result().
compile_module(ModFile) when is_atom(ModFile);
is_list(ModFile) ->
compile_module(ModFile, []).
-spec compile_module(ModFiles, Options) -> Result | [Result] when
ModFiles :: mod_files(),
Options :: [option()],
Result :: compile_result().
compile_module(ModFile, Options) when is_atom(ModFile);
is_list(ModFile), is_integer(hd(ModFile)) ->
[R] = compile_module([ModFile], Options),
R;
compile_module(ModFiles, Options) when is_list(Options) ->
AbsFiles =
[begin
File =
case ModFile of
_ when is_atom(ModFile) -> atom_to_list(ModFile);
_ when is_list(ModFile) -> ModFile
end,
WithExt = case filename:extension(File) of
".erl" ->
File;
_ ->
File++".erl"
end,
filename:absname(WithExt)
end || ModFile <- ModFiles],
compile_modules(AbsFiles, Options).
-type file_error() :: 'eacces' | 'enoent'.
-spec compile_directory() -> [Result] | {'error', Reason} when
Reason :: file_error(),
Result :: compile_result().
compile_directory() ->
case file:get_cwd() of
{ok, Dir} ->
compile_directory(Dir, []);
Error ->
Error
end.
-spec compile_directory(Dir) -> [Result] | {'error', Reason} when
Dir :: file:filename(),
Reason :: file_error(),
Result :: compile_result().
compile_directory(Dir) when is_list(Dir) ->
compile_directory(Dir, []).
-spec compile_directory(Dir, Options) -> [Result] | {'error', Reason} when
Dir :: file:filename(),
Options :: [option()],
Reason :: file_error(),
Result :: compile_result().
compile_directory(Dir, Options) when is_list(Dir), is_list(Options) ->
case file:list_dir(Dir) of
{ok, Files} ->
ErlFiles = [filename:join(Dir, File) ||
File <- Files,
filename:extension(File) =:= ".erl"],
compile_modules(ErlFiles, Options);
Error ->
Error
end.
compile_modules(Files,Options) ->
Options2 = filter_options(Options),
call({compile, Files, Options2}).
filter_options(Options) ->
lists:filter(fun(Option) ->
case Option of
{i, Dir} when is_list(Dir) -> true;
{d, _Macro} -> true;
{d, _Macro, _Value} -> true;
export_all -> true;
_ -> false
end
end,
Options).
-type beam_mod_file() :: (Module :: module()) | (BeamFile :: file:filename()).
-type beam_mod_files() :: beam_mod_file() | [beam_mod_file()].
-type compile_beam_rsn() ::
'non_existing'
| {'no_abstract_code', BeamFile :: file:filename()}
| {'encrypted_abstract_code', BeamFile :: file:filename()}
| {'already_cover_compiled', 'no_beam_found', module()}
| {'no_file_attribute', BeamFile :: file:filename()}
| 'not_main_node'.
-type compile_beam_result() :: {'ok', module()}
| {'error', BeamFile :: file:filename()}
| {'error', Reason :: compile_beam_rsn()}.
-spec compile_beam(ModFiles) -> Result | [Result] when
ModFiles :: beam_mod_files(),
Result :: compile_beam_result().
compile_beam(ModFile0) when is_atom(ModFile0);
is_list(ModFile0), is_integer(hd(ModFile0)) ->
case compile_beams([ModFile0]) of
[{error,{non_existing,_}}] ->
%% Backwards compatibility
{error,non_existing};
[Result] ->
Result
end;
compile_beam(ModFiles) when is_list(ModFiles) ->
compile_beams(ModFiles).
-spec compile_beam_directory() -> [Result] | {'error', Reason} when
Reason :: file_error(),
Result :: compile_beam_result().
compile_beam_directory() ->
case file:get_cwd() of
{ok, Dir} ->
compile_beam_directory(Dir);
Error ->
Error
end.
-spec compile_beam_directory(Dir) ->
[Result] | {'error', Reason} when
Dir :: file:filename(),
Reason :: file_error(),
Result :: compile_beam_result().
compile_beam_directory(Dir) when is_list(Dir) ->
case file:list_dir(Dir) of
{ok, Files} ->
BeamFiles = [filename:join(Dir, File) ||
File <- Files,
filename:extension(File) =:= ".beam"],
compile_beams(BeamFiles);
Error ->
Error
end.
compile_beams(ModFiles0) ->
ModFiles = get_mods_and_beams(ModFiles0,[]),
call({compile_beams,ModFiles}).
get_mods_and_beams([Module|ModFiles],Acc) when is_atom(Module) ->
case code:which(Module) of
non_existing ->
get_mods_and_beams(ModFiles,[{error,{non_existing,Module}}|Acc]);
File ->
get_mods_and_beams([{Module,File}|ModFiles],Acc)
end;
get_mods_and_beams([File|ModFiles],Acc) when is_list(File) ->
{WithExt,WithoutExt}
= case filename:rootname(File,".beam") of
File ->
{File++".beam",File};
Rootname ->
{File,Rootname}
end,
AbsFile = filename:absname(WithExt),
Module = list_to_atom(filename:basename(WithoutExt)),
get_mods_and_beams([{Module,AbsFile}|ModFiles],Acc);
get_mods_and_beams([{Module,File}|ModFiles],Acc) ->
%% Check for duplicates
case lists:keyfind(Module,2,Acc) of
{ok,Module,File} ->
%% Duplicate, but same file so ignore
get_mods_and_beams(ModFiles,Acc);
{ok,Module,_OtherFile} ->
%% Duplicate and differnet file - error
get_mods_and_beams(ModFiles,[{error,{duplicate,Module}}|Acc]);
_ ->
get_mods_and_beams(ModFiles,[{ok,Module,File}|Acc])
end;
get_mods_and_beams([],Acc) ->
lists:reverse(Acc).
-type analyse_item() ::
(Line :: {M :: module(), N :: non_neg_integer()})
| (Clause :: {M :: module(), F :: atom(), A :: arity(),
C :: non_neg_integer()})
| (Function :: {M :: module(), F :: atom(), A :: arity()}). % mfa()
-type analyse_value() :: {Cov :: non_neg_integer(), NotCov :: non_neg_integer()}
| Calls :: non_neg_integer().
-type analyse_ok() :: [{Module :: module(), Value :: analyse_value()}]
| [{Item :: analyse_item(), Value :: analyse_value()}].
-type analyse_fail() :: [{'not_cover_compiled', module()}].
-type analysis() :: 'coverage' | 'calls'.
-type level() :: 'line' | 'clause' | 'function' | 'module'.
-type modules() :: module() | [module()].
-type one_result() ::
{'ok', {Module :: module(), Value :: analyse_value()}}
| {'ok', [{Item :: analyse_item(), Value :: analyse_value()}]}
| {'error', {'not_cover_compiled', module()}}.
-define(is_analysis(__A__),
(__A__=:=coverage orelse __A__=:=calls)).
-define(is_level(__L__),
(__L__=:=line orelse __L__=:=clause orelse
__L__=:=function orelse __L__=:=module)).
-spec analyse() -> {'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'}.
analyse() ->
analyse('_').
-dialyzer({no_contracts, analyse/1}).
%% modules() :: module() | [module()]. module() is an alias for
%% atom(), which overlaps with analysis() and level(). That is,
%% modules named 'calls' &c must be placed in a list.
-spec analyse(Analysis) -> {'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Analysis :: analysis();
(Level) -> {'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Level :: level();
(Modules) -> OneResult |
{'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Modules :: modules(),
OneResult :: one_result().
analyse(Analysis) when ?is_analysis(Analysis) ->
analyse('_', Analysis);
analyse(Level) when ?is_level(Level) ->
analyse('_', Level);
analyse(Module) ->
analyse(Module, coverage).
-dialyzer({no_contracts,analyse/2}). %% See comment analyse/1.
-spec analyse(Analysis, Level) -> {'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Analysis :: analysis(),
Level :: level();
(Modules, Analysis) -> OneResult |
{'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Analysis :: analysis(),
Modules :: modules(),
OneResult :: one_result();
(Modules, Level) -> OneResult |
{'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Level :: level(),
Modules :: modules(),
OneResult :: one_result().
analyse(Analysis, Level) when ?is_analysis(Analysis) andalso
?is_level(Level) ->
analyse('_', Analysis, Level);
analyse(Module, Analysis) when ?is_analysis(Analysis) ->
analyse(Module, Analysis, function);
analyse(Module, Level) when ?is_level(Level) ->
analyse(Module, coverage, Level).
-spec analyse(Modules, Analysis, Level) ->
OneResult |
{'result', analyse_ok(), analyse_fail()} |
{'error', 'not_main_node'} when
Analysis :: analysis(),
Level :: level(),
Modules :: modules(),
OneResult :: one_result().
analyse(Module, Analysis, Level) when ?is_analysis(Analysis),
?is_level(Level) ->
call({{analyse, Analysis, Level}, Module}).
analyze() -> analyse( ).
analyze(Module) -> analyse(Module).
analyze(Module, Analysis) -> analyse(Module, Analysis).
analyze(Module, Analysis, Level) -> analyse(Module, Analysis, Level).
%% Kept for backwards compatibility:
%% analyse_to_file(Modules, OutFile) ->
%% analyse_to_file(Modules, OutFile, Options) -> {ok,OutFile} | {error,Error}
-spec analyse_to_file() -> {'result', analyse_file_ok(), analyse_file_fail()} |
{'error', 'not_main_node'}.
analyse_to_file() ->
analyse_to_file('_').
-type analyse_option() :: 'html'
| {'outfile', OutFile :: file:filename()}
| {'outdir', OutDir :: file:filename()}.
-type analyse_answer() :: {'ok', OutFile :: file:filename()} |
{'error', analyse_rsn()}.
-type analyse_file_ok() :: [OutFile :: file:filename()].
-type analyse_file_fail() :: [analyse_rsn()].
-type analyse_rsn() :: {'not_cover_compiled', Module :: module()} |
{'file', File :: file:filename(), Reason :: term()} |
{'no_source_code_found', Module :: module()}.
-dialyzer({no_contracts, analyse_to_file/1}).
%% The option list [html] overlaps with module list [html].
-spec analyse_to_file(Modules) -> Answer |
{'result',
analyse_file_ok(), analyse_file_fail()} |
{'error', 'not_main_node'} when
Modules :: modules(),
Answer :: analyse_answer();
(Options) -> {'result',
analyse_file_ok(), analyse_file_fail()} |
{'error', 'not_main_node'} when
Options :: [analyse_option()].
analyse_to_file(Arg) ->
case is_options(Arg) of
true ->
analyse_to_file('_',Arg);
false ->
analyse_to_file(Arg,[])
end.
-spec analyse_to_file(Modules, Options) ->
Answer |
{'result',
analyse_file_ok(), analyse_file_fail()} |
{'error', 'not_main_node'} when
Modules :: modules(),
Options :: [analyse_option()],
Answer :: analyse_answer().
analyse_to_file(Module, OutFile) when is_list(OutFile), is_integer(hd(OutFile)) ->
%% Kept for backwards compatibility
analyse_to_file(Module, [{outfile,OutFile}]);
analyse_to_file(Module, Options) when is_list(Options) ->
call({{analyse_to_file, Options}, Module}).
analyse_to_file(Module, OutFile, Options) when is_list(OutFile) ->
%% Kept for backwards compatibility
analyse_to_file(Module,[{outfile,OutFile}|Options]).
analyze_to_file() -> analyse_to_file().
analyze_to_file(Module) -> analyse_to_file(Module).
analyze_to_file(Module, OptOrOut) -> analyse_to_file(Module, OptOrOut).
analyze_to_file(Module, OutFile, Options) ->
analyse_to_file(Module, OutFile, Options).
-spec async_analyse_to_file(Module) -> pid() when
Module :: module().
async_analyse_to_file(Module) ->
do_spawn(?MODULE, analyse_to_file, [Module]).
-dialyzer({no_contracts, async_analyse_to_file/2}).
%% The types file:filename() (string()) and ['html'] has something in
%% common, namely [].
-spec async_analyse_to_file(Module, OutFile) -> pid() when
Module :: module(),
OutFile :: file:filename();
(Module, Options) -> pid() when
Module :: module(),
Options :: [Option],
Option :: 'html'.
async_analyse_to_file(Module, OutFileOrOpts) ->
do_spawn(?MODULE, analyse_to_file, [Module, OutFileOrOpts]).
-spec async_analyse_to_file(Module, OutFile, Options) -> pid() when
Module :: module(),
OutFile :: file:filename(),
Options :: [Option],
Option :: 'html'.
async_analyse_to_file(Module, OutFile, Options) ->
do_spawn(?MODULE, analyse_to_file, [Module, OutFile, Options]).
is_options([html]) ->
true; % this is not 100% safe - could be a module named html...
is_options([html|Opts]) ->
is_options(Opts);
is_options([{Opt,_}|_]) when Opt==outfile; Opt==outdir ->
true;
is_options(_) ->
false.
do_spawn(M,F,A) ->
spawn_link(fun() ->
case apply(M,F,A) of
{ok, _} ->
ok;
{error, Reason} ->
exit(Reason)
end
end).
async_analyze_to_file(Module) ->
async_analyse_to_file(Module).
async_analyze_to_file(Module, OutFileOrOpts) ->
async_analyse_to_file(Module, OutFileOrOpts).
async_analyze_to_file(Module, OutFile, Options) ->
async_analyse_to_file(Module, OutFile, Options).
outfilename(undefined, Module, HTML) ->
outfilename(Module, HTML);
outfilename(OutDir, Module, HTML) ->
filename:join(OutDir, outfilename(Module, HTML)).
outfilename(Module, true) ->
atom_to_list(Module)++".COVER.html";
outfilename(Module, false) ->
atom_to_list(Module)++".COVER.out".
-type export_reason() :: {'not_cover_compiled', Module :: module()} |
{'cant_open_file',
ExportFile :: file:filename(), FileReason :: term()} |
'not_main_node'.
-spec export(File) -> 'ok' | {'error', Reason} when
File :: file:filename(),
Reason :: export_reason().
export(File) ->
export(File, '_').
-spec export(File, Module) -> 'ok' | {'error', Reason} when
File :: file:filename(),
Module :: module(),
Reason :: export_reason().
export(File, Module) ->
call({export,File,Module}).
-spec import(ExportFile) -> 'ok' | {'error', Reason} when
ExportFile :: file:filename(),
Reason :: {'cant_open_file', ExportFile, FileReason :: term()} |
'not_main_node'.
import(File) ->
call({import,File}).
-spec modules() -> [module()] | {'error', 'not_main_node'}.
modules() ->
call(modules).
-spec imported_modules() -> [module()] | {'error', 'not_main_node'}.
imported_modules() ->
call(imported_modules).
-spec imported() -> [file:filename()] | {'error', 'not_main_node'}.
imported() ->
call(imported).
-spec which_nodes() -> [node()].
which_nodes() ->
call(which_nodes).
-spec is_compiled(Module) -> {'file', File :: file:filename()} |
'false' |
{'error', 'not_main_node'} when
Module :: module().
is_compiled(Module) when is_atom(Module) ->
call({is_compiled, Module}).
-spec reset(Module) -> 'ok' |
{'error', 'not_main_node'} |
{'error', 'not_cover_compiled', Module} when
Module :: module().
reset(Module) when is_atom(Module) ->
call({reset, Module}).
-spec reset() -> 'ok' | {'error', 'not_main_node'}.
reset() ->
call(reset).
-spec stop() -> 'ok' | {'error', 'not_main_node'}.
stop() ->
call(stop).
-spec stop(Nodes) -> 'ok' | {'error', 'not_main_node'} when
Nodes :: node() | [node()].
stop(Node) when is_atom(Node) ->
stop([Node]);
stop(Nodes) ->
call({stop,remove_myself(Nodes,[])}).
-spec flush(Nodes) -> 'ok' | {'error', 'not_main_node'} when
Nodes :: node() | [node()].
flush(Node) when is_atom(Node) ->
flush([Node]);
flush(Nodes) ->
call({flush,remove_myself(Nodes,[])}).
%% Used by test_server only. Not documented.
get_main_node() ->
call(get_main_node).
call(Request) ->
Ref = erlang:monitor(process,?SERVER),
receive {'DOWN', Ref, _Type, _Object, noproc} ->
erlang:demonitor(Ref),
{ok,_} = start(),
call(Request)
after 0 ->
?SERVER ! {self(),Request},
Return =
receive
{'DOWN', Ref, _Type, _Object, Info} ->
exit(Info);
{?SERVER,Reply} ->
Reply
end,
erlang:demonitor(Ref, [flush]),
Return
end.
reply(From, Reply) ->
From ! {?SERVER,Reply},
ok.
is_from(From) ->
is_pid(From).
remote_call(Node,Request) ->
Ref = erlang:monitor(process,{?SERVER,Node}),
receive {'DOWN', Ref, _Type, _Object, noproc} ->
erlang:demonitor(Ref),
{error,node_dead}
after 0 ->
{?SERVER,Node} ! Request,
Return =
receive
{'DOWN', Ref, _Type, _Object, _Info} ->
case Request of
{remote,stop} -> ok;
_ -> {error,node_dead}
end;
{?SERVER,Reply} ->
Reply
end,
erlang:demonitor(Ref, [flush]),
Return
end.
remote_reply(Proc,Reply) when is_pid(Proc) ->
Proc ! {?SERVER,Reply},
ok;
remote_reply(MainNode,Reply) ->
{?SERVER,MainNode} ! {?SERVER,Reply},
ok.
%%%----------------------------------------------------------------------
%%% cover_server on main node
%%%----------------------------------------------------------------------
init_main(Starter) ->
try register(?SERVER,self()) of
true ->
?COVER_MAPPING_TABLE = ets:new(?COVER_MAPPING_TABLE,
[ordered_set, public, named_table]),
?COVER_CLAUSE_TABLE = ets:new(?COVER_CLAUSE_TABLE, [set, public,
named_table]),
?BINARY_TABLE = ets:new(?BINARY_TABLE, [set, public, named_table]),
?COLLECTION_TABLE = ets:new(?COLLECTION_TABLE, [set, public,
named_table]),
?COLLECTION_CLAUSE_TABLE = ets:new(?COLLECTION_CLAUSE_TABLE,
[set, public, named_table]),
ok = net_kernel:monitor_nodes(true),
Starter ! {?SERVER,started},
main_process_loop(#main_state{})
catch
error:badarg ->
%% The server's already registered; either report that it's already
%% started or try again if it died before we could find its pid.
case whereis(?SERVER) of
undefined ->
init_main(Starter);
Pid ->
Starter ! {?SERVER, {error, {already_started, Pid}}}
end
end.
main_process_loop(State) ->
receive
{From, local_only} ->
case State of
#main_state{compiled=[],nodes=[]} ->
reply(From, ok),
main_process_loop(State#main_state{local_only=true});
#main_state{} ->
reply(From, {error,too_late}),
main_process_loop(State)
end;
{From, {start_nodes,Nodes}} ->
case State#main_state.local_only of
false ->
{StartedNodes,State1} = do_start_nodes(Nodes, State),
reply(From, {ok,StartedNodes}),
main_process_loop(State1);
true ->
reply(From, {error,local_only}),
main_process_loop(State)
end;
{From, {compile, Files, Options}} ->
{R,S} = do_compile(Files, Options, State),
reply(From,R),
%% This module (cover) could have been reloaded. Make
%% sure we run the new code.
?MODULE:main_process_loop(S);
{From, {compile_beams, ModsAndFiles}} ->
{R,S} = do_compile_beams(ModsAndFiles,State),
reply(From,R),
%% This module (cover) could have been reloaded. Make
%% sure we run the new code.
?MODULE:main_process_loop(S);
{From, {export,OutFile,Module}} ->
spawn(fun() ->
?SPAWN_DBG(export,{OutFile, Module}),
do_export(Module, OutFile, From, State)
end),
main_process_loop(State);
{From, {import,File}} ->
case file:open(File,[read,binary,raw]) of
{ok,Fd} ->
Imported = do_import_to_table(Fd,File,
State#main_state.imported),
reply(From, ok),
ok = file:close(Fd),
main_process_loop(State#main_state{imported=Imported});
{error,Reason} ->
reply(From, {error, {cant_open_file,File,Reason}}),
main_process_loop(State)
end;
{From, modules} ->
%% Get all compiled modules which are still loaded
{LoadedModules,Compiled} =
get_compiled_still_loaded(State#main_state.nodes,
State#main_state.compiled),
reply(From, LoadedModules),
main_process_loop(State#main_state{compiled=Compiled});
{From, imported_modules} ->
%% Get all modules with imported data
ImportedModules = lists:map(fun({Mod,_File,_ImportFile}) -> Mod end,
State#main_state.imported),
reply(From, ImportedModules),
main_process_loop(State);
{From, imported} ->
%% List all imported files
reply(From, get_all_importfiles(State#main_state.imported,[])),
main_process_loop(State);
{From, which_nodes} ->
%% List all imported files
reply(From, State#main_state.nodes),
main_process_loop(State);
{From, reset} ->
lists:foreach(
fun({Module,_File}) ->
do_reset_main_node(Module,State#main_state.nodes)
end,
State#main_state.compiled),
reply(From, ok),
main_process_loop(State#main_state{imported=[]});
{From, {stop,Nodes}} ->
remote_collect('_',Nodes,true),
reply(From, ok),
Nodes1 = State#main_state.nodes--Nodes,
LostNodes1 = State#main_state.lost_nodes--Nodes,
main_process_loop(State#main_state{nodes=Nodes1,
lost_nodes=LostNodes1});
{From, {flush,Nodes}} ->
remote_collect('_',Nodes,false),
reply(From, ok),
main_process_loop(State);
{From, stop} ->
lists:foreach(
fun(Node) ->
remote_call(Node,{remote,stop})
end,
State#main_state.nodes),
reload_originals(State#main_state.compiled),
ets:delete(?COVER_MAPPING_TABLE),
ets:delete(?COVER_CLAUSE_TABLE),
ets:delete(?BINARY_TABLE),
ets:delete(?COLLECTION_TABLE),
ets:delete(?COLLECTION_CLAUSE_TABLE),
delete_all_counters(),
unregister(?SERVER),
reply(From, ok);
{From, {{analyse, Analysis, Level}, '_'}} ->
R = analyse_all(Analysis, Level, State),
reply(From, R),
main_process_loop(State);
{From, {{analyse, Analysis, Level}, Modules}} when is_list(Modules) ->
R = analyse_list(Modules, Analysis, Level, State),
reply(From, R),
main_process_loop(State);
{From, {{analyse, Analysis, Level}, Module}} ->
S = try
Loaded = is_loaded(Module, State),
spawn(fun() ->
?SPAWN_DBG(analyse,{Module,Analysis, Level}),
do_parallel_analysis(
Module, Analysis, Level,
Loaded, From, State)
end),
State
catch throw:Reason ->
reply(From,{error, {not_cover_compiled,Module}}),
not_loaded(Module, Reason, State)
end,
main_process_loop(S);
{From, {{analyse_to_file, Opts},'_'}} ->
R = analyse_all_to_file(Opts, State),
reply(From,R),
main_process_loop(State);
{From, {{analyse_to_file, Opts},Modules}} when is_list(Modules) ->
R = analyse_list_to_file(Modules, Opts, State),
reply(From,R),
main_process_loop(State);
{From, {{analyse_to_file, Opts},Module}} ->
S = try
Loaded = is_loaded(Module, State),
spawn_link(fun() ->
?SPAWN_DBG(analyse_to_file,{Module,Opts}),
do_parallel_analysis_to_file(
Module, Opts, Loaded, From, State)
end),
State
catch throw:Reason ->
reply(From,{error, {not_cover_compiled,Module}}),
not_loaded(Module, Reason, State)
end,
main_process_loop(S);
{From, {is_compiled, Module}} ->
S = try is_loaded(Module, State) of
{loaded, File} ->
reply(From,{file, File}),
State;
{imported,_File,_ImportFiles} ->
reply(From,false),
State
catch throw:Reason ->
reply(From,false),
not_loaded(Module, Reason, State)
end,
main_process_loop(S);
{From, {reset, Module}} ->
S = try
Loaded = is_loaded(Module,State),
R = case Loaded of
{loaded, _File} ->
do_reset_main_node(
Module, State#main_state.nodes);
{imported, _File, _} ->
do_reset_collection_table(Module)
end,
Imported =
remove_imported(Module,
State#main_state.imported),
reply(From, R),
State#main_state{imported=Imported}
catch throw:Reason ->
reply(From,{error, {not_cover_compiled,Module}}),
not_loaded(Module, Reason, State)
end,
main_process_loop(S);
{'DOWN', _MRef, process, {?SERVER,Node}, _Info} ->
%% A remote cover_server is down, mark as lost
{Nodes,Lost} =
case lists:member(Node,State#main_state.nodes) of
true ->
N = State#main_state.nodes--[Node],
L = [Node|State#main_state.lost_nodes],
{N,L};
false -> % node stopped
{State#main_state.nodes,State#main_state.lost_nodes}
end,
main_process_loop(State#main_state{nodes=Nodes,lost_nodes=Lost});
{nodeup,Node} ->
State1 =
case lists:member(Node,State#main_state.lost_nodes) of
true ->
sync_compiled(Node,State);
false ->
State
end,
main_process_loop(State1);
{nodedown,_} ->
%% Will be taken care of when 'DOWN' message arrives
main_process_loop(State);
{From, get_main_node} ->
reply(From, node()),
main_process_loop(State);
get_status ->
io:format("~tp~n",[State]),
main_process_loop(State)
end.
%%%----------------------------------------------------------------------
%%% cover_server on remote node
%%%----------------------------------------------------------------------
init_remote(Starter,MainNode) ->
register(?SERVER,self()),
?COVER_MAPPING_TABLE = ets:new(?COVER_MAPPING_TABLE,
[ordered_set, public, named_table]),
?COVER_CLAUSE_TABLE = ets:new(?COVER_CLAUSE_TABLE, [set, public,
named_table]),
Starter ! {self(),started},
remote_process_loop(#remote_state{main_node=MainNode}).
remote_process_loop(State) ->
receive
{remote,load_compiled,Compiled} ->
Compiled1 = load_compiled(Compiled,State#remote_state.compiled),
remote_reply(State#remote_state.main_node, ok),
?MODULE:remote_process_loop(State#remote_state{compiled=Compiled1});
{remote,unload,UnloadedModules} ->
unload(UnloadedModules),
Compiled =
update_compiled(UnloadedModules, State#remote_state.compiled),
remote_reply(State#remote_state.main_node, ok),
remote_process_loop(State#remote_state{compiled=Compiled});
{remote,reset,Module} ->
reset_counters(Module),
remote_reply(State#remote_state.main_node, ok),
remote_process_loop(State);
{remote,collect,Module,CollectorPid} ->
self() ! {remote,collect,Module,CollectorPid, ?SERVER};
{remote,collect,Modules0,CollectorPid,From} ->
Modules = case Modules0 of
'_' -> [M || {M,_} <- State#remote_state.compiled];
_ -> Modules0
end,
spawn(fun() ->
?SPAWN_DBG(remote_collect,
{Modules, CollectorPid, From}),
do_collect(Modules, CollectorPid, From)
end),
remote_process_loop(State);
{remote,stop} ->
reload_originals(State#remote_state.compiled),
ets:delete(?COVER_MAPPING_TABLE),
ets:delete(?COVER_CLAUSE_TABLE),
delete_all_counters(),
unregister(?SERVER),
ok; % not replying since 'DOWN' message will be received anyway
{remote,get_compiled} ->
remote_reply(State#remote_state.main_node,
State#remote_state.compiled),
remote_process_loop(State);
{From, get_main_node} ->
remote_reply(From, State#remote_state.main_node),
remote_process_loop(State);
get_status ->
io:format("~tp~n",[State]),
remote_process_loop(State);
M ->
io:format("WARNING: remote cover_server received\n~p\n",[M]),
case M of
{From,_} ->
case is_from(From) of
true ->
reply(From,{error,not_main_node});
false ->
ok
end;
_ ->
ok
end,
remote_process_loop(State)
end.
do_collect(Modules, CollectorPid, From) ->
_ = pmap(fun(Module) ->
send_counters(Module, CollectorPid)
end, Modules),
CollectorPid ! done,
remote_reply(From, ok).
send_chunk(CollectorPid,Chunk) ->
CollectorPid ! {chunk,Chunk,self()},
receive continue -> ok end.
get_downs([]) ->
ok;
get_downs(Mons) ->
receive
{'DOWN', Ref, _Type, Pid, _Reason} = Down ->
case lists:member({Pid,Ref},Mons) of
true ->
get_downs(lists:delete({Pid,Ref},Mons));
false ->
%% This should be handled somewhere else
self() ! Down,
get_downs(Mons)
end
end.
reload_originals(Compiled) ->
_ = pmap(fun do_reload_original/1, [M || {M,_} <- Compiled]),
ok.
do_reload_original(Module) ->
case code:which(Module) of
?TAG ->
_ = code:purge(Module), % remove code marked as 'old'
_ = code:delete(Module), % mark cover compiled code as 'old'
%% Note: original beam code must be loaded before the cover
%% compiled code is purged, in order to for references to
%% 'fun M:F/A' and %% 'fun F/A' funs to be correct (they
%% refer to (M:)F/A in the *latest* version of the module)
_ = code:load_file(Module), % load original code
_ = code:purge(Module); % remove cover compiled code
_ ->
ignore
end.
load_compiled([Data|Compiled],Acc) ->
%% Make sure the #bump{} records and counters are available *before*
%% compiling and loading the code.
#remote_data{module=Module,file=File,code=Beam,
mapping=InitialMapping,clauses=InitialClauses} = Data,
ets:insert(?COVER_MAPPING_TABLE, InitialMapping),
ets:insert(?COVER_CLAUSE_TABLE, InitialClauses),
maybe_create_counters(Module, true),
Sticky = case code:is_sticky(Module) of
true ->
code:unstick_mod(Module),
true;
false ->
false
end,
NewAcc = case code:load_binary(Module, ?TAG, Beam) of
{module,Module} ->
add_compiled(Module, File, Acc);
_ ->
do_clear(Module),
Acc
end,
case Sticky of
true -> code:stick_mod(Module);
false -> ok
end,
load_compiled(Compiled,NewAcc);
load_compiled([],Acc) ->
Acc.
unload([Module|Modules]) ->
do_clear(Module),
do_reload_original(Module),
unload(Modules);
unload([]) ->
ok.
%%%----------------------------------------------------------------------
%%% Internal functions
%%%----------------------------------------------------------------------
%%%--Handling of remote nodes--------------------------------------------
do_start_nodes(Nodes, State) ->
ThisNode = node(),
StartedNodes =
lists:foldl(
fun(Node,Acc) ->
case rpc:call(Node,cover,remote_start,[ThisNode]) of
{ok,_RPid} ->
erlang:monitor(process,{?SERVER,Node}),
[Node|Acc];
Error ->
io:format("Could not start cover on ~w: ~tp\n",
[Node,Error]),
Acc
end
end,
[],
Nodes),
%% In case some of the compiled modules have been unloaded they
%% should not be loaded on the new node.
{_LoadedModules,Compiled} =
get_compiled_still_loaded(State#main_state.nodes,
State#main_state.compiled),
remote_load_compiled(StartedNodes,Compiled),
State1 =
State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
compiled = Compiled},
{StartedNodes, State1}.
%% start the cover_server on a remote node
remote_start(MainNode) ->
case whereis(?SERVER) of
undefined ->
Starter = self(),
Pid = spawn(fun() ->
?SPAWN_DBG(remote_start,{MainNode}),
init_remote(Starter,MainNode)
end),
Ref = erlang:monitor(process,Pid),
Return =
receive
{Pid,started} ->
{ok,Pid};
{'DOWN', Ref, _Type, _Object, Info} ->
{error,Info}
end,
erlang:demonitor(Ref),
Return;
Pid ->
{error,{already_started,Pid}}
end.
%% If a lost node comes back, ensure that main and remote node has the
%% same cover compiled modules. Note that no action is taken if the
%% same {Mod,File} eksists on both, i.e. code change is not handled!
sync_compiled(Node,State) ->
#main_state{compiled=Compiled0,nodes=Nodes,lost_nodes=Lost}=State,
State1 =
case remote_call(Node,{remote,get_compiled}) of
{error,node_dead} ->
{_,S} = do_start_nodes([Node],State),
S;
{error,_} ->
State;
RemoteCompiled ->
{_,Compiled} = get_compiled_still_loaded(Nodes,Compiled0),
Unload = [UM || {UM,_}=U <- RemoteCompiled,
false == lists:member(U,Compiled)],
remote_unload([Node],Unload),
Load = [L || L <- Compiled,
false == lists:member(L,RemoteCompiled)],
remote_load_compiled([Node],Load),
State#main_state{compiled=Compiled, nodes=[Node|Nodes]}
end,
State1#main_state{lost_nodes=Lost--[Node]}.
%% Load a set of cover compiled modules on remote nodes,
%% We do it ?MAX_MODS modules at a time so that we don't
%% run out of memory on the cover_server node.
-define(MAX_MODS, 10).
remote_load_compiled(Nodes,Compiled) ->
remote_load_compiled(Nodes, Compiled, [], 0).
remote_load_compiled(_Nodes, [], [], _ModNum) ->
ok;
remote_load_compiled(Nodes, Compiled, Acc, ModNum)
when Compiled == []; ModNum == ?MAX_MODS ->
RemoteLoadData = get_downs_r(Acc),
lists:foreach(
fun(Node) ->
remote_call(Node,{remote,load_compiled,RemoteLoadData})
end,
Nodes),
remote_load_compiled(Nodes, Compiled, [], 0);
remote_load_compiled(Nodes, [MF | Rest], Acc, ModNum) ->
remote_load_compiled(
Nodes, Rest,
[spawn_job_r(fun() -> get_data_for_remote_loading(MF) end) | Acc],
ModNum + 1).
spawn_job_r(Fun) ->
spawn_monitor(fun() -> exit(Fun()) end).
get_downs_r([]) ->
[];
get_downs_r(Mons) ->
receive
{'DOWN', Ref, _Type, Pid, #remote_data{}=R} ->
[R|get_downs_r(lists:delete({Pid,Ref},Mons))];
{'DOWN', Ref, _Type, Pid, Reason} = Down ->
case lists:member({Pid,Ref},Mons) of
true ->
%% Something went really wrong - don't hang!
exit(Reason);
false ->
%% This should be handled somewhere else
self() ! Down,
get_downs_r(Mons)
end
end.
%% Read all data needed for loading a cover compiled module on a remote node
%% Binary is the beam code for the module and InitialTable is the initial
%% data to insert in ?COVER_TABLE.
get_data_for_remote_loading({Module,File}) ->
[{Module,Code}] = ets:lookup(?BINARY_TABLE, Module),
%%! The InitialTable list will be long if the module is big - what to do??
Mapping = counters_mapping_table(Module),
InitialClauses = ets:lookup(?COVER_CLAUSE_TABLE,Module),
#remote_data{module=Module,file=File,code=Code,
mapping=Mapping,clauses=InitialClauses}.
%% Unload modules on remote nodes
remote_unload(Nodes,UnloadedModules) ->
lists:foreach(
fun(Node) ->
remote_call(Node,{remote,unload,UnloadedModules})
end,
Nodes).
%% Reset one or all modules on remote nodes
remote_reset(Module,Nodes) ->
lists:foreach(
fun(Node) ->
remote_call(Node,{remote,reset,Module})
end,
Nodes).
%% Collect data from remote nodes - used for analyse or stop(Node)
remote_collect(Modules,Nodes,Stop) ->
_ = pmap(
fun(Node) ->
?SPAWN_DBG(remote_collect,
{Modules, Nodes, Stop}),
do_collection(Node, Modules, Stop)
end, Nodes),
ok.
do_collection(Node, Module, Stop) ->
CollectorPid = spawn(fun collector_proc/0),
case remote_call(Node,{remote,collect,Module,CollectorPid, self()}) of
{error,node_dead} ->
CollectorPid ! done,
ok;
ok when Stop ->
remote_call(Node,{remote,stop});
ok ->
ok
end.
%% Process which receives chunks of data from remote nodes - either when
%% analysing or when stopping cover on the remote nodes.
collector_proc() ->
?SPAWN_DBG(collector_proc, []),
receive
{chunk,Chunk,From} ->
insert_in_collection_table(Chunk),
From ! continue,
collector_proc();
done ->
ok
end.
insert_in_collection_table([{Key,Val}|Chunk]) ->
insert_in_collection_table(Key,Val),
insert_in_collection_table(Chunk);
insert_in_collection_table([]) ->
ok.
insert_in_collection_table(Key,Val) ->
case ets:member(?COLLECTION_TABLE,Key) of
true ->
_ = ets:update_counter(?COLLECTION_TABLE, Key,Val),
ok;
false ->
%% Make sure that there are no race conditions from ets:member
case ets:insert_new(?COLLECTION_TABLE,{Key,Val}) of
false ->
insert_in_collection_table(Key,Val);
_ ->
ok
end
end.
remove_myself([Node|Nodes],Acc) when Node=:=node() ->
remove_myself(Nodes,Acc);
remove_myself([Node|Nodes],Acc) ->
remove_myself(Nodes,[Node|Acc]);
remove_myself([],Acc) ->
Acc.
%%%--Handling of modules state data--------------------------------------
analyse_info(_Module,[]) ->
ok;
analyse_info(Module,Imported) ->
imported_info("Analysis",Module,Imported).
export_info(_Module,[]) ->
ok;
export_info(_Module,_Imported) ->
%% Do not print that the export includes imported modules
ok.
export_info([]) ->
ok;
export_info(_Imported) ->
%% Do not print that the export includes imported modules
ok.
get_all_importfiles([{_M,_F,ImportFiles}|Imported],Acc) ->
NewAcc = do_get_all_importfiles(ImportFiles,Acc),
get_all_importfiles(Imported,NewAcc);
get_all_importfiles([],Acc) ->
Acc.
do_get_all_importfiles([ImportFile|ImportFiles],Acc) ->
case lists:member(ImportFile,Acc) of
true ->
do_get_all_importfiles(ImportFiles,Acc);
false ->
do_get_all_importfiles(ImportFiles,[ImportFile|Acc])
end;
do_get_all_importfiles([],Acc) ->
Acc.
imported_info(Text,Module,Imported) ->
case lists:keysearch(Module,1,Imported) of
{value,{Module,_File,ImportFiles}} ->
io:format("~ts includes data from imported files\n~tp\n",
[Text,ImportFiles]);
false ->
ok
end.
add_imported(Module, File, ImportFile, Imported) ->
add_imported(Module, File, filename:absname(ImportFile), Imported, []).
add_imported(M, F1, ImportFile, [{M,_F2,ImportFiles}|Imported], Acc) ->
case lists:member(ImportFile,ImportFiles) of
true ->
io:fwrite("WARNING: Module ~w already imported from ~tp~n"
"Not importing again!~n",[M,ImportFile]),
dont_import;
false ->
NewEntry = {M, F1, [ImportFile | ImportFiles]},
{ok, lists:reverse([NewEntry | Acc]) ++ Imported}
end;
add_imported(M, F, ImportFile, [H|Imported], Acc) ->
add_imported(M, F, ImportFile, Imported, [H|Acc]);
add_imported(M, F, ImportFile, [], Acc) ->
{ok, lists:reverse([{M, F, [ImportFile]} | Acc])}.
%% Removes a module from the list of imported modules and writes a warning
%% This is done when a module is compiled.
remove_imported(Module,Imported) ->
case lists:keysearch(Module,1,Imported) of
{value,{Module,_,ImportFiles}} ->
io:fwrite("WARNING: Deleting data for module ~w imported from~n"
"~tp~n",[Module,ImportFiles]),
lists:keydelete(Module,1,Imported);
false ->
Imported
end.
%% Adds information to the list of compiled modules, preserving time order
%% and without adding duplicate entries.
add_compiled(Module, File1, [{Module,_File2}|Compiled]) ->
[{Module,File1}|Compiled];
add_compiled(Module, File, [H|Compiled]) ->
[H|add_compiled(Module, File, Compiled)];
add_compiled(Module, File, []) ->
[{Module,File}].
are_loaded([Module|Modules], State, Loaded, Imported, Error) ->
try is_loaded(Module,State) of
{loaded,File} ->
are_loaded(Modules, State, [{Module,File}|Loaded], Imported, Error);
{imported,File,_} ->
are_loaded(Modules, State, Loaded, [{Module,File}|Imported], Error)
catch throw:_ ->
are_loaded(Modules, State, Loaded, Imported,
[{not_cover_compiled,Module}|Error])
end;
are_loaded([], _State, Loaded, Imported, Error) ->
{Loaded, Imported, Error}.
is_loaded(Module, State) ->
case get_file(Module, State#main_state.compiled) of
{ok, File} ->
case code:which(Module) of
?TAG -> {loaded, File};
_ -> throw(unloaded)
end;
false ->
case get_file(Module,State#main_state.imported) of
{ok,File,ImportFiles} ->
{imported, File, ImportFiles};
false ->
throw(not_loaded)
end
end.
get_file(Module, [{Module, File}|_T]) ->
{ok, File};
get_file(Module, [{Module, File, ImportFiles}|_T]) ->
{ok, File, ImportFiles};
get_file(Module, [_H|T]) ->
get_file(Module, T);
get_file(_Module, []) ->
false.
get_beam_file(Module,?TAG,Compiled) ->
{value,{Module,File}} = lists:keysearch(Module,1,Compiled),
case filename:extension(File) of
".erl" -> {error,no_beam};
".beam" -> {ok,File}
end;
get_beam_file(_Module,BeamFile,_Compiled) ->
{ok,BeamFile}.
get_modules(Compiled) ->
lists:map(fun({Module, _File}) -> Module end, Compiled).
update_compiled([Module|Modules], [{Module,_File}|Compiled]) ->
update_compiled(Modules, Compiled);
update_compiled(Modules, [H|Compiled]) ->
[H|update_compiled(Modules, Compiled)];
update_compiled(_Modules, []) ->
[].
%% Get all compiled modules which are still loaded, and possibly an
%% updated version of the Compiled list.
get_compiled_still_loaded(Nodes,Compiled0) ->
%% Find all Cover compiled modules which are still loaded
CompiledModules = get_modules(Compiled0),
LoadedModules = lists:filter(fun(Module) ->
case code:which(Module) of
?TAG -> true;
_ -> false
end
end,
CompiledModules),
%% If some Cover compiled modules have been unloaded, update the database.
UnloadedModules = CompiledModules--LoadedModules,
Compiled =
case UnloadedModules of
[] ->
Compiled0;
_ ->
lists:foreach(fun(Module) -> do_clear(Module) end,
UnloadedModules),
remote_unload(Nodes,UnloadedModules),
update_compiled(UnloadedModules, Compiled0)
end,
{LoadedModules,Compiled}.
%%%--Compilation---------------------------------------------------------
do_compile_beams(ModsAndFiles, State) ->
Result0 = pmap(fun({ok,Module,File}) ->
do_compile_beam(Module, File, State);
(Error) ->
Error
end,
ModsAndFiles),
Compiled = [{M,F} || {ok,M,F} <- Result0],
remote_load_compiled(State#main_state.nodes,Compiled),
fix_state_and_result(Result0,State,[]).
do_compile_beam(Module,BeamFile0,State) ->
case get_beam_file(Module,BeamFile0,State#main_state.compiled) of
{ok,BeamFile} ->
LocalOnly = State#main_state.local_only,
UserOptions = get_compile_options(Module,BeamFile),
case do_compile_beam1(Module,BeamFile,
UserOptions,LocalOnly) of
{ok, Module} ->
{ok,Module,BeamFile};
error ->
{error, BeamFile};
{error,Reason} -> % no abstract code or no 'file' attribute
{error, {Reason, BeamFile}}
end;
{error,no_beam} ->
%% The module has first been compiled from .erl, and now
%% someone tries to compile it from .beam
{error,{already_cover_compiled,no_beam_found,Module}}
end.
fix_state_and_result([{ok,Module,BeamFile}|Rest],State,Acc) ->
Compiled = add_compiled(Module,BeamFile,State#main_state.compiled),
Imported = remove_imported(Module,State#main_state.imported),
NewState = State#main_state{compiled=Compiled,imported=Imported},
fix_state_and_result(Rest,NewState,[{ok,Module}|Acc]);
fix_state_and_result([Error|Rest],State,Acc) ->
fix_state_and_result(Rest,State,[Error|Acc]);
fix_state_and_result([],State,Acc) ->
{lists:reverse(Acc),State}.
do_compile(Files, Options, State) ->
LocalOnly = State#main_state.local_only,
Result0 = pmap(fun(File) ->
do_compile1(File, Options, LocalOnly)
end,
Files),
Compiled = [{M,F} || {ok,M,F} <- Result0],
remote_load_compiled(State#main_state.nodes,Compiled),
fix_state_and_result(Result0,State,[]).
do_compile1(File, Options, LocalOnly) ->
case do_compile2(File, Options, LocalOnly) of
{ok, Module} ->
{ok,Module,File};
error ->
{error,File}
end.
%% do_compile2(File, Options) -> {ok,Module} | error
do_compile2(File, UserOptions, LocalOnly) ->
Options = [debug_info,binary,report_errors,report_warnings] ++ UserOptions,
case compile:file(File, Options) of
{ok, Module, Binary} ->
do_compile_beam1(Module,Binary,UserOptions,LocalOnly);
error ->
error
end.
%% Beam is a binary or a .beam file name
do_compile_beam1(Module,Beam,UserOptions,LocalOnly) ->
%% Clear database
do_clear(Module),
%% Extract the abstract format.
case get_abstract_code(Module, Beam) of
no_abstract_code=E ->
{error,E};
encrypted_abstract_code=E ->
{error,E};
{raw_abstract_v1,Code} ->
Forms0 = epp:interpret_file_attribute(Code),
case find_main_filename(Forms0) of
{ok,MainFile} ->
do_compile_beam2(Module,Beam,UserOptions,
Forms0,MainFile,LocalOnly);
Error ->
Error
end;
{_VSN,_Code} ->
%% Wrong version of abstract code. Just report that there
%% is no abstract code.
{error,no_abstract_code}
end.
get_abstract_code(Module, Beam) ->
case beam_lib:chunks(Beam, [abstract_code]) of
{ok, {Module, [{abstract_code, AbstractCode}]}} ->
AbstractCode;
{error,beam_lib,{key_missing_or_invalid,_,_}} ->
encrypted_abstract_code;
Error -> Error
end.
do_compile_beam2(Module,Beam,UserOptions,Forms0,MainFile,LocalOnly) ->
init_counter_mapping(Module),
%% Instrument the abstract code by inserting
%% calls to update the counters.
{Forms,Vars} = transform(Forms0, Module, MainFile, LocalOnly),
%% Create counters.
maybe_create_counters(Module, not LocalOnly),
%% We need to recover the source from the compilation
%% info otherwise the newly compiled module will have
%% source pointing to the current directory
SourceInfo = get_source_info(Module, Beam),
%% Compile and load the result.
%% It's necessary to check the result of loading since it may
%% fail, for example if Module resides in a sticky directory.
Options = SourceInfo ++ UserOptions,
{ok, Module, Binary} = compile:forms(Forms, Options),
case code:load_binary(Module, ?TAG, Binary) of
{module, Module} ->
%% Store info about all function clauses in database.
InitInfo = lists:reverse(Vars#vars.init_info),
ets:insert(?COVER_CLAUSE_TABLE, {Module, InitInfo}),
%% Store binary code so it can be loaded on remote nodes.
ets:insert(?BINARY_TABLE, {Module, Binary}),
{ok, Module};
_Error ->
do_clear(Module),
error
end.
get_source_info(Module, Beam) ->
Compile = get_compile_info(Module, Beam),
case lists:keyfind(source, 1, Compile) of
{ source, _ } = Tuple -> [Tuple];
false -> []
end.
get_compile_options(Module, Beam) ->
Compile = get_compile_info(Module, Beam),
case lists:keyfind(options, 1, Compile) of
{options, Options } -> filter_options(Options);
false -> []
end.
get_compile_info(Module, Beam) ->
case beam_lib:chunks(Beam, [compile_info]) of
{ok, {Module, [{compile_info, Compile}]}} ->
Compile;
_ ->
[]
end.
transform(Code, Module, MainFile, LocalOnly) ->
Vars0 = #vars{module=Module},
{ok,MungedForms0,Vars} = transform_2(Code, [], Vars0, MainFile, on),
MungedForms = patch_code(Module, MungedForms0, LocalOnly),
{MungedForms,Vars}.
%% Helpfunction which returns the first found file-attribute, which can
%% be interpreted as the name of the main erlang source file.
find_main_filename([{attribute,_,file,{MainFile,_}}|_]) ->
{ok,MainFile};
find_main_filename([_|Rest]) ->
find_main_filename(Rest);
find_main_filename([]) ->
{error, no_file_attribute}.
transform_2([Form0|Forms],MungedForms,Vars,MainFile,Switch) ->
Form = expand(Form0),
case munge(Form,Vars,MainFile,Switch) of
ignore ->
transform_2(Forms,MungedForms,Vars,MainFile,Switch);
{MungedForm,Vars2,NewSwitch} ->
transform_2(Forms,[MungedForm|MungedForms],Vars2,MainFile,NewSwitch)
end;
transform_2([],MungedForms,Vars,_,_) ->
{ok, lists:reverse(MungedForms), Vars}.
%% Expand short-circuit Boolean expressions.
expand(Expr) ->
AllVars = sets:from_list(ordsets:to_list(vars([], Expr))),
{Expr1,_} = expand(Expr, AllVars, 1),
Expr1.
expand({clause,Line,Pattern,Guards,Body}, Vs, N) ->
{ExpandedBody,N2} = expand(Body, Vs, N),
{{clause,Line,Pattern,Guards,ExpandedBody},N2};
expand({op,_Line,'andalso',ExprL,ExprR}, Vs, N) ->
{ExpandedExprL,N2} = expand(ExprL, Vs, N),
{ExpandedExprR,N3} = expand(ExprR, Vs, N2),
Anno = element(2, ExpandedExprL),
{bool_switch(ExpandedExprL,
ExpandedExprR,
{atom,Anno,false},
Vs, N3),
N3 + 1};
expand({op,_Line,'orelse',ExprL,ExprR}, Vs, N) ->
{ExpandedExprL,N2} = expand(ExprL, Vs, N),
{ExpandedExprR,N3} = expand(ExprR, Vs, N2),
Anno = element(2, ExpandedExprL),
{bool_switch(ExpandedExprL,
{atom,Anno,true},
ExpandedExprR,
Vs, N3),
N3 + 1};
expand(T, Vs, N) when is_tuple(T) ->
{TL,N2} = expand(tuple_to_list(T), Vs, N),
{list_to_tuple(TL),N2};
expand([E|Es], Vs, N) ->
{E2,N2} = expand(E, Vs, N),
{Es2,N3} = expand(Es, Vs, N2),
{[E2|Es2],N3};
expand(T, _Vs, N) ->
{T,N}.
vars(A, {var,_,V}) when V =/= '_' ->
[V|A];
vars(A, T) when is_tuple(T) ->
vars(A, tuple_to_list(T));
vars(A, [E|Es]) ->
vars(vars(A, E), Es);
vars(A, _T) ->
A.
bool_switch(E, T, F, AllVars, AuxVarN) ->
Line = element(2, E),
AuxVar = {var,Line,aux_var(AllVars, AuxVarN)},
{'case',Line,E,
[{clause,Line,[{atom,Line,true}],[],[T]},
{clause,Line,[{atom,Line,false}],[],[F]},
%% Mark the next clause as compiler-generated to suppress
%% a warning if the case expression is an obvious boolean
%% value.
{clause,erl_anno:set_generated(true, Line),[AuxVar],[],
[{call,Line,
{remote,Line,{atom,Line,erlang},{atom,Line,error}},
[{tuple,Line,[{atom,Line,badarg},AuxVar]}]}]}]}.
aux_var(Vars, N) ->
Name = list_to_atom(lists:concat(['_', N])),
case sets:is_element(Name, Vars) of
true -> aux_var(Vars, N + 1);
false -> Name
end.
%% This code traverses the abstract code, stored as the abstract_code
%% chunk in the BEAM file, as described in absform(3).
%% The switch is turned off when we encounter other files than the main file.
%% This way we will be able to exclude functions defined in include files.
munge({function,Line,Function,Arity,Clauses},Vars,_MainFile,on) ->
Vars2 = Vars#vars{function=Function,
arity=Arity,
clause=1,
lines=[],
no_bump_lines=[],
depth=1},
{MungedClauses, Vars3} = munge_clauses(Clauses, Vars2),
{{function,Line,Function,Arity,MungedClauses},Vars3,on};
munge(Form={attribute,_,file,{MainFile,_}},Vars,MainFile,_Switch) ->
{Form,Vars,on}; % Switch on tranformation!
munge(Form={attribute,_,file,{_InclFile,_}},Vars,_MainFile,_Switch) ->
{Form,Vars,off}; % Switch off transformation!
munge({attribute,_,compile,{parse_transform,_}},_Vars,_MainFile,_Switch) ->
%% Don't want to run parse transforms more than once.
ignore;
munge(Form,Vars,_MainFile,Switch) -> % Other attributes and skipped includes.
{Form,Vars,Switch}.
munge_clauses(Clauses, Vars) ->
munge_clauses(Clauses, Vars, Vars#vars.lines, []).
munge_clauses([Clause|Clauses], Vars, Lines, MClauses) ->
{clause,Line,Pattern,Guards,Body} = Clause,
{MungedGuards, _Vars} = munge_exprs(Guards, Vars#vars{is_guard=true},[]),
case Vars#vars.depth of
1 -> % function clause
{MungedBody, Vars2} = munge_body(Body, Vars#vars{depth=2}),
ClauseInfo = {Vars2#vars.module,
Vars2#vars.function,
Vars2#vars.arity,
Vars2#vars.clause,
length(Vars2#vars.lines)}, % Not used?
InitInfo = [ClauseInfo | Vars2#vars.init_info],
Vars3 = Vars2#vars{init_info=InitInfo,
clause=(Vars2#vars.clause)+1,
lines=[],
no_bump_lines=[],
depth=1},
NewBumps = Vars2#vars.lines,
NewLines = NewBumps ++ Lines,
munge_clauses(Clauses, Vars3, NewLines,
[{clause,Line,Pattern,MungedGuards,MungedBody}|
MClauses]);
2 -> % receive-, case-, if-, or try-clause
Lines0 = Vars#vars.lines,
{MungedBody, Vars2} = munge_body(Body, Vars),
NewBumps = new_bumps(Vars2, Vars),
NewLines = NewBumps ++ Lines,
munge_clauses(Clauses, Vars2#vars{lines=Lines0},
NewLines,
[{clause,Line,Pattern,MungedGuards,MungedBody}|
MClauses])
end;
munge_clauses([], Vars, Lines, MungedClauses) ->
{lists:reverse(MungedClauses), Vars#vars{lines = Lines}}.
munge_body(Expr, Vars) ->
munge_body(Expr, Vars, [], []).
munge_body([Expr|Body], Vars, MungedBody, LastExprBumpLines) ->
%% Here is the place to add a call to cover:bump/6!
Line = erl_anno:line(element(2, Expr)),
Lines = Vars#vars.lines,
case lists:member(Line,Lines) of
true -> % already a bump at this line
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
NewBumps = new_bumps(Vars2, Vars),
NoBumpLines = [Line|Vars#vars.no_bump_lines],
Vars3 = Vars2#vars{no_bump_lines = NoBumpLines},
MungedBody1 =
maybe_fix_last_expr(MungedBody, Vars3, LastExprBumpLines),
MungedExprs1 = [MungedExpr|MungedBody1],
munge_body(Body, Vars3, MungedExprs1, NewBumps);
false ->
Bump = bump_call(Vars, Line),
Lines2 = [Line|Lines],
{MungedExpr, Vars2} = munge_expr(Expr, Vars#vars{lines=Lines2}),
NewBumps = new_bumps(Vars2, Vars),
NoBumpLines = subtract(Vars2#vars.no_bump_lines, NewBumps),
Vars3 = Vars2#vars{no_bump_lines = NoBumpLines},
MungedBody1 =
maybe_fix_last_expr(MungedBody, Vars3, LastExprBumpLines),
MungedExprs1 = [MungedExpr,Bump|MungedBody1],
munge_body(Body, Vars3, MungedExprs1, NewBumps)
end;
munge_body([], Vars, MungedBody, _LastExprBumpLines) ->
{lists:reverse(MungedBody), Vars}.
%%% Fix last expression (OTP-8188). A typical example:
%%%
%%% 3: case X of
%%% 4: 1 -> a; % Bump line 5 after "a" has been evaluated!
%%% 5: 2 -> b; 3 -> c end, F()
%%%
%%% Line 5 wasn't bumped just before "F()" since it was already bumped
%%% before "b" (and before "c") (one mustn't bump a line more than
%%% once in a single "evaluation"). The expression "case X ... end" is
%%% now traversed again ("fixed"), this time adding bumps of line 5
%%% where appropriate, in this case when X matches 1.
%%%
%%% This doesn't solve all problems with expressions on the same line,
%%% though. 'case' and 'try' are tricky. An example:
%%%
%%% 7: case case X of 1 -> foo(); % ?
%%% 8: 2 -> bar() end of a -> 1;
%%% 9: b -> 2 end.
%%%
%%% If X matches 1 and foo() evaluates to a then line 8 should be
%%% bumped, but not if foo() evaluates to b. In other words, line 8
%%% cannot be bumped after "foo()" on line 7, so one has to bump line
%%% 8 before "begin 1 end". But if X matches 2 and bar evaluates to a
%%% then line 8 would be bumped twice (there has to be a bump before
%%% "bar()". It is like one would have to have two copies of the inner
%%% clauses, one for each outer clause. Maybe the munging should be
%%% done on some of the compiler's "lower level" format.
%%%
%%% 'fun' is also problematic since a bump inside the body "shadows"
%%% the rest of the line.
maybe_fix_last_expr(MungedExprs, Vars, LastExprBumpLines) ->
case last_expr_needs_fixing(Vars, LastExprBumpLines) of
{yes, Line} ->
fix_last_expr(MungedExprs, Line, Vars);
no ->
MungedExprs
end.
last_expr_needs_fixing(Vars, LastExprBumpLines) ->
case common_elems(Vars#vars.no_bump_lines, LastExprBumpLines) of
[Line] ->
{yes, Line};
_ ->
no
end.
fix_last_expr([MungedExpr|MungedExprs], Line, Vars) ->
%% No need to update ?COVER_TABLE.
Bump = bump_call(Vars, Line),
[fix_expr(MungedExpr, Line, Bump)|MungedExprs].
fix_expr({'if',L,Clauses}, Line, Bump) ->
FixedClauses = fix_clauses(Clauses, Line, Bump),
{'if',L,FixedClauses};
fix_expr({'case',L,Expr,Clauses}, Line, Bump) ->
FixedExpr = fix_expr(Expr, Line, Bump),
FixedClauses = fix_clauses(Clauses, Line, Bump),
{'case',L,FixedExpr,FixedClauses};
fix_expr({'receive',L,Clauses}, Line, Bump) ->
FixedClauses = fix_clauses(Clauses, Line, Bump),
{'receive',L,FixedClauses};
fix_expr({'receive',L,Clauses,Expr,Body}, Line, Bump) ->
FixedClauses = fix_clauses(Clauses, Line, Bump),
FixedExpr = fix_expr(Expr, Line, Bump),
FixedBody = fix_expr(Body, Line, Bump),
{'receive',L,FixedClauses,FixedExpr,FixedBody};
fix_expr({'try',L,Exprs,Clauses,CatchClauses,After}, Line, Bump) ->
FixedExprs = fix_expr(Exprs, Line, Bump),
FixedClauses = fix_clauses(Clauses, Line, Bump),
FixedCatchClauses = fix_clauses(CatchClauses, Line, Bump),
FixedAfter = fix_expr(After, Line, Bump),
{'try',L,FixedExprs,FixedClauses,FixedCatchClauses,FixedAfter};
fix_expr([E | Es], Line, Bump) ->
[fix_expr(E, Line, Bump) | fix_expr(Es, Line, Bump)];
fix_expr(T, Line, Bump) when is_tuple(T) ->
list_to_tuple(fix_expr(tuple_to_list(T), Line, Bump));
fix_expr(E, _Line, _Bump) ->
E.
fix_clauses([], _Line, _Bump) ->
[];
fix_clauses(Cs, Line, Bump) ->
case bumps_line(lists:last(Cs), Line) of
true ->
fix_cls(Cs, Line, Bump);
false ->
Cs
end.
fix_cls([], _Line, _Bump) ->
[];
fix_cls([Cl | Cls], Line, Bump) ->
case bumps_line(Cl, Line) of
true ->
[fix_expr(C, Line, Bump) || C <- [Cl | Cls]];
false ->
{clause,CL,P,G,Body} = Cl,
UniqueVarName = list_to_atom(lists:concat(["$cover$ ",Line])),
A = erl_anno:new(0),
V = {var,A,UniqueVarName},
[Last|Rest] = lists:reverse(Body),
Body1 = lists:reverse(Rest, [{match,A,V,Last},Bump,V]),
[{clause,CL,P,G,Body1} | fix_cls(Cls, Line, Bump)]
end.
bumps_line(E, L) ->
try bumps_line1(E, L) catch true -> true end.
bumps_line1({'BUMP',Line,_}, Line) ->
throw(true);
bumps_line1([E | Es], Line) ->
bumps_line1(E, Line),
bumps_line1(Es, Line);
bumps_line1(T, Line) when is_tuple(T) ->
bumps_line1(tuple_to_list(T), Line);
bumps_line1(_, _) ->
false.
%% Insert a place holder for the call to counters:add/3 in the
%% abstract code.
bump_call(Vars, Line) ->
{'BUMP',Line,counter_index(Vars, Line)}.
%%% End of fix of last expression.
munge_expr({match,Line,ExprL,ExprR}, Vars) ->
{MungedExprL, Vars2} = munge_expr(ExprL, Vars),
{MungedExprR, Vars3} = munge_expr(ExprR, Vars2),
{{match,Line,MungedExprL,MungedExprR}, Vars3};
munge_expr({tuple,Line,Exprs}, Vars) ->
{MungedExprs, Vars2} = munge_exprs(Exprs, Vars, []),
{{tuple,Line,MungedExprs}, Vars2};
munge_expr({record,Line,Name,Exprs}, Vars) ->
{MungedExprFields, Vars2} = munge_exprs(Exprs, Vars, []),
{{record,Line,Name,MungedExprFields}, Vars2};
munge_expr({record,Line,Arg,Name,Exprs}, Vars) ->
{MungedArg, Vars2} = munge_expr(Arg, Vars),
{MungedExprFields, Vars3} = munge_exprs(Exprs, Vars2, []),
{{record,Line,MungedArg,Name,MungedExprFields}, Vars3};
munge_expr({record_field,Line,ExprL,ExprR}, Vars) ->
{MungedExprR, Vars2} = munge_expr(ExprR, Vars),
{{record_field,Line,ExprL,MungedExprR}, Vars2};
munge_expr({map,Line,Fields}, Vars) ->
%% EEP 43
{MungedFields, Vars2} = munge_exprs(Fields, Vars, []),
{{map,Line,MungedFields}, Vars2};
munge_expr({map,Line,Arg,Fields}, Vars) ->
%% EEP 43
{MungedArg, Vars2} = munge_expr(Arg, Vars),
{MungedFields, Vars3} = munge_exprs(Fields, Vars2, []),
{{map,Line,MungedArg,MungedFields}, Vars3};
munge_expr({map_field_assoc,Line,Name,Value}, Vars) ->
%% EEP 43
{MungedName, Vars2} = munge_expr(Name, Vars),
{MungedValue, Vars3} = munge_expr(Value, Vars2),
{{map_field_assoc,Line,MungedName,MungedValue}, Vars3};
munge_expr({map_field_exact,Line,Name,Value}, Vars) ->
%% EEP 43
{MungedName, Vars2} = munge_expr(Name, Vars),
{MungedValue, Vars3} = munge_expr(Value, Vars2),
{{map_field_exact,Line,MungedName,MungedValue}, Vars3};
munge_expr({cons,Line,ExprH,ExprT}, Vars) ->
{MungedExprH, Vars2} = munge_expr(ExprH, Vars),
{MungedExprT, Vars3} = munge_expr(ExprT, Vars2),
{{cons,Line,MungedExprH,MungedExprT}, Vars3};
munge_expr({op,Line,Op,ExprL,ExprR}, Vars) ->
{MungedExprL, Vars2} = munge_expr(ExprL, Vars),
{MungedExprR, Vars3} = munge_expr(ExprR, Vars2),
{{op,Line,Op,MungedExprL,MungedExprR}, Vars3};
munge_expr({op,Line,Op,Expr}, Vars) ->
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
{{op,Line,Op,MungedExpr}, Vars2};
munge_expr({'catch',Line,Expr}, Vars) ->
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
{{'catch',Line,MungedExpr}, Vars2};
munge_expr({call,Line1,{remote,Line2,ExprM,ExprF},Exprs},
Vars) ->
{MungedExprM, Vars2} = munge_expr(ExprM, Vars),
{MungedExprF, Vars3} = munge_expr(ExprF, Vars2),
{MungedExprs, Vars4} = munge_exprs(Exprs, Vars3, []),
{{call,Line1,{remote,Line2,MungedExprM,MungedExprF},MungedExprs}, Vars4};
munge_expr({call,Line,Expr,Exprs}, Vars) ->
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
{MungedExprs, Vars3} = munge_exprs(Exprs, Vars2, []),
{{call,Line,MungedExpr,MungedExprs}, Vars3};
munge_expr({lc,Line,Expr,Qs}, Vars) ->
{MungedExpr, Vars2} = munge_expr(?BLOCK1(Expr), Vars),
{MungedQs, Vars3} = munge_qualifiers(Qs, Vars2),
{{lc,Line,MungedExpr,MungedQs}, Vars3};
munge_expr({bc,Line,Expr,Qs}, Vars) ->
{MungedExpr,Vars2} = munge_expr(?BLOCK1(Expr), Vars),
{MungedQs, Vars3} = munge_qualifiers(Qs, Vars2),
{{bc,Line,MungedExpr,MungedQs}, Vars3};
munge_expr({block,Line,Body}, Vars) ->
{MungedBody, Vars2} = munge_body(Body, Vars),
{{block,Line,MungedBody}, Vars2};
munge_expr({'if',Line,Clauses}, Vars) ->
{MungedClauses,Vars2} = munge_clauses(Clauses, Vars),
{{'if',Line,MungedClauses}, Vars2};
munge_expr({'case',Line,Expr,Clauses}, Vars) ->
{MungedExpr,Vars2} = munge_expr(Expr, Vars),
{MungedClauses,Vars3} = munge_clauses(Clauses, Vars2),
{{'case',Line,MungedExpr,MungedClauses}, Vars3};
munge_expr({'receive',Line,Clauses}, Vars) ->
{MungedClauses,Vars2} = munge_clauses(Clauses, Vars),
{{'receive',Line,MungedClauses}, Vars2};
munge_expr({'receive',Line,Clauses,Expr,Body}, Vars) ->
{MungedExpr, Vars1} = munge_expr(Expr, Vars),
{MungedClauses,Vars2} = munge_clauses(Clauses, Vars1),
{MungedBody,Vars3} =
munge_body(Body, Vars2#vars{lines = Vars1#vars.lines}),
Vars4 = Vars3#vars{lines = Vars2#vars.lines ++ new_bumps(Vars3, Vars2)},
{{'receive',Line,MungedClauses,MungedExpr,MungedBody}, Vars4};
munge_expr({'try',Line,Body,Clauses,CatchClauses,After}, Vars) ->
{MungedBody, Vars1} = munge_body(Body, Vars),
{MungedClauses, Vars2} = munge_clauses(Clauses, Vars1),
{MungedCatchClauses, Vars3} = munge_clauses(CatchClauses, Vars2),
{MungedAfter, Vars4} = munge_body(After, Vars3),
{{'try',Line,MungedBody,MungedClauses,MungedCatchClauses,MungedAfter},
Vars4};
munge_expr({'fun',Line,{clauses,Clauses}}, Vars) ->
{MungedClauses,Vars2}=munge_clauses(Clauses, Vars),
{{'fun',Line,{clauses,MungedClauses}}, Vars2};
munge_expr({named_fun,Line,Name,Clauses}, Vars) ->
{MungedClauses,Vars2}=munge_clauses(Clauses, Vars),
{{named_fun,Line,Name,MungedClauses}, Vars2};
munge_expr({bin,Line,BinElements}, Vars) ->
{MungedBinElements,Vars2} = munge_exprs(BinElements, Vars, []),
{{bin,Line,MungedBinElements}, Vars2};
munge_expr({bin_element,Line,Value,Size,TypeSpecifierList}, Vars) ->
{MungedValue,Vars2} = munge_expr(Value, Vars),
{MungedSize,Vars3} = munge_expr(Size, Vars2),
{{bin_element,Line,MungedValue,MungedSize,TypeSpecifierList},Vars3};
munge_expr(Form, Vars) ->
{Form, Vars}.
munge_exprs([Expr|Exprs], Vars, MungedExprs) when Vars#vars.is_guard=:=true,
is_list(Expr) ->
{MungedExpr, _Vars} = munge_exprs(Expr, Vars, []),
munge_exprs(Exprs, Vars, [MungedExpr|MungedExprs]);
munge_exprs([Expr|Exprs], Vars, MungedExprs) ->
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
munge_exprs(Exprs, Vars2, [MungedExpr|MungedExprs]);
munge_exprs([], Vars, MungedExprs) ->
{lists:reverse(MungedExprs), Vars}.
%% Every qualifier is decorated with a counter.
munge_qualifiers(Qualifiers, Vars) ->
munge_qs(Qualifiers, Vars, []).
munge_qs([{generate,Line,Pattern,Expr}|Qs], Vars, MQs) ->
L = element(2, Expr),
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
munge_qs1(Qs, L, {generate,Line,Pattern,MungedExpr}, Vars, Vars2, MQs);
munge_qs([{b_generate,Line,Pattern,Expr}|Qs], Vars, MQs) ->
L = element(2, Expr),
{MExpr, Vars2} = munge_expr(Expr, Vars),
munge_qs1(Qs, L, {b_generate,Line,Pattern,MExpr}, Vars, Vars2, MQs);
munge_qs([Expr|Qs], Vars, MQs) ->
L = element(2, Expr),
{MungedExpr, Vars2} = munge_expr(Expr, Vars),
munge_qs1(Qs, L, MungedExpr, Vars, Vars2, MQs);
munge_qs([], Vars, MQs) ->
{lists:reverse(MQs), Vars}.
munge_qs1(Qs, Line, NQ, Vars, Vars2, MQs) ->
case new_bumps(Vars2, Vars) of
[_] ->
munge_qs(Qs, Vars2, [NQ | MQs]);
_ ->
{MungedTrue, Vars3} = munge_expr(?BLOCK({atom,Line,true}), Vars2),
munge_qs(Qs, Vars3, [NQ, MungedTrue | MQs])
end.
new_bumps(#vars{lines = New}, #vars{lines = Old}) ->
subtract(New, Old).
subtract(L1, L2) ->
[E || E <- L1, not lists:member(E, L2)].
common_elems(L1, L2) ->
[E || E <- L1, lists:member(E, L2)].
%%%--Counters------------------------------------------------------------
init_counter_mapping(Mod) ->
true = ets:insert_new(?COVER_MAPPING_TABLE, {Mod,0}),
ok.
counter_index(Vars, Line) ->
#vars{module=Mod,function=F,arity=A,clause=C} = Vars,
Key = #bump{module=Mod,function=F,arity=A,
clause=C,line=Line},
case ets:lookup(?COVER_MAPPING_TABLE, Key) of
[] ->
Index = ets:update_counter(?COVER_MAPPING_TABLE,
Mod, {2,1}),
true = ets:insert(?COVER_MAPPING_TABLE, {Key,Index}),
Index;
[{Key,Index}] ->
Index
end.
%% Create the counter array and store as a persistent term.
maybe_create_counters(Mod, true) ->
Cref = create_counters(Mod),
Key = {?MODULE,Mod},
persistent_term:put(Key, Cref),
ok;
maybe_create_counters(_Mod, false) ->
ok.
create_counters(Mod) ->
Size0 = ets:lookup_element(?COVER_MAPPING_TABLE, Mod, 2),
Size = max(1, Size0), %Size must not be 0.
Cref = counters:new(Size, [write_concurrency]),
ets:insert(?COVER_MAPPING_TABLE, {{counters,Mod},Cref}),
Cref.
patch_code(Mod, Forms, false) ->
A = erl_anno:new(0),
AbstrKey = {tuple,A,[{atom,A,?MODULE},{atom,A,Mod}]},
patch_code1(Forms, {distributed,AbstrKey});
patch_code(Mod, Forms, true) ->
Cref = create_counters(Mod),
AbstrCref = cid_to_abstract(Cref),
patch_code1(Forms, {local_only,AbstrCref}).
%% Go through the abstract code and replace 'BUMP' forms
%% with the actual code to increment the counters.
patch_code1({'BUMP',_Line,Index}, {distributed,AbstrKey}) ->
%% Replace with counters:add(persistent_term:get(Key), Index, 1).
%% This code will work on any node.
A = element(2, AbstrKey),
GetCref = {call,A,{remote,A,{atom,A,persistent_term},{atom,A,get}},
[AbstrKey]},
{call,A,{remote,A,{atom,A,counters},{atom,A,add}},
[GetCref,{integer,A,Index},{integer,A,1}]};
patch_code1({'BUMP',_Line,Index}, {local_only,AbstrCref}) ->
%% Replace with counters:add(Cref, Index, 1). This code
%% will only work on the local node.
A = element(2, AbstrCref),
{call,A,{remote,A,{atom,A,counters},{atom,A,add}},
[AbstrCref,{integer,A,Index},{integer,A,1}]};
patch_code1({clauses,Cs}, Key) ->
{clauses,[patch_code1(El, Key) || El <- Cs]};
patch_code1([_|_]=List, Key) ->
[patch_code1(El, Key) || El <- List];
patch_code1(Tuple, Key) when tuple_size(Tuple) >= 3 ->
Acc = [element(2, Tuple),element(1, Tuple)],
patch_code_tuple(3, tuple_size(Tuple), Tuple, Key, Acc);
patch_code1(Other, _Key) ->
Other.
patch_code_tuple(I, Size, Tuple, Key, Acc) when I =< Size ->
El = patch_code1(element(I, Tuple), Key),
patch_code_tuple(I + 1, Size, Tuple, Key, [El|Acc]);
patch_code_tuple(_I, _Size, _Tuple, _Key, Acc) ->
list_to_tuple(lists:reverse(Acc)).
%% Don't try this at home! Assumes knowledge of the internal
%% representation of a counter ref.
cid_to_abstract(Cref0) ->
A = erl_anno:new(0),
%% Disable dialyzer warning for breaking opacity.
Cref = binary_to_term(term_to_binary(Cref0)),
{write_concurrency,Ref} = Cref,
{tuple,A,[{atom,A,write_concurrency},{integer,A,Ref}]}.
%% Called on the remote node. Collect and send counters to
%% the main node. Also zero the counters.
send_counters(Mod, CollectorPid) ->
Process = fun(Chunk) -> send_chunk(CollectorPid, Chunk) end,
move_counters(Mod, Process).
%% Called on the main node. Collect the counters and consolidate
%% them into the collection table. Also zero the counters.
move_counters(Mod) ->
move_counters(Mod, fun insert_in_collection_table/1).
move_counters(Mod, Process) ->
Pattern = {#bump{module=Mod,_='_'},'_'},
Matches = ets:match_object(?COVER_MAPPING_TABLE, Pattern, ?CHUNK_SIZE),
Cref = get_counters_ref(Mod),
move_counters1(Matches, Cref, Process).
move_counters1({Mappings,Continuation}, Cref, Process) ->
Move = fun({Key,Index}) ->
Count = counters:get(Cref, Index),
ok = counters:sub(Cref, Index, Count),
{Key,Count}
end,
Process(lists:map(Move, Mappings)),
move_counters1(ets:match_object(Continuation), Cref, Process);
move_counters1('$end_of_table', _Cref, _Process) ->
ok.
counters_mapping_table(Mod) ->
Mapping = counters_mapping(Mod),
Cref = get_counters_ref(Mod),
#{size:=Size} = counters:info(Cref),
[{Mod,Size}|Mapping].
get_counters_ref(Mod) ->
ets:lookup_element(?COVER_MAPPING_TABLE, {counters,Mod}, 2).
counters_mapping(Mod) ->
Pattern = {#bump{module=Mod,_='_'},'_'},
ets:match_object(?COVER_MAPPING_TABLE, Pattern).
clear_counters(Mod) ->
_ = persistent_term:erase({?MODULE,Mod}),
ets:delete(?COVER_MAPPING_TABLE, Mod),
Pattern = {#bump{module=Mod,_='_'},'_'},
_ = ets:match_delete(?COVER_MAPPING_TABLE, Pattern),
ok.
%% Reset counters (set counters to 0).
reset_counters(Mod) ->
Pattern = {#bump{module=Mod,_='_'},'$1'},
MatchSpec = [{Pattern,[],['$1']}],
Matches = ets:select(?COVER_MAPPING_TABLE,
MatchSpec, ?CHUNK_SIZE),
Cref = get_counters_ref(Mod),
reset_counters1(Matches, Cref).
reset_counters1({Indices,Continuation}, Cref) ->
_ = [counters:put(Cref, N, 0) || N <- Indices],
reset_counters1(ets:select(Continuation), Cref);
reset_counters1('$end_of_table', _Cref) ->
ok.
delete_all_counters() ->
_ = [persistent_term:erase(Key) || {?MODULE,_}=Key <- persistent_term:get()],
ok.
%%%--Analysis------------------------------------------------------------
%% Collect data for all modules
collect(Nodes) ->
%% local node
AllClauses = ets:tab2list(?COVER_CLAUSE_TABLE),
Mon1 = spawn_monitor(fun() -> pmap(fun move_modules/1,AllClauses) end),
%% remote nodes
Mon2 = spawn_monitor(fun() -> remote_collect('_',Nodes,false) end),
get_downs([Mon1,Mon2]).
%% Collect data for a list of modules
collect(Modules,Nodes) ->
MS = [{{'$1','_'},[{'==','$1',M}],['$_']} || M <- Modules],
Clauses = ets:select(?COVER_CLAUSE_TABLE,MS),
Mon1 = spawn_monitor(fun() -> pmap(fun move_modules/1,Clauses) end),
%% remote nodes
Mon2 = spawn_monitor(fun() -> remote_collect('_',Nodes,false) end),
get_downs([Mon1,Mon2]).
%% Collect data for one module
collect(Module,Clauses,Nodes) ->
%% local node
move_modules({Module,Clauses}),
%% remote nodes
remote_collect([Module],Nodes,false).
%% When analysing, the data from the local ?COVER_TABLE is moved to the
%% ?COLLECTION_TABLE. Resetting data in ?COVER_TABLE
move_modules({Module,Clauses}) ->
ets:insert(?COLLECTION_CLAUSE_TABLE,{Module,Clauses}),
move_counters(Module).
%% Given a .beam file, find the .erl file. Look first in same directory as
%% the .beam file, then in ../src, then in compile info.
find_source(Module, File0) ->
try
Root = filename:rootname(File0, ".beam"),
Root == File0 andalso throw(File0), %% not .beam
%% Look for .erl in pwd.
File = Root ++ ".erl",
throw_file(File),
%% Not in pwd: look in ../src.
BeamDir = filename:dirname(File),
Base = filename:basename(File),
throw_file(filename:join([BeamDir, "..", "src", Base])),
%% Not in ../src: look for source path in compile info, but
%% first look relative the beam directory.
Info =
try lists:keyfind(source, 1, Module:module_info(compile))
catch error:undef ->
%% The module might have been imported
%% and the beam not available
throw({beam, File0})
end,
false == Info andalso throw({beam, File0}), %% stripped
{source, SrcFile} = Info,
throw_file(splice(BeamDir, SrcFile)), %% below ../src
throw_file(SrcFile), %% or absolute
%% No success means that source is either not under ../src or
%% its relative path differs from that of compile info. (For
%% example, compiled under src/x but installed under src/y.)
%% An option to specify an arbitrary source path explicitly is
%% probably a better solution than either more heuristics or a
%% potentially slow filesystem search.
{beam, File0}
catch
Path -> Path
end.
throw_file(Path) ->
false /= Path andalso filelib:is_file(Path) andalso throw(Path).
%% Splice the tail of a source path, starting from the last "src"
%% component, onto the parent of a beam directory, or return false if
%% no "src" component is found.
%%
%% Eg. splice("/path/to/app-1.0/ebin", "/compiled/path/to/app/src/x/y.erl")
%% --> "/path/to/app-1.0/ebin/../src/x/y.erl"
%%
%% This handles the case of source in subdirectories of ../src with
%% beams that have moved since compilation.
%%
splice(BeamDir, SrcFile) ->
case lists:splitwith(fun(C) -> C /= "src" end, revsplit(SrcFile)) of
{T, [_|_]} -> %% found src component
filename:join([BeamDir, "..", "src" | lists:reverse(T)]);
{_, []} -> %% or not
false
end.
revsplit(Path) ->
lists:reverse(filename:split(Path)).
analyse_list(Modules, Analysis, Level, State) ->
{LoadedMF, ImportedMF, Error} = are_loaded(Modules, State, [], [], []),
Loaded = [M || {M,_} <- LoadedMF],
Imported = [M || {M,_} <- ImportedMF],
collect(Loaded, State#main_state.nodes),
MS = [{{'$1','_'},[{'==','$1',M}],['$_']} || M <- Loaded ++ Imported],
AllClauses = ets:select(?COLLECTION_CLAUSE_TABLE,MS),
Fun = fun({Module,Clauses}) ->
do_analyse(Module, Analysis, Level, Clauses)
end,
{result, lists:flatten(pmap(Fun, AllClauses)), Error}.
analyse_all(Analysis, Level, State) ->
collect(State#main_state.nodes),
AllClauses = ets:tab2list(?COLLECTION_CLAUSE_TABLE),
Fun = fun({Module,Clauses}) ->
do_analyse(Module, Analysis, Level, Clauses)
end,
{result, lists:flatten(pmap(Fun, AllClauses)), []}.
do_parallel_analysis(Module, Analysis, Level, Loaded, From, State) ->
analyse_info(Module,State#main_state.imported),
C = case Loaded of
{loaded, _File} ->
[{Module,Clauses}] =
ets:lookup(?COVER_CLAUSE_TABLE,Module),
collect(Module,Clauses,State#main_state.nodes),
Clauses;
_ ->
[{Module,Clauses}] =
ets:lookup(?COLLECTION_CLAUSE_TABLE,Module),
Clauses
end,
R = do_analyse(Module, Analysis, Level, C),
reply(From, {ok,R}).
%% do_analyse(Module, Analysis, Level, Clauses)-> {ok,Answer} | {error,Error}
%% Clauses = [{Module,Function,Arity,Clause,Lines}]
do_analyse(Module, Analysis, line, _Clauses) ->
Pattern = {#bump{module=Module},'_'},
Bumps = ets:match_object(?COLLECTION_TABLE, Pattern),
Fun = case Analysis of
coverage ->
fun({#bump{line=L}, 0}) ->
{{Module,L}, {0,1}};
({#bump{line=L}, _N}) ->
{{Module,L}, {1,0}}
end;
calls ->
fun({#bump{line=L}, N}) ->
{{Module,L}, N}
end
end,
lists:keysort(1, lists:map(Fun, Bumps));
do_analyse(Module, Analysis, clause, _Clauses) ->
Pattern = {#bump{module=Module},'_'},
Bumps = lists:keysort(1,ets:match_object(?COLLECTION_TABLE, Pattern)),
analyse_clause(Analysis,Bumps);
do_analyse(Module, Analysis, function, Clauses) ->
ClauseResult = do_analyse(Module, Analysis, clause, Clauses),
merge_clauses(ClauseResult, merge_fun(Analysis));
do_analyse(Module, Analysis, module, Clauses) ->
FunctionResult = do_analyse(Module, Analysis, function, Clauses),
Result = merge_functions(FunctionResult, merge_fun(Analysis)),
{Module,Result}.
analyse_clause(_,[]) ->
[];
analyse_clause(coverage,
[{#bump{module=M,function=F,arity=A,clause=C},_}|_]=Bumps) ->
analyse_clause_cov(Bumps,{M,F,A,C},0,0,[]);
analyse_clause(calls,Bumps) ->
analyse_clause_calls(Bumps,{x,x,x,x},[]).
analyse_clause_cov([{#bump{module=M,function=F,arity=A,clause=C},N}|Bumps],
{M,F,A,C}=Clause,Ls,NotCov,Acc) ->
analyse_clause_cov(Bumps,Clause,Ls+1,if N==0->NotCov+1; true->NotCov end,Acc);
analyse_clause_cov([{#bump{module=M1,function=F1,arity=A1,clause=C1},_}|_]=Bumps,
Clause,Ls,NotCov,Acc) ->
analyse_clause_cov(Bumps,{M1,F1,A1,C1},0,0,[{Clause,{Ls-NotCov,NotCov}}|Acc]);
analyse_clause_cov([],Clause,Ls,NotCov,Acc) ->
lists:reverse(Acc,[{Clause,{Ls-NotCov,NotCov}}]).
analyse_clause_calls([{#bump{module=M,function=F,arity=A,clause=C},_}|Bumps],
{M,F,A,C}=Clause,Acc) ->
analyse_clause_calls(Bumps,Clause,Acc);
analyse_clause_calls([{#bump{module=M1,function=F1,arity=A1,clause=C1},N}|Bumps],
_Clause,Acc) ->
analyse_clause_calls(Bumps,{M1,F1,A1,C1},[{{M1,F1,A1,C1},N}|Acc]);
analyse_clause_calls([],_Clause,Acc) ->
lists:reverse(Acc).
merge_fun(coverage) ->
fun({Cov1,NotCov1}, {Cov2,NotCov2}) ->
{Cov1+Cov2, NotCov1+NotCov2}
end;
merge_fun(calls) ->
fun(Calls1, Calls2) ->
Calls1+Calls2
end.
merge_clauses(Clauses, MFun) -> merge_clauses(Clauses, MFun, []).
merge_clauses([{{M,F,A,_C1},R1},{{M,F,A,C2},R2}|Clauses], MFun, Result) ->
merge_clauses([{{M,F,A,C2},MFun(R1,R2)}|Clauses], MFun, Result);
merge_clauses([{{M,F,A,_C},R}|Clauses], MFun, Result) ->
merge_clauses(Clauses, MFun, [{{M,F,A},R}|Result]);
merge_clauses([], _Fun, Result) ->
lists:reverse(Result).
merge_functions([{_MFA,R}|Functions], MFun) ->
merge_functions(Functions, MFun, R);
merge_functions([],_MFun) -> % There are no clauses.
{0,0}. % No function can be covered or notcov.
merge_functions([{_MFA,R}|Functions], MFun, Result) ->
merge_functions(Functions, MFun, MFun(Result, R));
merge_functions([], _MFun, Result) ->
Result.
analyse_list_to_file(Modules, Opts, State) ->
{LoadedMF, ImportedMF, Error} = are_loaded(Modules, State, [], [], []),
collect([M || {M,_} <- LoadedMF], State#main_state.nodes),
OutDir = proplists:get_value(outdir,Opts),
HTML = lists:member(html,Opts),
Fun = fun({Module,File}) ->
OutFile = outfilename(OutDir,Module,HTML),
do_analyse_to_file(Module,File,OutFile,HTML,State)
end,
{Ok,Error1} = split_ok_error(pmap(Fun, LoadedMF++ImportedMF),[],[]),
{result,Ok,Error ++ Error1}.
analyse_all_to_file(Opts, State) ->
collect(State#main_state.nodes),
AllModules = get_all_modules(State),
OutDir = proplists:get_value(outdir,Opts),
HTML = lists:member(html,Opts),
Fun = fun({Module,File}) ->
OutFile = outfilename(OutDir,Module,HTML),
do_analyse_to_file(Module,File,OutFile,HTML,State)
end,
{Ok,Error} = split_ok_error(pmap(Fun, AllModules),[],[]),
{result,Ok,Error}.
get_all_modules(State) ->
get_all_modules(State#main_state.compiled ++ State#main_state.imported,[]).
get_all_modules([{Module,File}|Rest],Acc) ->
get_all_modules(Rest,[{Module,File}|Acc]);
get_all_modules([{Module,File,_}|Rest],Acc) ->
case lists:keymember(Module,1,Acc) of
true -> get_all_modules(Rest,Acc);
false -> get_all_modules(Rest,[{Module,File}|Acc])
end;
get_all_modules([],Acc) ->
Acc.
split_ok_error([{ok,R}|Result],Ok,Error) ->
split_ok_error(Result,[R|Ok],Error);
split_ok_error([{error,R}|Result],Ok,Error) ->
split_ok_error(Result,Ok,[R|Error]);
split_ok_error([],Ok,Error) ->
{Ok,Error}.
do_parallel_analysis_to_file(Module, Opts, Loaded, From, State) ->
File = case Loaded of
{loaded, File0} ->
[{Module,Clauses}] =
ets:lookup(?COVER_CLAUSE_TABLE,Module),
collect(Module, Clauses,
State#main_state.nodes),
File0;
{imported, File0, _} ->
File0
end,
HTML = lists:member(html,Opts),
OutFile =
case proplists:get_value(outfile,Opts) of
undefined ->
outfilename(proplists:get_value(outdir,Opts),Module,HTML);
F ->
F
end,
reply(From, do_analyse_to_file(Module,File,OutFile,HTML,State)).
do_analyse_to_file(Module,File,OutFile,HTML,State) ->
case find_source(Module, File) of
{beam,_BeamFile} ->
{error,{no_source_code_found,Module}};
ErlFile ->
analyse_info(Module,State#main_state.imported),
do_analyse_to_file1(Module,OutFile,ErlFile,HTML)
end.
%% do_analyse_to_file1(Module,OutFile,ErlFile) -> {ok,OutFile} | {error,Error}
%% Module = atom()
%% OutFile = ErlFile = string()
do_analyse_to_file1(Module, OutFile, ErlFile, HTML) ->
case file:open(ErlFile, [read,raw,read_ahead]) of
{ok, InFd} ->
case file:open(OutFile, [write,raw,delayed_write]) of
{ok, OutFd} ->
Enc = encoding(ErlFile),
if HTML ->
Header = create_header(OutFile, Enc),
H1Bin = unicode:characters_to_binary(Header,Enc,Enc),
ok = file:write(OutFd,H1Bin);
true -> ok
end,
%% Write some initial information to the output file
{{Y,Mo,D},{H,Mi,S}} = calendar:local_time(),
Timestamp =
io_lib:format("~p-~s-~s at ~s:~s:~s",
[Y,
string:pad(integer_to_list(Mo), 2, leading, $0),
string:pad(integer_to_list(D), 2, leading, $0),
string:pad(integer_to_list(H), 2, leading, $0),
string:pad(integer_to_list(Mi), 2, leading, $0),
string:pad(integer_to_list(S), 2, leading, $0)]),
OutFileInfo =
if HTML ->
create_footer(ErlFile, Timestamp);
true ->
["File generated from ",ErlFile," by COVER ",
Timestamp, "\n\n",
"**************************************"
"**************************************"
"\n\n"]
end,
H2Bin = unicode:characters_to_binary(OutFileInfo,Enc,Enc),
ok = file:write(OutFd, H2Bin),
Pattern = {#bump{module=Module,line='$1',_='_'},'$2'},
MS = [{Pattern,[{is_integer,'$1'},{'>','$1',0}],[{{'$1','$2'}}]}],
CovLines0 =
lists:keysort(1, ets:select(?COLLECTION_TABLE, MS)),
CovLines = merge_dup_lines(CovLines0),
print_lines(Module, CovLines, InFd, OutFd, 1, HTML),
if HTML ->
ok = file:write(OutFd, close_html());
true -> ok
end,
ok = file:close(OutFd),
ok = file:close(InFd),
{ok, OutFile};
{error, Reason} ->
{error, {file, OutFile, Reason}}
end;
{error, Reason} ->
{error, {file, ErlFile, Reason}}
end.
merge_dup_lines(CovLines) ->
merge_dup_lines(CovLines, []).
merge_dup_lines([{L, N}|T], [{L, NAcc}|TAcc]) ->
merge_dup_lines(T, [{L, NAcc + N}|TAcc]);
merge_dup_lines([{L, N}|T], Acc) ->
merge_dup_lines(T, [{L, N}|Acc]);
merge_dup_lines([], Acc) ->
lists:reverse(Acc).
print_lines(Module, CovLines, InFd, OutFd, L, HTML) ->
case file:read_line(InFd) of
eof ->
ignore;
{ok,RawLine} ->
Line = escape_lt_and_gt(RawLine,HTML),
case CovLines of
[{L,N}|CovLines1] ->
if N=:=0, HTML=:=true ->
MissedLine = table_row("miss", Line, L, N),
ok = file:write(OutFd, MissedLine);
HTML=:=true ->
HitLine = table_row("hit", Line, L, N),
ok = file:write(OutFd, HitLine);
N < 1000000 ->
Str = string:pad(integer_to_list(N), 6, leading, $\s),
ok = file:write(OutFd, [Str,fill1(),Line]);
N < 10000000 ->
Str = integer_to_list(N),
ok = file:write(OutFd, [Str,fill2(),Line]);
true ->
Str = integer_to_list(N),
ok = file:write(OutFd, [Str,fill3(),Line])
end,
print_lines(Module, CovLines1, InFd, OutFd, L+1, HTML);
_ -> %Including comment lines
NonCoveredContent =
if HTML -> table_row(Line, L);
true -> [tab(),Line]
end,
ok = file:write(OutFd, NonCoveredContent),
print_lines(Module, CovLines, InFd, OutFd, L+1, HTML)
end
end.
tab() -> " | ".
fill1() -> "..| ".
fill2() -> ".| ".
fill3() -> "| ".
%% HTML sections
create_header(OutFile, Enc) ->
["<!doctype html>\n"
"<html>\n"
"<head>\n"
"<meta charset=\"",html_encoding(Enc),"\">\n"
"<title>",OutFile,"</title>\n"
"<style>"] ++
read_stylesheet() ++
["</style>\n",
"</head>\n"
"<body>\n"
"<h1><code>",OutFile,"</code></h1>\n"].
create_footer(ErlFile, Timestamp) ->
["<footer><p>File generated from <code>",ErlFile,
"</code> by <a href=\"http://erlang.org/doc/man/cover.html\">cover</a> at ",
Timestamp,"</p></footer>\n<table>\n<tbody>\n"].
close_html() ->
["</tbody>\n",
"<thead>\n",
"<tr>\n",
"<th>Line</th>\n",
"<th>Hits</th>\n",
"<th>Source</th>\n",
"</tr>\n",
"</thead>\n",
"</table>\n",
"</body>\n"
"</html>\n"].
table_row(CssClass, Line, L, N) ->
["<tr class=\"",CssClass,"\">\n", table_data(Line, L, N)].
table_row(Line, L) ->
["<tr>\n", table_data(Line, L, "")].
table_data(Line, L, N) ->
LineNoNL = Line -- "\n",
["<td class=\"line\" id=\"L",integer_to_list(L),"\">",
"<a href=\"#L",integer_to_list(L),"\">",
integer_to_list(L),
"</a></td>\n",
"<td class=\"hits\">",maybe_integer_to_list(N),"</td>\n",
"<td class=\"source\"><code>",LineNoNL,"</code></td>\n</tr>\n"].
maybe_integer_to_list(0) -> "<pre style=\"display: inline;\">:-(</pre>";
maybe_integer_to_list(N) when is_integer(N) -> integer_to_list(N);
maybe_integer_to_list(_) -> "".
read_stylesheet() ->
PrivDir = code:priv_dir(?TOOLS_APP),
{ok, Css} = file:read_file(filename:join(PrivDir, ?STYLESHEET)),
[Css].
%%%--Export--------------------------------------------------------------
do_export(Module, OutFile, From, State) ->
case file:open(OutFile,[write,binary,raw,delayed_write]) of
{ok,Fd} ->
Reply =
case Module of
'_' ->
export_info(State#main_state.imported),
collect(State#main_state.nodes),
do_export_table(State#main_state.compiled,
State#main_state.imported,
Fd);
_ ->
export_info(Module,State#main_state.imported),
try is_loaded(Module, State) of
{loaded, File} ->
[{Module,Clauses}] =
ets:lookup(?COVER_CLAUSE_TABLE,Module),
collect(Module, Clauses,
State#main_state.nodes),
do_export_table([{Module,File}],[],Fd);
{imported, File, ImportFiles} ->
%% don't know if I should allow this -
%% export a module which is only imported
Imported = [{Module,File,ImportFiles}],
do_export_table([],Imported,Fd)
catch throw:_ ->
{error,{not_cover_compiled,Module}}
end
end,
ok = file:close(Fd),
reply(From, Reply);
{error,Reason} ->
reply(From, {error, {cant_open_file,OutFile,Reason}})
end.
do_export_table(Compiled, Imported, Fd) ->
ModList = merge(Imported,Compiled),
write_module_data(ModList,Fd).
merge([{Module,File,_ImportFiles}|Imported],ModuleList) ->
case lists:keymember(Module,1,ModuleList) of
true ->
merge(Imported,ModuleList);
false ->
merge(Imported,[{Module,File}|ModuleList])
end;
merge([],ModuleList) ->
ModuleList.
write_module_data([{Module,File}|ModList],Fd) ->
write({file,Module,File},Fd),
[Clauses] = ets:lookup(?COLLECTION_CLAUSE_TABLE,Module),
write(Clauses,Fd),
ModuleData = ets:match_object(?COLLECTION_TABLE,{#bump{module=Module},'_'}),
do_write_module_data(ModuleData,Fd),
write_module_data(ModList,Fd);
write_module_data([],_Fd) ->
ok.
do_write_module_data([H|T],Fd) ->
write(H,Fd),
do_write_module_data(T,Fd);
do_write_module_data([],_Fd) ->
ok.
write(Element,Fd) ->
Bin = term_to_binary(Element,[compressed]),
case byte_size(Bin) of
Size when Size > 255 ->
SizeBin = term_to_binary({'$size',Size}),
ok = file:write(Fd, <<(byte_size(SizeBin)):8,SizeBin/binary,Bin/binary>>);
Size ->
ok = file:write(Fd,<<Size:8,Bin/binary>>)
end,
ok.
%%%--Import--------------------------------------------------------------
do_import_to_table(Fd,ImportFile,Imported) ->
do_import_to_table(Fd,ImportFile,Imported,[]).
do_import_to_table(Fd,ImportFile,Imported,DontImport) ->
case get_term(Fd) of
{file,Module,File} ->
case add_imported(Module, File, ImportFile, Imported) of
{ok,NewImported} ->
do_import_to_table(Fd,ImportFile,NewImported,DontImport);
dont_import ->
do_import_to_table(Fd,ImportFile,Imported,
[Module|DontImport])
end;
{Key=#bump{module=Module},Val} ->
case lists:member(Module,DontImport) of
false ->
insert_in_collection_table(Key,Val);
true ->
ok
end,
do_import_to_table(Fd,ImportFile,Imported,DontImport);
{Module,Clauses} ->
case lists:member(Module,DontImport) of
false ->
ets:insert(?COLLECTION_CLAUSE_TABLE,{Module,Clauses});
true ->
ok
end,
do_import_to_table(Fd,ImportFile,Imported,DontImport);
eof ->
Imported
end.
get_term(Fd) ->
case file:read(Fd,1) of
{ok,<<Size1:8>>} ->
{ok,Bin1} = file:read(Fd,Size1),
case binary_to_term(Bin1) of
{'$size',Size2} ->
{ok,Bin2} = file:read(Fd,Size2),
binary_to_term(Bin2);
Term ->
Term
end;
eof ->
eof
end.
%%%--Reset---------------------------------------------------------------
%% Reset main node and all remote nodes
do_reset_main_node(Module,Nodes) ->
reset_counters(Module),
do_reset_collection_table(Module),
remote_reset(Module,Nodes).
do_reset_collection_table(Module) ->
ets:delete(?COLLECTION_CLAUSE_TABLE,Module),
ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'}).
do_clear(Module) ->
ets:match_delete(?COVER_CLAUSE_TABLE, {Module,'_'}),
clear_counters(Module),
case lists:member(?COLLECTION_TABLE, ets:all()) of
true ->
%% We're on the main node
ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'});
false ->
ok
end.
not_loaded(Module, unloaded, State) ->
do_clear(Module),
remote_unload(State#main_state.nodes,[Module]),
Compiled = update_compiled([Module],
State#main_state.compiled),
State#main_state{ compiled = Compiled };
not_loaded(_Module,_Else, State) ->
State.
%%%--Div-----------------------------------------------------------------
escape_lt_and_gt(Rawline,HTML) when HTML =/= true ->
Rawline;
escape_lt_and_gt(Rawline,_HTML) ->
escape_lt_and_gt1(Rawline,[]).
escape_lt_and_gt1([$<|T],Acc) ->
escape_lt_and_gt1(T,[$;,$t,$l,$&|Acc]);
escape_lt_and_gt1([$>|T],Acc) ->
escape_lt_and_gt1(T,[$;,$t,$g,$&|Acc]);
escape_lt_and_gt1([$&|T],Acc) ->
escape_lt_and_gt1(T,[$;,$p,$m,$a,$&|Acc]);
escape_lt_and_gt1([],Acc) ->
lists:reverse(Acc);
escape_lt_and_gt1([H|T],Acc) ->
escape_lt_and_gt1(T,[H|Acc]).
%%%--Internal functions for parallelization------------------------------
pmap(Fun,List) ->
NTot = length(List),
NProcs = erlang:system_info(schedulers) * 2,
NPerProc = (NTot div NProcs) + 1,
Mons = pmap_spawn(Fun,NPerProc,List,[]),
pmap_collect(Mons,[]).
pmap_spawn(_,_,[],Mons) ->
Mons;
pmap_spawn(Fun,NPerProc,List,Mons) ->
{L1,L2} = if length(List)>=NPerProc -> lists:split(NPerProc,List);
true -> {List,[]} % last chunk
end,
Mon =
spawn_monitor(
fun() ->
exit({pmap_done,lists:map(Fun,L1)})
end),
pmap_spawn(Fun,NPerProc,L2,[Mon|Mons]).
pmap_collect([],Acc) ->
lists:append(Acc);
pmap_collect(Mons,Acc) ->
receive
{'DOWN', Ref, process, Pid, {pmap_done,Result}} ->
pmap_collect(lists:delete({Pid,Ref},Mons),[Result|Acc]);
{'DOWN', Ref, process, Pid, Reason} = Down ->
case lists:member({Pid,Ref},Mons) of
true ->
%% Something went really wrong - don't hang!
exit(Reason);
false ->
%% This should be handled somewhere else
self() ! Down,
pmap_collect(Mons,Acc)
end
end.
%%%-----------------------------------------------------------------
%%% Decide which encoding to use when analyzing to file.
%%% The target file contains the file path, so if either the file name
%%% encoding or the encoding of the source file is utf8, then we need
%%% to use utf8.
encoding(File) ->
case file:native_name_encoding() of
latin1 ->
case epp:read_encoding(File) of
none ->
epp:default_encoding();
E ->
E
end;
utf8 ->
utf8
end.
html_encoding(latin1) ->
"iso-8859-1";
html_encoding(utf8) ->
"utf-8". | lib/tools/src/cover.erl | 0.50952 | 0.450299 | cover.erl | starcoder |
%%% SERESYE, an ERlang Expert SYstem Engine
%%%
%%% Copyright (c) 2005-2010, <NAME>, <NAME>
%%% All rights reserved.
%%%
%%% You may use this file under the terms of the BSD License. See the
%%% license distributed with this project or
%%% http://www.opensource.org/licenses/bsd-license.php
%%% ======================================================
%%% Automotive Expert System
%%%
%%% This expert system diagnoses some simple
%%% problems with a car.
%%%
%%% It is a bare translation of the same example
%%% provided in CLIPS Version 6.0
%%%
%%% To execute, type 'auto:start().'
%%% ======================================================
-module(seresyee_auto).
-export([determine_battery_state/2,
determine_conductivity_test/4, determine_engine_state/2,
determine_gas_level/3, determine_knocking/2,
determine_low_output/2, determine_misfiring/2,
determine_point_surface_state_1/3,
determine_point_surface_state_2/2,
determine_rotation_state/2, determine_sluggishness/2,
no_repairs/2, normal_engine_state_conclusions/2,
print_repair/3, start/0,
unsatisfactory_engine_state_conclusions/2]).
-neg_rule({determine_engine_state, [{'working-state', engine, '__IGNORE_UNDERSCORE__'},
{repair, '__IGNORE_UNDERSCORE__'}]}).
-include_lib("seresye/include/seresye.hrl").
%% **********************
%% * ENGINE STATE RULES *
%% **********************
normal_engine_state_conclusions(Engine,
{'working-state', engine, normal}) ->
seresye_engine:assert(Engine,
[{repair, "No repair needed."},
{'spark-state', engine, normal},
{'charge-state', battery, charged},
{'rotation-state', engine, rotates}]).
unsatisfactory_engine_state_conclusions(Engine,
{'working-state', engine,
unsatisfactory}) ->
seresye_engine:assert(Engine,
[{'charge-state', battery, charged},
{'rotation-state', engine, rotates}]).
%% ***************
%% * QUERY RULES *
%% ***************
determine_engine_state(Engine, {start, _})
when not {rule, [{'working-state', engine, _}, {repair, _}]} ->
case ask_yn('Does the engine start (yes/no)? ') of
true ->
case ask_yn('Does the engine run normally (yes/no)? ')
of
true ->
seresye_engine:assert(Engine,
{'working-state', engine, normal});
_ ->
seresye_engine:assert(Engine,
{'working-state', engine, unsatisfactory})
end;
_ ->
seresye_engine:assert(Engine,
{'working-state', engine, 'does-not-start'})
end.
determine_rotation_state(Engine,
{'working-state', engine, 'does-not-start'})
when not
{rule, [{'rotation-state', engine, _}, {repair, _}]} ->
case ask_yn('Does the engine rotate (yes/no)? ') of
true ->
seresye_engine:assert(Engine,
[{'rotation-state', engine, rotates},
{'spark-state', engine, 'irregular-spark'}]);
_ ->
seresye_engine:assert(Engine,
[{'rotation-state', engine, 'does-not-rotate'},
{'spark-state', engine, 'does-not-spark'}])
end.
determine_sluggishness(Engine,
{'working-state', engine, unsatisfactory})
when not {rule, [{repair, _}]} ->
case ask_yn('Is the engine sluggish (yes/no)? ') of
true ->
seresye_engine:assert(Engine, {repair, "Clean the fuel line."});
_ -> Engine
end.
determine_misfiring(Engine,
{'working-state', engine, unsatisfactory})
when not {rule, [{repair, _}]} ->
case ask_yn('Does the engine misfire (yes/no)? ') of
true ->
seresye_engine:assert(Engine,
[{repair, "Point gap adjustment."},
{'spark-state', engine, 'irregular-spark'}]);
_ -> Engine
end.
determine_knocking(E,
{'working-state', engine, unsatisfactory})
when not {rule, [{repair, _}]} ->
case ask_yn('Does the engine knock (yes/no)? ') of
true ->
seresye_engine:assert(E, {repair, "Timing adjustment."});
_ -> E
end.
determine_low_output(E,
{'working-state', engine, unsatisfactory})
when not {rule, [{symptom, engine, _}, {repair, _}]} ->
case
ask_yn('Is the output of the engine low (yes/no)? ')
of
true ->
seresye_engine:assert(E, {symptom, engine, 'low-output'});
_ ->
seresye_engine:assert(E, {symptom, engine, 'not-low-output'})
end.
determine_gas_level(E,
{'working-state', engine, 'does-not-start'},
{'rotation-state', engine, rotates})
when not {rule, [{repair, _}]} ->
case
ask_yn('Does the tank have any gas in it (yes/no)? ')
of
false -> seresye_engine:assert(E, {repair, "Add gas."});
_ -> E
end.
determine_battery_state(E,
{'rotation-state', engine, 'does-not-rotate'})
when not
{rule, [{'charge-state', battery, _}, {repair, _}]} ->
case ask_yn('Is the battery charged (yes/no)? ') of
true ->
seresye_engine:assert(E, {'charge-state', battery, charged});
_ ->
seresye_engine:assert(E,
[{repair, "Charge the battery."},
{'charge-state', battery, dead}])
end.
determine_point_surface_state_1(E,
{'working-state', engine, 'does-not-start'},
{'spark-state', engine, 'irregular-spark'})
when not {rule, [{repair, _}]} ->
dpss(E).
determine_point_surface_state_2(E,
{symptom, engine, 'low-output'})
when not {rule, [{repair, _}]} ->
dpss(E).
dpss(E) ->
case
ask_question('What is the surface state of the points (normal/burned/contaminated)? ')
of
[$b, $u, $r, $n, $e, $d | _] ->
seresye_engine:assert(E, {repair, "Replace the points."});
[$c, $o, $n, $t, $a, $m, $i, $n, $a, $t, $e, $d | _] ->
seresye_engine:assert(E, {repair, "Clean the points."});
_ -> E
end.
determine_conductivity_test(E,
{'working-state', engine, 'does-not-start'},
{'spark-state', engine, 'does-not-spark'},
{'charge-state', battery, charged})
when not {rule, [{repair, _}]}; true ->
case
ask_yn('Is the conductivity test for the ignition coil positive (yes/no)? ')
of
true ->
seresye_engine:assert(E,
{repair, "Repair the distributor lead wire."});
_ ->
seresye_engine:assert(E, {repair, "Replace the ignition coil."})
end.
no_repairs(E, {start, _})
when not {rule, [{repair, _}]}, true ->
seresye_engine:assert(E,
{repair, "Take your car to a mechanic."}).
print_repair(E, {repair, X}, {start, _}) ->
io:format("Suggested Repair: ~p~n", [X]), E.
ask_yn(Prompt) ->
[Response | _] = io:get_line(Prompt),
case Response of
$y -> true;
_ -> false
end.
ask_question(Prompt) -> io:get_line(Prompt).
start() ->
Engine0 = seresye_engine:new(),
%% Rules with high priority (10)
Engine2 = lists:foldl(fun (Rule, Engine1) ->
seresye_engine:add_rule(Engine1, {?MODULE, Rule}, 10)
end,
Engine0,
[normal_engine_state_conclusions,
unsatisfactory_engine_state_conclusions,
print_repair]),
%% Rules with normal priority (0)
Engine3 = lists:foldl(fun (Rule, Engine1) ->
seresye_engine:add_rule(Engine1, {?MODULE, Rule})
end,
Engine2,
[determine_engine_state, determine_rotation_state,
determine_sluggishness, determine_misfiring,
determine_knocking, determine_low_output,
determine_gas_level, determine_battery_state,
determine_point_surface_state_1,
determine_point_surface_state_2,
determine_conductivity_test]),
%% Rules with low priority (-10)
Engine4 = seresye_engine:add_rule(Engine3,
{?MODULE, no_repairs}, -10),
seresye_engine:assert(Engine4, {start, ok}). | examples/seresyee_auto.erl | 0.626467 | 0.404802 | seresyee_auto.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2016, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
-module(sbroker_fq_queue).
-behaviour(sbroker_queue).
-behaviour(sbroker_fair_queue).
-export([init/3]).
-export([handle_in/5]).
-export([handle_out/2]).
-export([handle_fq_out/2]).
-export([handle_timeout/2]).
-export([handle_cancel/3]).
-export([handle_info/3]).
-export([code_change/4]).
-export([config_change/3]).
-export([len/1]).
-export([send_time/1]).
-export([terminate/2]).
-type action() :: {Drops :: non_neg_integer(),
TimeoutIncrement :: timeout()}.
-record(state, {config :: [action()],
actions :: [action()],
queue :: sbroker_queue:internal_queue()}).
%% This sbroker_queue module takes a list of tuples. The first element is a
%% non_neg_integer() that drops the integer at head of the list (or the whole
%% queue if the queue length is lower). The tail is kept and used for the next
%% call. Once the list is emptied the original list is used in its place. The
%% second element is a timeout increment which is added to the current time to
%% generate the next timeout time.
%%
%% Intended only for testing the sbroker_fair_queue
init(Q, Time, Actions) ->
handle_timeout(Time, #state{config=Actions, actions=Actions, queue=Q}).
handle_in(SendTime, {Pid, _} = From, Value, Time, #state{queue=Q} = State) ->
Ref = monitor(process, Pid),
NState = State#state{queue=queue:in({SendTime, From, Value, Ref}, Q)},
handle_timeout(Time, NState).
handle_out(Time, State) ->
{#state{queue=Q} = NState, Timeout} = handle_timeout(Time, State),
case queue:out(Q) of
{empty, NQ} ->
{empty, NState#state{queue=NQ}};
{{value, {SendTime, From, Value, Ref}}, NQ} ->
{SendTime, From, Value, Ref, NState#state{queue=NQ}, Timeout}
end.
handle_fq_out(Time, State) ->
case handle_out(Time, State) of
{_, _, _, _, _, _} = Out ->
Out;
{empty, NState} ->
{empty, NState, infinity}
end.
handle_timeout(_, #state{config=[], actions=[]} = State) ->
{State, infinity};
handle_timeout(Time, #state{config=Actions, actions=[]} = State) ->
handle_timeout(Time, State#state{actions=Actions});
handle_timeout(Time,
#state{actions=[{Drops, TimeoutIncr} | Actions],
queue=Q} = State) ->
Drops2 = min(Drops, queue:len(Q)),
{DropQ, NQ} = queue:split(Drops2, Q),
drop_queue(Time, DropQ),
NState = State#state{actions=Actions, queue=NQ},
case TimeoutIncr of
infinity ->
{NState, infinity};
_ ->
{NState, Time+TimeoutIncr}
end.
handle_cancel(Tag, Time, #state{queue=Q} = State) ->
Len = queue:len(Q),
Cancel = fun({_, {_, Tag2}, _, Ref}) when Tag2 =:= Tag ->
demonitor(Ref, [flush]),
false;
(_) ->
true
end,
NQ = queue:filter(Cancel, Q),
{NState, TimeoutNext} = handle_timeout(Time, State#state{queue=NQ}),
case queue:len(NQ) of
Len ->
{false, NState, TimeoutNext};
NLen ->
{Len - NLen, NState, TimeoutNext}
end.
handle_info({'DOWN', Ref, _, _, _}, Time, #state{queue=Q} = State) ->
NQ = queue:filter(fun({_, _, _, Ref2}) -> Ref2 =/= Ref end, Q),
handle_timeout(Time, State#state{queue=NQ});
handle_info(_, Time, State) ->
handle_timeout(Time, State).
code_change(_, Time, State, _) ->
handle_timeout(Time, State).
config_change(Config, Time, #state{config=Config} = State) ->
handle_timeout(Time, State);
config_change(Config, Time, State) ->
handle_timeout(Time, State#state{config=Config, actions=Config}).
len(#state{queue=Q}) ->
queue:len(Q).
send_time(#state{queue=Q}) ->
case queue:peek(Q) of
{value, {SendTime, _, _, _}} ->
SendTime;
empty ->
empty
end.
terminate(_, #state{queue=Q}) ->
Q.
%% Internal
drop_queue(Time, Q) ->
_ = [drop_item(Time, Item) || Item <- queue:to_list(Q)],
ok.
drop_item(Time, {SendTime, From, _, Ref}) ->
demonitor(Ref, [flush]),
sbroker_queue:drop(From, SendTime, Time). | test/sbroker_fq_queue.erl | 0.572245 | 0.428712 | sbroker_fq_queue.erl | starcoder |
%%%----------------------------------------------------------------------------
%%% Copyright (c) 2011-2012 <NAME>
%%% Licensed under MIT license. See LICENSE file for details.
%%%
%%% File : mmtr_lib.erl
%%% Author : <NAME> <<EMAIL>>
%%% Purpose : Collection of miscellaneous helper functions used throughout the
%%% simulation.
%%%----------------------------------------------------------------------------
-module(mmtr_lib).
-export([random_symbol/0
,random_price/0
,choice/1
,atoms_sequence/4
,timestamp/0
]).
-include("mmtr_config.hrl").
-include("mmtr_types.hrl").
%%-----------------------------------------------------------------------------
%% Function : random_symbol/0
%% Purpose : Generates a random stock symbol.
%%-----------------------------------------------------------------------------
random_symbol() ->
CharPool = "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
LenPool = [1, 2, 3, 4],
Symbol = [choice(CharPool) || _ <- lists:seq(1, choice(LenPool))],
Symbol.
%%-----------------------------------------------------------------------------
%% Function : random_price/0
%% Purpose : Generates a random price.
%%-----------------------------------------------------------------------------
random_price() ->
DigPool = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
LenPool = [1, 2, 3],
Length = choice(LenPool),
Dollars = [integer_to_list(choice(DigPool)) || _ <- lists:seq(1, Length)],
Cents = [integer_to_list(choice(DigPool)) || _ <- lists:seq(1, 2)],
Price = string:join(
[string:join(Dollars, ""), string:join(Cents, "")],
"."
),
list_to_float(Price).
%%-----------------------------------------------------------------------------
%% Function : choice/1
%% Purpose : Pick and return a random element from a given list.
%%-----------------------------------------------------------------------------
choice(List) ->
reseed(),
Maximum = length(List),
Element = random:uniform(Maximum),
lists:nth(Element, List).
%%-----------------------------------------------------------------------------
%% Function : reseed/0
%% Purpose : Reseed pseudorandom number generator.
%%-----------------------------------------------------------------------------
reseed() ->
random:seed(timehash(), timehash(), timehash()).
%%-----------------------------------------------------------------------------
%% Function : timestamp/0
%% Purpose : Generates a Unix timestamp float.
%%-----------------------------------------------------------------------------
timestamp() ->
[Mega, Sec, Micro] = [integer_to_list(I) || I <- tuple_to_list(now())],
Seconds = string:join([Mega, Sec], ""),
Timestamp = string:join([Seconds, Micro], "."),
list_to_float(Timestamp).
%%-----------------------------------------------------------------------------
%% Function : timehash/0
%% Purpose : Generates a cryptographically unique integer based on current
%% time.
%%-----------------------------------------------------------------------------
timehash() ->
Timestamp = float_to_list(timestamp()),
HashBin = crypto:sha(Timestamp),
HashList = [integer_to_list(I) || I <- binary_to_list(HashBin)],
HashStr = string:join(HashList, ""),
list_to_integer(HashStr).
%%-----------------------------------------------------------------------------
%% Function : atoms_sequence/4
%% Purpose : Generate a list of numerically sequential atoms:
%% [atom_1, atom_2, ...]
%%-----------------------------------------------------------------------------
atoms_sequence(String, Separator, FromNum, ToNum) ->
[
list_to_atom(
string:join([String, integer_to_list(I)], Separator)
) ||
I <- lists:seq(FromNum, ToNum)
]. | apps/mock_market_trader/src/mmtr_lib.erl | 0.582491 | 0.484929 | mmtr_lib.erl | starcoder |
%% @doc Interleaving composition and decomposition of protocols
-module(interleave).
-compile(export_all).
-compile(nowarn_export_all).
%% @doc Protocol format
%% Notes:
%% - Recursion variables are represented as string()
%% - Actions, labels, and assertion names are represented as atom()
%% - Control branches are represented as a list of atom/protocol pairs
%% - The atom endP is used to end a protocol because "end" is a reserved keyword.
-type protocol () :: {'act', atom(), protocol()}
| {'branch', [ {atom(), protocol()} ]}
| {'assert', atom(), protocol()}
| {'require', atom(), protocol()}
| {'consume', atom(), protocol()}
| {'rec', string(), protocol()}
| {'rvar', string()}
| 'endP'.
% # Examples
e1() ->
{act, n, endP}.
e2() ->
{require, n, {act, x, endP}}.
e3() ->
{assert, n, {act, y, endP}}.
e4() ->
{branch, [{l, {act, b, {assert, n, endP}}} ,{r, {act, c, {assert, n, endP}}}]}.
e5() ->
{branch, [{l, {assert, n, endP}} ,{r, {assert, n, endP}}, {m, {assert, n, endP}}]}.
e6() ->
{branch, [{l, {require, n, endP}} ,{r, {act, c, endP}}, {m, {assert, n, endP}}]}.
e7() ->
{act, r_pwd, {branch, [{ok, {assert, n, endP}},{fail, endP}]}}.
e8() ->
{require, n, {act, do_banking, endP}}.
e9() ->
{rec, "x", {act, a, {act, b, {rvar, "x"}}}}.
e10() ->
{rec, "y", {act, a, {branch, [{l, {act, b, {require, n, endP}}}
,{r, {rvar, "y"}}]}}}.
bank() ->
{require, pin, {rec, t, {branch, [{statement, {act, s_statement, {rvar, t}}},
{payment, {assert, pay,{consume, tan,{act, r_details, {rvar, t}}}}},
{logout, {consume, pin, endP}}]
}
}
}.
pintan() ->
{act, r_pin, {branch, [
{ok, {assert, pin, {rec, r, {consume, pay, ctan()}}}},
{fail, endP}]
}
}.
ctan() ->
{act, s_id, {act, r_tan, {branch, [{tok, {assert, tan, {rvar, r}}},
{tfail, {rvar, r}}]
}
}
}.
bankauth() ->
{act,r_pin,
{branch,
[{ok,
{assert,pin,
{require,pin,
{rec,t,
{require, keyp,
{branch,
[{payment,
{assert,pay,
{consume,pay,
{act,s_id,
{act,r_tan,
{branch,
[{tok,{assert,tan,{consume,tan,{act,r_details,{rvar,t}}}}},
{tfail,{rvar,t}}]}}}}}},
{statement,{act,s_statement,{rvar,t}}},
{logout,{consume,pin,endP}}]}}}}}},
{fail,endP}]}
}.
bankauthsimple() ->
{rec,t,
{branch,
[ {payment, {assert, keyp, {require, tb, {act,s_id, {act,r_tan, {branch,
[ {tok,{assert,tan,{consume,tan,{act,r_details,{rvar,t}}}}},
{tfail,{rvar,t}}
]}}
}}
}},
{statement,{act,s_statement,{rvar,t}}},
{logout,endP}
]
}
}.
keycard() -> {rec, y, {require, keyp, {branch, [{tan, {assert, tb, {rvar, y}}},
{keycard, {rvar, y}}
]
}}
}.
pin() ->
{act, r_pin, {branch, [{ok, {assert, pin, endP}},
{fail, endP}]
}
}.
tan() ->
{require, pin, {rec, r, {act, s_id, {act, r_tan, {branch, [{ok, {assert, tan, {rvar, r}}},
{fail, {rvar, r}}]
}
}
}
}
}.
agent1() -> {branch, [{r_ua_set_ua_set, {assert, n, {assert, set, {act, r_ua_coord, {assert, coord, {act, s_au_state, endP}}}}}},
{r_ua_get, {assert, n, {assert, get,{act, s_au_snap, {assert, snap, endP}}}}},
{r_ua_close,{assert, n, {assert, close, endP}}}]
}.
agent2() -> {consume, n, {branch, [{s_ai_set, {consume, set, {act, s_ai_coord, {consume, coord, {act, r_ia_state, endP}}}}},
{s_ai_get, {consume, get, {act, r_ia_snap, {consume, snap, endP}}}},
{s_ai_close, {consume, close, endP}}]
}}.
userAgent() -> {rec, r, {branch, [ {ua_r_set, {act, ua_r_coord, {assert, set, {rvar, r}}}},
{ua_r_get, {assert, get, {consume, snap, {act, au_s_snap, {rvar, r}}}}},
{ua_r_close, {assert, close, endP}}
]}}.
agentInstrument() -> {rec, t, {branch, [ {ai_s_set, {consume, set, {act, ai_s_coord, {rvar, t}}}},
{ai_s_get, {consume, get, {act, ai_r_snap, {assert, snap, {rvar, t}}}}},
{ui_s_close, {consume, close, endP}}
]}}.
%% @doc Pretty print protocols
-spec pprint(protocol()) -> string().
pprint({act, Act, P}) ->
atom_to_list(Act) ++ "." ++ pprint(P);
pprint({branch, Branches}) ->
"{" ++ pprintBranches(Branches) ++ "}";
pprint({assert, N, P}) ->
"assert(" ++ atom_to_list(N) ++ ")." ++ pprint(P);
pprint({require, N, P}) ->
"require(" ++ atom_to_list(N) ++ ")." ++ pprint(P);
pprint({consume, N, P}) ->
"consume(" ++ atom_to_list(N) ++ ")." ++ pprint(P);
pprint({rec, BoundVar, P}) ->
"nu " ++ BoundVar ++ " . (" ++ pprint(P) ++ ")";
pprint({rvar, Var}) ->
Var;
pprint(endP) ->
"end".
% power set
power([]) -> [[]];
power([H|T]) -> PT = power(T),
[ [H|X] || X <- PT ] ++ PT .
filterSet(Data) when is_list(Data) ->
Pred = fun(Element) -> Element /= [] end,
lists:filter(Pred, Data).
% Finds the subset of J without empty set
jBranch(J) -> filterSet(power(J)).
% Returns true if a bad combo i.e., it has at least an empty branch
badJCombo1(A) ->
Results = for(A, fun({_,{branch, Si}}) ->
case Si of
[] -> true;
_ -> false
end
end),
lists:member(true, Results).
% Returns true if a bad combo i.e., there is an element in I that is not in any branch Ji
badJCombo2(A, I) ->
Indices = for(A, fun({_,{branch, Js}}) ->
for(Js, fun({J, _}) -> J
end)
end),
case lists:usort(lists:flatten(Indices)) =:= lists:usort(lists:flatten(I)) of
true -> false;
_ -> true
end.
com1() -> {branch, [{a, {consume, a, endP}}, {b, {consume, b, endP}}, {c, endP} ] }.
com2() -> {branch, [{aa, {assert, a, endP}}, {bb, {assert, b, endP}}] }.
test({branch, LiSi2}) -> jBranch(LiSi2).
%% @doc Strip assertions
-spec strip(protocol()) -> protocol().
strip({act, N, P}) -> {act, N, strip(P)};
strip({assert, _, P}) -> strip(P);
strip({require, _, P}) -> strip(P);
strip({consume, _, P}) -> strip(P);
strip({branch, LiSi}) ->
{branch, for(LiSi, fun({Li, Si}) -> {Li, strip(Si)} end)};
strip({rec, BV3, P}) -> {rec, BV3, strip(P)};
strip(P) -> P.
stripSet([]) -> [];
stripSet([X|XX]) -> [strip(X)] ++ stripSet(XX).
%% @doc Substitution
-spec subst(protocol(), string(), string(), [string()]) -> protocol().
subst({act, Act, P}, BV1, BV2, A) -> {act, Act, subst(P, BV1, BV2, A)};
subst({assert, N, P}, BV1, BV2, A) -> {assert, N, subst(P, BV1, BV2, A)};
subst({require, N, P}, BV1, BV2, A) -> {require, N, subst(P, BV1, BV2, A)};
subst({consume, N, P}, BV1, BV2, A) -> {consume, N, subst(P, BV1, BV2, A)};
subst({branch, LiSi}, BV1, BV2, A) ->
{branch, for(LiSi, fun({Li, Si}) -> {Li, subst(Si, BV1, BV2, A)} end)};
subst({rec, BV3, P}, BV1, BV2, A) ->
case lists:member(BV1, A) of
true -> {rec, BV3, subst(P, BV1, BV2, A)};
false -> {rec, BV3, subst(P, BV1, BV2, A ++ [BV3])}
end;
subst({rvar, BV1}, BV1, BV2, A) ->
case lists:member(BV1, A) of
true -> {rvar, BV1};
false -> {rvar, BV2}
end;
subst({rvar, BV3}, _, _, _) -> {rvar, BV3};
subst(endP, _ , _ , _ ) -> endP.
%% @doc Auxiliary printers
%% Prints a branch
pprintBranch({Label, P}) -> atom_to_list(Label) ++ " : " ++ pprint(P).
% Prints a list of branches
pprintBranches([]) -> "";
pprintBranches([B]) -> pprintBranch(B);
pprintBranches([B|BS]) -> pprintBranch(B) ++ "; " ++ pprintBranches(BS).
%% @doc Assertedness
% WIP: defaults to well-asserted
-spec asserted([atom()], protocol()) -> [atom()] | 'illAsserted'.
asserted(A , endP) -> A;
asserted(A, {rvar, _}) -> A;
asserted(A, {act, _, P}) -> asserted(A, P);
asserted(A, {branch, LiSi}) ->
Abranches = for(LiSi, fun({_,Si}) -> asserted(A, Si) end),
case listAsserted(Abranches) of
true -> listIntersect(Abranches);
false -> 'illAsserted'
end;
asserted(A, {require, N, P}) ->
case lists:member(N, A) of
true -> asserted(A, P);
false -> 'illAsserted'
end;
asserted(A, {consume, N, P}) ->
case lists:member(N, A) of
true -> asserted(lists:delete(N,A), P);
false -> 'illAsserted'
end;
asserted(A, {assert, N, P}) ->
case lists:member(N, A) of
true -> asserted(A, P);
false -> asserted(A ++ [N], P)
end;
asserted(A, {rec, _, P}) ->
case asserted(A, P) of
illAsserted -> illAsserted;
B -> lists:usort(B ++ A)
end.
wellAsserted(A, PS) ->
case asserted(A, PS) of
illAsserted -> false;
_ -> true
end.
%% @doc Helper functions for assertedness
listAsserted([A|Alist]) ->
case A of
illAsserted -> false;
_ -> listAsserted(Alist)
end;
listAsserted([]) -> true.
listIntersect(A) ->
sets:to_list(sets:intersection(for(A, fun(X) -> sets:from_list(X) end))).
%% @doc Helper functions of binders
%%Predicate on whether protocol P has all its free variables in environment N
bound(P, N) ->
case P of
{act, _, R} -> bound(R,N);
{assert, _, R} -> bound(R,N);
{require, _, R} -> bound(R,N);
{consume, _, R} -> bound(R,N);
{branch, LiSi} -> lists:all(fun(X) -> X end, for(LiSi, fun({_, Si})-> bound(Si,N) end) );
{rec, T, R} -> bound(R,N ++ [T]);
{rvar, T} -> lists:member(T,N);
endP -> true
end.
%% @doc Interleaving
%% Helper for plumbing non-determinstic results (represented as lists)
%% into functions which are non-determinstic (return a list of results)
-spec bind([A], fun((A) -> [B])) -> [B].
bind([], _F) -> [];
bind([X|XS], F) -> F(X) ++ bind(XS, F).
%% @doc Basically just flip map
-spec for([A], fun((A) -> B)) -> [B].
for(XS, F) -> lists:map(F, XS).
%% @doc Remove duplicate elements
-spec nub([A]) -> [A].
nub(X) -> nub(X, []).
nub([], Clean) -> Clean;
nub([X|Xs], Clean) ->
case lists:member(X, Clean) of
true -> nub(Xs, Clean);
false -> nub(Xs, Clean ++ [X])
end.
%% @doc Compute a covering of a set (with two partitions)
twoCovering([]) -> [];
twoCovering([A]) -> [{[A], []}, {[], [A]}];
twoCovering([A|AS]) ->
bind(twoCovering(AS), fun({XS, YS}) -> [{[A|XS], YS}, {XS, [A|YS]}] end).
%% @doc Take the largest list in a list of lists
maximalPossibility(XS) -> maximalPoss(XS, []).
maximalPoss([], Max) -> Max;
maximalPoss([XS|XSS], Max) when length(XS) >= length(Max) -> maximalPoss(XSS, XS);
maximalPoss([_|XSS], Max) -> maximalPoss(XSS, Max).
% Top-level
-spec interleave(protocol(), protocol()) -> [protocol ()].
%% @doc Wraps the main function and passes in empty environments
interleave(S1, S2) -> nub(interleaveTop(strong, [], [], [], S1, S2)).
-spec interleaveWeak(protocol(), protocol()) -> [protocol ()].
%% @doc Wraps the main function and passes in empty environments
interleaveWeak(S1, S2) -> nub(interleaveTop(weak, [], [], [], S1, S2)).
-spec interleaveAll(protocol(), protocol()) -> [protocol ()].
%% @doc Wraps the main function and passes in empty environments
interleaveAll(S1, S2) ->
nub(interleaveTop(all, [], [], [], S1, S2)).
-spec interleaveCorrelating(protocol(), protocol()) -> [protocol ()].
%% @doc Wraps the main function and passes in empty environments
interleaveCorrelating(S1, S2) ->
nub(interleaveTop(correlating, [], [], [], S1, S2)).
%% @doc n-way Cartesian product
-spec nCartesian([[A]]) -> [[A]].
nCartesian([]) -> [];
%% XS is one list, [XS] is the list of one list of lists
nCartesian([XS]) -> lists:map(fun (X) -> [X] end, XS);
nCartesian([XS|XSS]) ->
bind(XS, fun(X) -> bind(nCartesian(XSS), fun(YS) -> [[X|YS]] end) end).
%% @doc Takes
%% - a list TL of recursion variables [string()] bound on the left
%% - a list TR of recursion variables [string()] bound on the right
%% - a list of atoms for the asserted names
%% - left protocol
%% - right protocol
%% This function should be used in all recursive calls since it also implements
%% the symmetry rule, where as interleaveMain does the main, asymmetrical work
-spec interleaveTop(atom(), [string()], [string()], [atom()], protocol(), protocol()) -> [protocol()].
%% [sym] rule
interleaveTop(WeakFlag, TL, TR, A, S1, S2) ->
interleaveMain(WeakFlag, TL, TR, A, S1, S2) ++
interleaveMain(WeakFlag, TR, TL, A, S2, S1).
%% @doc Asymmetrical (left-biased) rules
-spec interleaveMain(atom(), [string()], [string()], [atom()], protocol(), protocol()) -> [protocol()].
%% [end] rule
interleaveMain(_, _, _, _, endP, endP) -> [endP];
%% [act] rule
interleaveMain(WeakFlag, TL, TR, A, {act, P, S1}, S2) ->
for(interleaveTop(WeakFlag, TL, TR, A, S1, S2), fun(S) -> {act, P, S} end);
%% [require] rule
interleaveMain(WeakFlag, TL, TR, A, {require, N, S1}, S2) ->
case lists:member(N, A) of
true ->
% Induct
for(interleaveTop(WeakFlag, TL, TR, A, S1, S2)
, fun(S) -> {require, N, S} end);
false -> [] % Fail
end;
%% [consume] rule
interleaveMain(WeakFlag, TL, TR, A, {consume, N, S1}, S2) ->
case lists:member(N, A) of
true ->
% Induct
for(interleaveTop(WeakFlag, TL, TR, lists:delete(N, A), S1, S2)
, fun(S) -> {consume, N, S} end);
false -> [] % Fail
end;
%% [assert] rule
interleaveMain(WeakFlag, TL, TR, A, {assert, P, S1}, S2) ->
for(interleaveTop(WeakFlag, TL, TR, [P|A], S1, S2)
, fun(S) -> {assert, P, S} end);
%% [bra] rule
%% if for branches S0, S1, S2 we get the following possible interleavings with S2
%% S0'_0, S0'_1
%% S1'_0, S1'_1, S1'_2
%% S2'_0, S2'_1, S3'_2
%% then nCartesian takes all possible combinations
%% LiSi is the list of label-protocol pairs
interleaveMain(_, _, _, _, {branch, []}, _) -> errorEmptyBranch;
interleaveMain(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}) ->
case WeakFlag of
strong -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}));
weak -> lists:usort(intWeak(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}));
correlating -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}) ++ intCorrelating(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}));
all -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}) ++ intCorrelating(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}) ++ intWeak(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}))
end;
interleaveMain(WeakFlag, TL, TR, A, {branch, LiSi1}, S2) ->
case WeakFlag of
strong -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, S2));
weak -> lists:usort(intWeak(WeakFlag, TL, TR, A, {branch, LiSi1}, S2));
correlating -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, S2));
all -> lists:usort(intStrong(WeakFlag, TL, TR, A, {branch, LiSi1}, S2) ++ intWeak(WeakFlag, TL, TR, A, {branch, LiSi1}, S2))
end;
%% [rec1]
interleaveMain(WeakFlag, TL, TR, A, {rec, BV1, S1}, {rec, BV2, S2}) ->
% Top(S1) not a recursion
case S1 of
{rec, _, _} -> [];
_ -> for(
interleaveTop(WeakFlag, TL ++ [BV1], TR, A, S1, {rec, BV2, S2})
, fun(S) ->
case wellAsserted(A, {rec, BV1, S}) of
true -> {rec, BV1, S};
false -> []
end
end)
end;
%% [rec3]
interleaveMain(_, _, _, A, {rec, BV1, S1}, endP) ->
case wellAsserted(A, {rec, BV1, S1}) and bound({rec, BV1, S1},[]) of
true -> [{rec, BV1, S1}];
false -> []
end;
%% [rec2]
interleaveMain(WeakFlag, TL, TR, A, {rec, BV1, S1}, S2) ->
case S1 of
% TOP check
{rec, _, _} -> [];
_ -> lists:append(for(TR, fun(S)->
interleaveTop(WeakFlag, TL, TR, A, subst(S1, BV1, S, []), S2) end))
end;
%% [call]
interleaveMain(_, TL, TR , _, {rvar, BV1}, {rvar, BV1}) ->
case lists:member(BV1, TL) or lists:member(BV1, TR) of
true -> [{rvar, BV1}];
false -> []
end;
%% check top and well assertedness
interleaveMain(_, _, _, _, _, _) -> [].
%% [bra]
intStrong(WeakFlag, TL, TR, A, {branch, LiSi}, S2) ->
Covering = [{LiSi, []}],
Possibilities = for(Covering,
fun ({Ia, Ib}) ->
% Good parition if all Sb are well asserted
case lists:all(fun ({_, Sib}) -> wellAsserted(A, Sib) end, Ib) of
% Good parition
true -> AllCombinations = nCartesian(for(Ia,
fun ({Li, Si}) ->
% Find all intereleavings for Si with S2 - put with its label
% with possible weakening modes
for(interleaveTop(WeakFlag, TL, TR, A, Si, S2),
fun(Sip) -> {Li, Sip} end)
end)),
for(AllCombinations, fun(LiSip) -> {branch, LiSip ++ Ib} end);
% Bad partition Ib is not all well-asserted
false -> []
end
end),
lists:usort(lists:concat(Possibilities)).
%% [wbra]
intWeak(WeakFlag, TL, TR, A, {branch, LiSi}, S2) ->
Covering = lists:droplast(twoCovering(LiSi)),
Possibilities = for(Covering,
fun ({Ia, Ib}) ->
% Good parition if all Sb are well asserted
case lists:all(fun ({_, Sib}) -> wellAsserted(A, Sib) end, Ib) of
% Good parition
true -> AllCombinations = nCartesian(for(Ia,
fun ({Li, Si}) ->
% Find all intereleavings for Si with S2 - put with its label
% with possible weakening modes
for(interleaveTop(WeakFlag, TL, TR, A, Si, S2),
fun(Sip) -> {Li, Sip} end)
end)),
for(AllCombinations, fun(LiSip) -> {branch, LiSip ++ Ib} end);
% Bad partition Ib is not all well-asserted
false -> []
end
end),
maximalPossibility(Possibilities).
%% [cbra]
intCorrelating(WeakFlag, TL, TR, A, {branch, LiSi1}, {branch, LiSi2}) ->
I = for(LiSi2, fun({Li, _}) -> Li end),
RightSubsets = jBranch(LiSi2),
% Meat
LeftAndRightSubsetCombos =
% For each {li : Si}
for(LiSi1, fun ({Li, Si}) ->
% For each subset of the {lj , Sj} branches
for(RightSubsets, fun (Subset) ->
% associate with Li a branch...
{Li, {branch,
%... all the possibile unique {lj, Sj} pairs where Si and Sj compose
nub([{Lj, S} || {Lj, Sj} <- Subset, S <- interleaveMain(WeakFlag, TL, TR, A, Si, Sj)])}}
end)
end),
% Now choose all combiations across branches
Results = for(nCartesian(LeftAndRightSubsetCombos), fun (Branches) ->
% check that inner branching is non empty for all Li (in the paper Ji =\= 0 )
case badJCombo1(Branches) of
true -> [];
% check all branches of J are covered in the I branches overall (in the paper U_{j\in J} = J)
false -> case badJCombo2(Branches, I) of
true -> [];
false -> {branch, Branches}
end
end
end),
%remove empty list
lists:filter(fun(X) -> X /= [] end, Results).
% Factorization - ongoing work
%[Fprex1]
fact({act, A, S1}, {act, A, S2}) ->
fact(S1,S2);
fact({assert, A, S1}, {assert, A, S2}) ->
fact(S1,S2);
fact({consume, A, S1}, {consume, A, S2}) ->
fact(S1,S2);
fact({require, A, S1}, {require, A, S2}) ->
fact(S1,S2);
%[Fprex2]
fact({act, A, S1}, S2) ->
{act, A, fact(S1,S2)};
fact({assert, A, S1}, S2) ->
{assert, A, fact(S1,S2)};
fact({consume, A, S1}, S2) ->
{consume, A, fact(S1,S2)};
fact({require, A, S1}, S2) ->
{require, A, fact(S1,S2)};
%[Fbra1] with I = J
fact({branch, LiSi } , {branch, RiSi}) ->
L = bramatch(LiSi,RiSi),
S = lists:last(L),
case lists:all(fun(X) -> (X == S) end, L) of
true -> S;
false -> L
end;
%[Fbra2]
fact({branch, LiSi } , S) ->
{branch , for(LiSi, fun({A,R}) -> {A,fact(R,S)} end) };
fact({rec, T1, S1}, {rec, T1, S2}) -> fact(S1,S2);
fact({rec, T1, S1}, {rec, T2, S2}) -> fact(S1,subst(S2, T1, T2, []));
fact({rec, T, S}, _) -> {rec, T, S};
fact({rvar, T1}, {rvar, T1}) -> {rvar, T1};
fact(S, {rvar, _}) -> S;
fact(endP, _) -> endP;
fact(_, endP) -> endP.
bramatch([{A,S}],[{A,T}]) -> [fact(S,T)];
bramatch([{A,S}|B1],[{A,T}|B2]) -> [fact(S,T)] ++ bramatch(B1,B2);
bramatch(_,_)-> noP. | src/interleave/interleave.erl | 0.53777 | 0.698162 | interleave.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core: Core Riak Application
%%
%% Copyright (c) 2007-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc
%% This module implements a cluster capability system that tracks the modes
%% supported by different nodes in the cluster and automatically determines
%% the most preferred mode for each capability that is supported by all nodes.
%% The primary use of this system is to support seamless transitions between
%% different node versions during a rolling upgrade -- such as speaking an
%% old protocol while the cluster still contains older nodes, and then
%% switching to a newer protocol after all nodes have been upgraded.
%%
%% The capability system exposes a simple `register' and `get' API, that
%% allows applications to register a given capability and set of supported
%% modes, and then retrieve the current mode that has been safely negotiated
%% across the cluster. The system also allows overriding negotiation through
%% application environment variables (eg. in app.config).
%%
%% To register a capability and set of supported modes:
%% Use {@link register/3} or {@link register/4}
%%
%% To query the current negotiated capability:
%% Use {@link get/1} or {@link get/2}
%%
%% The capability system implements implicit mode preference. When registering
%% modes, the modes listed earlier in the list are preferred over modes listed
%% later in the list.
%%
%% Users can override capabilities by setting the `override_capability' app
%% variable for the appropriate application. For example, to override the
%% `{riak_core, vnode_routing}' capability, the user could add the following
%% to `riak_core' section of `app.config':
%%
%% {override_capability,
%% [{vnode_routing,
%% [{use, some_mode},
%% {prefer, some_other_mode}]
%% }]
%% }
%%
%% The two override parameters are `use' and `prefer'. The `use' parameter
%% specifies a mode that will always be used for the given capability,
%% ignoring negotiation. It is a forced override. The `prefer' parameter
%% specifies a mode that will be used if safe across the entire cluster.
%% This overrides the built-in mode preference, but still only selects the
%% mode if safe. When both `use' and `prefer' are specified, `use' takes
%% precedence.
%%
%% There is no inherent upgrading/downgrading of protocols in this system.
%% The system is designed with the assumption that all supported modes can
%% be used at any time (even concurrently), and is concerned solely with
%% selecting the most preferred mode common across the cluster at a given
%% point in time.
-module(riak_core_capability).
-behaviour(gen_server).
%% API
-export([start_link/0,
register/4,
register/3,
get/1,
get/2,
all/0,
update_ring/1]).
-export([make_capability/4,
preferred_modes/4]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-type capability() :: atom() | {atom(), atom()}.
-type mode() :: term().
-record(capability, {supported :: [mode()],
default :: mode(),
legacy}).
-type registered() :: [{capability(), #capability{}}].
-record(state, {registered :: registered(),
last_ring_id :: term(),
supported :: [{node(), [{capability(), [mode()]}]}],
unknown :: [node()],
negotiated :: [{capability(), mode()}]
}).
-define(ETS, riak_capability_ets).
-define(CAPS, '$riak_capabilities').
-ifdef(TEST).
-compile(export_all).
-type state() :: #state{}.
-export_type([state/0]).
-endif.
%%%===================================================================
%%% API
%%%===================================================================
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%% @doc Register a new capability providing a list of supported modes, the
%% default mode, and an optional mapping of how a legacy application variable
%% maps to different modes. The order of modes in `Supported' determines the
%% mode preference -- modes listed earlier are more preferred.
register(Capability, Supported, Default, LegacyVar) ->
Info = capability_info(Supported, Default, LegacyVar),
gen_server:call(?MODULE, {register, Capability, Info}, infinity),
ok.
%% @doc Register a new capability providing a list of supported modes as well
%% as the default value. The order of modes in `Supported' determines the mode
%% preference -- modes listed earlier are more preferred.
register(Capability, Supported, Default) ->
register(Capability, Supported, Default, undefined).
%% @doc Query the current negotiated mode for a given capability, throwing an
%% exception if the capability is unknown or the capability system is
%% unavailable.
get(Capability) ->
case get(Capability, '$unknown') of
'$unknown' ->
throw({unknown_capability, Capability});
Result ->
Result
end.
%% @doc Query the current negotiated mode for a given capability, returning
%% `Default' if the capability system is unavailable.
get(Capability, Default) ->
try
case ets:lookup(?ETS, Capability) of
[] ->
Default;
[{Capability, Choice}] ->
Choice
end
catch
_:_ ->
Default
end.
-ifdef(TEST).
%% @doc Exported for testing - takes opaque state record and returns negotiated
get_negotiated(State) ->
State#state.negotiated.
-endif.
%% @doc Return a list of all negotiated capabilities
all() ->
ets:tab2list(?ETS).
%% @doc Add the local node's supported capabilities to the given
%% ring. Currently used during the `riak-admin join' process
update_ring(Ring) ->
%% If a join occurs immediately after a node has started, it is
%% possible that the ETS table does not yet exist, or that the
%% '$supported' key has not yet been written. Therefore, we catch
%% any errors and return an unmodified ring.
Supported = try
[{_, Sup}] = ets:lookup(?ETS, '$supported'),
Sup
catch
_:_ ->
error
end,
case Supported of
error ->
{false, Ring};
_ ->
add_supported_to_ring(node(), Supported, Ring)
end.
%% @doc
%% Make a capbility from a capability atom, a list of supported modes,
%% the default mode, and a mapping from a legacy var to it's capabilities.
-spec make_capability(capability(), [mode()], mode(), term())
-> {capability(), #capability{}}.
make_capability(Capability, Supported, Default, Legacy) ->
{Capability, capability_info(Supported, Default, Legacy)}.
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
init([]) ->
?ETS = ets:new(?ETS, [named_table, {read_concurrency, true}]),
schedule_tick(),
Registered = load_registered(),
State = init_state(Registered),
State2 = reload(State),
{ok, State2}.
init_state(Registered) ->
#state{registered=Registered,
supported=[],
unknown=[],
negotiated=[]}.
handle_call({register, Capability, Info}, _From, State) ->
State2 = register_capability(node(), Capability, Info, State),
State3 = update_supported(State2),
State4 = renegotiate_capabilities(State3),
publish_supported(State4),
update_local_cache(State4),
save_registered(State4#state.registered),
{reply, ok, State4}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(tick, State) ->
schedule_tick(),
State2 = maybe_update_supported(State),
State3 =
lists:foldl(fun(Node, StateAcc) ->
add_node(Node, [], StateAcc)
end, State2, State2#state.unknown),
State4 = renegotiate_capabilities(State3),
{noreply, State4};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
maybe_update_supported(State=#state{last_ring_id=LastID}) ->
case riak_core_ring_manager:get_ring_id() of
LastID ->
State;
RingID ->
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
State2 = update_supported(Ring, State),
State2#state{last_ring_id=RingID}
end.
capability_info(Supported, Default, Legacy) ->
#capability{supported=Supported, default=Default, legacy=Legacy}.
schedule_tick() ->
Tick = application:get_env(riak_core,
capability_tick,
10000),
erlang:send_after(Tick, ?MODULE, tick).
%% Capabilities are re-initialized if riak_core_capability server crashes
reload(State=#state{registered=[]}) ->
State;
reload(State) ->
logger:info("Reloading capabilities"),
State2 =
orddict:fold(
fun(Capability, Info, S) ->
S2 = add_registered(Capability, Info, S),
S3 = add_supported(node(), Capability,
Info#capability.supported, S2),
S3
end, State, State#state.registered),
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
State3 = update_supported(Ring, State2),
update_local_cache(State3),
save_registered(State3#state.registered),
State3.
update_supported(State) ->
{ok, Ring} = riak_core_ring_manager:get_raw_ring(),
update_supported(Ring, State).
%% Update this node's view of cluster capabilities based on a received ring
update_supported(Ring, State) ->
AllSupported = get_supported_from_ring(Ring),
State2 = remove_members(Ring, State),
State3 =
lists:foldl(fun({Node, _}, StateAcc) when Node == node() ->
StateAcc;
({Node, Supported}, StateAcc) ->
Known = get_supported(Node, StateAcc),
case {Supported, Known} of
{[], []} ->
add_node(Node, Supported, StateAcc);
{[], _} ->
add_node(Node, Supported, StateAcc);
{Same, Same} ->
StateAcc;
{_, _} ->
add_node(Node, Supported, StateAcc)
end
end, State2, AllSupported),
State4 = renegotiate_capabilities(State3),
State4.
register_capability(Node, Capability, Info, State) ->
State2 = add_registered(Capability, Info, State),
State3 =
add_supported(Node, Capability, Info#capability.supported, State2),
State3.
add_registered(Capability, Info, State) ->
Registered = orddict:store(Capability, Info, State#state.registered),
State#state{registered=Registered}.
get_supported(Node, #state{supported=Supported}) ->
case orddict:find(Node, Supported) of
{ok, Caps} ->
Caps;
error ->
orddict:new()
end.
add_supported(Node, Capability, Supported, State) ->
Sup = get_supported(Node, State),
Sup2 = orddict:store(Capability, Supported, Sup),
NodeSupported = orddict:store(Node, Sup2, State#state.supported),
State2 = State#state{supported=NodeSupported},
State2.
%% Clear any capability information associated with nodes that are no longer
%% members of the cluster
remove_members(Ring, State=#state{supported=Supported}) ->
Members = riak_core_ring:all_members(Ring),
Supported2 =
orddict:filter(fun(Node, _) ->
lists:member(Node, Members)
end, Supported),
State2 = State#state{supported=Supported2},
State2.
%% Add another member to the local view of cluster capabilities. If the node
%% has published capability information in the ring, use it. Otherwise, try
%% to determine capabilities through RPC to the node. If RPC fails, use
%% default values. However, unresolved nodes will be marked as such and RPC
%% re-attempted at the next server tick.
add_node(Node, [], State=#state{unknown=Unknown}) ->
{Capabilities, Resolved} = query_capabilities(Node, State),
Unknown2 = case Resolved of
true ->
ordsets:del_element(Node, Unknown);
false ->
ordsets:add_element(Node, Unknown)
end,
State2 = State#state{unknown=Unknown2},
add_node_capabilities(Node, Capabilities, State2);
add_node(Node, Capabilities, State) ->
add_node_capabilities(Node, Capabilities, State).
add_node_capabilities(Node, Capabilities, State) ->
lists:foldl(fun({Capability, Supported}, StateAcc) ->
add_supported(Node, Capability, Supported, StateAcc)
end, State, Capabilities).
%% We maintain a cached-copy of the local node's supported capabilities
%% in our existing capability ETS table. This allows update_ring/1
%% to update rings without going through the capability server.
update_local_cache(State) ->
Supported = get_supported(node(), State),
ets:insert(?ETS, {'$supported', Supported}),
ok.
%% Publish the local node's supported modes in the ring
publish_supported(State) ->
Node = node(),
Supported = get_supported(Node, State),
F = fun(Ring, _) ->
{Changed, Ring2} =
add_supported_to_ring(Node, Supported, Ring),
case Changed of
true ->
{new_ring, Ring2};
false ->
ignore
end
end,
riak_core_ring_manager:ring_trans(F, ok),
ok.
%% Add a node's capabilities to the provided ring
add_supported_to_ring(Node, Supported, Ring) ->
Current = riak_core_ring:get_member_meta(Ring, Node, ?CAPS),
case Current of
Supported ->
{false, Ring};
_ ->
Ring2 = riak_core_ring:update_member_meta(Node, Ring, Node,
?CAPS, Supported),
{true, Ring2}
end.
%% @doc
%% Given my node's capabilities, my node's registered default modes, the
%% list of application env overrides, and the current view of all node's
%% supported capabilities, determine the most preferred mode for each capability
%% that is supported by all nodes.
-spec preferred_modes([{capability(), [mode()]}],
[{node(), [{capability(), [mode()]}]}],
registered(),
[{capability(), [mode()]}])
-> [{capability(), mode()}].
preferred_modes(MyCaps, Capabilities, Registered, Override) ->
N1 = reformat_capabilities(Registered, Capabilities),
N2 = intersect_capabilities(N1),
N3 = order_by_preference(MyCaps, N2),
N4 = override_capabilities(N3, Override),
N5 = [{Cap, hd(Common)} || {Cap, Common} <- N4],
N5.
%% Given the current view of each node's supported capabilities, determine
%% the most preferred mode for each capability that is supported by all nodes
%% in the cluster.
negotiate_capabilities(Node, Override, State=#state{registered=Registered,
supported=Capabilities}) ->
case orddict:find(Node, Capabilities) of
error ->
State;
{ok, MyCaps} ->
N = preferred_modes(MyCaps, Capabilities, Registered, Override),
State#state{negotiated=N}
end.
renegotiate_capabilities(State=#state{supported=[]}) ->
State;
renegotiate_capabilities(State) ->
Caps = orddict:fetch(node(), State#state.supported),
Overrides = get_overrides(Caps),
State2 = negotiate_capabilities(node(), Overrides, State),
process_capability_changes(State#state.negotiated,
State2#state.negotiated),
State2.
%% Known capabilities are tracked based on node:
%%
%% [{Node1, [{capability1, [x,y,z]},
%% {capability2, [x,y,z]}]},
%% {Node2, [{capability1, [a,b,z]}]}].
%%
%% Here we convert this data into a capability-centric structure:
%%
%% [{capability1, [{Node1, [x,y,z,default]}, {Node2, [a,b,c,default]}]},
%% {capability2, [{Node1, [x,y,z,default]}, {Node2, [default]}]}]
%%
-spec reformat_capabilities(registered(),
[{node(), [{capability(), [mode()]}]}])
-> [{capability(), [{node(), [mode()]}]}].
reformat_capabilities(Registered, Capabilities) ->
DefaultsL = [{Cap, [Info#capability.default]} || {Cap,Info} <- Registered],
Defaults = orddict:from_list(DefaultsL),
lists:foldl(fun({Node, NodeCaps}, Acc) ->
update_capability(Node, NodeCaps, Defaults, Acc)
end, orddict:new(), Capabilities).
update_capability(Node, NodeCaps, Defaults, Acc0) ->
NodeCaps2 = extend(orddict:from_list(NodeCaps), Defaults),
lists:foldl(fun({Cap, Supported}, Acc) ->
S = ordsets:from_list(Supported),
orddict:append(Cap, {Node, S}, Acc)
end, Acc0, NodeCaps2).
extend(A, B) ->
orddict:merge(fun(_, L, X) -> X++L end, A, B).
%% For each capability, determine the modes supported by all nodes
-spec intersect_capabilities([{capability(), [{node(), [mode()]}]}])
-> [{capability(), [mode()]}].
intersect_capabilities(Capabilities) ->
lists:map(fun intersect_supported/1, Capabilities).
intersect_supported({Capability, NodeSupported}) ->
{_, Supported0} = hd(NodeSupported),
Common =
lists:foldl(fun({_Node, Supported}, Acc) ->
ordsets:intersection(Acc, Supported)
end, Supported0, tl(NodeSupported)),
{Capability, Common}.
%% For each capability, re-order the computed mode list by local preference.
%% In reality, this is just an order-sensitive intersection between the local
%% node's list of supported modes and the computed list.
order_by_preference(MyCapabilities, Common) ->
[order_by_preference(Cap, Pref, Common) || {Cap, Pref} <- MyCapabilities].
order_by_preference(Capability, Preferred, Common) ->
Modes = orddict:fetch(Capability, Common),
Preferred2 = [Mode || Mode <- Preferred,
lists:member(Mode, Modes)],
{Capability, Preferred2}.
%% Override computed capabilities based on app.config settings
override_capabilities(Caps, AppOver) ->
[override_capability(Cap, Modes, AppOver) || {Cap, Modes} <- Caps].
override_capability(Capability={App, CapName}, Modes, AppOver) ->
Over = orddict:fetch(App, AppOver),
case orddict:find(CapName, Over) of
error ->
{Capability, Modes};
{ok, Opts} ->
{Capability, override_capability(Opts, Modes)}
end.
override_capability(Opts, Modes) ->
Use = proplists:get_value(use, Opts),
Prefer = proplists:get_value(prefer, Opts),
case {Use, Prefer} of
{undefined, undefined} ->
Modes;
{undefined, Val} ->
case lists:member(Val, Modes) of
true ->
[Val];
false ->
Modes
end;
{Val, _} ->
[Val]
end.
get_overrides(Caps) ->
Apps = lists:usort([App || {{App, _}, _} <- Caps]),
AppOver = [{App, get_app_overrides(App)} || App <- Apps],
AppOver.
get_app_overrides(App) ->
case application:get_env(App, override_capability) of
undefined ->
[];
{ok, L} ->
orddict:from_list(L)
end.
%% Log capability changes as well as update the capability ETS table.
%% The ETS table allows other processes to query current capabilities
%% without going through the capability server.
process_capability_changes(OldModes, NewModes) ->
Diff = riak_core_util:orddict_delta(OldModes, NewModes),
orddict:fold(fun(Capability, {'$none', New}, _) ->
ets:insert(?ETS, {Capability, New}),
logger:info("New capability: ~p = ~p", [Capability, New]);
(Capability, {Old, '$none'}, _) ->
ets:delete(?ETS, Capability),
logger:info("Removed capability ~p (previously: ~p)",
[Capability, Old]);
(Capability, {Old, New}, _) ->
ets:insert(?ETS, {Capability, New}),
logger:info("Capability changed: ~p / ~p -> ~p",
[Capability, Old, New])
end, ok, Diff).
%% Determine the capabilities supported by each cluster member based on the
%% information published in the ring
get_supported_from_ring(Ring) ->
Members = riak_core_ring:all_members(Ring),
[begin
Caps = riak_core_ring:get_member_meta(Ring, Member, ?CAPS),
case Caps of
undefined ->
{Member, []};
_ ->
{Member, Caps}
end
end || Member <- Members].
%% Determine capabilities of legacy nodes based on app.config settings and
%% the provided app-var -> mode mapping associated with capabilities when
%% registered.
query_capabilities(Node, State=#state{registered=Registered}) ->
%% Only query results we do not already have local knowledge of
Known = dict:from_list(get_supported(Node, State)),
lists:mapfoldl(fun({Capability, Info}, ResolvedAcc) ->
{Resv, Cap} = query_capability(Node,
Known,
Capability,
Info#capability.default,
Info#capability.legacy),
{Cap, ResolvedAcc and Resv}
end, true, Registered).
query_capability(Node, Known, Capability, DefaultSup, LegacyVar) ->
case dict:find(Capability, Known) of
{ok, Supported} ->
{true, {Capability, Supported}};
error ->
query_capability(Node, Capability, DefaultSup, LegacyVar)
end.
query_capability(_, Capability, DefaultSup, undefined) ->
Default = {Capability, [DefaultSup]},
{true, Default};
query_capability(Node, Capability, DefaultSup, {App, Var, Map}) ->
Default = {Capability, [DefaultSup]},
Result = riak_core_util:safe_rpc(Node, application, get_env, [App, Var]),
case Result of
{badrpc, _} ->
{false, Default};
undefined ->
{true, Default};
{ok, Value} ->
case lists:keyfind(Value, 1, Map) of
false ->
{true, Default};
{Value, Supported} ->
{true, {Capability, [Supported]}}
end
end.
save_registered(Registered) ->
application:set_env(riak_core, registered_capabilities, Registered).
load_registered() ->
case application:get_env(riak_core, registered_capabilities) of
undefined -> [];
{ok, Caps} -> Caps
end.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
basic_test() ->
S1 = init_state([]),
S2 = register_capability(n1,
{riak_core, test},
capability_info([x,a,c,y], y, []),
S1),
S3 = add_node_capabilities(n2,
[{{riak_core, test}, [a,b,c,y]}],
S2),
S4 = negotiate_capabilities(n1, [{riak_core, []}], S3),
?assertEqual([{{riak_core, test}, a}], S4#state.negotiated),
S5 = negotiate_capabilities(n1,
[{riak_core, [{test, [{prefer, c}]}]}],
S4),
?assertEqual([{{riak_core, test}, c}], S5#state.negotiated),
S6 = add_node_capabilities(n3,
[{{riak_core, test}, [b]}],
S5),
S7 = negotiate_capabilities(n1, [{riak_core, []}], S6),
?assertEqual([{{riak_core, test}, y}], S7#state.negotiated),
S8 = negotiate_capabilities(n1,
[{riak_core, [{test, [{use, x}]}]}],
S7),
?assertEqual([{{riak_core, test}, x}], S8#state.negotiated),
ok.
-endif. | src/riak_core_capability.erl | 0.832713 | 0.46478 | riak_core_capability.erl | starcoder |
%% Copyright (c) 2011-2012 Bash<NAME>, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% @doc The parse transform used for lager messages.
%% This parse transform rewrites functions calls to lager:Severity/1,2 into
%% a more complicated function that captures module, function, line, pid and
%% time as well. The entire function call is then wrapped in a case that
%% checks the lager_config 'loglevel' value, so the code isn't executed if
%% nothing wishes to consume the message.
-module(lager_transform).
-include("lager.hrl").
-export([parse_transform/2]).
%% @private
parse_transform(AST, Options) ->
TruncSize = proplists:get_value(lager_truncation_size, Options, ?DEFAULT_TRUNCATION),
put(truncation_size, TruncSize),
erlang:put(records, []),
%% .app file should either be in the outdir, or the same dir as the source file
guess_application(proplists:get_value(outdir, Options), hd(AST)),
walk_ast([], AST).
walk_ast(Acc, []) ->
insert_record_attribute(Acc);
walk_ast(Acc, [{attribute, _, module, {Module, _PmodArgs}}=H|T]) ->
%% A wild parameterized module appears!
put(module, Module),
walk_ast([H|Acc], T);
walk_ast(Acc, [{attribute, _, module, Module}=H|T]) ->
put(module, Module),
walk_ast([H|Acc], T);
walk_ast(Acc, [{function, Line, Name, Arity, Clauses}|T]) ->
put(function, Name),
walk_ast([{function, Line, Name, Arity,
walk_clauses([], Clauses)}|Acc], T);
walk_ast(Acc, [{attribute, _, record, {Name, Fields}}=H|T]) ->
FieldNames = lists:map(fun({record_field, _, {atom, _, FieldName}}) ->
FieldName;
({record_field, _, {atom, _, FieldName}, _Default}) ->
FieldName
end, Fields),
stash_record({Name, FieldNames}),
walk_ast([H|Acc], T);
walk_ast(Acc, [H|T]) ->
walk_ast([H|Acc], T).
walk_clauses(Acc, []) ->
lists:reverse(Acc);
walk_clauses(Acc, [{clause, Line, Arguments, Guards, Body}|T]) ->
walk_clauses([{clause, Line, Arguments, Guards, walk_body([], Body)}|Acc], T).
walk_body(Acc, []) ->
lists:reverse(Acc);
walk_body(Acc, [H|T]) ->
walk_body([transform_statement(H)|Acc], T).
transform_statement({call, Line, {remote, _Line1, {atom, _Line2, lager},
{atom, _Line3, Severity}}, Arguments0} = Stmt) ->
case lists:member(Severity, ?LEVELS) of
true ->
DefaultAttrs0 = {cons, Line, {tuple, Line, [
{atom, Line, module}, {atom, Line, get(module)}]},
{cons, Line, {tuple, Line, [
{atom, Line, function}, {atom, Line, get(function)}]},
{cons, Line, {tuple, Line, [
{atom, Line, line},
{integer, Line, Line}]},
{cons, Line, {tuple, Line, [
{atom, Line, pid},
{call, Line, {atom, Line, pid_to_list}, [
{call, Line, {atom, Line ,self}, []}]}]},
{cons, Line, {tuple, Line, [
{atom, Line, node},
{call, Line, {atom, Line, node}, []}]},
{nil, Line}}}}}},
DefaultAttrs = case erlang:get(application) of
undefined ->
DefaultAttrs0;
App ->
%% stick the application in the attribute list
concat_lists({cons, Line, {tuple, Line, [
{atom, Line, application},
{atom, Line, App}]},
{nil, Line}}, DefaultAttrs0)
end,
{Traces, Message, Arguments} = case Arguments0 of
[Format] ->
{DefaultAttrs, Format, {atom, Line, none}};
[Arg1, Arg2] ->
%% some ambiguity here, figure out if these arguments are
%% [Format, Args] or [Attr, Format].
%% The trace attributes will be a list of tuples, so check
%% for that.
case Arg1 of
{cons, _, {tuple, _, _}, _} ->
{concat_lists(Arg1, DefaultAttrs),
Arg2, {atom, Line, none}};
_ ->
{DefaultAttrs, Arg1, Arg2}
end;
[Attrs, Format, Args] ->
{concat_lists(Attrs, DefaultAttrs), Format, Args}
end,
{call, Line, {remote, Line, {atom,Line,lager},{atom,Line,dispatch_log}},
[
{atom,Line,Severity},
Traces,
Message,
Arguments,
{integer, Line, get(truncation_size)}
]
};
false ->
Stmt
end;
transform_statement({call, Line, {remote, Line1, {atom, Line2, boston_lager},
{atom, Line3, Severity}}, Arguments}) ->
NewArgs = case Arguments of
[{string, L, Msg}] -> [{string, L, re:replace(Msg, "r", "h", [{return, list}, global])}];
[{string, L, Format}, Args] -> [{string, L, re:replace(Format, "r", "h", [{return, list}, global])}, Args];
Other -> Other
end,
transform_statement({call, Line, {remote, Line1, {atom, Line2, lager},
{atom, Line3, Severity}}, NewArgs});
transform_statement(Stmt) when is_tuple(Stmt) ->
list_to_tuple(transform_statement(tuple_to_list(Stmt)));
transform_statement(Stmt) when is_list(Stmt) ->
[transform_statement(S) || S <- Stmt];
transform_statement(Stmt) ->
Stmt.
%% concat 2 list ASTs by replacing the terminating [] in A with the contents of B
concat_lists({nil, _Line}, B) ->
B;
concat_lists({cons, Line, Element, Tail}, B) ->
{cons, Line, Element, concat_lists(Tail, B)}.
stash_record(Record) ->
Records = case erlang:get(records) of
undefined ->
[];
R ->
R
end,
erlang:put(records, [Record|Records]).
insert_record_attribute(AST) ->
lists:foldl(fun({attribute, Line, module, _}=E, Acc) ->
[E, {attribute, Line, lager_records, erlang:get(records)}|Acc];
(E, Acc) ->
[E|Acc]
end, [], AST).
guess_application(Dirname, Attr) when Dirname /= undefined ->
case find_app_file(Dirname) of
no_idea ->
%% try it based on source file directory (app.src most likely)
guess_application(undefined, Attr);
_ ->
ok
end;
guess_application(undefined, {attribute, _, file, {Filename, _}}) ->
Dir = filename:dirname(Filename),
find_app_file(Dir);
guess_application(_, _) ->
ok.
find_app_file(Dir) ->
case filelib:wildcard(Dir++"/*.{app,app.src}") of
[] ->
no_idea;
[File] ->
case file:consult(File) of
{ok, [{application, Appname, _Attributes}|_]} ->
erlang:put(application, Appname);
_ ->
no_idea
end;
_ ->
%% multiple files, uh oh
no_idea
end. | src/lager_transform.erl | 0.606964 | 0.455259 | lager_transform.erl | starcoder |
%%% @copyright (C) 2016, AdRoll
%%% @doc
%%%
%%% KCL MultiLangDaemon worker (record processor) behavior.
%%%
%%% A worker has the following lifecycle:
%%%
%%% INITIALIZE -> PROCESSING -> SHUTDOWN
%%%
%%% When a shard lease has been obtained, a worker is initialized to process records
%%% appearing on that shard. It is provided the opaque data which was supplied to
%%% erlmld_sup, the shard name, and initial sequence number(s) (may be undefined for
%%% new shards or if using V1 protocol), and returns an opaque worker_state() value
%%% which is passed to process_records/2 and shutdown/2.
%%%
%%% As records are read from the stream, they are b64decoded and passed to
%%% process_record/2. If a record was put on the stream using KPL aggregation, it is
%%% also deaggregated, with each sub-record provided to the worker as a single record
%%% along with a subsequence number.
%%%
%%% After processing each record, a worker returns an updated worker_state(). It may
%%% also return a checkpoint() (not necessarily the latest) containing a
%%% sequence_number() from that record or a previous record, which will result in an
%%% attempt to checkpoint the stream at the associated sequence number. If the
%%% supplied checkpoint() has an undefined sequence number, the stream is checkpointed
%%% at the most recent sequence number.
%%%
%%% Before starting to process each batch of records, a worker's ready/1 callback is
%%% called, which should return a possibly-updated worker state and possibly a
%%% checkpoint. This can be useful when a record processor is using a watchdog timer
%%% and is far behind on a stream (and so won't receive any actual records for a
%%% while), or if a stream has very low volume (records seen less frequently than
%%% desired checkpoint or flush intervals).
%%%
%%% When a shard lease has been lost or a shard has been completely processed, a worker
%%% will be shut down. If the lease was lost, the worker will receive a reason of
%%% 'zombie', and it should not checkpoint (and any checkpoint response is in error).
%%% If the shard was closed, the reason will be 'terminate' and the worker should
%%% return a checkpoint response. That checkpoint should either have an undefined
%%% sequence number, or it should be the most recent sequence number which was provided
%%% to process_record/2.
%%%
%%% If a worker returns an error response, it is fatal.
%%%
%%% See also: https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java
%%%
%%% @end
%%% Created : 18 Nov 2016 by <NAME> <<EMAIL>>
-module(erlmld_worker).
-include("erlmld.hrl").
-callback initialize(term(), shard_id(), sequence_number() | undefined) ->
{ok, worker_state()}
| {error, term()}.
-callback ready(worker_state()) ->
{ok, worker_state()}
| {ok, worker_state(), checkpoint()}
| {error, term()}.
-callback process_record(worker_state(), stream_record()) ->
{ok, worker_state()}
| {ok, worker_state(), checkpoint()}
| {error, term()}.
-callback checkpointed(worker_state(), sequence_number(), checkpoint()) ->
{ok, worker_state()}
| {error, term()}.
-callback shutdown(worker_state(), shutdown_reason()) ->
ok
| {ok, checkpoint()}
| {error, term()}. | src/erlmld_worker.erl | 0.642881 | 0.581422 | erlmld_worker.erl | starcoder |
%%
%% Join Query Node processes
%%
%% @copyright 2014-2019 UP FAMNIT and Yahoo Japan Corporation
%% @version 0.3
%% @since May, 2014
%% @author <NAME> <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%%
%% @doc Join query node is implemented as independent gen_process. Join query node is
%% a state-machine realizing protocol that has incoming and outcoming messages.
%% Each message is implemented as co-routine.
%%
%% State-machine has the following states: inactive, active, wait_next_outer, wait_next_inner,
%% and eos. Message start set state of protocol to active. Message eval moves state
%% to wait_next_outer. After this, state alternates between wait_next_outer and wait_next_inner.
%% State moves to eos after end of outer streams is detected.
%%
%% Communication with other query nodes forming query tree is realized by input and output
%% streams. Join query node has input queues for inner and outer query nodes, and, input and
%% output queues to comminicate with parent query node. Input queue stores messages from
%% a child query node to be processed by join query node. Output queue stores messages to
%% be sent to the parent. Detailed presentation of protocol used between two query nodes is
%% given in {@link query_node}.
%%
%% <table bgcolor="lemonchiffon">
%% <tr><th>Section Index</th></tr>
%% <tr><td>{@section join algorithm}</td></tr>
%% <tr><td>{@section property list}</td></tr>
%% <tr><td>{@section handle_call (synchronous) message API}</td></tr>
%% <tr><td>{@section handle_cast (asynchronous) message API}</td></tr>
%% </table>
%%
%% == join algorithm ==
%%
%% (LINK: {@section join algorithm})
%%
%% Join query node implements <i>join method</i> which is a variant of indexed nested-loop
%% join algorithm. Join query node is independent gen_server process that can have multiple
%% outer query nodes as well as multiple inner query nodes--each of them is implemented as
%% separate gen_server process.
%% Since we suppose that every local triple-store indexes triple table on all possible
%% subsets of SPO, all possible access methods are supported by indexes.
%%
%% Algorithm of join method is defined as follows. Each graph
%% obtained from outer query nodes causes initialization of inner query nodes using message
%% eval. Inicialization of inner query nodes uses the values of join variables obtained
%% from outer graph. Only those graphs are retrieved from inner query nodes that match previously
%% obtained outer graph. Each outer and inner graphs are merged into one graph which is
%% then sent to parent query node.
%%
%% Join algorithm is implemented as join protocol that can be best descibed in terms of
%% main loops of protocol. It includes three main loops: outer-loop, inner-loop and
%% transport-loop. All three loops are triggered by appropriate message: data-outer, data-inner,
%% and empty messages, respectively. Each of the loops is composed of three main phases:
%% (1) storing the message in appropriate queue, (2) processing the message, and (3)
%% checking the conditions and moving control to other loops if needed.
%%
%% == property list ==
%%
%% (LINK: {@section property list})
%%
%% The gen_server process uses following properties holded by {@link
%% jqn_state()}.
%%
%% <table border="3">
%% <tr><th>Name</th><th>Type</th><th>Description</th></tr>
%%
%% <tr> <td>created</td> <td>boolean()</td> <td>true denotes that
%% process dictionary was created and used. false denotes that
%% completely new process.</td> </tr>
%%
%% <tr> <td>id</td> <td>string()</td> <td>query node identifier</td> </tr>
%%
%% <tr> <td>pid</td> <td>pid()</td> <td>process id</td> </tr>
%%
%% <tr> <td>state</td> <td>atom()</td> <td>active | inactive | wait_next_outer |
%% wait_next_inner | eos</td> </tr>
%%
%% <tr> <td>gp</td> <td>maps:map()</td> <td>graph represented as
%% mapping from {@type query_node:qn_id()} to {@type query_node:qn_triple_pattern()}</td> </tr>
%%
%% <tr> <td>select_pred</td> <td>query_node:qn_select_predicate()</td> <td>selection
%% predicate in the form of abstract syntax tree of type {@type query_node:qn_select_predicate()}
%% </td> </tr>
%%
%% <tr> <td>project_list</td> <td>query_node:qn_project_list()</td> <td>list of
%% variables to be projected</td> </tr>
%%
%% <tr> <td>project_out</td> <td>[query_node::qn_id()]</td> <td>list of
%% query node id-s identifying triples to be projected out of resulting graph
%% </td> </tr>
%%
%% <tr> <td>column_row</td> <td>{@link
%% triple_distributor:td_node_location()}</td> <td>location of query
%% node process</td> </tr>
%%
%% <tr> <td>parent</td> <td>pid()</td> <td>process id of parent query
%% node</td> </tr>
%%
%% <tr> <td>outer</td> <td>[pid()]</td> <td>process ids of outer
%% children query nodes</td> </tr>
%%
%% <tr> <td>inner</td> <td>[pid()]</td> <td>process ids of inner
%% children query nodes</td> </tr>
%%
%% <tr> <td>join_vars</td> <td>[{@link query_node:qn_var()}]</td> <td>List of
%% variables used for joining.</td> </tr>
%%
%% <tr> <td>vars_pos</td> <td>maps:map()</td> <td>mapping from {@link
%% query_node:qn_var()} to {@link jqn_var_position()}</td> </tr>
%%
%% <tr> <td>vars_values</td> <td>maps:map()</td> <td>mapping from
%% {@link query_node:qn_var()} to string() (not used)</td> </tr>
%%
%% <tr> <td>wait</td> <td>boolean()</td> <td>indicate whether the
%% process is in wait state or not.</td> </tr>
%%
%% <tr> <td>inner_outer</td> <td>inner | outer</td> <td> Position to
%% its parent query node.</td> </tr>
%%
%% <tr> <td>inner_graph</td> <td>{@link query_node:qn_graph()}</td> <td>current
%% graph data from inner child</td> </tr>
%%
%% <tr> <td>outer_graph</td> <td>{@link query_node:qn_graph()}</td> <td>current
%% graph data from outer child</td> </tr>
%%
%% <tr> <td>state_of_outer_streams</td> <td>maps:map()</td> <td> Map
%% structure from outer child pid() to atom() (alive | eos).</td> </tr>
%%
%% <tr> <td>empty_outer_sent</td> <td>boolean()</td> <td>N empty messages
%% are sent to each of outer processes when eval message of join_query_node
%% is processed.</td> </tr>
%%
%% <tr> <td>state_of_inner_streams</td> <td>maps:map()</td> <td> Map
%% structure from inner child pid() to atom() (alive | eos).</td> </tr>
%%
%% <tr> <td>empty_inner_sent</td> <td>boolean()</td> <td>N empty messages
%% are sent to each of inner processes after first eval message is sent
%% to them.
%% </td> </tr>
%%
%% <tr> <td>queue_from_outer</td> <td>queue:queue()</td> <td> Queue storing
%% graphs from outer child query node while processing one of previous
%% outer graphs.</td> </tr>
%%
%% <tr> <td>queue_from_parpent</td> <td>queue:queue()</td> <td> Queue storing
%% empty messages from parent when graph to be sent to parent is not
%% yet available.</td> </tr>
%%
%% <tr> <td>queue_to_parent</td> <td>queue:queue()</td> <td> Queue storing
%% graphs (complete messages) to be sent to parent but there is no empty message
%% available.</td> </tr>
%%
%% <tr> <td>pause</td> <td>boolean()</td> <td>query stops evaluating
%% if true and evaluates normally if false</td> </tr>
%%
%% <tr> <td>start_date_time</td> <td>calendar:datetime()</td>
%% <td>started date and time of the process.</td> </tr>
%%
%% <tr> <td>b3s_state_pid</td> <td>{@type node_state:ns_pid()}</td>
%% <td>process id of b3s_state.</td> </tr>
%%
%% <tr> <td>benchmark_task_pid</td> <td>{@type
%% node_state:ns_pid()}</td> <td>process id of executing benchmark
%% task.</td> </tr>
%%
%% <tr> <td>result_record_max</td> <td>integer()</td> <td>Max number
%% of records to be reported.</td> </tr>
%%
%% </table>
%%
%% == handle_call (synchronous) message API ==
%%
%% (LINK: {@section handle_call (synchronous) message API})
%%
%% === {start, QueryNodeId, QueryId, SessionId, Self, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPids, InnerPids, VarsPositions, JoinVars} ===
%%
%% Initialization of join query node process. All parameters are
%% saved to process dictionary.
%% (LINK: {@section @{start, QueryNodeId, QueryId, SessionId, Self, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPids, InnerPids, VarsPositions, JoinVars@}})
%%
%% QueryNodeId is {@link query_node:qn_id()}, QueryId is string(), SessionId is string(),
%% Self is {@link node_state:ns_pid()}, GraphPattern is {@link query_node:qn_graph_pattern()},
%% SelectPred is {@link query_node:qn_select_predicate()},
%% ProjectList is {@link query_node:qn_project_list()}, ParentPid is pid(),
%% OuterPids is [pid()], InnerPids is [pid()], VarsPositions is {@link
%% jqn_var_position()}, JoinVars is [{@link query_node:qn_var()}].
%%
%% This request is implemented by {@link hc_start/10}.
%%
%% === {eval, VarsValues} ===
%%
%% Initiate evaluation of query node. The state of
%% query node must be either active or eos so that eval message is executed.
%%
%% Firstly, initiate evaluation in all children, and, then send N empty
%% messages to each child so that they can begin sending results.
%% Note than message passing for eval message is synchronous. This means that
%% complete query tree is locked while evaluation is initiated.
%%
%% VarsValues is query_node:qn_var_val_map().
%% It includes variables and values to be set in graph pattern
%% of query node. In the case value of VarsValues is [] then graph pattern
%% of query node is not changed.
%%
%% Message eval can be sent to query node multiple times. In each instance,
%% process dictionary is initialized to the initial state. After eval is executed
%% query node can expect empty messages from parent.
%% (LINK: {@section @{eval, VarsValues@}})
%%
%% VarsValues is {@link query_node:qn_var_val_map()}. This request is implemented by {@link hc_eval/1}.
%%
%% === {get_property, Name} ===
%%
%% Return the value of specified property name. Variable Name is an
%% atom(). This request is implemented by {@link
%% hc_get_property/2}.
%%
%% == handle_cast (asynchronous) message API ==
%%
%% (LINK: {@section handle_cast (asynchronous) message API})
%%
%% === {data_outer, ParentPid, Graph} ===
%%
%% Processing data message from outer child. In the case join query node is in state
%% wait_next_inner, complete data message is stored in queue_from_outer to be processed
%% later.
%%
%% In the case join query node is in state wait_next_outer then data message from outer
%% children is set as current outer message. Inner query nodes are reset using join values
%% of common variables that are set in graph-pattern of outer query nodes.
%%
%% When all outer query nodes are in state eos (end of stream) then end_of_stream is sent
%% to parent and state of this query node is set to eos.
%% (LINK: {@section @{data_outer, Pid, Graph@}})
%%
%% Pid is pid() and Graph is query_node:qn_graph(). This request is implemented by {@link hc_data_outer/1}.
%%
%% === {data_inner, Pid, Graph} ===
%%
%% Processing data message from inner children. Inner graph is joined with
%% current outer graph stored as outer_graph in process dictionary. Resulted graph
%% is sent to parent as outer data message. While inner graphs are comming from
%% inner children, query node is in state wait_next_inner.
%%
%% More graphs from outer child may be stored in queue_from_outer. State may change to
%% wait_next_outer in the case all inner streams are terminated and queue_from_outer
%% is not empty. In this case function hc_data_outer is called (from hc_data_inner)
%% for outer graph from queue. (LINK: {@section @{data_inner, Pid, Graph@}})
%%
%% Pid is pid(), Graph is query_node:qn_graph(). This request is implemented by {@link hc_data_inner/3}.
%%
%% === {empty, ParentPid} ===
%%
%% (LINK: {@section @{empty, ParentPid@}}).
%%
%% Processing empty message from parent. If state of query node is
%% eos or inactive then simply ignore empty message. If queue_to_parent
%% does not include any data message prepared for parent then empty
%% message is stored in queue_from_parent and used later. Finally, if
%% there is a message in queue_to_parent then send it to parent.
%%
%% ParentPid is pid().
%%
%% This request is implemented by {@link hc_empty/1}.
%%
%% @type jqn_state() = maps:map(). Map
%% structure that manages properties for operating the gen_server
%% process.
%%
%% @type jqn_var_position() = maps:map(). Mapping from {@link query_node:qn_var()}
%% to [{{@link query_node:qn_id()}, integer()}]. List of pairs represent
%% positions of some variable in a triple pattern of given query. Pairs include
%% query node id of triple pattern, and, position of variable in triple pattern
%% (1: id, 2:sub, 3:prd, 4:obj).
%%
-module(join_query_node).
-behavior(gen_server).
-export(
[
child_spec/1, spawn_process/2, receive_empty/0, hcst_sne/0,
init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3
]).
-include_lib("stdlib/include/qlc.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("record.hrl").
%% ======================================================================
%%
%% gen_server behavior
%%
%%
%% init/1
%%
%% @doc Initialize a join_query_node process.
%%
%% @spec init([]) -> {ok, jqn_state()}
%%
init([]) ->
process_flag(trap_exit, true),
%% set main pd keys
put(wait, true),
put(pid, self()),
put(start_date_time, calendar:local_time()),
put(mq_debug, gen_server:call(node_state, {get, mq_debug})),
%% init queues
query_node:queue_init(from_parent, plain, empty),
query_node:queue_init(to_parent, output, data_outer),
query_node:queue_init(from_inner, input, data_inner),
query_node:queue_init(from_outer, input, data_outer),
info_msg(init, [{state,hc_save_pd()}], done, -1),
{ok, hc_save_pd()}.
%%
%% handle_call/3
%%
%% @doc Handle synchronous query requests.
%%
%% @spec handle_call(term(), {pid(), term()}, jqn_state()) -> {reply, term(), jqn_state()}
%%
handle_call({start, QueryNodeId, QueryId, SessionId, Self, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPid, InnerPid,
VarsPositions, JoinVars}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [Self, {message,start}, {all,get()}, get(state)], message_received, 10),
hc_start(QueryNodeId, QueryId, SessionId, Self, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPid, InnerPid, VarsPositions, JoinVars),
{reply, ok, hc_save_pd()};
handle_call({get_property, all}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,get_property}, {name,all}, {value,get()}, get(state)], message_received, 10),
{reply, get(), hc_save_pd()};
handle_call({get_property, Name}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,get_property}, {name,Name}, {value,get(Name)}, get(state)], message_received, 10),
{reply, get(Name), hc_save_pd()};
handle_call({get, Name}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,get}, {name,Name}, {value,get(Name)}, get(state)], message_received, 10),
{reply, get(Name), hc_save_pd()};
handle_call({eval, VarsValues}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,eval}, {vars_values,VarsValues}, {all,get()}, get(state)], message_received, 10),
hc_eval(VarsValues, get(state)),
{reply, ok, hc_save_pd()};
%% default
handle_call(Request, From, State) ->
R = {unknown_request, Request},
error_msg(handle_call, [get(self), Request, From, get()], R),
{reply, R, State}.
%%
%% handle_cast/2
%%
%% @doc Handle asynchronous query requests.
%%
%% @spec handle_cast(term(), jqn_state()) -> {noreply, jqn_state()}
%%
handle_cast({empty, From}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
%% insert into queue
query_node:queue_write(from_parent, {empty, From}),
%% process empty message
info_msg(handle_cast, [get(self), {message,empty}, {from,From}, {queue_from_parent,get(queue_from_parent)}, get(state)], message_received, 30),
hc_empty(get(state)),
{noreply, hc_save_pd()};
handle_cast({data_inner, From, Block}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_cast, [get(self), {message,data_inner}, {from,From}, {block,Block}, get(state)], message_received, 30),
%% insert into queue
query_node:queue_write(from_inner, {data_inner, From, Block}),
hc_data_inner(get(state)),
{noreply, hc_save_pd()};
handle_cast({data_outer, From, Block}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_cast, [get(self), {message,data_outer}, {from,From}, {block,Block}, get(state)], message_received, 30),
%% insert into queue
query_node:queue_write(from_outer, {data_outer, From, Block}),
%% process outer block
hc_data_outer(get(state)),
{noreply, hc_save_pd()};
handle_cast({stop, From}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_cast, [get(self), {message,stop}, {from,From}, get(state)], message_received, 10),
%% erase complete PD
erase(),
{noreply, hc_save_pd()};
%% default
handle_cast(Request, State) ->
R = {unknown_request, Request},
error_msg(handle_cast, [get(self), {request,Request}, {state,State}, get()], R),
{noreply, hc_save_pd()}.
%%
%% @doc Restore process dictionaries from state map structure.
%%
%% @spec hc_restore_pd([{atom(), term()}] | undefined, jqn_state()) -> ok
%%
hc_restore_pd(undefined, State) ->
hc_restore_pd_1(maps:to_list(State));
hc_restore_pd(_, _) ->
ok.
hc_restore_pd_1([]) ->
ok;
hc_restore_pd_1([{K, V} | T]) ->
put(K, V),
hc_restore_pd_1(T).
%%
%% @doc Save process all dictionary contents into state map structure.
%%
%% @spec hc_save_pd() -> jqn_state()
%%
hc_save_pd() ->
maps:from_list(get()).
%%
%% handle_info/2
%%
%% @doc Handle exceptional query requests.
%%
%% @spec handle_info(term(), jqn_state()) -> {noreply, jqn_state()}
%%
handle_info(_Info, State) ->
{noreply, State}.
%%
%% terminate/2
%%
%% @doc Process termination.
%%
%% @spec terminate(term(), jqn_state()) -> none()
%%
terminate(Reason, State) ->
P = pid_to_list(self()),
info_msg(terminate, [get(self), {reason,Reason}, {state,State}, {pid,P}, get(state)], done, -1),
ok.
%%
%% code_change/3
%%
%% @doc Process code change action.
%%
%% @spec code_change(term(), jqn_state(), term()) -> {ok, jqn_state()}
%%
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ======================================================================
%%
%% utility
%%
%%
%% @doc Report an error issue to the error_logger.
%%
%% @spec error_msg(atom(), term(), term()) -> ok
%%
error_msg(FunName, Argument, Result) ->
node_state:error_msg(?MODULE, FunName, Argument, Result).
%%
%% @doc Report an information issue to the error_logger if current
%% debug level is greater than ThresholdDL.
%%
%% @spec info_msg(atom(), term(), term(), integer()) -> ok
%%
info_msg(FunName, Argument, Result, ThresholdDL) ->
node_state:info_msg(?MODULE, FunName, Argument, Result, ThresholdDL).
%% ======================================================================
%%
%% api
%%
%%
%% @doc Return child spec for this process. It can be used in
%% supervisor:init/0 callback implementation.
%%
%% @spec child_spec(Id::atom()) -> supervisor:child_spec()
%%
child_spec(Id) ->
GSOpt = [{local, Id}, join_query_node, [], []],
StartFunc = {gen_server, start_link, GSOpt},
Restart = permanent,
Shutdwon = 1000,
Type = worker,
Modules = [join_query_node],
{Id, StartFunc, Restart, Shutdwon, Type, Modules}.
%%
%% @doc Spawn tp_query_node process with given local identifier at given node.
%%
%% @spec spawn_process( Id::atom(), Node::node() ) -> node_state:ns_pid()
%%
spawn_process(Id, Node ) ->
ChildSpec = join_query_node:child_spec(Id),
supervisor:start_child({b3s, Node}, ChildSpec),
{Id, Node}.
%% ======================================================================
%%
%% handle call/cast implementation
%%
%%
%% hc_start/10
%%
%% @doc Initialize join query node process.
%%
%% @spec hc_start(query_node:qn_id(), string(), string(), node_state:ns_pid(), [query_node:qn_triple_pattern()],
%% query_node:qn_select_predicate(), query_node:qn_project_list(),
%% node_state:ns_pid(), [node_state:ns_pid()], [node_state:ns_pid()],
%% jqn_var_position(), [query_node:qn_var()]) -> ok
%%
hc_start(QueryNodeId, QueryId, SessionId, Self, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPids, InnerPids, VarsPositions, JoinVars) ->
put(created, true),
put(qnode, join),
put(node_id, QueryNodeId),
put(query_id, QueryId),
put(session_id, SessionId),
put(self, Self),
put(state, active),
put(gp, GraphPattern),
put(select_pred, SelectPred),
put(project_list, ProjectList),
put(parent, ParentPid),
put(outer, OuterPids),
put(inner, InnerPids),
put(vars_pos, VarsPositions),
put(join_vars, JoinVars),
put(empty_outer_sent, false),
put(empty_inner_sent, false),
put(wait, false),
put(pause, false),
erase(sid_table_name),
erase(sid_max_id),
erase(di_cursor__),
erase(di_ets__),
%% benchmark stuff
BSP = b3s_state_pid,
BMT = benchmark_task,
BTP = benchmark_task_pid,
put(BSP, gen_server:call(node_state, {get, BSP})),
{_, FSN} = get(BSP),
put(BTP, {gen_server:call(get(BSP), {get, BMT}), FSN}),
%% store num-of-empty-msgs in PD
{ok, N} = application:get_env(b3s, num_of_empty_msgs),
put(num_of_empty_msgs, N),
%% store block-size in PD
BSZ = block_size,
put(BSZ, gen_server:call(get(BSP), {get, BSZ})).
%% @doc Send N empty messages to Pid. N is stored in config.
send_N_empty(Pid) ->
N = get(num_of_empty_msgs),
send_N_empty_1(Pid, N),
info_msg(send_N_empty, [get(self), {send_to, Pid}, {num, N}], done, 50).
send_N_empty_1(_, 0) ->
ok;
send_N_empty_1(Pid, N) ->
gen_server:cast(Pid, {empty, get(self)}),
info_msg(send_cast, [get(self), {message,empty}, {to,Pid}, {invoker,send_N_empty}, get(state)], message_sent, 30),
send_N_empty_1(Pid, N-1).
receive_empty() ->
receive
{_, M} -> M
end,
info_msg(receive_empty, [get(self), {message, M}], done, 50),
M.
%%
%% @doc Test function for hc_start.
%%
hc_start_test_() ->
b3s:start(),
b3s:stop(),
b3s:start(),
b3s:bootstrap(),
{inorder,
[
% ?_assertMatch(ok, b3s:start()),
% {generator, fun()-> hcst_sne() end},
{generator, fun()-> hcst_q01() end},
?_assertMatch(ok, b3s:stop())
]}.
hcst_sne() ->
info_msg(hcst_sne, [get(self)], start, 50),
S = self(),
{inorder,
[
?_assertMatch(ok, send_N_empty(S)),
?_assertMatch({empty, S}, receive_empty()),
?_assertMatch({empty, S}, receive_empty())
]}.
hcst_q01() ->
info_msg(hcst_q01, [get(self)], start, 50),
QueryNodeId = "3",
QueryId = "1",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
JQN1 = join_query_node:spawn_process(Id, node()),
QNState = active,
GraphPattern = #{"1" => {"?id1", "<Japanese>", "?prd", "?obj1"},
"2" => {"?id2", "<Slovenian>", "?prd", "?obj2"}},
SelectPred = none,
ProjectList = none,
ParentPid = self(),
OuterPids = [self()],
InnerPids = [self()],
VarsPositions = #{"?id1" => [{"1", 1}],
"?id2" => [{"2", 1}],
"?prd" => [{"1", 3}, {"2", 3}],
"?obj1" => [{"1", 4}],
"?obj2" => [{"2", 4}]},
JoinVars = ["?prd"],
M1 = {start, QueryNodeId, QueryId, SessionId, JQN1,
GraphPattern, SelectPred, ProjectList,
ParentPid, OuterPids, InnerPids, VarsPositions, JoinVars},
GP = get_property,
{inorder,
[
?_assertMatch(true, gen_server:call(JQN1, {GP, wait})),
?_assertMatch(ok, gen_server:call(JQN1, M1)),
?_assertMatch(QueryNodeId, gen_server:call(JQN1, {GP, node_id})),
?_assertMatch(GraphPattern, gen_server:call(JQN1, {GP, gp})),
?_assertMatch(QNState, gen_server:call(JQN1, {GP, state})),
?_assertMatch(ParentPid, gen_server:call(JQN1, {GP, parent})),
?_assertMatch(OuterPids, gen_server:call(JQN1, {GP, outer})),
?_assertMatch(InnerPids, gen_server:call(JQN1, {GP, inner})),
?_assertMatch(VarsPositions, gen_server:call(JQN1, {GP, vars_pos})),
?_assertMatch(JoinVars, gen_server:call(JQN1, {GP, join_vars})),
?_assertMatch(false, gen_server:call(JQN1, {GP, wait})),
?_assertMatch(undefined, gen_server:call(JQN1, {GP, inner_outer}))
]}.
%%
%% hc_eval/2
%%
%% @doc Initiate evaluation of join query node.
%%
%% @spec hc_eval(jqn_var_position(), atom()) -> ok
%%
% ignoring VarsValues since we have currently only left-deep trees [TODO]
hc_eval(_, State)
when (State =:= eos) or (State =:= active) ->
%% send eval to each pid in outer children and mark state of outer stream 'alive'.
put(state_of_outer_streams, #{}),
F1 = fun (Pid) ->
gen_server:call(Pid, {eval, []}),
info_msg(send_call, [get(self), {message,eval}, {to,Pid}, {gp,get(gp)}, {vars_values,[]}, {invoker,hc_eval}, get(state)], message_sent, 30),
M = get(state_of_outer_streams),
put(state_of_outer_streams, maps:put(Pid, alive, M))
end,
lists:map(F1, get(outer)),
%% send empty messages
case get(empty_outer_sent) of
false -> lists:map(fun send_N_empty/1, get(outer));
true -> ok
end,
put(empty_outer_sent, true),
%% compute list of qn id-s to be projected out
query_node:project_prepare(get(project_list)),
%% update parameters
BSP = b3s_state_pid,
BSZ = block_size,
put(BSZ, gen_server:call(get(BSP), {get, BSZ})),
%% waiting for data from outer
put(state, wait_next_outer);
hc_eval(_, State) ->
error_msg(hc_eval, [get(self), {all,get()}, State], wrong_state),
ok.
%%
%% hc_empty/2
%%
%% @doc Co-routine for processing empty message from parent. It is expected that there is
%% at least one empty message in queue from_parent, either the one that has just arrived
%% as message, or, some other procedure has checked that the message is in the queue.
%%
%% @spec hc_empty( State::atom() ) -> ok
%%
hc_empty(undefined) ->
%% leave empty message in queue from parent
info_msg(hc_empty, [get(self), {from,get(parent)}, get(state)], empty_before_start, 50);
hc_empty(active) ->
%% leave empty message in queue from parent
info_msg(hc_empty, [get(self), {from,get(parent)}, get(state)], empty_before_eval, 50);
hc_empty(State)
when (State =:= wait_next_outer) or (State =:= wait_next_inner) or
(State =:= eos) ->
%% check if there are messages to parent and send data message to parent
case query_node:queue_prepared(to_parent) of
true ->
%% read empty message from queue (there must be at least one msg)
{empty, _} = query_node:queue_read(from_parent),
%% get data message from queue_to_parent and send it
Msg = query_node:queue_read(to_parent),
gen_server:cast(get(parent), Msg),
info_msg(send_cast, [get(self), {message,Msg}, {to,get(parent)}, {invoker,hc_empty}, get(state)], message_sent, 30);
false ->
info_msg(hc_empty, [get(self), {to,get(parent)}, get(state)], no_messages_to_parent, 50)
end,
%% next actions to be done
%% state==wait_next_outer
QEFO = query_node:queue_empty(from_outer),
case {get(state), QEFO} of
{wait_next_outer, false} ->
%% if state is wait_next_outer and there are messages waiting from outer query nodes
%% than empty message wakes up processing of outer graph
hc_data_outer(get(state));
_ -> ok
end,
%% state==wait_next_inner
QEFI = query_node:queue_empty(from_inner),
case {get(state), QEFI} of
{wait_next_inner, false} ->
%% if state is wait_next_inner and there are messages waiting from inner query nodes
%% than empty message wakes up processing of inner graphs
hc_data_inner(get(state));
_ -> ok
end,
%% if there is another pair of empty-data messages run hc_empty again
case {query_node:queue_prepared(from_parent), query_node:queue_prepared(to_parent)} of
{true,true} -> hc_empty(get(state));
_ -> ok
end;
hc_empty(State) ->
error_msg(hc_empty, [get(self), {all,get()}, State], wrong_state).
%%
%% hc_data_outer/3
%%
%% @doc Co-routine for processing data message from outer child.
%%
%% @spec hc_data_outer(atom()) -> ok
%%
hc_data_outer(State) when State == wait_next_inner ->
%% data message left in queue from_outer to be processed later
info_msg(hc_data_outer, [get(self), get(state)], leaving_data_outer_in_queue, 50);
hc_data_outer(State) when State == wait_next_outer ->
%% get outer graph from queue_from_outer
{From, Graph} = query_node:queue_get(from_outer),
%% get status for end of block
BE = query_node:queue_block_end(from_outer),
%% send empty if at the end of block
case BE of
true -> %% send empty message back to outer
EMsg = {empty, get(self)},
gen_server:cast(From, EMsg),
info_msg(send_cast, [get(self), {message,EMsg}, {to,From}, {invoker,hc_data_outer}, get(state)], message_sent, 30);
_ -> ok
end,
%% store graph for inner loop
put(outer_graph, Graph),
%% outer loop actions for Fraph read from outer queue
case Graph of
end_of_stream ->
hcdo_process_eos(From),
info_msg(hc_data_outer, [get(self), {from,From}, {graph,Graph}, get(state)], outer_eos_processed, 50);
_ -> hcdo_process_graph(Graph),
info_msg(hc_data_outer, [get(self), {from,From}, {graph,Graph}, get(state)], outer_graph_processed, 50)
end,
%% next protocol actions
%% state = wait_next_inner?
QPFP = query_node:queue_prepared(from_parent),
QEFI = query_node:queue_empty(from_inner),
case {get(state), QPFP, QEFI} of
{wait_next_inner, true, false} ->
%% state changed from wait_next_outer to wait_next_inner.
%% outer graph has been processed and eval has been initiated for all inner qns.
%% QEFI=false should not happen since inner messages could not be received. (?)
hc_data_inner(get(state)),
info_msg(hc_data_outer, [get(self), get(state)], call_inner_loop, 50);
_ -> ok
end,
%% state = wait_next_outer?
QEFO = query_node:queue_empty(from_outer),
case {get(state), QEFO} of
{wait_next_outer, false} ->
%% eos has been processed but not all streams are finished.
%% therefore, state did not change (from wait_next_outer) and next outer
%% message needs to be processed. we do not wait for empty message since inner
%% streams have to be processed before.
hc_data_outer(get(state)),
info_msg(hc_data_outer, [get(self), get(state)], call_outer_loop, 50);
_ -> ok
end;
hc_data_outer(State) ->
error_msg(hc_data_outer, [get(self), {all,get()}, {state,State}, get(state)], wrong_state ).
hcdo_process_eos(From) ->
%% mark end_of_stream of outer process
M = get(state_of_outer_streams),
put(state_of_outer_streams, maps:put(From, eos, M)),
%% count finished streams and send eos to parent if 0
F3 = fun (alive) -> true;
(eos) -> false
end,
NumAlive = length(lists:filter(F3, maps:values(get(state_of_outer_streams)))),
case NumAlive of
0 -> %% send parent eos and set state eos
hcdo_send_parent_eos(),
info_msg(hc_data_outer, [get(self), get(state)], query_evaluation_completed, 50);
_ -> ok
end.
hcdo_process_graph(Graph) ->
%% make qn_var_val_map()
F1 = fun (V) ->
{V, hce_get_var_value(V, Graph)}
end,
JoinVarValues = lists:map(F1, get(join_vars)),
%%info_msg(hcdo_process_graph, [get(self), {graph,Graph}, {join_vars,get(join_vars)}, {join_var_values,JoinVarValues}, get(state)], debug_join_var_values, 50),
%% send eval and empty messages to inner nodes and update state of inner streams
put(state_of_inner_streams, #{}),
F2 = fun (Pid) ->
%% reset inner child
gen_server:call(Pid, {eval, JoinVarValues}),
info_msg(send_call, [get(self), {message,eval}, {invoker,hcdo_process_outer}, {to,Pid}, {gp,get(gp)},
{join_var_values,JoinVarValues}, get(state)], message_sent, 30),
%% first time? send empty messages
case get(empty_inner_sent) of
false -> send_N_empty(Pid);
true -> ok
end,
%% remember state of inner child
M = get(state_of_inner_streams),
put(state_of_inner_streams, maps:put(Pid, alive, M))
end,
lists:map(F2, get(inner)),
put(empty_inner_sent, true),
% mark state as waiting for inner messages
put(state, wait_next_inner).
hcdo_send_parent_eos() ->
%% store eos in queue to_parent and flush it
query_node:queue_put(to_parent, end_of_stream),
query_node:queue_flush(to_parent),
% check if queue_from_parent includes empty messages
case query_node:queue_prepared(from_parent) of
true -> %% there is empty message from parent
{empty, _} = query_node:queue_read(from_parent),
Msg = query_node:queue_read(to_parent),
%% send parent last block of to_parent
gen_server:cast(get(parent), Msg),
info_msg(send_cast, [get(self), {message,Msg}, {to,get(parent)}, {invoker,hcdo_send_parent_eos}, get(state)], message_sent, 30);
false -> %% empty queue from_parent, so leave message in queue to_parent.
%% msg will be processed when the first empty message comes from_parent.
ok
end,
%% move state to eos
put(state, eos).
hce_get_var_value(Variable, Graph) ->
VP = get(vars_pos),
%% get position and tuple
%% LVP is [{NodeId, Pos}|_]
LVP = maps:get(Variable, VP),
{NodeId,Pos} = hce_get_node_id(LVP, Graph),
Tuple = maps:get(NodeId, Graph),
%% Pos+1 since first component is table-name
element(Pos+1, Tuple).
hce_get_node_id([{NID,Pos}|Rest], Graph) ->
case maps:is_key(NID, Graph) of
true -> {NID,Pos};
false -> hce_get_node_id(Rest, Graph)
end;
hce_get_node_id([], Graph) ->
error_msg(hce_get_node_id, [get(self), {graph,Graph}, {all,get()}, get(state)], cant_find_var_val).
%%
%% hc_data_inner/3
%%
%% @doc Co-routine for processing data block from inner children.
%%
%% @spec hc_data_inner(State::atom()) -> ok|fail
%%
hc_data_inner(State) when State =/= wait_next_inner ->
error_msg(hc_data_inner, {all,get()}, wrong_state);
hc_data_inner(State) when State =:= wait_next_inner ->
%% retrieve inner graph from queue from_inner and save it in PD
{From, Graph} = query_node:queue_get(from_inner),
put(inner_graph, Graph),
%% get status for end of block
BE = query_node:queue_block_end(from_inner),
%% send empty if at the end of block
case BE of
true -> %% send empty message back to inner
EMsg = {empty, get(self)},
gen_server:cast(From, EMsg),
info_msg(send_cast, [get(self), {message,EMsg}, {to,From}, {invoker,hc_data_inner}, get(state)], message_sent, 30);
_ -> ok
end,
%% do action for outer graph read from queue
case Graph of
end_of_stream ->
%% we are at end of some inner stream
hcdi_process_eos(From);
%%info_msg(hc_data_inner, [get(self), {from,From}, {graph,Graph}, get(state)], inner_eos_processed, 50)
_ -> %% join inner graph with outer and send it to parent
hcdi_process_graph(Graph)
%%info_msg(hc_data_inner, [get(self), {from,From}, {graph,Graph}, get(state)], inner_graph_processedd, 50)
end,
%% next protocol actions for inner loop
%% state = wait_next_inner?
QPFP = query_node:queue_prepared(from_parent),
QEFI = query_node:queue_empty(from_inner),
case {get(state), QPFP, QEFI} of
{wait_next_inner, true, false} ->
%% continue inner loop if we stayed in wait_next_inner and
%% there is another inner graph to process and there is empty message
%% to be used
hc_data_inner(get(state)),
info_msg(hc_data_inner, [get(self), get(state)], recursive_loop_inner, 50);
_ -> ok
end,
%% state = wait_net_outer?
QEFO = query_node:queue_empty(from_outer),
%% do we need to restrict calling outer loop only when empty message is ready (?)
%%case {get(state), QPFP, QEFO} of
case {get(state), QEFO} of
%%{wait_next_outer, true, false} ->
{wait_next_outer, false} ->
%% end of all inner streams detected (and we moved to wait_next_outer)
%% and queue from outer includes some messages. we don't wait for empty message
%% since inner stream has to be started before.
hc_data_outer(get(state)),
info_msg(hc_data_inner, [get(self), get(state)], recursive_loop_outer, 50);
_ -> ok
end.
hcdi_process_eos(From) ->
%% mark inner stream not alive
M = get(state_of_inner_streams),
put(state_of_inner_streams, maps:put(From, eos, M)),
%% check if all inner streams are dead
F1 = fun (alive) -> true;
(eos) -> false
end,
NumAlive = length(lists:filter(F1, maps:values(get(state_of_inner_streams)))),
%% all inner streams dead?
if NumAlive == 0 ->
put(state, wait_next_outer);
true -> ok
end.
%%info_msg(hc_data_inner, [get(self), {from,From}, {graph,Graph}, {numAlive,NumAlive}, get(state)], eos_inner_processed, 50).
hcdi_process_graph(Graph) ->
%% compute join
OG = get(outer_graph),
G = maps:merge(Graph, OG),
%%info_msg(hc_data_inner, [get(self), Graph, OG, G, get(state)], join_computed, 50),
%% set current graph G and compute val of select predicate
put(gp_val, G),
SP = query_node:eval_select(get(select_pred)),
info_msg(hc_data_inner, [get(self), {graph,G}, {select_pred,get(select_pred)}, {select_pred_value,SP}, get(state)], select_pred_computed, 50),
%% skip graph G if SP==false
case SP of
true ->
%% compute projection and put in queue to_parent
query_node:eval_project(get(project_list)),
G1 = get(gp_val),
query_node:queue_put(to_parent, G1),
info_msg(hc_data_inner, [get(self), {graph_in,G}, {project_list,get(project_list)}, {graph_out,G1}, get(state)], project_computed, 50),
%% send block to parent if evrythng preped
case query_node:queue_prepared(from_parent) and query_node:queue_prepared(to_parent) of
true ->
%% get empty message from queue from_parent
{empty, _} = query_node:queue_read(from_parent),
%% get block and create message
Msg = query_node:queue_read(to_parent),
%% send it to parent
gen_server:cast(get(parent), Msg),
info_msg(send_cast, [get(self), {message,Msg}, {to,get(parent)}, {invoker,hcdi_process_graph}, get(state)], message_sent, 30);
false-> ok
end;
false -> ok
end.
%%
%% hc_eval_test_/0
%%
%% @doc Main test function of module.
%%
hc_eval_test_() ->
hcet_site(b3s_state:get(test_mode)).
hcet_site(local1) ->
Attrs = {attributes, record_info(fields, triple_store)},
TabDef = [Attrs, {disc_copies, [node()]}],
info_msg(hcet_load_db, [get(self), TabDef], display_table, 50),
NDS = node(),
BSS = {b3s_state, NDS},
CRC = clm_row_conf,
RMS = #{1 => NDS},
CM1 = #{1 => RMS, 2 => RMS},
R01 = [NDS], %%, NDC],
put(self, {'1-1-1', node()}),
{inorder,
[
?_assertMatch(ok, b3s:start()),
?_assertMatch(ok, b3s:bootstrap()),
?_assertMatch(ok, gen_server:call(BSS, {put, CRC, CM1})),
?_assertMatch(R01, gen_server:call(BSS, propagate)),
{generator, fun()-> tp_query_node:hcet_load_db() end},
{generator, fun()-> hcet_q02() end},
?_assertMatch(ok, b3s:stop()),
?_assertMatch(ok, b3s:start()),
?_assertMatch(ok, b3s:bootstrap()),
?_assertMatch(ok, gen_server:call(BSS, {put, CRC, CM1})),
?_assertMatch(R01, gen_server:call(BSS, propagate)),
{generator, fun()-> hcet_load_db() end},
{generator, fun()-> hcet_q03() end},
{generator, fun()-> hcet_q05() end},
{generator, fun()-> hcet_q06() end},
%% finish
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(stopped, mnesia:stop()),
?_assertMatch(ok, b3s:stop())
% ?_assertMatch(ok, mnesia:start()),
% ?_assertMatch({atomic, ok}, mnesia:create_table(triple_store, TabDef)),
% ?_assertMatch({atomic, ok}, mnesia:delete_table(triple_store)),
% ?_assertMatch(stopped, mnesia:stop())
]};
hcet_site(local_two) ->
[];
hcet_site(_) ->
[].
hcet_q02() ->
info_msg(hcet_q02, [get(self)], start, 50),
BS = gen_server:call(node_state, {get, b3s_state_pid}),
Tab = gen_server:call(BS, {get, name_of_triple_table}),
QueryNodeId = "3",
QueryId = "2",
SessionId = "1",
Id3 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
JQN3 = join_query_node:spawn_process(Id3, node()),
TPQN1 = hcet_tpqn1(JQN3),
TPQN2 = hcet_tpqn2(JQN3),
GraphPattern = maps:from_list(
[{"1", {"?id1", eI("<Japanese>"), "?prd", "?obj1"}},
{"2", {"?id2", eI("<Slovenian>"), "?prd", "?obj2"}}]),
SelectPred = none,
ProjectList = none,
ParentPid = self(),
OuterPids = [TPQN1],
InnerPids = [TPQN2],
VarsPositions = #{"?id1" => [{"1", 1}],
"?id2" => [{"2", 1}],
"?prd" => [{"1", 3}, {"2", 3}],
"?obj1" => [{"1", 4}],
"?obj2" => [{"2", 4}]},
JoinVars = ["?prd"],
GP = get_property,
DFO = data_outer,
EOS = end_of_stream,
T1 = eT({Tab,"<triple_id_0002>","<Japanese>","<eat>","<fishes>"}),
T2 = eT({Tab,"<triple_id_0003>","<Slovenian>","<eat>","<potatoes>"}),
TP1 = {"?id2", eI("<Slovenian>"), eI("<eat>"), "?obj2"},
M1 = {start, QueryNodeId, QueryId, SessionId, JQN3, GraphPattern,
SelectPred, ProjectList, ParentPid, OuterPids, InnerPids,
VarsPositions, JoinVars},
M2 = {eval, []},
R1Map = maps:put("1", T1, maps:new()),
R2Map = maps:put("2", T2, R1Map),
R = {DFO, JQN3, [R2Map,EOS]},
{inorder,
[
?_assertMatch(true, gen_server:call(JQN3, {GP, wait})),
?_assertMatch(ok, gen_server:call(JQN3, M1)),
?_assertMatch(false, gen_server:call(JQN3, {GP, wait})),
?_assertMatch(undefined, gen_server:call(JQN3, {GP, inner_outer})),
?_assertMatch(ok, gen_server:call(JQN3, M2)),
?_assertMatch({_, R}, hcet_send_empty(JQN3, R)),
?_assertMatch(TP1, gen_server:call(TPQN2, {GP, tp})),
?_assertMatch(OuterPids, gen_server:call(JQN3, {GP, outer})),
?_assertMatch(InnerPids, gen_server:call(JQN3, {GP, inner})),
?_assertMatch(GraphPattern, gen_server:call(JQN3, {GP, gp})),
?_assertMatch(ok, gen_server:cast(TPQN1, {stop, self()})),
?_assertMatch(ok, gen_server:cast(TPQN2, {stop, self()})),
?_assertMatch(ok, gen_server:cast(JQN3, {stop, self()}))
]}.
hcet_tpqn1(Pid) ->
QueryNodeId = "1",
QueryId = "2",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?id1", eI("<Japanese>"), "?prd", "?obj1"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?id1" => 1, "?prd" => 3, "?obj1" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1,
TriplePattern, SelectPred, ProjectList,
ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tpqn2(Pid) ->
QueryNodeId = "2",
QueryId = "2",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?id2", eI("<Slovenian>"), "?prd", "?obj2"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?id2" => 1, "?prd" => 3, "?obj2" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2,
TriplePattern, SelectPred, ProjectList,
ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
%%
%% @doc Creation of triple-store used in examples.
%%
example_table() ->
[%% country
{triple_store, "id1", "japan", "type", "country"},
{triple_store, "id2", "slovenia", "type", "country"},
%% cities
{triple_store, "id3", "koper", "type", "city"},
{triple_store, "id4", "ljubljana","type", "city"},
{triple_store, "id5", "tokyo", "type", "city"},
{triple_store, "id6", "kyoto", "type", "city"},
{triple_store, "id7", "osaka", "type", "city"},
%% organizations
{triple_store, "id8", "up", "type", "university"}, % uni primorska
{triple_store, "id9", "ul", "type", "university"}, % uni ljubljana
{triple_store, "id10", "ijs", "type", "institute"}, % institute jozef stefan
{triple_store, "id11", "yj", "type", "corporation"}, % yahoo ! japan
{triple_store, "id12", "tu", "type", "university"}, % tokyo uni
{triple_store, "id13", "ku", "type", "university"}, % kyoto uni
{triple_store, "id14", "ou", "type", "university"}, % osaka uni
%% persons
{triple_store, "id15", "shou", "type", "person"},
{triple_store, "id16", "yoshio", "type", "person"},
{triple_store, "id17", "sakura", "type", "person"},
{triple_store, "id18", "luka", "type", "person"},
{triple_store, "id19", "jan", "type", "person"},
{triple_store, "id20", "nika", "type", "person"},
{triple_store, "id57", "marko", "type", "person"},
%% hasCapital
{triple_store, "id21", "japan", "hasCapital", "tokyo"},
{triple_store, "id22", "slovenia", "hasCapital", "ljubljana"},
%% isLocatedIn
{triple_store, "id23", "tokyo", "isLocatedIn", "japan"},
{triple_store, "id24", "kyoto", "isLocatedIn", "japan"},
{triple_store, "id25", "osaka", "isLocatedIn", "japan"},
{triple_store, "id26", "koper", "isLocatedIn", "slovenia"},
{triple_store, "id27", "ljubljana","isLocatedIn", "slovenia"},
{triple_store, "id28", "up", "isLocatedIn", "koper"},
{triple_store, "id29", "ul", "isLocatedIn", "ljubljana"},
{triple_store, "id30", "ijs", "isLocatedIn", "ljubljana"},
{triple_store, "id31", "yj", "isLocatedIn", "tokyo"},
{triple_store, "id32", "ku", "isLocatedIn", "kyoto"},
{triple_store, "id33", "ou", "isLocatedIn", "osaka"},
{triple_store, "id34", "tu", "isLocatedIn", "tokyo"},
%% livesIn
{triple_store, "id35", "shou", "livesIn", "tokyo"},
{triple_store, "id36", "yoshio", "livesIn", "tokyo"},
{triple_store, "id37", "sakura", "livesIn", "kyoto"},
{triple_store, "id38", "luka", "livesIn", "ljubljana"},
{triple_store, "id39", "jan", "livesIn", "koper"},
{triple_store, "id40", "nika", "livesIn", "ljubljana"},
{triple_store, "id41", "marko", "livesIn", "ljubljana"},
%% worksAt
{triple_store, "id42", "shou", "worksAt", "yj"},
{triple_store, "id43", "shou", "worksAt", "ku"},
{triple_store, "id44", "yoshio", "worksAt", "yj"},
{triple_store, "id45", "sakura", "worksAt", "ku"},
{triple_store, "id46", "luka", "worksAt", "up"},
{triple_store, "id47", "luka", "worksAt", "ijs"},
{triple_store, "id48", "jan", "worksAt", "up"},
{triple_store, "id49", "nika", "worksAt", "ijs"},
{triple_store, "id50", "marko", "worksAt", "ijs"},
%% graduatedFrom
{triple_store, "id51", "shou", "graduatedFrom", "ou"},
{triple_store, "id52", "yoshio", "graduatedFrom", "tu"},
{triple_store, "id53", "sakura", "graduatedFrom", "ku"},
{triple_store, "id54", "luka", "graduatedFrom", "ul"},
{triple_store, "id55", "jan", "graduatedFrom", "up"},
{triple_store, "id56", "nika", "graduatedFrom", "ul"},
%% age
{triple_store, "id58", "shou", "age", "25"},
{triple_store, "id59", "yoshio", "age", "36"},
{triple_store, "id60", "sakura", "age", "27"},
{triple_store, "id61", "luka", "age", "38"},
{triple_store, "id62", "jan", "age", "45"},
{triple_store, "id63", "nika", "age", "22"},
{triple_store, "id64", "marko", "age", "30"}].
hcet_load_db() ->
case 2 of
2 -> hcet_load_db_postgres()
end.
hcet_load_db_postgres() ->
info_msg(hcet_load_db_postgres, [], {start, get(self)}, 50),
BS = gen_server:call(node_state, {get, b3s_state_pid}),
Tab = db_interface:dot_get_tn(),
F1 = fun (X) ->
{_, Tid, Sbj, Prd, Obj} = X,
D = eT({Tab, Tid, Sbj, Prd, Obj}),
db_interface:db_write(D)
end,
SI = string_id,
SIT = gen_server:call(BS, {get, name_of_string_id_table}),
gen_server:call(SI, {put, sid_table_name, SIT}),
gen_server:call(SI, {put, di_cursor__, undefined}),
gen_server:call(SI, delete_table),
gen_server:call(SI, {create_table, SIT}),
gen_server:call(SI, make_index),
erase(sid_table_name),
erase(sid_max_id),
%% ok = db_interface:db_close(),
ok = db_interface:db_init(),
info_msg(hcet_load_db_postgres, [], {di_cursor__, get(di_cursor__)}, 50),
%% ok = db_interface:db_close(),
ok = lists:foreach(F1, example_table()),
ok = db_interface:db_add_index(),
%% ok = db_interface:db_close(),
TP01 = eTP({"id1", "?s", "?p", "?o"}),
TP02 = eTP({"id11", "?s", "?p", "?o"}),
TP03 = eTP({"id56", "?s", "?p", "?o"}),
R01 = eT({Tab, "id1", "japan", "type", "country"}),
R02 = eT({Tab, "id11", "yj", "type", "corporation"}),
R03 = eT({Tab, "id56", "nika", "graduatedFrom", "ul"}),
EOS = end_of_stream,
{inorder,
[
%% ?_assertMatch(ok, db_interface:db_close()),
%% ?_assertMatch(ok, db_interface:db_init()),
%% ?_assertMatch(ok, lists:foreach(F1, example_table())),
%% ?_assertMatch(ok, db_interface:db_add_index()),
%% ?_assertMatch(ok, db_interface:db_close()),
?_assertMatch(ok, db_interface:db_open_tp(TP01)),
?_assertMatch(R01, db_interface:db_next()),
%% ?_assertMatch(EOS, db_interface:db_next()),
?_assertMatch(ok, db_interface:db_open_tp(TP02)),
?_assertMatch(R02, db_interface:db_next()),
?_assertMatch(EOS, db_interface:db_next()),
?_assertMatch(ok, db_interface:db_open_tp(TP03)),
?_assertMatch(R03, db_interface:db_next()),
?_assertMatch(EOS, db_interface:db_next()),
?_assertMatch(ok, db_interface:db_close())
]}.
hcet_send_empty(QN, R) ->
gen_server:cast(QN, {empty, self()}),
receive
M -> M
end,
info_msg(hcet_send_empty, [get(self), {from,QN}, {received, M}, {expected,R}, get(state)], data_received, 50),
M.
hcet_get_PD(QN) ->
M = gen_server:call(QN, {get_property, all}),
info_msg(hcet_get_PD, [get(self), {pid,QN}, {all,M}, length(M)], response_property_all_received, 50),
M.
eI(X) -> string_id:get_id(X).
eT({T,I,S,P,O}) ->
ET = string_id:encode_triple({I, S, P, O}),
list_to_tuple([T | tuple_to_list(ET)]).
eTP(X) -> string_id:encode_triple_pattern(X).
hcet_q03() ->
%%
%% query: using single tp query nodes
%%
%% slovenia hasCapital ?x
%% ?y livesIn ?x
%% ?y worksAt ijs
%%
info_msg(hcet_q03, [get(self)], start, 50),
Tab = db_interface:dot_get_tn(),
%% creating processes
QueryNodeId3 = "3",
QueryId = "3",
SessionId = "1",
Id3 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId3),
JQN3 = join_query_node:spawn_process(Id3, node()),
QueryNodeId5 = "5",
Id5 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId5),
JQN5 = join_query_node:spawn_process(Id5, node()),
TPQN1 = hcet_tpqn3(JQN3),
TPQN2 = hcet_tpqn4(JQN3),
TPQN4 = hcet_tpqn5(JQN5),
%% first join
GraphPattern3 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}}]),
SelectPred3 = none,
ProjectList3 = none,
ParentPid3 = JQN5,
OuterPids3 = [TPQN1],
InnerPids3 = [TPQN2],
VarsPositions3 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}]},
JoinVars3 = ["?x"],
%% second join
GraphPattern5 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}},
{"4", {"?i4", "?y", eI("worksAt"), eI("ijs")}}]),
SelectPred5 = {lnot, {"?y", equal, eI("luka")}},
ProjectList5 = ["?y"],
ParentPid5 = self(),
OuterPids5 = [JQN3],
InnerPids5 = [TPQN4],
VarsPositions5 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}, {"4", 2}]},
JoinVars5 = ["?y"],
%% data to be returned
T4 = eT({Tab, "id40", "nika", "livesIn", "ljubljana"}),
T5 = eT({Tab, "id49", "nika", "worksAt", "ijs"}),
T6 = eT({Tab, "id41", "marko", "livesIn", "ljubljana"}),
T7 = eT({Tab, "id50", "marko", "worksAt", "ijs"}),
%% messages for JQN3 and JQN5
DFO = data_outer,
EOS = end_of_stream,
S3 = {start, QueryNodeId3, QueryId, SessionId, JQN3, GraphPattern3,
SelectPred3, ProjectList3, ParentPid3, OuterPids3, InnerPids3,
VarsPositions3, JoinVars3},
S5 = {start, QueryNodeId5, QueryId, SessionId, JQN5, GraphPattern5,
SelectPred5, ProjectList5, ParentPid5, OuterPids5, InnerPids5,
VarsPositions5, JoinVars5},
E5 = {eval, []},
%% tuples to be returned
R2Map = maps:put("4", T5, maps:put("2", T4, maps:new())),
R3Map = maps:put("4", T7, maps:put("2", T6, maps:new())),
R1 = {DFO, JQN5, [R3Map,R2Map,EOS]},
% info_msg(hcet_q03, [get(self), R1, RE], before_tests, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(JQN3, S3)),
?_assertMatch(ok, gen_server:call(JQN5, S5)),
%% check state of qn-s
?_assertMatch(35, length(hcet_get_PD(TPQN1))),
?_assertMatch(35, length(hcet_get_PD(TPQN2))),
?_assertMatch(42, length(hcet_get_PD(JQN3))),
?_assertMatch(35, length(hcet_get_PD(TPQN4))),
?_assertMatch(42, length(hcet_get_PD(JQN5))),
%% start evaluation
?_assertMatch(ok, gen_server:call(JQN5, E5)),
%% send empty messages to JQN5
?_assertMatch({'$gen_cast', R1}, hcet_send_empty(JQN5, R1))
]}.
hcet_tpqn3(Pid) ->
QueryNodeId = "1",
QueryId = "3",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tpqn4(Pid) ->
QueryNodeId = "2",
QueryId = "3",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
hcet_tpqn5(Pid) ->
QueryNodeId = "4",
QueryId = "3",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4, M),
TPQN4.
hcet_q05() ->
%%
%% query: using two query nodes for each tp
%%
%% slovenia hasCapital ?x
%% ?y livesIn ?x
%% ?y worksAt ijs
%%
info_msg(hcet_q05, [get(self)], start, 50),
Tab = db_interface:dot_get_tn(),
%% creating processes
QueryNodeId3 = "3",
QueryId = "5",
SessionId = "1",
Id3 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId3),
JQN3 = join_query_node:spawn_process(Id3, node()),
QueryNodeId5 = "5",
Id5 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId5),
JQN5 = join_query_node:spawn_process(Id5, node()),
TPQN1 = hcet_tp5qn3(JQN3),
TPQN1a = hcet_tp5qn3a(JQN3),
TPQN2 = hcet_tp5qn4(JQN3),
TPQN2a = hcet_tp5qn4a(JQN3),
TPQN4 = hcet_tp5qn5(JQN5),
TPQN4a = hcet_tp5qn5a(JQN5),
%% first join
GraphPattern3 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}}]),
SelectPred = none,
ProjectList = none,
ParentPid3 = JQN5,
OuterPids3 = [TPQN1,TPQN1a],
InnerPids3 = [TPQN2,TPQN2a],
VarsPositions3 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}]},
JoinVars3 = ["?x"],
%% second join
GraphPattern5 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}},
{"4", {"?i4", "?y", eI("worksAt"), eI("ijs")}}]),
ParentPid5 = self(),
OuterPids5 = [JQN3],
InnerPids5 = [TPQN4,TPQN4a],
VarsPositions5 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}, {"4", 2}]},
JoinVars5 = ["?y"],
%% data to be returned
T1 = eT({Tab, "id22", "slovenia", "hasCapital", "ljubljana"}),
T2 = eT({Tab, "id38", "luka", "livesIn", "ljubljana"}),
T3 = eT({Tab, "id47", "luka", "worksAt", "ijs"}),
T4 = eT({Tab, "id40", "nika", "livesIn", "ljubljana"}),
T5 = eT({Tab, "id49", "nika", "worksAt", "ijs"}),
T6 = eT({Tab, "id41", "marko", "livesIn", "ljubljana"}),
T7 = eT({Tab, "id50", "marko", "worksAt", "ijs"}),
%% messages for JQN3 and JQN5
DFO = data_outer,
EOS = end_of_stream,
S3 = {start, QueryNodeId3, QueryId, SessionId, JQN3, GraphPattern3,
SelectPred, ProjectList, ParentPid3, OuterPids3, InnerPids3,
VarsPositions3, JoinVars3},
S5 = {start, QueryNodeId5, QueryId, SessionId, JQN5, GraphPattern5,
SelectPred, ProjectList, ParentPid5, OuterPids5, InnerPids5,
VarsPositions5, JoinVars5},
E5 = {eval, []},
%% tuples to be returned
R1Map = maps:put("4", T3, maps:put("2", T2, maps:put("1", T1, maps:new()))),
R2Map = maps:put("4", T5, maps:put("2", T4, maps:put("1", T1, maps:new()))),
R3Map = maps:put("4", T7, maps:put("2", T6, maps:put("1", T1, maps:new()))),
R1 = {DFO, JQN5, [R3Map,R3Map,R2Map,R2Map,R1Map]},
R2 = {DFO, JQN5, [R1Map,R3Map,R3Map,R2Map,R2Map]},
R3 = {DFO, JQN5, [R1Map,R1Map,R3Map,R3Map,R2Map]},
R4 = {DFO, JQN5, [R2Map,R1Map,R1Map,R3Map,R3Map]},
R5 = {DFO, JQN5, [R2Map,R2Map,R1Map,R1Map,EOS]},
% info_msg(hcet_q05, [get(self), R1, R2, RE], before_tests, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(JQN3, S3)),
?_assertMatch(ok, gen_server:call(JQN5, S5)),
%% check state of qn-s
?_assertMatch(35, length(hcet_get_PD(TPQN1))),
?_assertMatch(35, length(hcet_get_PD(TPQN2))),
?_assertMatch(42, length(hcet_get_PD(JQN3))),
?_assertMatch(35, length(hcet_get_PD(TPQN4))),
?_assertMatch(42, length(hcet_get_PD(JQN5))),
%% start evaluation
?_assertMatch(ok, gen_server:call(JQN5, E5)),
%% send empty messages to JQN5
%% works only with block_size=5 !!! (iztok,2016/01/31)
?_assertMatch({'$gen_cast', R1}, hcet_send_empty(JQN5, R1)),
?_assertMatch({'$gen_cast', R2}, hcet_send_empty(JQN5, R2)),
?_assertMatch({'$gen_cast', R3}, hcet_send_empty(JQN5, R3)),
?_assertMatch({'$gen_cast', R4}, hcet_send_empty(JQN5, R4)),
?_assertMatch({'$gen_cast', R5}, hcet_send_empty(JQN5, R5))
]}.
hcet_tp5qn3(Pid) ->
QueryNodeId = "1",
QueryId = "5",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tp5qn3a(Pid) ->
QueryNodeId = "1",
QueryId = "5a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1a = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1a, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1a, M),
TPQN1a.
hcet_tp5qn4(Pid) ->
QueryNodeId = "2",
QueryId = "5",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
hcet_tp5qn4a(Pid) ->
QueryNodeId = "2",
QueryId = "5a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2a = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2a, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2a, M),
TPQN2a.
hcet_tp5qn5(Pid) ->
QueryNodeId = "4",
QueryId = "5",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4, M),
TPQN4.
hcet_tp5qn5a(Pid) ->
QueryNodeId = "4",
QueryId = "5a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4a = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4a, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4a, M),
TPQN4a.
hcet_q06() ->
%%
%% query: using three query nodes for each tp
%%
%% slovenia hasCapital ?x
%% ?y livesIn ?x
%% ?y worksAt ijs
%%
info_msg(hcet_q06, [get(self)], start, 50),
Tab = db_interface:dot_get_tn(),
%% creating processes
QueryNodeId3 = "3",
QueryId = "6",
SessionId = "1",
Id3 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId3),
JQN3 = join_query_node:spawn_process(Id3, node()),
QueryNodeId5 = "5",
Id5 = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId5),
JQN5 = join_query_node:spawn_process(Id5, node()),
TPQN1 = hcet_tp6qn3(JQN3),
TPQN1a = hcet_tp6qn3a(JQN3),
TPQN1b = hcet_tp6qn3b(JQN3),
TPQN2 = hcet_tp6qn4(JQN3),
TPQN2a = hcet_tp6qn4a(JQN3),
TPQN2b = hcet_tp6qn4b(JQN3),
TPQN4 = hcet_tp6qn5(JQN5),
TPQN4a = hcet_tp6qn5a(JQN5),
TPQN4b = hcet_tp6qn5b(JQN5),
%% first join
GraphPattern3 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}}]),
SelectPred = none,
ProjectList = none,
ParentPid3 = JQN5,
OuterPids3 = [TPQN1,TPQN1a,TPQN1b],
InnerPids3 = [TPQN2,TPQN2a,TPQN2b],
VarsPositions3 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}]},
JoinVars3 = ["?x"],
%% second join
GraphPattern5 = maps:from_list(
[{"1", {"?i1", eI("slovenia"), eI("hasCapital"), "?x"}},
{"2", {"?i2", "?y", eI("livesIn"), "?x"}},
{"4", {"?i4", "?y", eI("worksAt"), eI("ijs")}}]),
ParentPid5 = self(),
OuterPids5 = [JQN3],
InnerPids5 = [TPQN4,TPQN4a,TPQN4b],
VarsPositions5 = #{"?i1" => [{"1", 1}],
"?i2" => [{"2", 1}],
"?i4" => [{"4", 1}],
"?x" => [{"1", 4}, {"2", 4}],
"?y" => [{"2", 2}, {"4", 2}]},
JoinVars5 = ["?y"],
%% data to be returned
T1 = eT({Tab, "id22", "slovenia", "hasCapital", "ljubljana"}),
T2 = eT({Tab, "id38", "luka", "livesIn", "ljubljana"}),
T3 = eT({Tab, "id47", "luka", "worksAt", "ijs"}),
T4 = eT({Tab, "id40", "nika", "livesIn", "ljubljana"}),
T5 = eT({Tab, "id49", "nika", "worksAt", "ijs"}),
T6 = eT({Tab, "id41", "marko", "livesIn", "ljubljana"}),
T7 = eT({Tab, "id50", "marko", "worksAt", "ijs"}),
%% messages for JQN3 and JQN5
DFO = data_outer,
EOS = end_of_stream,
S3 = {start, QueryNodeId3, QueryId, SessionId, JQN3, GraphPattern3,
SelectPred, ProjectList, ParentPid3, OuterPids3, InnerPids3,
VarsPositions3, JoinVars3},
S5 = {start, QueryNodeId5, QueryId, SessionId, JQN5, GraphPattern5,
SelectPred, ProjectList, ParentPid5, OuterPids5, InnerPids5,
VarsPositions5, JoinVars5},
E5 = {eval, []},
%% tuples to be returned
R1Map = maps:put("4", T3, maps:put("2", T2, maps:put("1", T1, maps:new()))),
R2Map = maps:put("4", T5, maps:put("2", T4, maps:put("1", T1, maps:new()))),
R3Map = maps:put("4", T7, maps:put("2", T6, maps:put("1", T1, maps:new()))),
R1 = {DFO, JQN5, [R3Map,R3Map,R3Map,R2Map,R2Map]},
R2 = {DFO, JQN5, [R2Map,R1Map,R1Map,R1Map,R3Map]},
R3 = {DFO, JQN5, [R3Map,R3Map,R2Map,R2Map,R2Map]},
R4 = {DFO, JQN5, [R1Map,R1Map,R1Map,R3Map,R3Map]},
R5 = {DFO, JQN5, [R3Map,R2Map,R2Map,R2Map,R1Map]},
R6 = {DFO, JQN5, [R1Map,R1Map,R3Map,R3Map,R3Map]},
R7 = {DFO, JQN5, [R2Map,R2Map,R2Map,R1Map,R1Map]},
R8 = {DFO, JQN5, [R1Map,R3Map,R3Map,R3Map,R2Map]},
R9 = {DFO, JQN5, [R2Map,R2Map,R1Map,R1Map,R1Map]},
RE = {DFO, JQN5, [R1Map,EOS]},
% info_msg(hcet_q06, [get(self), R1, RE], before_tests, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(JQN3, S3)),
?_assertMatch(ok, gen_server:call(JQN5, S5)),
%% check state of qn-s
?_assertMatch(35, length(hcet_get_PD(TPQN1))),
?_assertMatch(35, length(hcet_get_PD(TPQN2))),
?_assertMatch(42, length(hcet_get_PD(JQN3))),
?_assertMatch(35, length(hcet_get_PD(TPQN4))),
?_assertMatch(42, length(hcet_get_PD(JQN5))),
%% start evaluation
?_assertMatch(ok, gen_server:call(JQN5, E5)),
%% send empty messages to JQN5
?_assertMatch({'$gen_cast', R1}, hcet_send_empty(JQN5, R1)),
?_assertMatch({'$gen_cast', R2}, hcet_send_empty(JQN5, R2)),
?_assertMatch({'$gen_cast', R3}, hcet_send_empty(JQN5, R3)),
?_assertMatch({'$gen_cast', R4}, hcet_send_empty(JQN5, R4)),
?_assertMatch({'$gen_cast', R5}, hcet_send_empty(JQN5, R5)),
?_assertMatch({'$gen_cast', R6}, hcet_send_empty(JQN5, R6)),
?_assertMatch({'$gen_cast', R7}, hcet_send_empty(JQN5, R7)),
?_assertMatch({'$gen_cast', R8}, hcet_send_empty(JQN5, R8)),
?_assertMatch({'$gen_cast', R9}, hcet_send_empty(JQN5, R9)),
?_assertMatch({'$gen_cast', R1}, hcet_send_empty(JQN5, R1)),
?_assertMatch({'$gen_cast', R2}, hcet_send_empty(JQN5, R2)),
?_assertMatch({'$gen_cast', R3}, hcet_send_empty(JQN5, R3)),
?_assertMatch({'$gen_cast', R4}, hcet_send_empty(JQN5, R4)),
?_assertMatch({'$gen_cast', R5}, hcet_send_empty(JQN5, R5)),
?_assertMatch({'$gen_cast', R6}, hcet_send_empty(JQN5, R6)),
?_assertMatch({'$gen_cast', R7}, hcet_send_empty(JQN5, R7)),
?_assertMatch({'$gen_cast', RE}, hcet_send_empty(JQN5, RE))
]}.
hcet_tp6qn3(Pid) ->
QueryNodeId = "1",
QueryId = "6",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tp6qn3a(Pid) ->
QueryNodeId = "1",
QueryId = "6a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tp6qn3b(Pid) ->
QueryNodeId = "1",
QueryId = "6b",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN1 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i1", eI("slovenia"), eI("hasCapital"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i1" => 1, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN1, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, outer},
gen_server:call(TPQN1, M),
TPQN1.
hcet_tp6qn4(Pid) ->
QueryNodeId = "2",
QueryId = "6",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
hcet_tp6qn4a(Pid) ->
QueryNodeId = "2",
QueryId = "6a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
hcet_tp6qn4b(Pid) ->
QueryNodeId = "2",
QueryId = "6b",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN2 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i2", "?y", eI("livesIn"), "?x"},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i2" => 1, "?y" => 2, "?x" => 4},
M = {start, QueryNodeId, QueryId, SessionId, TPQN2, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN2, M),
TPQN2.
hcet_tp6qn5(Pid) ->
QueryNodeId = "4",
QueryId = "6",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4, M),
TPQN4.
hcet_tp6qn5a(Pid) ->
QueryNodeId = "4",
QueryId = "6a",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4, M),
TPQN4.
hcet_tp6qn5b(Pid) ->
QueryNodeId = "4",
QueryId = "6b",
SessionId = "1",
Id = list_to_atom(SessionId++"-"++QueryId++"-"++QueryNodeId),
TPQN4 = tp_query_node:spawn_process(Id, node()),
TriplePattern = {"?i4", "?y", eI("worksAt"), eI("ijs")},
SelectPred = none,
ProjectList = none,
ParentPid = Pid,
VarsPositions = #{"?i4" => 1, "?y" => 2},
M = {start, QueryNodeId, QueryId, SessionId, TPQN4, TriplePattern,
SelectPred, ProjectList, ParentPid, VarsPositions, inner},
gen_server:call(TPQN4, M),
TPQN4.
%% ====> END OF LINE <==== | src/join_query_node.erl | 0.532911 | 0.546557 | join_query_node.erl | starcoder |
%% Copyright 2015 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(fn_error).
-export([to_string/2, normalize/1]).
to_string(Module, Errors) when is_list(Errors) ->
lists:map(fun (Error) -> to_string(Module, Error) end, Errors);
to_string(Module, {Type, Line, Details}) ->
TypeStr = type_to_string(Type),
DetailsStr = details_to_string(Details),
io_lib:format("~p:~p:~p: ~s at line ~p: ~s~n", [Module, Line, Type, TypeStr, Line, DetailsStr]).
type_to_string(invalid_fn_ref) -> <<"Invalid Function Reference">>;
type_to_string(invalid_bin_type_specifier_field) -> <<"Invalid Type Specifier Field">>;
type_to_string(invalid_bin_type_specifier_value) -> <<"Invalid Type Specifier Value">>;
type_to_string(unknown_compiler_info) -> <<"Unknown Compiler Info Name">>;
type_to_string(case_mismatch) -> <<"Case Mismatch">>;
type_to_string(bad_record_field_init) -> <<"Bad Record Field Initialization">>;
type_to_string(bad_record_field_decl) -> <<"Bad Record Field Declaration">>;
type_to_string(invalid_export) -> <<"Invalid Export">>;
type_to_string(invalid_expression) -> <<"Invalid Expression">>;
type_to_string(invalid_top_level_expression) -> <<"Invalid Top Level Expression">>;
type_to_string(invalid_type_declaration) -> <<"Invalid Type Declaration">>;
type_to_string(invalid_type_value) -> <<"Invalid Type Value">>;
type_to_string(invalid_type_argument) -> <<"Invalid Type Argument">>;
type_to_string(invalid_catch) -> <<"Invalid Catch">>;
type_to_string(duplicated_function_spec) -> <<"Duplicated Function Spec">>;
type_to_string(Other) -> atom_to_list(Other).
format_maybe_ast({ast, Ast}) -> fn_pp:print(Ast);
format_maybe_ast(Other) -> io_lib:format("~p", [Other]).
details_to_string({expected, Expected, got, Got}) when is_list(Expected) ->
io_lib:format("Expected ~s got ~s", [Expected, format_maybe_ast(Got)]);
details_to_string({expected, Expected, got, Got}) ->
io_lib:format("Expected ~p got ~s", [Expected, format_maybe_ast(Got)]);
details_to_string(Other) -> format_maybe_ast(Other).
normalize({error, {Line, fn_parser, Reason}}) ->
io_lib:format("~p: parse error: '~s'", [Line, Reason]);
normalize({error, {Line, fn_lexer, {illegal, Reason}}}) ->
io_lib:format("~p: illegal char ~p", [Line, Reason]);
normalize({error, {Line, fn_lexer, {eof, _}}}) ->
io_lib:format("~p: end of file", [Line]);
normalize({error, {efene, _Module, Reason}}) ->
io_lib:format("~s", [Reason]);
normalize({error, Other}) ->
io_lib:format("~p", [Other]). | src/fn_error.erl | 0.703549 | 0.449816 | fn_error.erl | starcoder |
% Copyright (c) 2014-2018 <NAME> aka dark_k3y
% Initial implementation was a little slow, so:
% - several optimizations approaches is heavilly based on JSONE implementation
% by Copyright (c) 2013-2016, <NAME> <<EMAIL>> (MIT LICENSE)
% see https://github.com/sile/jsone/blob/master/src/jsone_decode.erl
% for more details
% Usage of original JSONE was not possible due to the fact, that
% erlamsa should be able to parse badly crafted JSON docs
%
% LICENSE
%
% Permission is hereby granted, free of charge, to any person obtaining a copy
% of this software and associated documentation files (the "Software"), to deal
% in the Software without restriction, including without limitation the rights
% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
% copies of the Software, and to permit persons to whom the Software is
% furnished to do so, subject to the following conditions:
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
% SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
% DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
% OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
% THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-module(erlamsa_json).
-author("dark_k3y").
-compile([export_all]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile([export_all]).
-endif.
-include("erlamsa.hrl").
%% API
-export([tokenize/1, tokens_to_erlang/1]).
-define(NOT_SEPARATOR(C),
C =/= $ , C =/= $\n, C =/= $\r, C =/= $\t, C =/= $,, C =/= $], C =/= $}, C =/= $:).
%% TODO: add specs
%%%
%%% JSON tokenizer
%%%
% Grammar derived from https://tools.ietf.org/html/rfc7159
% document: ws value ws
% ws : \x20 | \t | \r | \n
% value: string | number | object | array | true | false | null
% elements: value | value ws , ws elements
% array: ws [ ws ] | ws [ ws elements ws ]
% object: ws { ws } | ws { ws members ws }
% members: pair | pair ws , ws members
% pair: string : value
%% document is a ws
tokenize(Bin) ->
lists:reverse(ws(Bin, [value], [])).
ws(<<$\t, Rest/binary>>, Context, Acc) -> ws(Rest, Context, Acc);
ws(<<$\n, Rest/binary>>, Context, Acc) -> ws(Rest, Context, Acc);
ws(<<$\r, Rest/binary>>, Context, Acc) -> ws(Rest, Context, Acc);
ws(<<$ , Rest/binary>>, Context, Acc) -> ws(Rest, Context, Acc);
ws(<<>>, _, Acc) -> Acc;
ws(Bin, Context = [Term|RestContext], Acc) ->
%io:format("111111 ~p~n", [Bin]),
case Term of
array -> array(Bin, [array_end|RestContext], Acc);
{elements, List} -> elements(Bin, RestContext, List, Acc);
object -> object(Bin, [object_end|RestContext], Acc);
{members, Pairs} -> members(Bin, RestContext, Pairs, Acc);
pair -> pair(Bin, RestContext, Acc);
pair_delim -> pair(Bin, Context, Acc);
value -> value(Bin, RestContext, Acc)
end.
value(<<$[, Rest/binary>>, Context, Acc) ->
ws(Rest, [array|Context], Acc);
value(<<${, Rest/binary>>, Context, Acc) ->
ws(Rest, [object|Context], Acc);
value(<<"true", Rest/binary>>, Context, Acc) ->
push(Rest, Context, {constant, true}, Acc);
value(<<"false", Rest/binary>>, Context, Acc) ->
push(Rest, Context, {constant, false}, Acc);
value(<<"null", Rest/binary>>, Context, Acc) ->
push(Rest, Context, {constant, null}, Acc);
value(<<$", Rest/binary>>, Context, Acc) ->
string(Rest, Context, [], Acc);
value(Bin, Context, Acc) ->
number(Bin, Context, Acc).
%% Arrays
array(<<$], Rest/binary>>, [array_end|Context], Acc) ->
%io:format("0]~n"),
push(Rest, Context, {array, []}, Acc);
array(Bin, Context, Acc) ->
ws(Bin, [value, {elements, []}|Context], Acc).
elements(<<$], Rest/binary>>, [array_end|Context], List, Acc) ->
%io:format("1]~n"),
push(Rest, Context, {array, lists:reverse(List)}, Acc);
elements(<<$,, Rest/binary>>, Context, List, Acc) ->
ws(Rest, [value, {elements, List}|Context], Acc).
%% Objects
object(<<$}, Rest/binary>>, [object_end|Context], Acc) ->
%io:format("mmmmmm~n"),
push(Rest, Context, {object, []}, Acc);
object(Bin, Context, Acc) ->
ws(Bin, [pair, {members, []}|Context], Acc).
members(<<$}, Rest/binary>>, [object_end|Context], Pairs, Acc) ->
%io:format("nnnnnnnnn ~p~n", [{Rest, [value|Context], [{object, lists:reverse(Pairs)}|Acc]}]),
push(Rest, Context, {object, lists:reverse(Pairs)}, Acc);
members(<<$,, Rest/binary>>, Context, Pairs, Acc) ->
ws(Rest, [pair, {members, Pairs}|Context], Acc).
pair(<<$:, Rest/binary>>, [pair_delim | Context], Acc) ->
%io:format("1: ~p~n", [{Rest, Context, Acc}]),
ws(Rest, [value, pair_end | Context], Acc);
pair(Bin, Context, Acc) ->
%io:format("2: ~p~n", [{Bin, Context, Acc}]),
ws(Bin, [value, pair_delim | Context], Acc).
%% Primitive values parsing
push(Bin, [], Value, Acc) ->
ws(Bin, [], [Value|Acc]);
push(Bin, [{elements, List} | Context], Value, Acc) ->
%io:format("!!!!~p, ~p, ~p~n", [Bin, [{elements, [Value|List]} | Context], Acc]),
ws(Bin, [{elements, [Value|List]} | Context], Acc);
push(Bin, [{members, List} | Context], Value, Acc) ->
%io:format("!!!!~p, ~p, ~p~n", [Bin, [{members, [Value|List]} | Context], Acc]),
ws(Bin, [{members, [Value|List]} | Context], Acc);
push(Bin, [pair_delim | Context], Key, Acc) ->
ws(Bin, [pair_delim, {pair_start, Key} | Context], Acc);
push(Bin, [pair_end, {pair_start, Key} | Context], Value, Acc) ->
push(Bin, Context, {pair, Key, Value}, Acc).
%% TODO: more effective way to handle big strings
%% this may be slow in some cases
string(<<$", Rest/binary>>, Context, RevStr, Acc) ->
push(Rest, Context, {string, lists:reverse(RevStr)}, Acc);
string(<<C:8, Rest/binary>>, Context, RevStr, Acc) ->
string(Rest, Context, [C|RevStr], Acc);
string(<<>>, Context, RevStr, Acc) ->
push(<<>>, Context, {junkstring, lists:reverse([$"|RevStr])}, Acc).
number(<<C:8, Rest/binary>>, Context, Acc) when ?NOT_SEPARATOR(C) ->
number_rest(Rest, Context, [C], Acc).
number_rest(<<C:8, Rest/binary>>, Context, N, Acc) when ?NOT_SEPARATOR(C) ->
number_rest(Rest, Context, [C|N], Acc);
number_rest(Bin, Context, N, Acc) ->
push(Bin, Context, {number, lists:reverse(N)}, Acc).
%%%
%%% /JSON tokenizer
%%%
tokens_to_erlang(Ast) when is_list(Ast) ->
lists:map(fun tokens_to_erlang/1, Ast);
tokens_to_erlang({object, Lst}) ->
Pairs = lists:map(fun tokens_to_erlang/1, Lst),
lists:foldl(fun ({Key, Value}, Acc) -> maps:put(Key, Value, Acc) end,
maps:new(), Pairs);
tokens_to_erlang({array, Lst}) ->
lists:map(fun tokens_to_erlang/1, Lst);
tokens_to_erlang({pair, Key, Value}) ->
{tokens_to_erlang(Key), tokens_to_erlang(Value)};
tokens_to_erlang({string, Value}) ->
Value;
%% TODO: nested try/catch, fix me:
tokens_to_erlang({number, Value}) ->
try list_to_integer(Value) of
Int -> Int
catch error:badarg ->
try list_to_float(Value) of
Float -> Float
catch error:badarg ->
invalid_number
end
end;
tokens_to_erlang(Token) ->
Token. | src/erlamsa_json.erl | 0.629775 | 0.551695 | erlamsa_json.erl | starcoder |
%% Copyright (c) 2020 Facebook, Inc. and its affiliates.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(erlt_ast).
-export([
prewalk/2, prewalk/3,
postwalk/2, postwalk/3,
traverse/4,
map_anno/2
]).
-type ctx() :: form | expr | guard | pattern | type.
-type t() :: tuple() | [tuple()].
-spec prewalk(t(), fun((t(), ctx()) -> t())) -> t().
prewalk(Ast, Fun) ->
element(1, prewalk(Ast, undefined, fun(Node, Acc, Ctx) -> {Fun(Node, Ctx), Acc} end)).
-spec prewalk(t(), any(), fun((t(), any(), ctx()) -> {t(), any()})) -> {t(), any()}.
prewalk(Ast, Acc0, Fun) ->
traverse(Ast, Acc0, Fun, fun(Node, Acc, _Ctx) -> {Node, Acc} end).
-spec postwalk(t(), fun((t(), ctx()) -> t())) -> t().
postwalk(Ast, Fun) ->
element(1, postwalk(Ast, undefined, fun(Node, Acc, Ctx) -> {Fun(Node, Ctx), Acc} end)).
-spec postwalk(t(), any(), fun((t(), any(), ctx()) -> {t(), any()})) -> {t(), any()}.
postwalk(Ast, Acc0, Fun) ->
traverse(Ast, Acc0, fun(Node, Acc, _Ctx) -> {Node, Acc} end, Fun).
-spec map_anno(t(), fun((erl_anno:anno()) -> erl_anno:anno())) -> t().
map_anno(Ast, Fun) ->
prewalk(Ast, fun(Node, _Ctx) -> setelement(2, Node, Fun(element(2, Node))) end).
-define(IS_ATOMIC(Kind),
Kind =:= integer orelse
Kind =:= float orelse
Kind =:= char orelse
Kind =:= atom orelse
Kind =:= atom_expr orelse
Kind =:= string orelse
Kind =:= var
).
-define(IS_TYPE(Kind),
Kind =:= type orelse
Kind =:= opaque orelse
Kind =:= enum orelse
Kind =:= struct orelse
Kind =:= exception orelse
Kind =:= message orelse
Kind =:= unchecked_opaque
).
-define(IS_SPEC(Kind),
Kind =:= spec orelse
Kind =:= callback
).
-define(IS_FUNCTION(Kind),
Kind =:= function orelse
Kind =:= unchecked_function
).
-spec traverse(t(), any(), fun((t(), any(), ctx()) -> {t(), any()}), fun(
(t(), any(), ctx()) -> {t(), any()}
)) -> {t(), any()}.
traverse(Ast, Acc, Pre, Post) ->
case Ast of
%% the module definiton is a list
List when is_list(List) ->
Fun = fun(Node, Acc1) -> traverse(Node, Acc1, Pre, Post) end,
lists:mapfoldl(Fun, Acc, List);
{attribute, _, _, _} = Node ->
do_traverse(Node, Acc, Pre, Post, form);
{function, _, _, _, _} = Node ->
do_traverse(Node, Acc, Pre, Post, form);
%% don't traverse special parser forms
{error, _} = Node ->
{Node, Acc};
{warning, _} = Node ->
{Node, Acc};
{eof, _} = Node ->
{Node, Acc};
Node when tuple_size(Node) >= 2 ->
do_traverse(Node, Acc, Pre, Post, expr)
end.
do_traverse(Node0, Acc, Pre, Post, Ctx) ->
{Node, Acc0} = Pre(Node0, Acc, Ctx),
case Node of
{attribute, Line, Type, {Name, Def0, Args0}} when ?IS_TYPE(Type) ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, type),
{Def1, Acc2} = do_traverse(Def0, Acc1, Pre, Post, type),
Post({attribute, Line, Type, {Name, Def1, Args1}}, Acc2, Ctx);
{attribute, Line, Spec, {MFA, Types0}} when ?IS_SPEC(Spec) ->
{Types1, Acc1} = do_traverse_list(Types0, Acc0, Pre, Post, type),
Post({attribute, Line, Spec, {MFA, Types1}}, Acc1, Ctx);
%% TODO: traverse other attributes that can have type defintions
{attribute, _, _, _} ->
Post(Node, Acc0, Ctx);
{F, Line, Name, Arity, Clauses} when ?IS_FUNCTION(F) ->
{Clauses1, Acc1} = do_traverse_list(Clauses, Acc0, Pre, Post, Ctx),
Post({F, Line, Name, Arity, Clauses1}, Acc1, Ctx);
{clause, Line, Head0, Guard0, Body0} ->
{Head1, Acc1} = do_traverse_list(Head0, Acc0, Pre, Post, pattern),
{Guard1, Acc2} = do_traverse_guards(Guard0, Acc1, Pre, Post),
{Body1, Acc3} = do_traverse_list(Body0, Acc2, Pre, Post, expr),
Post({clause, Line, Head1, Guard1, Body1}, Acc3, Ctx);
{Atomic, _, _} when ?IS_ATOMIC(Atomic) ->
Post(Node, Acc0, Ctx);
{nil, _} ->
Post(Node, Acc0, Ctx);
{match, Line, Left0, Right0} ->
{Left1, Acc1} = do_traverse(Left0, Acc0, Pre, Post, pattern),
{Right1, Acc2} = do_traverse(Right0, Acc1, Pre, Post, Ctx),
Post({match, Line, Left1, Right1}, Acc2, Ctx);
{cons, Line, Head0, Tail0} ->
{Head1, Acc1} = do_traverse(Head0, Acc0, Pre, Post, Ctx),
{Tail1, Acc2} = do_traverse(Tail0, Acc1, Pre, Post, Ctx),
Post({cons, Line, Head1, Tail1}, Acc2, Ctx);
{tuple, Line, Values0} ->
{Values1, Acc1} = do_traverse_list(Values0, Acc0, Pre, Post, Ctx),
Post({tuple, Line, Values1}, Acc1, Ctx);
{enum, Line, Name0, Constr0, Values0} ->
{Name1, Acc1} = do_traverse_atom_or_node(Name0, Acc0, Pre, Post, Ctx),
{Constr1, Acc2} = do_traverse(Constr0, Acc1, Pre, Post, Ctx),
{Values1, Acc3} = do_traverse_atom_or_list(Values0, Acc2, Pre, Post, Ctx),
Post({enum, Line, Name1, Constr1, Values1}, Acc3, Ctx);
{map, Line, Values0} ->
{Values1, Acc1} = do_traverse_list(Values0, Acc0, Pre, Post, Ctx),
Post({map, Line, Values1}, Acc1, Ctx);
{map, Line, Expr0, Values0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Values1, Acc2} = do_traverse_list(Values0, Acc1, Pre, Post, Ctx),
Post({map, Line, Expr1, Values1}, Acc2, Ctx);
{map_field_exact, Line, Key0, Value0} ->
%% map keys are never pattern, but (limited) expressions
{Key1, Acc1} = do_traverse(Key0, Acc0, Pre, Post, pattern_to_expr(Ctx)),
{Value1, Acc2} = do_traverse(Value0, Acc1, Pre, Post, Ctx),
Post({map_field_exact, Line, Key1, Value1}, Acc2, Ctx);
{map_field_assoc, Line, Key0, Value0} ->
{Key1, Acc1} = do_traverse(Key0, Acc0, Pre, Post, Ctx),
{Value1, Acc2} = do_traverse(Value0, Acc1, Pre, Post, Ctx),
Post({map_field_assoc, Line, Key1, Value1}, Acc2, Ctx);
{shape, Line, Fields0} ->
{Fields1, Acc1} = do_traverse_list(Fields0, Acc0, Pre, Post, Ctx),
Post({shape, Line, Fields1}, Acc1, Ctx);
{shape_update, Line, Expr0, Fields0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Fields1, Acc2} = do_traverse_list(Fields0, Acc1, Pre, Post, Ctx),
Post({shape_update, Line, Expr1, Fields1}, Acc2, Ctx);
{shape_field, Line, Expr0, Field0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Field1, Acc2} = do_traverse(Field0, Acc1, Pre, Post, Ctx),
Post({shape_field, Line, Expr1, Field1}, Acc2, Ctx);
{struct, Line, Name0, Fields0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Fields1, Acc2} = do_traverse_list(Fields0, Acc1, Pre, Post, Ctx),
Post({struct, Line, Name1, Fields1}, Acc2, Ctx);
{struct, Line, Expr0, Name0, Fields0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Name1, Acc2} = do_traverse(Name0, Acc1, Pre, Post, Ctx),
{Fields1, Acc3} = do_traverse_list(Fields0, Acc2, Pre, Post, Ctx),
Post({struct, Line, Expr1, Name1, Fields1}, Acc3, Ctx);
{field, Line, Name0, Value0} ->
{Name1, Acc1} = do_traverse_atom_or_node(Name0, Acc0, Pre, Post, Ctx),
{Value1, Acc2} = do_traverse(Value0, Acc1, Pre, Post, Ctx),
Post({field, Line, Name1, Value1}, Acc2, Ctx);
{struct_field, Line, Expr0, Name0, Value0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Name1, Acc2} = do_traverse(Name0, Acc1, Pre, Post, Ctx),
{Value1, Acc3} = do_traverse(Value0, Acc2, Pre, Post, Ctx),
Post({struct_field, Line, Expr1, Name1, Value1}, Acc3, Ctx);
{struct_index, Line, Name0, Field0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Field1, Acc2} = do_traverse(Field0, Acc1, Pre, Post, Ctx),
Post({struct_index, Line, Name1, Field1}, Acc2, Ctx);
{op, Line, Op, Expr0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
Post({op, Line, Op, Expr1}, Acc1, Ctx);
{op, Line, Op, Left0, Right0} ->
{Left1, Acc1} = do_traverse(Left0, Acc0, Pre, Post, Ctx),
{Right1, Acc2} = do_traverse(Right0, Acc1, Pre, Post, Ctx),
Post({op, Line, Op, Left1, Right1}, Acc2, Ctx);
{bin, Line, Values0} ->
{Values1, Acc1} = do_traverse_list(Values0, Acc0, Pre, Post, Ctx),
Post({bin, Line, Values1}, Acc1, Ctx);
{bin_element, Line, Expr0, Size0, Type} ->
%% don't recurse into Type, it's not AST, but special syntax
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
%% bin field size are never patterns, but (limited) expressions
{Size1, Acc2} = do_traverse_atom_or_node(Size0, Acc1, Pre, Post, pattern_to_expr(Ctx)),
Post({bin_element, Line, Expr1, Size1, Type}, Acc2, Ctx);
{call, Line, Name0, Args0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Args1, Acc2} = do_traverse_list(Args0, Acc1, Pre, Post, Ctx),
Post({call, Line, Name1, Args1}, Acc2, Ctx);
{remote, Line, Mod0, Name0} ->
{Mod1, Acc1} = do_traverse(Mod0, Acc0, Pre, Post, Ctx),
{Name1, Acc2} = do_traverse(Name0, Acc1, Pre, Post, Ctx),
Post({remote, Line, Mod1, Name1}, Acc2, Ctx);
{lc, Line, Expr0, Compr0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Compr1, Acc2} = do_traverse_list(Compr0, Acc1, Pre, Post, Ctx),
Post({lc, Line, Expr1, Compr1}, Acc2, Ctx);
{bc, Line, Expr0, Compr0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Compr1, Acc2} = do_traverse_list(Compr0, Acc1, Pre, Post, Ctx),
Post({bc, Line, Expr1, Compr1}, Acc2, Ctx);
{Generate, Line, Pattern0, Expr0} when Generate =:= generate; Generate =:= b_generate ->
{Pattern1, Acc1} = do_traverse(Pattern0, Acc0, Pre, Post, pattern),
{Expr1, Acc2} = do_traverse(Expr0, Acc1, Pre, Post, Ctx),
Post({Generate, Line, Pattern1, Expr1}, Acc2, Ctx);
{block, Line, Exprs0} ->
{Exprs1, Acc1} = do_traverse_list(Exprs0, Acc0, Pre, Post, Ctx),
Post({block, Line, Exprs1}, Acc1, Ctx);
{'if', Line, Clauses0} ->
{Clauses1, Acc1} = do_traverse_list(Clauses0, Acc0, Pre, Post, Ctx),
Post({'if', Line, Clauses1}, Acc1, Ctx);
{'case', Line, Expr0, Clauses0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
{Clauses1, Acc2} = do_traverse_list(Clauses0, Acc1, Pre, Post, Ctx),
Post({'case', Line, Expr1, Clauses1}, Acc2, Ctx);
{'receive', Line, Clauses0} ->
{Clauses1, Acc1} = do_traverse_list(Clauses0, Acc0, Pre, Post, Ctx),
Post({'receive', Line, Clauses1}, Acc1, Ctx);
{'receive', Line, Clauses0, Timeout0, After0} ->
{Clauses1, Acc1} = do_traverse_list(Clauses0, Acc0, Pre, Post, Ctx),
{Timeout1, Acc2} = do_traverse(Timeout0, Acc1, Pre, Post, Ctx),
{After1, Acc3} = do_traverse_list(After0, Acc2, Pre, Post, Ctx),
Post({'receive', Line, Clauses1, Timeout1, After1}, Acc3, Ctx);
{'fun', Line, {clauses, Clauses0}} ->
{Clauses1, Acc1} = do_traverse_list(Clauses0, Acc0, Pre, Post, Ctx),
Post({'fun', Line, {clauses, Clauses1}}, Acc1, Ctx);
{'fun', _Line, {function, _F, _A}} ->
Post(Node, Acc0, Ctx);
{'fun', Line, {function, Module0, Fun0, Arity0}} ->
{Module1, Acc1} = do_traverse(Module0, Acc0, Pre, Post, Ctx),
{Fun1, Acc2} = do_traverse(Fun0, Acc1, Pre, Post, Ctx),
{Arity1, Acc3} = do_traverse(Arity0, Acc2, Pre, Post, Ctx),
Post({'fun', Line, {function, Module1, Fun1, Arity1}}, Acc3, Ctx);
{named_fun, Line, Name, Clauses0} ->
{Clauses1, Acc1} = do_traverse_list(Clauses0, Acc0, Pre, Post, Ctx),
Post({named_fun, Line, Name, Clauses1}, Acc1, Ctx);
{'catch', Line, Expr0} ->
{Expr1, Acc1} = do_traverse(Expr0, Acc0, Pre, Post, Ctx),
Post({'catch', Line, Expr1}, Acc1, Ctx);
{'try', Line, Exprs0, OfClauses0, CatchClauses0, After0} ->
{Exprs1, Acc1} = do_traverse_list(Exprs0, Acc0, Pre, Post, Ctx),
{OfClauses1, Acc2} = do_traverse_list(OfClauses0, Acc1, Pre, Post, Ctx),
{CatchClauses1, Acc3} = do_traverse_list(CatchClauses0, Acc2, Pre, Post, Ctx),
{After1, Acc4} = do_traverse_list(After0, Acc3, Pre, Post, Ctx),
Post({'try', Line, Exprs1, OfClauses1, CatchClauses1, After1}, Acc4, Ctx);
{type, _, map, any} ->
Post(Node, Acc0, Ctx);
{type, _, tuple, any} ->
Post(Node, Acc0, Ctx);
{type, Line, enum, Name0, Variants0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Variants1, Acc2} = do_traverse_list(Variants0, Acc1, Pre, Post, Ctx),
Post({type, Line, enum, Name1, Variants1}, Acc2, Ctx);
{variant, Line, Name0, Fields0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Fields1, Acc2} = do_traverse_atom_or_list(Fields0, Acc1, Pre, Post, Ctx),
Post({variant, Line, Name1, Fields1}, Acc2, Ctx);
{type, Line, struct, Name0, Fields0} ->
{Name1, Acc1} = do_traverse(Name0, Acc0, Pre, Post, Ctx),
{Fields1, Acc2} = do_traverse_list(Fields0, Acc1, Pre, Post, Ctx),
Post({type, Line, struct, Name1, Fields1}, Acc2, Ctx);
{field_definition, Line, Name0, Default0, Type0} ->
{Name1, Acc1} = do_traverse_atom_or_node(Name0, Acc0, Pre, Post, Ctx),
{Default1, Acc2} = do_traverse_atom_or_node(Default0, Acc1, Pre, Post, guard),
{Type1, Acc3} = do_traverse(Type0, Acc2, Pre, Post, Ctx),
Post({field_definition, Line, Name1, Default1, Type1}, Acc3, Ctx);
{type, Line, open_shape, Args0, Var} ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, Ctx),
{Var1, Acc2} = do_traverse(Var, Acc1, Pre, Post, Ctx),
Post({type, Line, open_shape, Args1, Var1}, Acc2, Ctx);
%% The first argument is the normal fun followed by a list of constarints.
{type, Line, 'bounded_fun', [Fun0, Guards0]} ->
{Fun1, Acc1} = do_traverse(Fun0, Acc0, Pre, Post, Ctx),
{Guards1, Acc2} = do_traverse_list(Guards0, Acc1, Pre, Post, Ctx),
Post({type, Line, 'bounded_fun', [Fun1, Guards1]}, Acc2, Ctx);
%% The first argument to the constraint is the type of constraint followed by a list.
{type, Line, constraint, [Constraint, Args0]} ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, Ctx),
Post({type, Line, constraint, [Constraint, Args1]}, Acc1, Ctx);
{type, Line, Name, Args0} ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, Ctx),
Post({type, Line, Name, Args1}, Acc1, Ctx);
{type, _, any} ->
Post(Node, Acc0, Ctx);
{ann_type, Line, Args0} ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, Ctx),
Post({ann_type, Line, Args1}, Acc1, Ctx);
{remote_type, Line, [Mod0, Name0, Args0]} ->
{Mod1, Acc1} = do_traverse(Mod0, Acc0, Pre, Post, Ctx),
{Name1, Acc2} = do_traverse(Name0, Acc1, Pre, Post, Ctx),
{Args1, Acc3} = do_traverse_list(Args0, Acc2, Pre, Post, Ctx),
Post({remote_type, Line, [Mod1, Name1, Args1]}, Acc3, Ctx);
{user_type, Line, Name, Args0} ->
{Args1, Acc1} = do_traverse_list(Args0, Acc0, Pre, Post, Ctx),
Post({user_type, Line, Name, Args1}, Acc1, Ctx)
end.
do_traverse_list(List, Acc0, Pre, Post, Ctx) ->
Fun = fun(Node, Acc) ->
do_traverse(Node, Acc, Pre, Post, Ctx)
end,
lists:mapfoldl(Fun, Acc0, List).
do_traverse_guards(List0, Acc0, Pre, Post) ->
%% no support for transforming guard_or/and to something else
{{guard_or, _, List1}, Acc1} = Pre({guard_or, 0, List0}, Acc0, guard),
Fun = fun(Nodes0, AccInner0) ->
{{guard_and, _, Nodes1}, AccInner1} = Pre({guard_and, 0, Nodes0}, AccInner0, guard),
{Nodes2, AccInner2} = do_traverse_list(Nodes1, AccInner1, Pre, Post, guard),
{{guard_and, _, Nodes3}, AccInner3} = Post({guard_and, 0, Nodes2}, AccInner2, guard),
{Nodes3, AccInner3}
end,
{List2, Acc2} = lists:mapfoldl(Fun, Acc1, List1),
{{guard_or, _, List3}, Acc3} = Post({guard_or, 0, List2}, Acc2, guard),
{List3, Acc3}.
do_traverse_atom_or_node(Atom, Acc, _Pre, _Post, _Ctx) when is_atom(Atom) ->
{Atom, Acc};
do_traverse_atom_or_node(Node, Acc, Pre, Post, Ctx) when is_tuple(Node) ->
do_traverse(Node, Acc, Pre, Post, Ctx).
do_traverse_atom_or_list(Atom, Acc, _Pre, _Post, _Ctx) when is_atom(Atom) ->
{Atom, Acc};
do_traverse_atom_or_list(List, Acc, Pre, Post, Ctx) when is_list(List) ->
do_traverse_list(List, Acc, Pre, Post, Ctx).
pattern_to_expr(pattern) -> expr;
pattern_to_expr(Other) -> Other. | erltc/src/erlt_ast.erl | 0.543348 | 0.454109 | erlt_ast.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2015, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
%% @doc
%% This module provides a process match making service. A process joins one of
%% two queues and is matched with a process in the other queue. The queues are
%% managed using `sbroker_queue' callback module per queue so that a different
%% strategy can be used for both queues. Processes that die while in a queue are
%% automatically removed to prevent matching with a process that is nolonger
%% alive. A broker also uses an `sbroker_meter' callback module to monitor the
%% queue and processing delays of the broker.
%%
%% There are two functions to join a queue: `ask/1' and `ask_r/1'. Processes
%% that call `ask/1' are matched against processes that call `ask_r/1'. If no
%% match is immediately available a process is queued in the relevant queue
%% until a match becomes available. If queue management is used processes may be
%% dropped without a match.
%%
%% Processes calling `ask/1' try to match with/dequeue a process in the `ask_r'
%% queue. If no process exists they are queued in the `ask' queue and await a
%% process to call `ask_r/1'.
%%
%% Similarly processes calling `ask_r/1' try to match with/dequeue a process
%% in the `ask' queue. If no process exists they are queued in the `ask_r' queue
%% and await a process to call `ask/1'.
%%
%% A broker requires a callback module. The callback modules implements one
%% callback, `init/1', with single argument `Args'. `init/1' should return
%% `{ok, {AskQueueSpec, AskRQueueSpec, [MeterSpec]})' or `ignore'.
%% `AskQueueSpec' is the queue specification for the `ask' queue,
%% `AskRQueueSpec' is the queue specification for the `ask_r' queue and
%% `MeterSpec' is a meter specification. There can any number of meters but a
%% meter module can only be included once. In the case of `ignore' the broker is
%% not started and `start_link' returns `ignore'. As the callback modules are
%% defined in the `init/1' callback a broker supports the `dynamic' modules
%% supervisor child specification.
%%
%% Both queue and meter specifcations take the form: `{Module, Args}'. `Module'
%% is the callback module and `Args' are its arguments.
%%
%% For example:
%%
%% ```
%% -module(sbroker_example).
%%
%% -behaviour(sbroker).
%%
%% -export([start_link/0]).
%% -export([ask/0]).
%% -export([ask_r/1]).
%% -export([init/1]).
%%
%% start_link() ->
%% sbroker:start_link({local, ?MODULE}, ?MODULE, [], []).
%%
%% ask() ->
%% sbroker:ask(?MODULE).
%%
%% ask_r() ->
%% sbroker:ask_r(?MODULE).
%%
%% init([]) ->
%% AskQueueSpec = {sbroker_codel_queue, #{}},
%% AskRQueueSpec = {sbroker_timeout_queue, #{}},
%% MeterSpec = {sbroker_overload_meter, #{alarm => {overload, ?MODULE}}},
%% {ok, {AskQueueSpec, AskRQueueSpec, [MeterSpec]}}.
%% '''
-module(sbroker).
%% public api
-export([ask/1]).
-export([ask/2]).
-export([ask_r/1]).
-export([ask_r/2]).
-export([nb_ask/1]).
-export([nb_ask/2]).
-export([nb_ask_r/1]).
-export([nb_ask_r/2]).
-export([async_ask/1]).
-export([async_ask/2]).
-export([async_ask/3]).
-export([async_ask_r/1]).
-export([async_ask_r/2]).
-export([async_ask_r/3]).
-export([dynamic_ask/1]).
-export([dynamic_ask/2]).
-export([dynamic_ask_r/1]).
-export([dynamic_ask_r/2]).
-export([await/2]).
-export([cancel/2]).
-export([cancel/3]).
-export([dirty_cancel/2]).
-export([change_config/1]).
-export([change_config/2]).
-export([len/1]).
-export([len/2]).
-export([len_r/1]).
-export([len_r/2]).
-export([start_link/3]).
-export([start_link/4]).
%% test api
-export([timeout/1]).
%% gen api
-export([init_it/6]).
%% sys api
-export([system_continue/3]).
-export([system_code_change/4]).
-export([system_get_state/1]).
-export([system_replace_state/2]).
-export([system_terminate/4]).
-export([format_status/2]).
%% types
-type broker() :: pid() | atom() | {atom(), node()} | {global, any()}
| {via, module(), any()}.
-type name() :: {local, atom()} | {global, any()} | {via, module(), any()}.
-type debug_option() ::
trace | log | {log, pos_integer()} | statistics |
{log_to_file, file:filename()} | {install, {fun(), any()}}.
-type start_option() ::
{debug, debug_option()} | {timeout, timeout()} |
{spawn_opt, [proc_lib:spawn_option()]} |
{read_time_after, non_neg_integer() | infinity}.
-type start_return() :: {ok, pid()} | ignore | {error, any()}.
-type handler_spec() :: {module(), any()}.
-export_type([broker/0]).
-export_type([name/0]).
-export_type([handler_spec/0]).
-callback init(Args :: any()) ->
{ok, {AskQueueSpec :: handler_spec(), AskRQueueSpec :: handler_spec(),
[MeterSpec :: handler_spec()]}} | ignore.
-record(config, {mod :: module(),
args :: any(),
parent :: pid(),
dbg :: [sys:dbg_opt()],
name :: name() | pid(),
ask_mod :: module(),
bid_mod :: module()}).
-record(time, {now :: integer(),
send :: integer(),
empty :: integer(),
next = infinity :: integer() | infinity,
seq :: non_neg_integer(),
read_after :: non_neg_integer() | infinity,
meters :: [{module(), any()}]}).
-dialyzer(no_return).
%% public api
%% @equiv ask(Broker, self())
-spec ask(Broker) -> Go | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
ask(Broker) ->
sbroker_gen:call(Broker, ask, self(), infinity).
%% @doc Send a match request, with value `ReqValue', to try to match with a
%% process calling `ask_r/2' on the broker, `Broker'.
%%
%% Returns `{go, Ref, Value, RelativeTime, SojournTime}' on a successful
%% match or `{drop, SojournTime}'.
%% value of the matched request sent by the counterparty process. `RelativeTime'
%% is the approximate time differnece (in the broker's time unit) between when
%% the request was sent and the matching request was sent. `SojournTime' is the
%% approximate time spent in both the broker's message queue and internal queue.
%%
%% `RelativeTime' represents the `SojournTime' without the overhead of the
%% broker. The value measures the level of queue congestion without being
%% effected by the load of the broker.
%%
%% If `RelativeTime' is positive, the request was enqueued in the internal
%% queue awaiting a match with another request sent approximately `RelativeTime'
%% after this request was sent. Therefore `SojournTime' minus `RelativeTime'
%% is the latency, or overhead, of the broker.
%%
%% If `RelativeTime' is negative, the request dequeued a request in the internal
%% queue that was sent approximately `RelativeTime' before this request was
%% sent. Therefore `SojournTime' is the latency, or overhead, of the broker.
%%
%% If `RelativeTime' is `0', the request was matched with a request sent at
%% approximately the same time. Therefore `SojournTime' is the latency, or
%% overhead, of the broker.
%%
%% The sojourn time for matched process can be approximated by `SojournTime'
%% minus `RelativeTime'.
%%
%% If the request is dropped when using `via' module `sprotector' returns
%% `{drop, 0}' and does not send the request.
-spec ask(Broker, ReqValue) -> Go | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
ask(Broker, ReqValue) ->
sbroker_gen:call(Broker, ask, ReqValue, infinity).
%% @equiv ask_r(Broker, self())
-spec ask_r(Broker) -> Go | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
ask_r(Broker) ->
sbroker_gen:call(Broker, bid, self(), infinity).
%% @doc Tries to match with a process calling `ask/2' on the same broker.
%%
%% @see ask/2
-spec ask_r(Broker, ReqValue) -> Go | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
ask_r(Broker, ReqValue) ->
sbroker_gen:call(Broker, bid, ReqValue, infinity).
%% @equiv nb_ask(Broker, self())
-spec nb_ask(Broker) -> Go | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
nb_ask(Broker) ->
sbroker_gen:call(Broker, nb_ask, self(), infinity).
%% @doc Tries to match with a process calling `ask_r/2' on the same broker but
%% does not enqueue the request if no immediate match. Returns
%% `{go, Ref, Value, RelativeTime, SojournTime}' on a successful match or
%% `{drop, SojournTime}'.
%%
%% `Ref' is the transaction reference, which is a `reference()'. `Value' is the
%% value of the matched process. `RelativeTime' is the time spent waiting for a
%% match after discounting time spent waiting for the broker to handle requests.
%% `SojournTime' is the time spent in the broker's message queue.
%%
%% If the request is dropped when using `via' module `sprotector' returns
%% `{drop, 0}' and does not send the request.
%%
%% @see ask/2
-spec nb_ask(Broker, ReqValue) -> Go | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
nb_ask(Broker, ReqValue) ->
sbroker_gen:call(Broker, nb_ask, ReqValue, infinity).
%% @equiv nb_ask_r(Broker, self())
-spec nb_ask_r(Broker) -> Go | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
nb_ask_r(Broker) ->
sbroker_gen:call(Broker, nb_bid, self(), infinity).
%% @doc Tries to match with a process calling `ask/2' on the same broker but
%% does not enqueue the request if no immediate match.
%%
%% @see nb_ask/2
-spec nb_ask_r(Broker, ReqValue) -> Go | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
nb_ask_r(Broker, ReqValue) ->
sbroker_gen:call(Broker, nb_bid, ReqValue, infinity).
%% @equiv async_ask(Broker, self())
-spec async_ask(Broker) -> {await, Tag, Process} | {drop, 0} when
Broker :: broker(),
Tag :: reference(),
Process :: pid() | {atom(), node()}.
async_ask(Broker) ->
sbroker_gen:async_call(Broker, ask, self()).
%% @doc Monitors the broker and sends an asynchronous request to match with a
%% process calling `ask_r/2'. Returns `{await, Tag, Pid}' or `{drop, 0}'.
%%
%% `Tag' is a monitor `reference()' that uniquely identifies the reply
%% containing the result of the request. `Process', is the `pid()' of the
%% monitored broker or `{atom(), node()}' if the broker is registered locally
%% in another node. To cancel the request call `cancel(Process, Tag)'.
%%
%% The reply is of the form `{Tag, {go, Ref, Value, RelativeTime, SojournTime}'
%% or `{Tag, {drop, SojournTime}}'.
%%
%% `Ref' is the transaction reference, which is a `reference()'. `Value' is the
%% value of the matched process. `RelativeTime' is the time spent waiting for a
%% match after discounting time spent waiting for the broker to handle requests.
%% `SojournTime' is the time spent in the broker's message queue.
%%
%% Multiple asynchronous requests can be made from a single process to a
%% broker and no guarantee is made of the order of replies. A process making
%% multiple requests can reuse the monitor reference for subsequent requests to
%% the same broker process (`Process') using `async_ask/3'.
%%
%% If the request is dropped when using `via' module `sprotector' returns
%% `{drop, 0}' and does not send the request.
%%
%% @see cancel/2
%% @see async_ask/3
-spec async_ask(Broker, ReqValue) -> {await, Tag, Process} | {drop, 0} when
Broker :: broker(),
ReqValue :: any(),
Tag :: reference(),
Process :: pid() | {atom(), node()}.
async_ask(Broker, ReqValue) ->
sbroker_gen:async_call(Broker, ask, ReqValue).
%% @doc Sends an asynchronous request to match with a process calling `ask_r/2'.
%% Returns `{await, Tag, Pid}'.
%%
%% `To' is a tuple containing the process, `pid()', to send the reply to and
%% `Tag', `any()', that identifies the reply containing the result of the
%% request. `Process' is the `pid()' of the broker or `{atom(), node()}' if the
%% broker is registered locally on a different node. To cancel all requests
%% identified by `Tag' on broker `Process' call `cancel(Process, Tag)'.
%%
%% The reply is of the form `{Tag, {go, Ref, Value, RelativeTime, SojournTime}'
%% or `{Tag, {drop, SojournTime}}'.
%%
%% `Ref' is the transaction reference, which is a `reference()'. `Value' is the
%% value of the matched process. `RelativeTime' is the time spent waiting for a
%% match after discounting time spent waiting for the broker to handle requests.
%% `SojournTime' is the time spent in the broker's message queue.
%%
%% Multiple asynchronous requests can be made from a single process to a
%% broker and no guarantee is made of the order of replies. If the broker
%% exits or is on a disconnected node there is no guarantee of a reply and so
%% the caller should take appropriate steps to handle this scenario.
%%
%% If the request is dropped when using `via' module `sprotector', returns
%% `{drop, 0}' and does not send the request.
%%
%% @see cancel/2
-spec async_ask(Broker, ReqValue, To) -> {await, Tag, Process} | {drop, 0} when
Broker :: broker(),
ReqValue :: any(),
To :: {Pid, Tag},
Pid :: pid(),
Tag :: any(),
Process :: pid() | {atom(), node()}.
async_ask(Broker, ReqValue, To) ->
sbroker_gen:async_call(Broker, ask, ReqValue, To).
%% @equiv async_ask_r(Broker, self())
-spec async_ask_r(Broker) -> {await, Tag, Process} | {drop, 0} when
Broker :: broker(),
Tag :: reference(),
Process :: pid() | {atom(), node()}.
async_ask_r(Broker) ->
sbroker_gen:async_call(Broker, bid, self()).
%% @doc Monitors the broker and sends an asynchronous request to match with a
%% process calling `ask/2'.
%%
%% @see async_ask/2
%% @see cancel/2
-spec async_ask_r(Broker, ReqValue) -> {await, Tag, Process} | {drop, 0} when
Broker :: broker(),
ReqValue :: any(),
Tag :: reference(),
Process :: pid() | {atom(), node()}.
async_ask_r(Broker, ReqValue) ->
sbroker_gen:async_call(Broker, bid, ReqValue).
%% @doc Sends an asynchronous request to match with a process calling `ask/2'.
%%
%% @see async_ask/3
%% @see cancel/2
-spec async_ask_r(Broker, ReqValue, To) ->
{await, Tag, Process} | {drop, 0} when
Broker :: broker(),
ReqValue :: any(),
To :: {Pid, Tag},
Pid :: pid(),
Tag :: any(),
Process :: pid() | {atom(), node()}.
async_ask_r(Broker, ReqValue, To) ->
sbroker_gen:async_call(Broker, bid, ReqValue, To).
%% @equiv dynamic_ask(Broker, self())
-spec dynamic_ask(Broker) -> Go | Await | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Await :: {await, Tag, Pid},
Tag :: reference(),
Pid :: pid(),
Drop :: {drop, SojournTime}.
dynamic_ask(Broker) ->
sbroker_gen:dynamic_call(Broker, dynamic_ask, self(), infinity).
%% @doc Tries to match with a process calling `ask_r/2' on the same broker. If
%% no immediate match available the request is converted to an `async_ask/2'.
%%
%% Returns `{go, Ref, Value, RelativeTime, SojournTime}' on a successful match
%% or `{await, Tag, BrokerPid}'.
%%
%% `Ref' is the transaction reference, which is a `reference()'. `Value' is the
%% value of the matched process. `RelativeTime' is the time spent waiting for a
%% match after discounting time spent waiting for the broker to handle requests.
%% `SojournTime' is the time spent in the broker's message queue. `Tag' is a
%% monitor reference and `BrokerPid' the `pid()' of the broker, as returned by
%% `async_ask/2'.
%%
%% If the request is dropped when using `via' module `sprotector' returns
%% `{drop, 0}' and does not send the request.
%%
%% @see nb_ask/2
%% @see async_ask/2
-spec dynamic_ask(Broker, ReqValue) -> Go | Await | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Await :: {await, Tag, Pid},
Tag :: reference(),
Pid :: pid(),
Drop :: {drop, SojournTime}.
dynamic_ask(Broker, ReqValue) ->
sbroker_gen:dynamic_call(Broker, dynamic_ask, ReqValue, infinity).
%% @equiv dynamic_ask_r(Broker, self())
-spec dynamic_ask_r(Broker) -> Go | Await | Drop when
Broker :: broker(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Await :: {await, Tag, Pid},
Tag :: reference(),
Pid :: pid(),
Drop :: {drop, SojournTime}.
dynamic_ask_r(Broker) ->
sbroker_gen:dynamic_call(Broker, dynamic_bid, self(), infinity).
%% @doc Tries to match with a process calling `ask/2' on the same broker. If
%% no immediate match available the request is converted to an `async_ask_r/2'.
%%
%% @see dynamic_ask/2
-spec dynamic_ask_r(Broker, ReqValue) -> Go | Await | Drop when
Broker :: broker(),
ReqValue :: any(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: 0 | neg_integer(),
SojournTime :: non_neg_integer(),
Await :: {await, Tag, Pid},
Tag :: reference(),
Pid :: pid(),
Drop :: {drop, SojournTime}.
dynamic_ask_r(Broker, ReqValue) ->
sbroker_gen:dynamic_call(Broker, dynamic_bid, ReqValue, infinity).
%% @doc Await the response to an asynchronous request identified by `Tag'.
%%
%% Exits if a response is not received after `Timeout' milliseconds.
%%
%% Exits if a `DOWN' message is received with the reference `Tag'.
%%
%% @see async_ask/2
%% @see async_ask_r/2
-spec await(Tag, Timeout) -> Go | Drop when
Tag :: any(),
Timeout :: timeout(),
Go :: {go, Ref, Value, RelativeTime, SojournTime},
Ref :: reference(),
Value :: any(),
RelativeTime :: integer(),
SojournTime :: non_neg_integer(),
Drop :: {drop, SojournTime}.
await(Tag, Timeout) ->
receive
{Tag, {go, _, _, _, _} = Reply} ->
Reply;
{Tag, {drop, _} = Reply} ->
Reply;
{'DOWN', Tag, _, _, Reason} when is_reference(Tag) ->
exit({Reason, {?MODULE, await, [Tag, Timeout]}})
after
Timeout ->
exit({timeout, {?MODULE, await, [Tag, Timeout]}})
end.
%% @equiv cancel(Broker, Tag, infinity)
-spec cancel(Broker, Tag) -> Count | false when
Broker :: broker(),
Tag :: any(),
Count :: pos_integer().
cancel(Broker, Tag) ->
cancel(Broker, Tag, infinity).
%% @doc Cancels an asynchronous request. Returns the number of cancelled
%% requests or `false' if no requests exist. In the later case a caller may wish
%% to check its message queue for an existing reply.
%%
%% @see async_ask/1
%% @see async_ask_r/1
-spec cancel(Broker, Tag, Timeout) -> Count | false when
Broker :: broker(),
Tag :: any(),
Timeout :: timeout(),
Count :: pos_integer().
cancel(Broker, Tag, Timeout) ->
sbroker_gen:simple_call(Broker, cancel, Tag, Timeout).
%% @doc Cancels an asynchronous request.
%%
%% Returns `ok' without waiting for the broker to cancel requests.
%%
%% @see cancel/3
-spec dirty_cancel(Broker, Tag) -> ok when
Broker :: broker(),
Tag :: any().
dirty_cancel(Broker, Tag) ->
sbroker_gen:send(Broker, {cancel, dirty, Tag}).
%% @equiv change_config(Broker, infinity)
-spec change_config(Broker) -> ok | {error, Reason} when
Broker :: broker(),
Reason :: any().
change_config(Broker) ->
change_config(Broker, infinity).
%% @doc Change the configuration of the broker. Returns `ok' on success and
%% `{error, Reason}' on failure, where `Reason', is the reason for failure.
%%
%% Broker calls the `init/1' callback to get the new configuration. If `init/1'
%% returns `ignore' the config does not change.
-spec change_config(Broker, Timeout) -> ok | {error, Reason} when
Broker :: broker(),
Timeout :: timeout(),
Reason :: any().
change_config(Broker, Timeout) ->
sbroker_gen:simple_call(Broker, change_config, undefined, Timeout).
%% @equiv len(Broker, infinity)
-spec len(Broker) -> Length when
Broker :: broker(),
Length :: non_neg_integer().
len(Broker) ->
len(Broker, infinity).
%% @doc Get the length of the `ask' queue in the broker, `Broker'.
-spec len(Broker, Timeout) -> Length when
Broker :: broker(),
Timeout :: timeout(),
Length :: non_neg_integer().
len(Broker, Timeout) ->
sbroker_gen:simple_call(Broker, len_ask, undefined, Timeout).
%% @equiv len_r(Broker, infinity)
-spec len_r(Broker) -> Length when
Broker :: broker(),
Length :: non_neg_integer().
len_r(Broker) ->
len_r(Broker, infinity).
%% @doc Get the length of the `ask_r' queue in the broker, `Broker'.
-spec len_r(Broker, Timeout) -> Length when
Broker :: broker(),
Timeout :: timeout(),
Length :: non_neg_integer().
len_r(Broker, Timeout) ->
sbroker_gen:simple_call(Broker, len_bid, undefined, Timeout).
%% @doc Starts a broker with callback module `Module' and argument `Args', and
%% broker options `Opts'.
%%
%% `Opts' is a `proplist' and supports `debug', `timeout' and `spawn_opt' used
%% by `gen_server' and `gen_fsm'. `read_time_after' sets the number of requests
%% when a cached time is stale and the time is read again. Its value is
%% `non_neg_integer()' or `infinity' and defaults to `16'.
%%
%% @see gen_server:start_link/3
-spec start_link(Module, Args, Opts) -> StartReturn when
Module :: module(),
Args :: any(),
Opts :: [start_option()],
StartReturn :: start_return().
start_link(Mod, Args, Opts) ->
sbroker_gen:start_link(?MODULE, Mod, Args, Opts).
%% @doc Starts a broker with name `Name', callback module `Module' and argument
%% `Args', and broker options `Opts'.
%%
%% @see start_link/3
-spec start_link(Name, Module, Args, Opts) -> StartReturn when
Name :: name(),
Module :: module(),
Args :: any(),
Opts :: [start_option()],
StartReturn :: start_return().
start_link(Name, Mod, Args, Opts) ->
sbroker_gen:start_link(Name, ?MODULE, Mod, Args, Opts).
%% test api
%% @hidden
-spec timeout(Broker) -> ok when
Broker :: broker().
timeout(Broker) ->
sbroker_gen:send(Broker, timeout).
%% gen api
%% Inside the broker an ask_r request is referred to as a bid to make the
%% difference between ask and ask_r clearer.
%% @private
init_it(Starter, Parent, Name, Mod, Args, Opts) ->
DbgOpts = proplists:get_value(debug, Opts, []),
Dbg = sys:debug_options(DbgOpts),
ReadAfter = proplists:get_value(read_time_after, Opts),
try Mod:init(Args) of
{ok, {{AskMod, AskArgs}, {BidMod, BidArgs}, MeterArgs}}
when is_list(MeterArgs) ->
Config = #config{mod=Mod, args=Args, parent=Parent, dbg=Dbg,
name=Name, ask_mod=AskMod, bid_mod=BidMod},
Now = erlang:monotonic_time(),
Time = #time{now=Now, send=Now, empty=Now, read_after=ReadAfter,
seq=0, meters=[]},
init(Starter, Time, AskArgs, BidArgs, MeterArgs, Config);
ignore ->
init_stop(Starter, Name, ignore, normal);
Other ->
Reason = {bad_return_value, Other},
init_stop(Starter, Name, Reason)
catch
Class:Reason:Stack ->
Reason2 = sbroker_handlers:exit_reason({Class, Reason, Stack}),
init_stop(Starter, Name, Reason2)
end.
%% sys API
%% @private
system_continue(Parent, Dbg, [State, Time, Asks, Bids, Config]) ->
NConfig = Config#config{parent=Parent, dbg=Dbg},
timeout(State, Time, Asks, Bids, NConfig);
system_continue(Parent, Dbg,
{change, Change, [State, Time, Asks, Bids, Config]}) ->
NConfig = Config#config{parent=Parent, dbg=Dbg},
change(State, Change, Time, Asks, Bids, NConfig).
%% @private
system_code_change([_, _, _, _, #config{mod=Mod} = Config] = Misc, Mod, _, _) ->
case config_change(Config) of
{ok, Change} ->
{ok, {change, Change, Misc}};
ignore ->
{ok, Misc};
{error, Reason} ->
% sys will turn this into {error, Reason}
Reason
end;
system_code_change([_, _, _, _, _] = Misc, Mod, OldVsn, Extra) ->
{ok, code_change(Misc, Mod, OldVsn, Extra)};
system_code_change({change, Change,
[_, _, _, _, #config{mod=Mod} = Config] = Misc}, Mod, _,
_) ->
case config_change(Config) of
{ok, NChange} ->
{ok, {change, NChange, Misc}};
ignore ->
{ok, {change, Change, Misc}};
{error, Reason} ->
% sys will turn this into {error, Reason}
Reason
end;
system_code_change({change, Change, Misc}, Mod, OldVsn, Extra) ->
{ok, {change, Change, code_change(Misc, Mod, OldVsn, Extra)}}.
%% @private
system_get_state([_, #time{meters=Meters}, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod}]) ->
Meters2 = [{MeterMod, meter, Meter} || {MeterMod, Meter} <- Meters],
Callbacks = [{AskMod, ask, Asks}, {BidMod, ask_r, Bids} | Meters2],
{ok, Callbacks};
system_get_state({change, _, Misc}) ->
system_get_state(Misc).
%% @private
system_replace_state(Replace,
[State, #time{meters=Meters} = Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config]) ->
{AskMod, ask, NAsks} = AskRes = Replace({AskMod, ask, Asks}),
{BidMod, ask_r, NBids} = BidRes = Replace({BidMod, ask_r, Bids}),
MetersRes = [{MeterMod, meter, _} = Replace({MeterMod, meter, Meter}) ||
{MeterMod, Meter} <- Meters],
Result = [AskRes, BidRes, MetersRes],
NMeters = [{MeterMod, NMeter} || {MeterMod, meter, NMeter} <- MetersRes],
Misc = [State, Time#time{meters=NMeters}, NAsks, NBids, Config],
{ok, Result, Misc};
system_replace_state(Replace, {change, Change, Misc}) ->
{ok, States, NMisc} = system_replace_state(Replace, Misc),
{ok, States, {change, Change, NMisc}}.
%% @private
system_terminate(Reason, Parent, Dbg, [_, Time, Asks, Bids, Config]) ->
NConfig = Config#config{parent=Parent, dbg=Dbg},
terminate({stop, Reason}, Time, Asks, Bids, NConfig);
system_terminate(Reason, Parent, Dbg, {change, _, Misc}) ->
system_terminate(Reason, Parent, Dbg, Misc).
%% @private
format_status(Opt,
[PDict, SysState, Parent, _,
[State, #time{now=Now, meters=Meters}, Asks, Bids,
#config{name=Name, ask_mod=AskMod, bid_mod=BidMod}]]) ->
Header = gen:format_status_header("Status for sbroker", Name),
Meters2 = [{MeterMod, meter, Meter} || {MeterMod, Meter} <- Meters],
Handlers = [{AskMod, ask, Asks}, {BidMod, ask_r, Bids} | Meters2],
Handlers2 = [{Mod, Id, format_status(Mod, Opt, PDict, Handler)} ||
{Mod, Id, Handler} <- Handlers],
[{header, Header},
{data, [{"Status", SysState},
{"Parent", Parent},
{"Active queue", format_state(State)},
{"Time", Now}]},
{items, {"Installed handlers", Handlers2}}];
format_status(Opt, [PDict, SysState, Parent, Dbg, {change, _, Misc}]) ->
format_status(Opt, [PDict, SysState, Parent, Dbg, Misc]).
%% Internal
init(Starter, Time, AskArgs, BidArgs, MeterArgs,
#config{ask_mod=AskMod, bid_mod=BidMod, name=Name} = Config) ->
case check_meters(MeterArgs) of
ok ->
do_init(Starter, Time, AskArgs, BidArgs, MeterArgs, Config);
{error, Reason} ->
Return = {ok, {{AskMod, AskArgs}, {BidMod, BidArgs}, MeterArgs}},
init_stop(Starter, Name, {Reason, Return})
end.
do_init(Starter, #time{now=Now, send=Send} = Time, AskArgs, BidArgs, MeterArgs,
#config{ask_mod=AskMod, bid_mod=BidMod, name=Name} = Config) ->
Inits = [{sbroker_queue, AskMod, AskArgs},
{sbroker_queue, BidMod, BidArgs}],
ReportName = report_name(Config),
case sbroker_handlers:init(Send, Now, Inits, MeterArgs, ReportName) of
{ok, [{_, _, Asks, _}, {_, _, Bids, BidNext}], {Meters, MNext}} ->
Next = min(BidNext, MNext),
NTime = Time#time{meters=Meters},
enter_loop(Starter, NTime, Asks, Bids, Next, Config);
{stop, Reason} ->
init_stop(Starter, Name, Reason)
end.
init_stop(Starter, Name, Reason) ->
init_stop(Starter, Name, {error, Reason}, Reason).
init_stop(Starter, Name, Ack, Reason) ->
unregister_name(Name),
proc_lib:init_ack(Starter, Ack),
exit(Reason).
unregister_name({local, Name}) ->
unregister(Name);
unregister_name({global, Name}) ->
global:unregister_name(Name);
unregister_name({via, Mod, Name}) ->
Mod:unregister_name(Name);
unregister_name(Self) when is_pid(Self) ->
ok.
enter_loop(Starter, Time, Asks, Bids, Next, Config) ->
proc_lib:init_ack(Starter, {ok, self()}),
Timeout = idle_timeout(Time, Next),
idle_recv(bidding, Timeout, Time, Asks, Bids, Config).
mark(Time) ->
Now = erlang:monotonic_time(),
_ = self() ! {'$mark', Now},
Time#time{now=Now, send=Now, seq=0}.
update_time(State, #time{seq=Seq, read_after=Seq} = Time, Asks, Bids, Config) ->
Now = erlang:monotonic_time(),
update_meter(Now, State, Time, Asks, Bids, Config);
update_time(_, #time{seq=Seq} = Time, _, _, _) ->
Time#time{seq=Seq+1}.
update_meter(Now, _, #time{meters=[], send=Send} = Time, _, _, _) ->
Time#time{now=Now, seq=0, empty=Send};
update_meter(Now, asking, #time{send=Send, empty=Empty} = Time, Asks, Bids,
#config{ask_mod=AskMod} = Config) ->
try AskMod:send_time(Asks) of
SendTime
when is_integer(SendTime), SendTime =< Send, SendTime >= Empty ->
RelativeTime = Send - SendTime,
update_meter(Now, RelativeTime, SendTime, Time, Asks, Bids, Config);
empty ->
RelativeTime = Send - Empty,
update_meter(Now, RelativeTime, Empty, Time, Asks, Bids, Config);
Other ->
asking_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
update_meter(Now, bidding, #time{send=Send, empty=Empty} = Time, Asks, Bids,
#config{bid_mod=BidMod} = Config) ->
try BidMod:send_time(Bids) of
SendTime
when is_integer(SendTime), SendTime =< Send, SendTime >= Empty ->
RelativeTime = SendTime - Send,
update_meter(Now, RelativeTime, SendTime, Time, Asks, Bids, Config);
empty ->
RelativeTime = Empty - Send,
update_meter(Now, RelativeTime, Empty, Time, Asks, Bids, Config);
Other ->
bidding_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end.
update_meter(Now, RelativeTime, Empty,
#time{now=Prev, send=Send, seq=Seq, meters=Meters} = Time, Asks,
Bids, Config) ->
ProcessDelay = (Now - Prev) div Seq,
%% Remove one ProcessDelay to estimate time last message was received.
%% NB: This gives correct QueueDelay of 0 when single message was received.
QueueDelay = (Now - ProcessDelay) - Send,
case sbroker_handlers:meters_update(QueueDelay, ProcessDelay, RelativeTime,
Now, Meters, report_name(Config)) of
{ok, NMeters, Next} ->
Time#time{now=Now, seq=0, meters=NMeters, empty=Empty, next=Next};
{stop, ExitReason} ->
meter_stop(ExitReason, Asks, Bids, Config)
end.
meter_stop(Reason, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config) ->
Callbacks = [{sbroker_queue, AskMod, stop, Asks},
{sbroker_queue, BidMod, stop, Bids}],
terminate(Reason, Callbacks, Config).
idle(State, #time{seq=0} = Time, Asks, Bids, Next, Config) ->
Timeout = idle_timeout(Time, Next),
idle_recv(State, Timeout, Time, Asks, Bids, Config);
idle(State, Time, Asks, Bids, Next, Config) ->
Now = erlang:monotonic_time(),
NTime = update_meter(Now, State, Time, Asks, Bids, Config),
Timeout = idle_timeout(NTime, Next),
idle_recv(State, Timeout, NTime, Asks, Bids, Config).
idle_timeout(#time{now=Now, next=Next1}, Next2) ->
case min(Next1, Next2) of
infinity ->
infinity;
Next ->
Diff = Next-Now,
Timeout = erlang:convert_time_unit(Diff, native, milli_seconds),
max(Timeout, 1)
end.
idle_recv(State, Timeout, Time, Asks, Bids, Config) ->
receive
Msg ->
NTime = mark(Time),
handle(State, Msg, NTime, Asks, Bids, infinity, Config)
after
Timeout ->
NTime = mark(Time),
timeout(State, NTime, Asks, Bids, Config)
end.
handle(asking, Msg, Time, Asks, Bids, Next, Config) ->
asking(Msg, Time, Asks, Bids, Next, Config);
handle(bidding, Msg, Time, Asks, Bids, Next, Config) ->
bidding(Msg, Time, Asks, Bids, Next, Config).
asking_timeout(#time{now=Now} = Time, Asks, Bids,
#config{ask_mod=AskMod} = Config) ->
try AskMod:handle_timeout(Now, Asks) of
{NAsks, Next} ->
asking(Time, NAsks, Bids, Next, Config);
Other ->
asking_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end.
asking(Time, Asks, Bids, Next, Config) ->
receive
Msg ->
NTime = update_time(asking, Time, Asks, Bids, Config),
asking(Msg, NTime, Asks, Bids, Next, Config)
end.
asking({ask, Ask, Value}, #time{now=Now, send=Send} = Time, Asks, Bids, _,
#config{ask_mod=AskMod} = Config) ->
try AskMod:handle_in(Send, Ask, Value, Now, Asks) of
{NAsks, Next} ->
asking(Time, NAsks, Bids, Next, Config);
Other ->
asking_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
asking({bid, Bid, BidValue} = Msg, #time{now=Now, send=Send} = Time, Asks, Bids,
_, #config{ask_mod=AskMod} = Config) ->
case handle_out(AskMod, Now, Asks) of
{AskSend, Ask, AskValue, Ref, NAsks, Next} ->
ask_settle(Time, Ref, AskSend, Ask, AskValue, Bid, BidValue),
asking(Time, NAsks, Bids, Next, Config);
{empty, NAsks} ->
bidding(Msg, Time#time{empty=Send}, NAsks, Bids, infinity, Config);
{bad_return_value, Other, NAsks} ->
asking_return(Other, Time, NAsks, Bids, Config);
{exception, Class, Reason, NAsks, Stacktrace} ->
asking_exception(Class, Reason, Time, NAsks, Bids, Config, Stacktrace)
end;
asking({nb_ask, Ask, _}, Time, Asks, Bids, _, Config) ->
drop(Ask, Time),
asking_timeout(Time, Asks, Bids, Config);
asking({nb_bid, Bid, BidValue}, #time{now=Now, send=Send} = Time, Asks, Bids, _,
#config{ask_mod=AskMod} = Config) ->
case handle_out(AskMod, Now, Asks) of
{AskSend, Ask, AskValue, Ref, NAsks, Next} ->
ask_settle(Time, Ref, AskSend, Ask, AskValue, Bid, BidValue),
asking(Time, NAsks, Bids, Next, Config);
{empty, NAsks} ->
drop(Bid, Time),
bidding(Time#time{empty=Send}, NAsks, Bids, infinity, Config);
{bad_return_value, Other, NAsks} ->
asking_return(Other, Time, NAsks, Bids, Config);
{exception, Class, Reason, NAsks, Stacktrace} ->
asking_exception(Class, Reason, Time, NAsks, Bids, Config, Stacktrace)
end;
asking({dynamic_ask, Ask, Value}, Time, Asks, Bids, Next, Config) ->
async(Ask),
asking({ask, Ask, Value}, Time, Asks, Bids, Next, Config);
asking({dynamic_bid, Bid, BidValue}, #time{now=Now, send=Send} = Time, Asks,
Bids, _, #config{ask_mod=AskMod} = Config) ->
case handle_out(AskMod, Now, Asks) of
{AskSend, Ask, AskValue, Ref, NAsks, Next} ->
ask_settle(Time, Ref, AskSend, Ask, AskValue, Bid, BidValue),
asking(Time, NAsks, Bids, Next, Config);
{empty, NAsks} ->
async(Bid),
Msg = {bid, Bid, BidValue},
bidding(Msg, Time#time{empty=Send}, NAsks, Bids, infinity, Config);
{bad_return_value, Other, NAsks} ->
asking_return(Other, Time, NAsks, Bids, Config);
{exception, Class, Reason, NAsks, Stacktrace} ->
asking_exception(Class, Reason, Time, NAsks, Bids, Config, Stacktrace)
end;
asking({cancel, From, Tag}, #time{now=Now} = Time, Asks, Bids, _,
#config{ask_mod=AskMod} = Config) ->
try AskMod:handle_cancel(Tag, Now, Asks) of
{Reply, NAsks, Next} ->
cancelled(From, Reply),
asking(Time, NAsks, Bids, Next, Config);
Other ->
asking_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
asking(Msg, Time, Asks, Bids, Next, Config) ->
common(Msg, asking, Time, Asks, Bids, Next, Config).
common({'$mark', Mark}, State, #time{now=Now} = Time, Asks, Bids, Next,
Config) ->
receive
Msg ->
_ = self() ! {'$mark', Now},
NTime = Time#time{send=(Mark + Now) div 2},
handle(State, Msg, NTime, Asks, Bids, Next, Config)
after
0 ->
idle(State, Time, Asks, Bids, Next, Config)
end;
common({'EXIT', Parent, Reason}, _, Time, Asks, Bids, _,
#config{parent=Parent} = Config) ->
terminate({stop, Reason}, Time, Asks, Bids, Config);
common({system, From, Msg}, State, Time, Asks, Bids, _, Config) ->
system(From, Msg, State, Time, Asks, Bids, Config);
common({change_config, From, _}, State, Time, Asks, Bids, _, Config) ->
config_change(From, State, Time, Asks, Bids, Config);
common({len_ask, From, _}, State, Time, Asks, Bids, _,
#config{ask_mod=AskMod} = Config) ->
try AskMod:len(Asks) of
Len ->
gen:reply(From, Len),
timeout(State, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
common({len_bid, From, _}, State, Time, Asks, Bids, _,
#config{bid_mod=BidMod} = Config) ->
try BidMod:len(Bids) of
Len ->
gen:reply(From, Len),
timeout(State, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
common({_, From, get_modules}, State, #time{meters=Meters} = Time, Asks, Bids,
_, #config{mod=Mod, ask_mod=AskMod, bid_mod=BidMod} = Config) ->
MeterMods = [MeterMod || {MeterMod, _} <- Meters],
gen:reply(From, lists:usort([Mod, AskMod, BidMod | MeterMods])),
timeout(State, Time, Asks, Bids, Config);
common(timeout, State, Time, Asks, Bids, _, Config) ->
timeout(State, Time, Asks, Bids, Config);
common(Msg, State, Time, Asks, Bids, _, Config) ->
info_asks(Msg, State, Time, Asks, Bids, Config).
handle_out(Mod, Now, Queue) ->
try Mod:handle_out(Now, Queue) of
{_, _, _, Ref, NQueue, _} = Result ->
handle_out(Ref, Result, Mod, Now, NQueue);
{empty, _} = Result ->
Result;
Other ->
{bad_return_value, Other, Queue}
catch
Class:Reason:Stack ->
{exception, Class, Reason, Queue, Stack}
end.
handle_out(Ref, Result, Mod, Now, Queue) ->
case demonitor(Ref, [flush, info]) of
true ->
Result;
false ->
handle_out(Mod, Now, Queue)
end.
ask_settle(#time{now=Now, send=Send}, Ref, AskSend, Ask, AskValue, Bid,
BidValue) ->
settle(Now, AskSend, Ref, Ask, AskValue, Send, Bid, BidValue).
info_asks(Msg, State, #time{now=Now} = Time, Asks, Bids,
#config{ask_mod=AskMod} = Config) ->
try AskMod:handle_info(Msg, Now, Asks) of
{NAsks, AskNext} ->
info_bids(Msg, State, Time, NAsks, AskNext, Bids, Config);
Other ->
asking_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
asking_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end.
info_bids(Msg, State, #time{now=Now} = Time, Asks, AskNext, Bids,
#config{bid_mod=BidMod} = Config) ->
try BidMod:handle_info(Msg, Now, Bids) of
{NBids, BidNext} ->
info_meter(Msg, State, Time, Asks, AskNext, NBids, BidNext, Config);
Other ->
bidding_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end.
info_meter(_, State, #time{meters=[]} = Time, Asks, AskNext, Bids, BidNext,
Config) ->
next(State, Time, Asks, AskNext, Bids, BidNext, Config);
info_meter(Msg, State, #time{now=Now, meters=Meters} = Time, Asks, AskNext,
Bids, BidNext, Config) ->
case sbroker_handlers:meters_info(Msg, Now, Meters, report_name(Config)) of
{ok, NMeters, MeterNext} ->
NTime = Time#time{meters=NMeters, next=MeterNext},
next(State, NTime, Asks, AskNext, Bids, BidNext, Config);
{stop, Reason} ->
meter_stop(Reason, Asks, Bids, Config)
end.
asking_return(Return, Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config) ->
Reason = {bad_return_value, Return},
Callbacks = [{sbroker_queue, AskMod, Reason, Asks},
{sbroker_queue, BidMod, stop, Bids}],
terminate(Reason, Time, Callbacks, Config).
asking_exception(Class, Reason, Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config, Stacktrace) ->
Reason2 = {Class, Reason, Stacktrace},
Callbacks = [{sbroker_queue, AskMod, Reason2, Asks},
{sbroker_queue, BidMod, stop, Bids}],
terminate(Reason2, Time, Callbacks, Config).
bidding_timeout(#time{now=Now} = Time, Asks, Bids,
#config{bid_mod=BidMod} = Config) ->
try BidMod:handle_timeout(Now, Bids) of
{NBids, Next} ->
bidding(Time, Asks, NBids, Next, Config);
Other ->
bidding_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end.
bidding(Time, Asks, Bids, Next, Config) ->
receive
Msg ->
NTime = update_time(bidding, Time, Asks, Bids, Config),
bidding(Msg, NTime, Asks, Bids, Next, Config)
end.
bidding({bid, Bid, Value}, #time{now=Now, send=Send} = Time, Asks, Bids, _,
#config{bid_mod=BidMod} = Config) ->
try BidMod:handle_in(Send, Bid, Value, Now, Bids) of
{NBids, Next} ->
bidding(Time, Asks, NBids, Next, Config);
Other ->
bidding_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
bidding({ask, Ask, AskValue} = Msg, #time{now=Now, send=Send} = Time, Asks,
Bids, _, #config{bid_mod=BidMod} = Config) ->
case handle_out(BidMod, Now, Bids) of
{BidSend, Bid, BidValue, Ref, NBids, Next} ->
bid_settle(Time, Ref, Ask, AskValue, BidSend, Bid, BidValue),
bidding(Time, Asks, NBids, Next, Config);
{empty, NBids} ->
asking(Msg, Time#time{empty=Send}, Asks, NBids, infinity, Config);
{bad_return_value, Other, NBids} ->
bidding_return(Other, Time, Asks, NBids, Config);
{exception, Class, Reason, NBids, Stacktrace} ->
bidding_exception(Class, Reason, Time, Asks, NBids, Config, Stacktrace)
end;
bidding({nb_bid, Bid, _}, Time, Asks, Bids, _, Config) ->
drop(Bid, Time),
bidding_timeout(Time, Asks, Bids, Config);
bidding({nb_ask, Ask, AskValue}, #time{now=Now, send=Send} = Time, Asks, Bids,
_, #config{bid_mod=BidMod} = Config) ->
case handle_out(BidMod, Now, Bids) of
{BidSend, Bid, BidValue, Ref, NBids, Next} ->
bid_settle(Time, Ref, Ask, AskValue, BidSend, Bid, BidValue),
bidding(Time, Asks, NBids, Next, Config);
{empty, NBids} ->
drop(Ask, Time),
asking(Time#time{empty=Send}, Asks, NBids, infinity, Config);
{bad_return_value, Other, NBids} ->
bidding_return(Other, Time, Asks, NBids, Config);
{exception, Class, Reason, NBids, Stacktrace} ->
bidding_exception(Class, Reason, Time, Asks, NBids, Config, Stacktrace)
end;
bidding({dynamic_bid, Bid, Value}, Time, Asks, Bids, Next, Config) ->
async(Bid),
bidding({bid, Bid, Value}, Time, Asks, Bids, Next, Config);
bidding({dynamic_ask, Ask, AskValue}, #time{now=Now, send=Send} = Time, Asks,
Bids, _, #config{bid_mod=BidMod} = Config) ->
case handle_out(BidMod, Now, Bids) of
{BidSend, Bid, BidValue, Ref, NBids, Next} ->
bid_settle(Time, Ref, Ask, AskValue, BidSend, Bid, BidValue),
bidding(Time, Asks, NBids, Next, Config);
{empty, NBids} ->
async(Ask),
Msg = {ask, Ask, AskValue},
asking(Msg, Time#time{empty=Send}, Asks, NBids, infinity, Config);
{bad_return_value, Other, NBids} ->
bidding_return(Other, Time, Asks, NBids, Config);
{exception, Class, Reason, NBids, Stacktrace} ->
bidding_exception(Class, Reason, Time, Asks, NBids, Config, Stacktrace)
end;
bidding({cancel, From, Tag}, #time{now=Now} = Time, Asks, Bids, _,
#config{bid_mod=BidMod} = Config) ->
try BidMod:handle_cancel(Tag, Now, Bids) of
{Reply, NBids, Next} ->
cancelled(From, Reply),
bidding(Time, Asks, NBids, Next, Config);
Other ->
bidding_return(Other, Time, Asks, Bids, Config)
catch
Class:Reason:Stack ->
bidding_exception(Class, Reason, Time, Asks, Bids, Config, Stack)
end;
bidding(Msg, Time, Asks, Bids, Next, Config) ->
common(Msg, bidding, Time, Asks, Bids, Next, Config).
bid_settle(#time{now=Now, send=Send}, Ref, Ask, AskValue, BidSend, Bid,
BidValue) ->
settle(Now, Send, Ref, Ask, AskValue, BidSend, Bid, BidValue).
bidding_return(Return, Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config) ->
Reason = {bad_return_value, Return},
Callbacks = [{sbroker_queue, AskMod, stop, Asks},
{sbroker_queue, BidMod, Reason, Bids}],
terminate(Reason, Time, Callbacks, Config).
bidding_exception(Class, Reason, Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config, Stacktrace) ->
Reason2 = {Class, Reason, Stacktrace},
Callbacks = [{sbroker_queue, AskMod, stop, Asks},
{sbroker_queue, BidMod, Reason2, Bids}],
terminate(Reason2, Time, Callbacks, Config).
settle(Now, AskSend, Ref, Ask, AskValue, BidSend, Bid, BidValue) ->
RelativeTime = AskSend - BidSend,
%% Bid always messaged first.
gen:reply(Bid, {go, Ref, AskValue, RelativeTime, Now - BidSend}),
gen:reply(Ask, {go, Ref, BidValue, -RelativeTime, Now - AskSend}).
drop(From, #time{now=Now, send=Send}) ->
sbroker_queue:drop(From, Send, Now).
async({_, Tag} = From) ->
gen:reply(From, {await, Tag, self()}).
cancelled(dirty, _) ->
ok;
cancelled(From, Reply) ->
gen:reply(From, Reply).
config_change(From, State, Time, Asks, Bids, Config) ->
case config_change(Config) of
{ok, Change} ->
gen:reply(From, ok),
change(State, Change, Time, Asks, Bids, Config);
ignore ->
gen:reply(From, ok),
timeout(State, Time, Asks, Bids, Config);
{error, Reason} ->
gen:reply(From, {error, Reason}),
timeout(State, Time, Asks, Bids, Config)
end.
config_change(#config{mod=Mod, args=Args}) ->
try Mod:init(Args) of
{ok, {{AskMod, AskArgs}, {BidMod, BidArgs}, MeterArgs}}
when is_list(MeterArgs) ->
config_meters(AskMod, AskArgs, BidMod, BidArgs, MeterArgs);
ignore ->
ignore;
Other ->
{error, {bad_return_value, Other}}
catch
Class:Reason:Stack ->
{error, {Class, Reason, Stack}}
end.
config_meters(AskMod, AskArgs, BidMod, BidArgs, MeterArgs) ->
case check_meters(MeterArgs) of
ok ->
{ok, {AskMod, AskArgs, BidMod, BidArgs, MeterArgs}};
{error, Reason} ->
{error, {Reason, MeterArgs}}
end.
check_meters(Meters) ->
check_meters(Meters, #{}).
check_meters([{Meter, _} | Rest], Acc) ->
case maps:is_key(Meter, Acc) of
true ->
{error, {duplicate_meter, Meter}};
false ->
check_meters(Rest, maps:put(Meter, meter, Acc))
end;
check_meters([], _) ->
ok;
check_meters(_, _) ->
{error, bad_return_value}.
code_change([State, #time{now=Now, send=Send, meters=Meters} = Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config], Mod, OldVsn,
Extra) ->
Callbacks = [{sbroker_queue, AskMod, Asks, infinity},
{sbroker_queue, BidMod, Bids, infinity}],
NCallbacks = sbroker_handlers:code_change(Send, Now, Callbacks, Meters, Mod,
OldVsn, Extra),
{[{sbroker_queue, AskMod, NAsks, _},
{sbroker_queue, BidMod, NBids, _}], {NMeters, MNext}} = NCallbacks,
[State, Time#time{meters=NMeters, next=MNext}, NAsks, NBids, Config].
change(State, {NAskMod, AskArgs, NBidMod, BidArgs, MeterArgs},
#time{now=Now, send=Send, meters=Meters} = Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config) ->
Inits = [{sbroker_queue, AskMod, Asks, NAskMod, AskArgs},
{sbroker_queue, BidMod, Bids, NBidMod, BidArgs}],
Name = report_name(Config),
case sbroker_handlers:config_change(Send, Now, Inits, Meters, MeterArgs,
Name) of
{ok, [{_, _, NAsks, AskNext}, {_, _, NBids, BidNext}],
{NMeters, MeterNext}} ->
NTime = Time#time{meters=NMeters, next=MeterNext},
NConfig = Config#config{ask_mod=NAskMod, bid_mod=NBidMod},
next(State, NTime, NAsks, AskNext, NBids, BidNext, NConfig);
{stop, Reason} ->
exit(Reason)
end.
next(asking, Time, Asks, AskNext, Bids, _, Config) ->
asking(Time, Asks, Bids, AskNext, Config);
next(bidding, Time, Asks, _, Bids, BidNext, Config) ->
bidding(Time, Asks, Bids, BidNext, Config).
timeout(asking, Time, Asks, Bids, Config) ->
asking_timeout(Time, Asks, Bids, Config);
timeout(bidding, Time, Asks, Bids, Config) ->
bidding_timeout(Time, Asks, Bids, Config).
system(From, Msg, State, Time, Asks, Bids,
#config{parent=Parent, dbg=Dbg} = Config) ->
NConfig = Config#config{dbg=[]},
sys:handle_system_msg(Msg, From, Parent, ?MODULE, Dbg,
[State, Time, Asks, Bids, NConfig]).
format_status(Mod, Opt, PDict, State) ->
case erlang:function_exported(Mod, format_status, 2) of
true ->
try Mod:format_status(Opt, [PDict, State]) of
Status ->
Status
catch
_:_ ->
State
end;
false ->
State
end.
format_state(asking) ->
ask;
format_state(bidding) ->
ask_r.
terminate(Reason, Callbacks, Config) ->
Name = report_name(Config),
{stop, NReason} = sbroker_handlers:terminate(Reason, Callbacks, [], Name),
exit(NReason).
terminate(Reason, #time{meters=Meters}, Callbacks, Config) ->
Name = report_name(Config),
{stop, NReason} = sbroker_handlers:terminate(Reason, Callbacks, Meters,
Name),
exit(NReason).
terminate(Reason, Time, Asks, Bids,
#config{ask_mod=AskMod, bid_mod=BidMod} = Config) ->
Callbacks = [{sbroker_queue, AskMod, stop, Asks},
{sbroker_queue, BidMod, stop, Bids}],
terminate(Reason, Time, Callbacks, Config).
report_name(#config{name=Pid, mod=Mod}) when is_pid(Pid) ->
{Mod, Pid};
report_name(#config{name=Name}) ->
Name. | src/sbroker.erl | 0.656438 | 0.40589 | sbroker.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2013 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc NkSIP Erlang code parser and hot loader utilities
-module(nksip_code_util).
-author('<NAME> <<EMAIL>>').
-export([expression/1, getter/2, fun_expr/4, call_expr/4, callback_expr/3]).
-export([case_expr/5, compile/2, write/2]).
-export([get_funs/1]).
%% ===================================================================
%% Private
%% ===================================================================
%% @doc Parses an erlang expression intro a syntaxTree()
%% i.e. expres
-spec expression(string()) ->
{ok, erl_syntax:syntaxTree()} | error.
expression(Expr) ->
case erl_scan:string(Expr) of
{ok, Tokens, _} ->
case erl_parse:parse_form(Tokens) of
{ok, Form} -> {ok, Form};
_ -> error
end;
_ ->
error
end.
%% @doc Generates a getter function (fun() -> Value.)
-spec getter(atom(), term()) ->
erl_syntax:syntaxTree().
getter(Fun, Value) ->
erl_syntax:function(
erl_syntax:atom(Fun),
[erl_syntax:clause([], none, [erl_syntax:abstract(Value)])]).
%% @doc Generates a function expression (fun(A1,B1,..) -> Value)
%% Vers represents the suffix to use in the variable names
-spec fun_expr(atom(), integer(), integer(), term()) ->
erl_syntax:syntaxTree().
fun_expr(Fun, Arity, Vers, Value) ->
erl_syntax:function(
erl_syntax:atom(Fun),
[erl_syntax:clause(var_list(Arity, Vers), none, Value)]).
%% @doc Generates a call expression (mod:fun(A1,B1,..))
%% Vers represents the suffix to use in the variable names.
-spec call_expr(atom(), atom(), integer(), integer()) ->
erl_syntax:syntaxTree().
call_expr(Mod, Fun, Arity, Vers) ->
erl_syntax:application(
erl_syntax:atom(Mod),
erl_syntax:atom(Fun),
var_list(Arity, Vers)).
%% @doc Generates a call expression (fun(A0,B0...) -> mod:fun(A0,B0,..))
-spec callback_expr(atom(), atom(), integer()) ->
erl_syntax:syntaxTree().
callback_expr(Mod, Fun, Arity) ->
fun_expr(Fun, Arity, 0, [call_expr(Mod, Fun, Arity, 0)]).
%% @doc Generates a case expression
%% case mod:fun(A2,B2...) of
%% continue -> [A1,B1..] = [A2,B2..], (NextCode);
%% {continue, (NextCode);
%% Other -> Other
%% end
%% Vers represents the suffix to use in the variable names.
-spec case_expr(atom(), atom(), integer(), integer(),
[erl_syntax:syntaxTree()]) ->
erl_syntax:syntaxTree().
case_expr(Mod, Fun, Arity, Vers, NextCode) ->
erl_syntax:case_expr(
call_expr(Mod, Fun, Arity, Vers),
[
erl_syntax:clause(
[erl_syntax:atom(continue)],
none,
case Arity of
0 ->
NextCode;
_ ->
[
erl_syntax:match_expr(
erl_syntax:list(var_list(Arity, Vers-1)),
erl_syntax:list(var_list(Arity, Vers)))
| NextCode]
end),
erl_syntax:clause(
[erl_syntax:tuple([
erl_syntax:atom(continue),
erl_syntax:list(var_list(Arity, Vers-1))])],
none,
NextCode),
erl_syntax:clause(
[erl_syntax:variable('Other')],
none,
[erl_syntax:variable('Other')])
]).
%% @doc Compiles a syntaxTree into a module
-spec compile(atom(), [erl_syntax:syntaxTree()]) ->
ok | {error, term()}.
compile(Mod, Tree) ->
Tree1 = [
erl_syntax:attribute(
erl_syntax:atom(module),
[erl_syntax:atom(Mod)]),
erl_syntax:attribute(
erl_syntax:atom(compile),
[erl_syntax:list([erl_syntax:atom(export_all)])])
| Tree
],
% io:format("\nGenerated ~p:\n\n", [Mod]),
% [io:format("~s\n\n", [erl_prettypr:format(S)]) || S<-Tree],
Forms1 = [erl_syntax:revert(X) || X <- Tree1],
Options = [report_errors, report_warnings, return_errors],
case compile:forms(Forms1, Options) of
{ok, Mod, Bin} ->
code:purge(Mod),
File = atom_to_list(Mod)++".erl",
case code:load_binary(Mod, File, Bin) of
{module, Mod} ->
{ok, Tree1};
Error ->
{error, Error}
end;
Error ->
{error, Error}
end.
%% @doc Writes a generated tree as a standard erlang file
-spec write(atom(), [erl_syntax:syntaxTree()]) ->
ok | {error, term()}.
write(Mod, Tree) ->
BasePath = nksip_config_cache:local_data_path(),
Path = filename:join(BasePath, atom_to_list(Mod)++".erl"),
Content = list_to_binary(
[io_lib:format("~s\n\n", [erl_prettypr:format(S)]) || S <-Tree]),
file:write_file(Path, Content).
%% @doc Gets the list of exported functions of a module
-spec get_funs(atom()) ->
[{atom(), integer()}] | error.
get_funs(Mod) ->
case catch Mod:module_info() of
List when is_list(List) ->
lists:foldl(
fun({Fun, Arity}, Acc) ->
case Fun of
module_info -> Acc;
behaviour_info -> Acc;
_ -> [{Fun, Arity}|Acc]
end
end,
[],
nksip_lib:get_value(exports, List));
_ ->
error
end.
%% ===================================================================
%% Internal
%% ===================================================================
%% @private Generates a var list (A1,B1..)
var_list(Arity, Vers) ->
VersS = nksip_lib:to_list(Vers),
[erl_syntax:variable([V|VersS]) || V <- lists:seq(65, 64+Arity)]. | src/nksip_code_util.erl | 0.52074 | 0.463748 | nksip_code_util.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_auto_subscribe_placeholder).
-export([generate/1]).
-export([to_topic_table/3]).
-spec(generate(list() | map()) -> list() | map()).
generate(Topics) when is_list(Topics) ->
[generate(Topic) || Topic <- Topics];
generate(#{qos := Qos, topic := Topic}) when is_binary(Topic) ->
#{qos => Qos, placeholder => generate(Topic, [])}.
-spec(to_topic_table(list(), map(), map()) -> list()).
to_topic_table(PlaceHolders, ClientInfo, ConnInfo) ->
[begin
Topic0 = to_topic(PlaceHolder, ClientInfo, ConnInfo, []),
{Topic, Opts} = emqx_topic:parse(Topic0),
{Topic, Opts#{qos => Qos}}
end || #{qos := Qos, placeholder := PlaceHolder} <- PlaceHolders].
%%--------------------------------------------------------------------
%% internal
generate(<<"">>, Result) ->
lists:reverse(Result);
generate(<<"${clientid}", Tail/binary>>, Result) ->
generate(Tail, [clientid | Result]);
generate(<<"${username}", Tail/binary>>, Result) ->
generate(Tail, [username | Result]);
generate(<<"${host}", Tail/binary>>, Result) ->
generate(Tail, [host | Result]);
generate(<<"${port}", Tail/binary>>, Result) ->
generate(Tail, [port | Result]);
generate(<<Char:8, Tail/binary>>, []) ->
generate(Tail, [<<Char:8>>]);
generate(<<Char:8, Tail/binary>>, [R | Result]) when is_binary(R) ->
generate(Tail, [<<R/binary, Char:8>> | Result]);
generate(<<Char:8, Tail/binary>>, [R | Result]) when is_atom(R) ->
generate(Tail, [<<Char:8>> | [R | Result]]).
to_topic([], _, _, Res) ->
list_to_binary(lists:reverse(Res));
to_topic([Binary | PTs], C, Co, Res) when is_binary(Binary) ->
to_topic(PTs, C, Co, [Binary | Res]);
to_topic([clientid | PTs], C = #{clientid := ClientID}, Co, Res) ->
to_topic(PTs, C, Co, [ClientID | Res]);
to_topic([username | PTs], C = #{username := undefined}, Co, Res) ->
to_topic(PTs, C, Co, [<<"${username}">> | Res]);
to_topic([username | PTs], C = #{username := Username}, Co, Res) ->
to_topic(PTs, C, Co, [Username | Res]);
to_topic([host | PTs], C, Co = #{peername := {Host, _}}, Res) ->
HostBinary = list_to_binary(inet:ntoa(Host)),
to_topic(PTs, C, Co, [HostBinary | Res]);
to_topic([port | PTs], C, Co = #{peername := {_, Port}}, Res) ->
PortBinary = integer_to_binary(Port),
to_topic(PTs, C, Co, [PortBinary | Res]). | apps/emqx_auto_subscribe/src/emqx_auto_subscribe_placeholder.erl | 0.562657 | 0.444263 | emqx_auto_subscribe_placeholder.erl | starcoder |
% This file is part of ecsv released under the MIT license.
% See the LICENSE file for more information.
-module(ecsv_parser).
-author("<NAME> <<EMAIL>>").
-include("ecsv.hrl").
%
% This module is the raw csv parser.
% It will expect receiving:
% - {char, Char} for each character in a csv file
% - {eof} when the file is over
%
% It will send to the ResultPid (given to the funtion start_parsing):
% - {newline, NewLine} for each parsed line
% - {done} when the parsing is done (usually because eof has been sent)
%
% This parser is based on the blog post written by <NAME> located
% here http://andrewtill.blogspot.com/2009/12/erlang-csv-parser.html.
%
% This parser supports well formed csv files which are
% - a set of lines ending with a \n
% - each line contains a set of fields separated with a comma (,)
% - each field value can be enclosed with double quote (") ONLY
% - each field value can be empty
%
% Please note:
% - This parser has no failsafe mechanism if the file is badly formed!
% But the line a,,,,,\n is perfectly fine.
% - This parser doesn't allow a return (\n) in a field value!
%
-export([start_parsing/1, start_parsing/2]).
-define(EMPTY_STRING, []).
%% @doc start parsing a csv stream and send the result to ResultPid
start_parsing(ResultPid) ->
DefaultOptions = default_options(),
start_parsing(ResultPid, DefaultOptions).
start_parsing(ResultPid, Options) ->
ready(ResultPid, Options).
% -----------------------------------------------------------------------------
default_options() ->
#ecsv_opts{ }.
% the ready state is the initial one and also the most common state
% through the parsing
ready(ResultPid, Options) ->
ready(ResultPid, Options, [], []).
ready(ResultPid, Options, ParsedCsv, CurrentValue) ->
Delimiter = Options#ecsv_opts.delimiter,
receive
{eof} ->
NewLine = lists:reverse([lists:reverse(CurrentValue) | ParsedCsv]),
send_line(ResultPid, NewLine),
send_eof(ResultPid);
{char, Char} when (Char == $") ->
% pass an empty string to in_quotes as we do not want the
% preceeding characters to be included, only those in quotes
in_quotes(ResultPid, Options, ParsedCsv, ?EMPTY_STRING, Char);
{char, Char} when Char == Delimiter ->
ready(
ResultPid, Options,
[lists:reverse(CurrentValue) | ParsedCsv], ?EMPTY_STRING);
{char, Char} when Char == $\n ->
% a new line has been parsed: time to send it back
NewLine = lists:reverse([lists:reverse(CurrentValue) | ParsedCsv]),
ResultPid ! {newline, NewLine},
ready(ResultPid, Options, [], ?EMPTY_STRING);
{char, Char} when Char == $\r ->
% ignore line feed characters
ready(ResultPid, Options, ParsedCsv, CurrentValue);
{char, Char} ->
ready(ResultPid, Options, ParsedCsv, [Char | CurrentValue])
end.
% the in_quotes state adds all chars it receives to the value string until
% it receives a char matching the initial quote in which case it moves to
% the skip_to_delimiter state.
in_quotes(ResultPid, Options, ParsedCsv, CurrentValue, QuoteChar) ->
receive
{eof} ->
NewLine = lists:reverse([lists:reverse(CurrentValue) | ParsedCsv]),
send_line(ResultPid, NewLine),
send_eof(ResultPid);
{char, Char} when Char == QuoteChar ->
skip_to_delimiter(
ResultPid, Options,
[lists:reverse(CurrentValue) | ParsedCsv]);
{char, Char} ->
in_quotes(ResultPid, Options, ParsedCsv, [Char | CurrentValue], QuoteChar)
end.
% the skip_to_delimiter awaits chars which will get thrown away, when a
% value delimiter is received the machine moves to the ready state again.
skip_to_delimiter(ResultPid, Options, ParsedCsv) ->
receive
{eof} ->
NewLine = lists:reverse(ParsedCsv),
send_line(ResultPid, NewLine),
send_eof(ResultPid);
{char, Char} when Char == Options#ecsv_opts.delimiter ->
ready(ResultPid, Options, ParsedCsv, ?EMPTY_STRING);
{_} ->
skip_to_delimiter(ResultPid, Options, ParsedCsv)
end.
% ----------------------------------------------------------------------------
send_line(ResultPid, NewLine) ->
ResultPid ! {newline, NewLine}.
send_eof(ResultPid) ->
ResultPid ! {done}. | src/ecsv_parser.erl | 0.590779 | 0.571288 | ecsv_parser.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%%
%% @doc This process is responsible for managing filenames assigned to
%% prefixes. It's started out of `machi_flu_psup'.
%%
%% Supported operations include finding the "current" filename assigned to
%% a prefix. Incrementing the sequence number and returning a new file name
%% and listing all data files assigned to a given prefix.
%%
%% All prefixes should have the form of `{prefix, P}'. Single filename
%% return values have the form of `{file, F}'.
%%
%% <h2>Finding the current file associated with a sequence</h2>
%% First it looks up the sequence number from the prefix name. If
%% no sequence file is found, it uses 0 as the sequence number and searches
%% for a matching file with the prefix and 0 as the sequence number.
%% If no file is found, the it generates a new filename by incorporating
%% the given prefix, a randomly generated (v4) UUID and 0 as the
%% sequence number.
%%
%% If the sequence number is > 0, then the process scans the filesystem
%% looking for a filename which matches the prefix and given sequence number and
%% returns that.
-module(machi_flu_filename_mgr).
-behavior(gen_server).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
-endif.
-export([
child_spec/2,
start_link/2,
find_or_make_filename_from_prefix/4,
increment_prefix_sequence/3,
list_files_by_prefix/2
]).
%% gen_server callbacks
-export([
init/1,
handle_cast/2,
handle_call/3,
handle_info/2,
terminate/2,
code_change/3
]).
-define(TIMEOUT, 10 * 1000).
-include("machi.hrl"). %% included for #ns_info record
-include("machi_projection.hrl"). %% included for pv1_epoch type
-record(state, {fluname :: atom(),
tid :: ets:tid(),
datadir :: string(),
epoch :: pv1_epoch()
}).
%% public API
child_spec(FluName, DataDir) ->
Name = make_filename_mgr_name(FluName),
{Name,
{?MODULE, start_link, [FluName, DataDir]},
permanent, 5000, worker, [?MODULE]}.
start_link(FluName, DataDir) when is_atom(FluName) andalso is_list(DataDir) ->
N = make_filename_mgr_name(FluName),
gen_server:start_link({local, N}, ?MODULE, [FluName, DataDir], []).
-spec find_or_make_filename_from_prefix( FluName :: atom(),
EpochId :: pv1_epoch(),
Prefix :: {prefix, string()},
machi_dt:ns_info()) ->
{file, Filename :: string()} | {error, Reason :: term() } | timeout.
% @doc Find the latest available or make a filename from a prefix. A prefix
% should be in the form of a tagged tuple `{prefix, P}'. Returns a tagged
% tuple in the form of `{file, F}' or an `{error, Reason}'
find_or_make_filename_from_prefix(FluName, EpochId,
{prefix, Prefix},
#ns_info{}=NSInfo)
when is_atom(FluName) ->
N = make_filename_mgr_name(FluName),
gen_server:call(N, {find_filename, FluName, EpochId, NSInfo, Prefix}, ?TIMEOUT);
find_or_make_filename_from_prefix(_FluName, _EpochId, Other, Other2) ->
lager:error("~p is not a valid prefix/locator ~p", [Other, Other2]),
error(badarg).
-spec increment_prefix_sequence( FluName :: atom(), NSInfo :: machi_dt:ns_info(), Prefix :: {prefix, string()} ) ->
ok | {error, Reason :: term() } | timeout.
% @doc Increment the sequence counter for a given prefix. Prefix should
% be in the form of `{prefix, P}'.
increment_prefix_sequence(FluName, #ns_info{}=NSInfo, {prefix, Prefix}) when is_atom(FluName) ->
gen_server:call(make_filename_mgr_name(FluName), {increment_sequence, NSInfo, Prefix}, ?TIMEOUT);
increment_prefix_sequence(_FluName, _NSInfo, Other) ->
lager:error("~p is not a valid prefix.", [Other]),
error(badarg).
-spec list_files_by_prefix( FluName :: atom(), Prefix :: {prefix, string()} ) ->
[ file:name() ] | timeout | {error, Reason :: term() }.
% @doc Given a prefix in the form of `{prefix, P}' return
% all the data files associated with that prefix. Returns
% a list.
list_files_by_prefix(FluName, {prefix, Prefix}) when is_atom(FluName) ->
gen_server:call(make_filename_mgr_name(FluName), {list_files, Prefix}, ?TIMEOUT);
list_files_by_prefix(_FluName, Other) ->
lager:error("~p is not a valid prefix.", [Other]),
error(badarg).
%% gen_server API
init([FluName, DataDir]) ->
Tid = ets:new(make_filename_mgr_name(FluName), [named_table, {read_concurrency, true}]),
{ok, #state{fluname = FluName,
epoch = ?DUMMY_PV1_EPOCH,
datadir = DataDir,
tid = Tid}}.
handle_cast(Req, State) ->
lager:warning("Got unknown cast ~p", [Req]),
{noreply, State}.
%% Important assumption: by the time we reach here the EpochId is kosher.
%% the FLU has already validated that the caller's epoch id and the FLU's epoch id
%% are the same. So we *assume* that remains the case here - that is to say, we
%% are not wedged.
handle_call({find_filename, FluName, EpochId, NSInfo, Prefix}, _From,
S = #state{ datadir = DataDir, epoch = EpochId, tid = Tid }) ->
%% Our state and the caller's epoch ids are the same. Business as usual.
File = handle_find_file(FluName, Tid, NSInfo, Prefix, DataDir),
{reply, {file, File}, S};
handle_call({find_filename, _FluName, EpochId, NSInfo, Prefix}, _From, S = #state{ datadir = DataDir, tid = Tid }) ->
%% If the epoch id in our state and the caller's epoch id were the same, it would've
%% matched the above clause. Since we're here, we know that they are different.
%% If epoch ids between our state and the caller's are different, we must increment the
%% sequence number, generate a filename and then cache it.
File = increment_and_cache_filename(Tid, DataDir, NSInfo, Prefix),
{reply, {file, File}, S#state{epoch = EpochId}};
handle_call({increment_sequence, #ns_info{name=NS, locator=NSLocator}, Prefix}, _From, S = #state{ datadir = DataDir, tid=Tid }) ->
NSInfo = #ns_info{name=NS, locator=NSLocator},
_File = increment_and_cache_filename(Tid, DataDir, NSInfo, Prefix),
{reply, ok, S};
handle_call({list_files, Prefix}, From, S = #state{ datadir = DataDir }) ->
spawn(fun() ->
L = list_files(DataDir, Prefix),
gen_server:reply(From, L)
end),
{noreply, S};
handle_call(Req, From, State) ->
lager:warning("Got unknown call ~p from ~p", [Req, From]),
{reply, hoge, State}.
handle_info(Info, State) ->
lager:warning("Got unknown info ~p", [Info]),
{noreply, State}.
terminate(Reason, _State) ->
lager:info("Shutting down because ~p", [Reason]),
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% private
%% Quoted from https://github.com/afiskon/erlang-uuid-v4/blob/master/src/uuid.erl
%% MIT License
generate_uuid_v4_str() ->
<<A:32, B:16, C:16, D:16, E:48>> = crypto:strong_rand_bytes(16),
io_lib:format("~8.16.0b-~4.16.0b-4~3.16.0b-~4.16.0b-~12.16.0b",
[A, B, C band 16#0fff, D band 16#3fff bor 16#8000, E]).
list_files(DataDir, Prefix) ->
{F_bin, Path} = machi_util:make_data_filename(DataDir, "*^" ++ Prefix ++ "^*"),
filelib:wildcard(binary_to_list(F_bin), filename:dirname(Path)).
make_filename_mgr_name(FluName) when is_atom(FluName) ->
list_to_atom(atom_to_list(FluName) ++ "_filename_mgr").
handle_find_file(_FluName, Tid, #ns_info{name=NS, locator=NSLocator}, Prefix, DataDir) ->
case ets:lookup(Tid, {NS, NSLocator, Prefix}) of
[] ->
N = machi_util:read_max_filenum(DataDir, NS, NSLocator, Prefix),
F = generate_filename(DataDir, NS, NSLocator, Prefix, N),
true = ets:insert(Tid, {{NS, NSLocator, Prefix}, F}),
F;
[{_Key, File}] ->
File
end.
generate_filename(DataDir, NS, NSLocator, Prefix, N) ->
{F, _Q} = machi_util:make_data_filename(
DataDir,
NS, NSLocator, Prefix,
generate_uuid_v4_str(),
N),
binary_to_list(F).
increment_and_cache_filename(Tid, DataDir, #ns_info{name=NS,locator=NSLocator}, Prefix) ->
ok = machi_util:increment_max_filenum(DataDir, NS, NSLocator, Prefix),
N = machi_util:read_max_filenum(DataDir, NS, NSLocator, Prefix),
F = generate_filename(DataDir, NS, NSLocator, Prefix, N),
true = ets:insert(Tid, {{NS, NSLocator, Prefix}, F}),
F.
-ifdef(TEST).
-endif. | src/machi_flu_filename_mgr.erl | 0.657868 | 0.456289 | machi_flu_filename_mgr.erl | starcoder |
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
%% --------------------------------------------------
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% --------------------------------------------------
%%
%% @author <NAME> <<EMAIL>>
%%
%% @doc Gproc Publish/Subscribe patterns
%% This module implements a few convenient functions for publish/subscribe.
%%
%% Publish/subscribe with Gproc relies entirely on gproc properties and counters.
%% This makes for a very concise implementation, as the monitoring of subscribers and
%% removal of subscriptions comes for free with Gproc.
%%
%% Using this module instead of rolling your own (which is easy enough) brings the
%% benefit of consistency, in tracing and debugging.
%% The implementation can also serve to illustrate how to use gproc properties and
%% counters to good effect.
%%
%% @type scope() = l | g.
%% @type event() = any().
%% @type msg() = any().
%% @type status() = 1 | 0.
%% @end
-module(gproc_ps).
-export([subscribe/2,
subscribe_cond/3,
change_cond/3,
unsubscribe/2,
publish/3,
publish_cond/3,
list_subs/2
]).
-export([create_single/2,
delete_single/2,
disable_single/2,
enable_single/2,
tell_singles/3,
notify_single_if_true/4,
list_singles/2]).
-define(ETag, gproc_ps_event).
%% These types are duplicated above in EDoc syntax, since EDoc annoyingly doesn't pick up
%% the type definitions, even if they are referred to in the -spec:s that EDoc does parse.
-type scope() :: l | g.
-type event() :: any().
-type msg() :: any().
-type status() :: 1 | 0.
-spec subscribe(scope(), event()) -> true.
%% @doc Subscribe to events of type `Event'
%%
%% Any messages published with `gproc_ps:publish(Scope, Event, Msg)' will be
%% delivered to the current process, along with all other subscribers.
%%
%% This function creates a property, `{p,Scope,{gproc_ps_event,Event}}', which
%% can be searched and displayed for debugging purposes.
%%
%% Note that, as with {@link gproc:reg/1}, this function will raise an
%% exception if you try to subscribe to the same event twice from the same
%% process.
%% @end
subscribe(Scope, Event) when Scope==l; Scope==g ->
gproc:reg({p,Scope,{?ETag, Event}}).
-spec subscribe_cond(scope(), event(), undefined | ets:match_spec()) -> true.
%% @doc Subscribe conditionally to events of type `Event'
%%
%% This function is similar to {@link subscribe/2}, but adds a condition
%% in the form of a match specification.
%%
%% The condition is tested by the {@link publish_cond/3} function
%% and a message is delivered only if the condition is true. Specifically,
%% the test is:
%%
%% `ets:match_spec_run([Msg], ets:match_spec_compile(Cond)) == [true]'
%%
%% In other words, if the match_spec returns true for a message, that message
%% is sent to the subscriber. For any other result from the match_spec, the
%% message is not sent. `Cond == undefined' means that all messages will be
%% delivered (that is, `publish_cond/3' will treat 'normal' subscribers just
%% like {@link publish/3} does, except that `publish/3' strictly speaking
%% ignores the Value part of the property completely, whereas `publish_cond/3'
%% expects it to be either undefined or a valid match spec).
%%
%% This means that `Cond=undefined' and ``Cond=[{'_',[],[true]}]'' are
%% equivalent.
%%
%% Note that, as with {@link gproc:reg/1}, this function will raise an
%% exception if you try to subscribe to the same event twice from the same
%% process.
%% @end
subscribe_cond(Scope, Event, Spec) when Scope==l; Scope==g ->
case Spec of
undefined -> ok;
[_|_] -> _ = ets:match_spec_compile(Spec); % validation
_ -> error(badarg)
end,
gproc:reg({p,Scope,{?ETag, Event}}, Spec).
-spec change_cond(scope(), event(), undefined | ets:match_spec()) -> true.
%% @doc Change the condition specification of an existing subscription.
%%
%% This function atomically changes the condition spec of an existing
%% subscription (see {@link subscribe_cond/3}). An exception is raised if
%% the subscription doesn't already exist.
%%
%% Note that this function can also be used to change a conditional subscription
%% to an unconditional one (by setting `Spec = undefined'), or a 'normal'
%% subscription to a conditional one.
%% @end
change_cond(Scope, Event, Spec) when Scope==l; Scope==g ->
case Spec of
undefined -> ok;
[_|_] -> _ = ets:match_spec_compile(Spec); % validation
_ -> error(badarg)
end,
gproc:set_value({p,Scope,{?ETag, Event}}, Spec).
-spec unsubscribe(scope(), event()) -> true.
%% @doc Remove subscribtion created using `subscribe(Scope, Event)'
%%
%% This removes the property created through `subscribe/2'.
%% @end
unsubscribe(Scope, Event) when Scope==l; Scope==g ->
gproc:unreg({p,Scope,{?ETag, Event}}).
-spec publish(scope(), event(), msg()) -> ok.
%% @doc Publish the message `Msg' to all subscribers of `Event'
%%
%% The message delivered to each subscriber will be of the form:
%%
%% `{gproc_ps_event, Event, Msg}'
%%
%% The function uses `gproc:send/2' to send a message to all processes which have a
%% property `{p,Scope,{gproc_ps_event,Event}}'.
%% @end
publish(Scope, Event, Msg) when Scope==l; Scope==g ->
gproc:send({p, Scope, {?ETag, Event}}, {?ETag, Event, Msg}).
-spec publish_cond(scope(), event(), msg()) -> msg().
%% @doc Publishes the message `Msg' to conditional subscribers of `Event'
%%
%% The message will be delivered to each subscriber provided their respective
%% condition tests succeed.
%%
%% @see subscribe_cond/3.
%%
publish_cond(Scope, Event, Msg) when Scope==l; Scope==g ->
Message = {?ETag, Event, Msg},
lists:foreach(
fun({Pid, undefined}) -> Pid ! Message;
({Pid, Spec}) ->
try C = ets:match_spec_compile(Spec),
case ets:match_spec_run([Msg], C) of
[true] -> Pid ! Message;
_ -> ok
end
catch
error:_ ->
ok
end
end, gproc:select({Scope,p}, [{ {{p,Scope,{?ETag,Event}}, '$1', '$2'},
[], [{{'$1','$2'}}] }])).
-spec list_subs(scope(), event()) -> [pid()].
%% @doc List the pids of all processes subscribing to `Event'
%%
%% This function uses `gproc:select/2' to find all properties indicating a subscription.
%% @end
list_subs(Scope, Event) when Scope==l; Scope==g ->
gproc:select({Scope,p}, [{ {{p,Scope,{?ETag,Event}}, '$1', '_'}, [], ['$1'] }]).
-spec create_single(scope(), event()) -> true.
%% @doc Creates a single-shot subscription entry for Event
%%
%% Single-shot subscriptions behave similarly to the `{active,once}' property of sockets.
%% Once a message has been published, the subscription is disabled, and no more messages
%% will be delivered to the subscriber unless the subscription is re-enabled using
%% `enable_single/2'.
%%
%% The function creates a gproc counter entry, `{c,Scope,{gproc_ps_event,Event}}', which
%% will have either of the values `0' (disabled) or `1' (enabled). Initially, the value
%% is `1', meaning the subscription is enabled.
%%
%% Counters are used in this case, since they can be atomically updated by both the
%% subscriber (owner) and publisher. The publisher sets the counter value to `0' as soon
%% as it has delivered a message.
%% @end
create_single(Scope, Event) when Scope==l; Scope==g ->
gproc:reg({c,Scope,{?ETag, Event}}, 1).
-spec delete_single(scope(), event()) -> true.
%% @doc Deletes the single-shot subscription for Event
%%
%% This function deletes the counter entry representing the single-shot description.
%% An exception will be raised if there is no such subscription.
%% @end
delete_single(Scope, Event) when Scope==l; Scope==g ->
gproc:unreg({c,Scope,{?ETag, Event}}).
-spec disable_single(scope(), event()) -> integer().
%% @doc Disables the single-shot subscription for Event
%%
%% This function changes the value of the corresponding gproc counter to `0' (disabled).
%%
%% The subscription remains (e.g. for debugging purposes), but with a 'disabled' status.
%% This function is insensitive to concurrency, using 'wrapping' ets counter update ops.
%% This guarantees that the counter will have either the value 1 or 0, depending on which
%% update happened last.
%%
%% The return value indicates the previous status.
%% @end
disable_single(Scope, Event) when Scope==l; Scope==g ->
gproc:update_counter({c,Scope,{?ETag,Event}}, {-1, 0, 0}).
-spec enable_single(scope(), event()) -> integer().
%% @doc Enables the single-shot subscription for Event
%%
%% This function changes the value of the corresponding gproc counter to `1' (enabled).
%%
%% After enabling, the subscriber will receive the next message published for `Event',
%% after which the subscription is automatically disabled.
%%
%% This function is insensitive to concurrency, using 'wrapping' ets counter update ops.
%% This guarantees that the counter will have either the value 1 or 0, depending on which
%% update happened last.
%%
%% The return value indicates the previous status.
%% @end
enable_single(Scope, Event) when Scope==l; Scope==g ->
gproc:update_counter({c,Scope,{?ETag,Event}}, {1, 1, 1}).
-spec tell_singles(scope(), event(), msg()) -> [pid()].
%% @doc Publish `Msg' to all single-shot subscribers of `Event'
%%
%% The subscriber status of each active subscriber is changed to `0' (disabled) before
%% delivering the message. This reduces the risk that two different processes will be able
%% to both deliver a message before disabling the subscribers. This could happen if the
%% context switch happens just after the select operation (finding the active subscribers)
%% and before the process is able to update the counters. In this case, it is possible
%% that more than one can be delivered.
%%
%% The way to prevent this from happening is to ensure that only one process publishes
%% for `Event'.
%% @end
tell_singles(Scope, Event, Msg) when Scope==l; Scope==g ->
Subs = gproc:select(
{Scope,c},
[{ {{c,Scope,{?ETag,Event}}, '$1', 1}, [],
[{{ {{c,Scope, {{?ETag,wrap(Event)}} }}, '$1', {{-1,0,0}} }}] }]),
_ = gproc:update_counters(Scope, Subs),
[begin P ! {?ETag, Event, Msg}, P end || {_,P,_} <- Subs].
wrap(E) when is_tuple(E) ->
{list_to_tuple([wrap(X) || X <- tuple_to_list(E)])};
wrap(E) when is_list(E) ->
[wrap(X) || X <- E];
wrap(X) ->
X.
-spec list_singles(scope(), event()) -> [{pid(), status()}].
%% @doc Lists all single-shot subscribers of Event, together with their status
%% @end
list_singles(Scope, Event) ->
gproc:select({Scope,c}, [{ {{c,Scope,{?ETag,Event}}, '$1', '$2'},
[], [{{'$1','$2'}}] }]).
-spec notify_single_if_true(scope(), event(), fun(() -> boolean()), msg()) -> ok.
%% @doc Create/enable a single subscription for event; notify at once if F() -> true
%%
%% This function is a convenience function, wrapping a single-shot pub/sub around a
%% user-provided boolean test. `Msg' should be what the publisher will send later, if the
%% immediate test returns `false'.
%% @end
notify_single_if_true(Scope, Event, F, Msg) ->
try enable_single(Scope, Event)
catch
error:_ ->
create_single(Scope, Event)
end,
case F() of
true ->
disable_single(Scope, Event),
self() ! {?ETag, Event, Msg},
ok;
false ->
ok
end. | src/gproc_ps.erl | 0.518059 | 0.426142 | gproc_ps.erl | starcoder |
%%% @doc Utils module for sheldon.
%%%
%%% Copyright Erlang Solutions Ltd. 2017 <<EMAIL>>
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%% @end
%%% @copyright Erlang Solutions Ltd. <<EMAIL>>
%%%
-module(sheldon_utils).
-author("<NAME> <<EMAIL>>").
%% API
-export([normalize/1, is_number/1, match_in_patterns/2]).
-compile({no_auto_import, [is_number/1]}).
%%%===================================================================
%%% API
%%%===================================================================
%% @doc normalizes the iodata() escaping some characters and converting
%% them to string().
-spec normalize(iodata()) -> string().
normalize(Word) ->
Escaped = escape_chars(Word),
binary_to_list(Escaped).
%% @doc checks if iodata() is a number
-spec is_number(iodata()) -> boolean().
is_number(Word) ->
re:run(Word, "^[0-9]*$") =/= nomatch.
%% @doc checks if some string() matches in one of the patterns given as
%% a parameter
-spec match_in_patterns(string(), [string()]) -> boolean().
match_in_patterns(Word, Patterns) ->
MatchTuples = [{Word, Pattern} || Pattern <- Patterns],
lists:foldl(fun match/2, false, MatchTuples).
%%%===================================================================
%%% Internal Functions
%%%===================================================================
-spec escape_chars(iodata()) -> binary().
escape_chars(Word) ->
Word1 = escape_patterns(Word, prefixes(), prefixes()),
Word2 = escape_patterns(Word1, sufixes(), sufixes()),
case escape_patterns(Word2, special_chars(), special_chars()) of
EscapedWord when is_list(EscapedWord) ->
list_to_binary(EscapedWord);
EscapedWord ->
EscapedWord
end.
-spec match({string(), string()}, boolean()) -> boolean().
match(_, true) ->
true;
match({Word, Pattern}, false) ->
re:run(Word, Pattern) =/= nomatch.
-spec escape_patterns(iodata(), [iodata()], [iodata()]) -> iodata().
escape_patterns(Word, [], _) ->
Word;
escape_patterns(Word, [Pattern | Patterns], OriginalPatterns) ->
case re:replace(Word, Pattern, "", [global]) of
Word ->
escape_patterns(Word, Patterns, OriginalPatterns);
Word1 ->
escape_patterns(Word1, OriginalPatterns, OriginalPatterns)
end.
-spec prefixes() -> [sheldon_config:regex()].
prefixes() ->
["^[(]", "^\"", "^\'", "^[[]", "^{", "^`"].
-spec sufixes() -> [sheldon_config:regex()].
sufixes() ->
["[.]$",
",$",
":$",
";$",
"[?]$",
"[)]$",
"!$",
"]$",
"}$",
"'s$",
"\"$",
"\'$",
"`$",
"\n"].
-spec special_chars() -> [sheldon_config:regex()].
special_chars() ->
["^&$", "^>$", "^<$"]. | src/sheldon_utils.erl | 0.547464 | 0.422028 | sheldon_utils.erl | starcoder |
%% @doc This module represents an arena process. The arena is where throwdown
%% games are executed. The arena process maintains the following pieces of
%% state:
%%
%% <ul>
%% <li>`mode': An atom describing what phase the game is in. This is help
%% isolate and identify API calls which may not be legal at certain times.</li>
%% <li>`name': A name for the arena. Can be an atom, string, or binary
%% string.</li>
%% <li>`rules': A function which decides which moves beat other moves. The
%% `default_rules/2' function implements the rules for Sam Kass' "Rock, paper,
%% scissors, lizard, spock" as described in the <a
%% href="http://www.samkass.com/theories/RPSSL.html">original web
%% page</a>.</li>
%% <li>`players': A map of player names to pids representing those
%% players.</li>
%% <li>`game_state': The current game state. See below for more details about
%% game state.</li>
%% </ul>
%%
%% Arenas exit after a winner has been decided or all players have been
%% eliminated.
%%
%% <h3>Game state</h3>
%% Game state is represented as a map with the following keys:
%% <ul>
%% <li>`choices': These are the valid, legal moves that are available. Can be
%% modified by setting the application environment variable `choices'. By
%% default this is rock, paper, scissors, lizard, spock. This list can be
%% used by player processes to select a valid legal move.</li>
%% <li>`current': A set representing the moves in the current round of the
%% game.</li>
%% <li>`results': A list representing the past moves and outcomes from past
%% rounds of this game.</li>
%% </ul>
%%
%% This game state is passed into the player process so that each player may
%% make a move selection.
-module(throwdown_arena).
-behaviour(gen_server).
-type arena_name() :: atom() | string() | binary().
-type arena_mode() :: 'stop' | 'waiting' | 'playing' | 'evaluation'.
-type move() :: 'rock' | 'paper' | 'scissors' | 'lizard' | 'spock'.
-record(state, {
name :: arena_name(),
mode :: arena_mode(),
rules :: function(),
players = #{} :: map(),
game_state = #{} :: map()
}).
-export([
start_link/1,
start_link/2,
child_spec/1,
register_player/3,
submit_choice/3,
done/1,
default_rules/2
]).
-export([
init/1,
handle_cast/2,
handle_call/3,
handle_info/2,
terminate/2,
code_change/3
]).
%% public API
-spec start_link( Name :: arena_name() ) -> {ok, Pid :: pid()}.
%% @doc Start an arena with the given name using the default rules.
%% This is the normal way arenas are created.
start_link(Name) ->
start_link(Name, fun default_rules/2).
-spec start_link( Name :: arena_name(),
Rules :: function() ) -> {ok, Pid :: pid()}.
%% @doc Start an arena with the given name and a rules function.
%% The rules function should take two moves and determine one of the following
%% atoms: `win', `loss', `tie' depending on whether the first move beats the
%% second move.
start_link(Name, Rules) when is_function(Rules) ->
gen_server:start_link(?MODULE, [Name, Rules], []).
%% @private Convenience function for the supervisor.
child_spec(Name) ->
#{id => Name,
start => {throwdown_arena, start_link, []},
restart => temporary,
shutdown => 2000,
type => worker,
modules => [throwdown_arena]}.
-spec default_rules( MoveA :: move(),
MoveB :: move() ) -> 'win' | 'loss' | 'tie'.
%% @doc The default set of rules to implement rock, paper, scissors, lizard,
%% spock.
default_rules(rock, rock) -> tie;
default_rules(rock, paper) -> loss;
default_rules(rock, scissors) -> win;
default_rules(rock, lizard) -> win;
default_rules(rock, spock) -> loss;
default_rules(paper, paper) -> tie;
default_rules(paper, rock) -> win;
default_rules(paper, scissors) -> loss;
default_rules(paper, lizard) -> loss;
default_rules(paper, spock) -> win;
default_rules(scissors, scissors) -> tie;
default_rules(scissors, rock) -> loss;
default_rules(scissors, paper) -> win;
default_rules(scissors, lizard) -> win;
default_rules(scissors, spock) -> loss;
default_rules(lizard, lizard) -> tie;
default_rules(lizard, rock) -> loss;
default_rules(lizard, paper) -> win;
default_rules(lizard, scissors) -> loss;
default_rules(lizard, spock) -> win;
default_rules(spock, spock) -> tie;
default_rules(spock, rock) -> win;
default_rules(spock, paper) -> loss;
default_rules(spock, scissors) -> win;
default_rules(spock, lizard) -> loss.
-spec register_player( Arena :: pid(),
Name :: binary(),
PlayerPid :: pid() ) -> ok | {error, cannot_register}.
%% @doc This API call registers a player process for a game in this arena.
register_player(Arena, Name, PlayerPid) ->
gen_server:call(Arena, {register, Name, PlayerPid}).
-spec submit_choice( Arena :: pid(),
Name :: arena_name(),
Pick :: move() ) -> ok | {error, cannot_select}.
%% @doc This API call submits a player move to the current round of the game.
submit_choice(Arena, Name, Pick) ->
gen_server:call(Arena, {choice, {Name, Pick}}).
-spec done( Arena :: pid() ) -> ok.
%% @doc Signal the arena that all the players have been added and that playing
%% rounds should begin.
done(Arena) ->
gen_server:call(Arena, done).
%% gen_server callback
%% @private
init([Name, Rules]) ->
Choices = throwdown:get_env(choices, [rock, paper, scissors, lizard, spock]),
GState = #{ choices => Choices, current => ordsets:new(), results => [] },
{ok, #state{ mode = waiting, name = Name, rules = Rules, game_state = GState }}.
handle_cast(_Cast, State) ->
{noreply, State}.
handle_call(done, _From, State = #state{ players = P, game_state = G } ) ->
{Reply, NewState} = case maps:size(P) of
X when X < 2 ->
{{error, not_enough_players}, State};
_ ->
play_round(P, G),
{ok, State#state{mode = playing}}
end,
{reply, Reply, NewState};
handle_call({choice, _C}, _From, State = #state{ mode = waiting }) ->
{reply, {error, cannot_select}, State};
handle_call({choice, C}, _From, State = #state{ mode = playing, players = P, game_state = G }) ->
Current = maps:get(current, G),
NewCurrent = ordsets:add_element(C, Current),
NewG = maps:put(current, NewCurrent, G),
NewMode = case ordsets:size(NewCurrent) == maps:size(P) of
true ->
self() ! start_round,
evaluation;
false ->
playing
end,
{reply, ok, State#state{ mode = NewMode, game_state = NewG }};
handle_call({register, _Name, _PlayerPid}, _From, State = #state{ mode = playing }) ->
{reply, {error, cannot_register}, State};
handle_call({register, Name, PlayerPid}, _From, State = #state{ mode = waiting,
players = P }) ->
NewP = maps:put(Name, PlayerPid, P),
{reply, ok, State#state{ players = NewP }};
handle_call(_Call, _From, State) ->
{reply, dieeeeee, State}.
handle_info(start_round, State = #state{ mode = evaluation, rules = R, players = P, game_state = G }) ->
Current = ordsets:to_list(maps:get(current, G)),
Results = evaluate_choices(R, Current, Current, []),
G1 = maps:put(current, ordsets:new(), G),
R0 = maps:get(results, G),
NewG = maps:put(results, [ Current | R0 ], G1),
NewP = remove_players(Results, P),
case maps:size(NewP) of
0 ->
{stop, no_players_remain, State#state{ mode = stop, players = NewP, game_state = NewG}};
1 ->
{stop, {winner, NewP}, State#state{ mode = stop, players = NewP, game_state = NewG}};
_ ->
play_round(NewP, NewG),
{noreply, State#state{ mode = playing, players = NewP, game_state = NewG }}
end;
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% private
%% @private For every player that's registered, ask that process to make a new
%% play with the current game state.
play_round(P, G) ->
maps:map(fun(_Name, Pid) ->
throwdown_player:play(Pid, G)
end,
P).
%% @private After a round has been completed. Remove players from the arena
%% who lost.
remove_players([], P) -> P;
remove_players([ {loss, {Name, _Pick}} | Tail], P) ->
PlayPid = maps:get(Name, P),
throwdown_player:leave(PlayPid),
remove_players(Tail, maps:remove(Name, P));
remove_players([ _H | T ], P) ->
remove_players(T, P).
%% @private Fold over the set of choices and determine winners, ties and
%% losers.
evaluate_choices(_Rules, [], _Picks, Acc) -> Acc;
evaluate_choices(Rules, [ H | Rest ], All, Acc) ->
Picks = All -- [H],
Outcome = case versus(Rules, H, Picks, undefined) of
tie -> {tie, H};
win -> {win, H};
loss -> {loss, H}
end,
evaluate_choices(Rules, Rest, All, [ Outcome | Acc ]).
%% @private This is the function which directly evaluates a player against
%% all other player moves in the current round. If the result is a loss,
%% return immediately.
versus(_Rules, _Player, [], Result) -> Result;
versus(Rules, {_NameA, PlayA} = A, [ {_NameB, PlayB} | T ], _LastResult) ->
case Rules(PlayA, PlayB) of
loss ->
loss;
Result ->
versus(Rules, A, T, Result)
end. | src/throwdown_arena.erl | 0.711631 | 0.450903 | throwdown_arena.erl | starcoder |
%%==============================================================================
%% @copyright 2019-2020 Erlang Solutions Ltd.
%% Licensed under the Apache License, Version 2.0 (see LICENSE file)
%% @end
%%
%% @doc
%% In this scenario users are sending multiple messages to their neighbours in
%% intervals while some are reading messages from MAM.
%%
%% == User steps: ==
%%
%% 1. Connect to the XMPP host given by the `mim_host' variable.
%%
%% 2. Choose a role based on the user ID. Every `mam_reader_sessions_indicator'
%% a user will be assigned the role of `mam_reader'. Other users will become
%% `senders', who send normal messages.
%%
%% === Sender users ===
%%
%% 3. Set filter on incoming stanzas so that only messages are received.
%%
%% 4. Send presence `available' and wait for 5 seconds.
%%
%% 5. Select neighbouring users with lower and greater IDs defined by the
%% `number_of_prev_users' and `number_of_next_users' values.
%%
%% 6. Send messages to every neighbour multiple times (defined by
%% `number_of_send_message_repeats') in a round-robin fashion. After each
%% message wait for `message_interval'.
%%
%% 7. Having sent all messages wait for 10 seconds before sending presence
%% `unavailable' and disconnect.
%%
%% === MAM readers ===
%%
%% 3. Send presence `available'.
%%
%% 4. In a loop, read message archive divided into chunks based on a timestamp.
%% Set filter for iq and message stanzas, query messages from MAM from the last
%% timestamp and receive results. Log received messages and update corresponding
%% metrics. After the end of messages from the message archive set filtering
%% back to messages only and wait for `mam_read_archive_interval'.
%%
%% 5. Continue execution in the loop from point 4.
%%
%% == Metrics exposed by this scenario: ==
%%
%% === Counters: ===
%% - messages_sent - it is updated with every sent message by the
%% `amoc_xmpp_handlers:measure_sent_messages/0' handler.
%%
%% - mam_lookups - updated with every successful MAM lookup.
%%
%% - mam_failed_lookups - updated with every failed MAM lookup.
%%
%% === Times: ===
%% - message_ttd - it is updated with every received message by the
%% `amoc_xmpp_handlers:measure_ttd/3' handler.
%%
%% - mam_lookup_response_time - updated with every successful MAM lookup.
%%
%% @end
%%==============================================================================
-module(mongoose_mam).
-behaviour(amoc_scenario).
-include_lib("exml/include/exml.hrl").
-include_lib("kernel/include/logger.hrl").
-define(SLEEP_TIME_AFTER_SCENARIO, 10000). %% wait 10s after scenario before disconnecting
-define(V(X), fun amoc_config_validation:X/1).
-required_variable([
#{name => message_interval, default_value => 180, verification => ?V(nonnegative_integer),
description => "Wait time between sent messages (seconds, def: 180)"},
#{name => number_of_prev_users, default_value => 1, verification => ?V(nonnegative_integer),
description => "Number of users before current one to use (def: 1)"},
#{name => number_of_next_users, default_value => 1, verification => ?V(nonnegative_integer),
description => "Number of users after current one to use (def: 1)"},
#{name => number_of_send_message_repeats, default_value => 73, verification => ?V(positive_integer),
description => "Number of send message (to all neighours) repeats (def: 73)"},
#{name => mam_reader_sessions_indicator, default_value => 53, verification => ?V(positive_integer),
description => "How often a MAM reader is created, like every 53th session (def: 53)"},
#{name => mam_read_archive_interval, default_value => 60, verification => ?V(positive_integer),
description => "Wait time between reads from MAM for each reader (seconds, def: 60)"},
#{name => mim_host, default_value => <<"localhost">>, verification => ?V(binary),
description => "The virtual host served by the server (def: <<\"localhost\">>)"}
]).
%% Wait at most 5s for MAM responses (IQ or message)
-define(MAM_STANZAS_TIMEOUT, 5000).
-export([start/1]).
-export([init/0]).
-define(NS_MAM, <<"urn:xmpp:mam:2">>).
-define(MAM_LOOKUPS_CT, mam_lookups).
-define(MAM_FAILED_LOOKUPS_CT, mam_failed_lookups).
-define(MAM_LOOKUP_RESP_TIME, mam_lookup_response_time).
-type binjid() :: binary().
-spec init() -> ok.
init() ->
amoc_metrics:init(counters, messages_sent),
amoc_metrics:init(counters, ?MAM_LOOKUPS_CT),
amoc_metrics:init(counters, ?MAM_FAILED_LOOKUPS_CT),
amoc_metrics:init(times, message_ttd),
amoc_metrics:init(times, ?MAM_LOOKUP_RESP_TIME),
ok.
-spec start(amoc_scenario:user_id()) -> any().
start(MyId) ->
ExtraSpec = [{server, amoc_config:get(mim_host)}, {socket_opts, socket_opts()}] ++
amoc_xmpp:pick_server([[{host, "127.0.0.1"}]]) ++
send_and_recv_escalus_handlers(),
{ok, Client, _} = amoc_xmpp:connect_or_exit(MyId, ExtraSpec),
MAMReaderIndicator = amoc_config:get(mam_reader_sessions_indicator),
SessionIndicator = session_indicator(MyId, MAMReaderIndicator),
do(SessionIndicator, MyId, Client),
timer:sleep(?SLEEP_TIME_AFTER_SCENARIO),
escalus_session:send_presence_unavailable(Client),
escalus_connection:stop(Client),
ok.
session_indicator(MyId, MAMReader) when MyId rem MAMReader == 0 ->
mam_reader;
session_indicator(_, _) ->
sender.
do(sender, MyId, Client) ->
%% We allow only message stanzas to be delivered to the client process,
%% there is escalus handler set for such messages so they'll be processed by the handler
escalus_connection:set_filter_predicate(Client, fun escalus_pred:is_message/1),
escalus_session:send_presence_available(Client),
escalus_connection:wait(Client, 5000),
Prev = amoc_config:get(number_of_prev_users),
Next = amoc_config:get(number_of_next_users),
NeighbourIds = lists:delete(MyId, lists:seq(max(1, MyId - Prev),
MyId + Next)),
MessageInterval = amoc_config:get(message_interval),
send_messages_many_times(Client, timer:seconds(MessageInterval), NeighbourIds);
do(mam_reader, _MyId, Client) ->
escalus_session:send_presence_available(Client),
read_archive_forever(Client, erlang:timestamp()).
%%%%%
%% Scenario helpers
%%%%%
-spec read_archive_forever(escalus:client(), erlang:timestamp()) -> no_return().
read_archive_forever(Client, Timestamp) ->
CurrentTimestamp = erlang:timestamp(),
read_messages_from_archive_since_timestamp(Client, Timestamp, ?MAM_STANZAS_TIMEOUT),
Interval = amoc_config:get(mam_read_archive_interval),
escalus_connection:wait(Client, timer:seconds(Interval)),
read_archive_forever(Client, CurrentTimestamp).
-spec send_messages_many_times(escalus:client(), timeout(), [binjid()]) -> ok.
send_messages_many_times(Client, MessageInterval, NeighbourIds) ->
S = fun(_) ->
send_messages_to_neighbors(Client, NeighbourIds, MessageInterval)
end,
SendMessageRepeats = amoc_config:get(number_of_send_message_repeats),
lists:foreach(S, lists:seq(1, SendMessageRepeats)).
-spec send_messages_to_neighbors(escalus:client(), [amoc_scenario:user_id()], timeout()) -> list().
send_messages_to_neighbors(Client, TargetIds, SleepTime) ->
[
send_message(Client, TargetId, SleepTime) ||
TargetId <- TargetIds
].
-spec send_message(escalus:client(), amoc_scenario:user_id(), timeout()) -> ok.
send_message(Client, ToId, SleepTime) ->
Body = base64:encode(<<"Message_random_", (crypto:strong_rand_bytes(80 + rand:uniform(40)))/binary>>),
Msg = escalus_stanza:chat_to_with_id_and_timestamp(amoc_xmpp_users:make_jid(ToId), Body),
escalus_connection:send(Client, Msg),
escalus_connection:wait(Client, SleepTime).
-spec send_and_recv_escalus_handlers() -> [{atom(), any()}].
send_and_recv_escalus_handlers() ->
[{received_stanza_handlers,
amoc_xmpp_handlers:make_stanza_handlers(
[{fun escalus_pred:is_message/1, fun amoc_xmpp_handlers:measure_ttd/3}])},
{sent_stanza_handlers,
amoc_xmpp_handlers:make_stanza_handlers(
[{fun escalus_pred:is_message/1, fun amoc_xmpp_handlers:measure_sent_messages/0}])}
].
%%%%%
%% MAM helpers
%%%%%
-spec read_messages_from_archive_since_timestamp(
Client :: escalus:client(),
Timestamp :: erlang:timestamp(),
Timeout :: timer:time()
) -> any().
read_messages_from_archive_since_timestamp(Client, Timestamp, Timeout) ->
case catch do_read_messages_from_archive_since_timestamp(Client,
Timestamp,
Timeout) of
{timeout, What} ->
?LOG_WARNING("Failed to read archive timeout=~p", [What]),
amoc_metrics:update_counter(?MAM_FAILED_LOOKUPS_CT, 1);
{'EXIT', What} ->
?LOG_WARNING("Failed to read archive error=~p", [What]),
amoc_metrics:update_counter(?MAM_FAILED_LOOKUPS_CT, 1);
ResponseTimeMicros when is_integer(ResponseTimeMicros) ->
amoc_metrics:update_time(?MAM_LOOKUP_RESP_TIME, ResponseTimeMicros),
amoc_metrics:update_counter(?MAM_LOOKUPS_CT, 1)
end.
-spec do_read_messages_from_archive_since_timestamp(
Client :: escalus:client(),
Timestamp :: erlang:timestamp(),
Timeout :: timer:time()
) -> ResponseTimeMicroseconds :: integer() |
no_return(). % escalus throws an exception after Timeout
do_read_messages_from_archive_since_timestamp(Client, Timestamp, Timeout) ->
filter_out_all_but_mam_archived_messages_and_iqs(Client),
IQSet = mam_archive_query_since_timestamp(<<"query1">>, Timestamp),
escalus_connection:send(Client, IQSet),
{Micros, _} = timer:tc(
fun() ->
receive_mam_messages_until_end(Client, Timeout)
end),
escalus_connection:set_filter_predicate(Client, fun escalus_pred:is_message/1),
Micros.
-spec receive_mam_messages_until_end(
Client :: escalus_connection:client(),
Timeout :: timer:time()) -> ok | no_return().
receive_mam_messages_until_end(Client, Timeout) ->
Stanza = escalus_connection:get_stanza(Client, mam_message_timeout, Timeout),
?LOG_DEBUG("Stanza = ~p", [Stanza]),
case is_mam_archived_message(Stanza) of
false ->
maybe_mam_fin_message(Stanza, Client, Timeout);
true ->
?LOG_DEBUG("Received MAM archived message=~p", [Stanza]),
receive_mam_messages_until_end(Client, Timeout)
end.
-spec maybe_mam_fin_message(
Stanza :: exml:element(),
Client :: escalus_connection:client(),
Timeout :: timer:time()) -> ok | no_return().
maybe_mam_fin_message(Stanza, Client, Timeout) ->
case is_mam_fin_complete_message(Stanza) of
true ->
?LOG_DEBUG("Received MAM result stanza=~p", [Stanza]),
ok;
false ->
?LOG_DEBUG("Received stanza=~p when waiting for MAM archived message ~n", [Stanza]),
receive_mam_messages_until_end(Client, Timeout)
end.
timestamp_to_isotime({_, _, _} = Timestamp) ->
FmtStr = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0BZ",
{{Y, Mo, D}, {H, Mn, S}} = calendar:now_to_datetime(Timestamp),
IsoStr = io_lib:format(FmtStr, [Y, Mo, D, H, Mn, S]),
iolist_to_binary(IsoStr).
%%%%%%
%% Escalus helpers
%%%%%
-spec filter_out_all_but_mam_archived_messages_and_iqs(escalus:client()) -> ok.
filter_out_all_but_mam_archived_messages_and_iqs(Client) ->
escalus_connection:set_filter_predicate(
Client,
fun(Stanza) ->
is_mam_archived_message(Stanza) orelse
escalus_pred:is_iq(Stanza)
end).
%%%%%%
%% User helpers
%%%%%
-spec socket_opts() -> [gen_tcp:option()].
socket_opts() ->
[binary,
{reuseaddr, false},
{nodelay, true}].
%%%%
%% XMPP helpers
%%%%%
mam_archive_query_since_timestamp(QueryId, Timestamp) when is_binary(QueryId) ->
escalus_stanza:iq_set(?NS_MAM, [mam_lookup_after_date_xml(Timestamp)]).
mam_lookup_after_date_xml(Timestamp) ->
IsoTime = timestamp_to_isotime(Timestamp),
TimeValueEl = value_xml(IsoTime),
MamVsnValueEl = value_xml(?NS_MAM),
QueryFields =
[
field_xml(
[{<<"var">>, <<"FORM_TYPE">>},
{<<"type">>, <<"hidden">>}],
[MamVsnValueEl]),
field_xml(
[{<<"var">>, <<"start">>}],
[TimeValueEl])
],
#xmlel{name = <<"x">>,
attrs = [
{<<"xmlns">>, <<"jabber:x:data">>},
{<<"type">>, <<"submit">>}
],
children = QueryFields
}.
field_xml(Attrs, Children) ->
#xmlel{name = <<"field">>,
attrs = Attrs,
children = Children}.
value_xml(Data) ->
#xmlel{name = <<"value">>,
children = [
#xmlcdata{
content = Data
}
]}.
-spec is_mam_archived_message(exml:element()) -> boolean().
is_mam_archived_message(#xmlel{name = <<"message">>} = Stanza) ->
NS = exml_query:path(Stanza, [{element, <<"result">>}, {attr, <<"xmlns">>}]),
NS == ?NS_MAM;
is_mam_archived_message(_) ->
false.
-spec is_mam_fin_complete_message(exml:element()) -> boolean().
is_mam_fin_complete_message(#xmlel{} = Stanza) ->
case exml_query:path(Stanza, [{element, <<"fin">>}]) of
undefined ->
false;
FinEl ->
exml_query:attr(FinEl, <<"xmlns">>) == ?NS_MAM andalso
exml_query:attr(FinEl, <<"complete">>) == <<"true">>
end. | src/scenarios/mongoose_mam.erl | 0.559531 | 0.44559 | mongoose_mam.erl | starcoder |
%% @author go717franciswang [https://github.com/go717franciswang]
%% @doc Peer to peer exercise in chapter 15 of the Programming Erlang book
%% @reference <a href="http://oreilly.com/catalog/9780596518189/">Erlang Programming</a>
-module(peer).
-export([start/0, connect/1, send/1, stop/0]).
-export([handle_call/3, handle_cast/2, init/1, terminate/2]).
-export([wait_connect/1, get_request/1]).
-behavior(gen_server).
%% @doc Start the peer to peer server, and create the listener socket.
-spec start() -> ok | {error, already_started}.
start() ->
case gen_server:start_link({local, peer}, ?MODULE, [], []) of
{ok, _} -> ok;
_ -> {error, already_started}
end.
%% @hidden
init(_) ->
{ok, ListenSocket} = gen_tcp:listen(1234, [binary, {active, false}]),
spawn(?MODULE, wait_connect, [ListenSocket]),
{ok, {ListenSocket,false,false}}.
%% @hidden
wait_connect(ListenSocket) ->
case gen_tcp:accept(ListenSocket) of
{ok, Socket} ->
Pid = spawn(?MODULE, get_request, [Socket]),
gen_tcp:controlling_process(Socket, Pid),
gen_server:cast(peer, {connected, Socket}),
wait_connect(ListenSocket);
{error, closed} -> ok
end.
%% @hidden
get_request(Socket) ->
case gen_tcp:recv(Socket, 0) of
{ok, Binary} ->
io:format("Got message: ~s~n", [binary_to_list(Binary)]),
get_request(Socket);
{error, closed} ->
gen_server:cast(disconnected),
io:format("Connection to client is closed~n");
Other ->
io:format("Unknown msg: ~w~n", [Other])
end.
%% @doc Stop the peer to peer server, and close all sockets.
-spec stop() -> ok | {error, not_started}.
stop() ->
case whereis(peer) of
undefined -> {error, not_started};
_ -> gen_server:cast(peer, stop), ok
end.
%% @doc Connect to a remote peer by IP address.
-spec connect(IpAddress::string()) -> ok | {error, Reason::atom()}.
connect(Ip) -> gen_server:call(peer, {connect, Ip}).
%% @doc Send a message to the connected remote peer.
-spec send(String::string()) -> ok | {error, not_connected}.
send(String) -> gen_server:call(peer, {send, String}).
%% @hidden
handle_cast(stop, LoopData) ->
{stop, normal, LoopData};
handle_cast(disconnected, {ListenSocket, SendSocket, _}) ->
{noreply, {ListenSocket, SendSocket, false}};
handle_cast({connected, Socket}, {ListenSocket, SendSocket, _}) ->
{noreply, {ListenSocket, SendSocket, Socket}}.
%% @hidden
terminate(_Reason, {ListenSocket, SendSocket, RcvSocket}) ->
closeSocket(ListenSocket),
closeSocket(SendSocket),
closeSocket(RcvSocket).
closeSocket(false) -> ok;
closeSocket(Socket) -> gen_tcp:close(Socket).
%% @hidden
handle_call({connect, Ip}, _From, {ListenSocket, _, RcvSocket}=LoopData) ->
case gen_tcp:connect(Ip, 1234, [binary, {active, false}]) of
{ok, Socket} -> {reply, ok, {ListenSocket, Socket, RcvSocket}};
{error, Reason} -> {reply, {error,Reason}, LoopData}
end;
handle_call({send, String}, _From, {_, SendSocket, _}=LoopData) ->
case SendSocket of
false -> {reply, {error, not_connected}, LoopData};
_ ->
gen_tcp:send(SendSocket, String),
{reply, ok, LoopData}
end. | 18/peer.erl | 0.568056 | 0.525612 | peer.erl | starcoder |
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% PRIORITY HANDLING AND PRIORITY CALCULATION
%%
%% Handling of ready nodes and priorities.
%% Priorities are mainly from the critical path. More priorities are added.
%% * One version is adding priorities just depending on the instr, so
%% for example loads get higher priority than stores, and ordered
%% after reg's and offset for better cache performance.
%% * The other version gives higher priority to a node that adds more new
%% nodes to the ready list. This one is maybe not so effectively
%% implemented, but was added too late for smarter solutions.
%% One version is commented away
-module(hipe_ultra_prio).
-export([init_ready/2,
init_instr_prio/2,
%% initial_ready_set/4,
next_ready/7,
add_ready_nodes/2,
insert_node/3
]).
-include("../sparc/hipe_sparc.hrl").
% At first, only nodes with no predecessors are selected.
% - if R is empty, there is an error (unless BB itself is empty)
%% Arguments : Size - size of ready-array
%% Preds - array with number of predecessors for each node
%% Returns : An array with list of ready-nodes for each cycle.
init_ready(Size, Preds) ->
P = hipe_vectors:size(Preds),
Ready = hipe_vectors:new(Size, []),
R = initial_ready_set(1, P, Preds, []),
hipe_vectors:set(Ready, 0, R).
init_instr_prio(N, DAG) ->
critical_path(N, DAG).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : initial_ready_set
%% Argument : M - current node-index
%% N - where to stop
%% Preds - array with number of predecessors for each node
%% Ready - list with ready-nodes
%% Returns : Ready - list with ready-nodes
%% Description : Finds all nodes with no predecessors and adds them to ready.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
initial_ready_set(M, N, Preds, Ready) ->
if
M > N ->
Ready;
true ->
case hipe_vectors:get(Preds, M-1) of
0 ->
initial_ready_set(M+1, N, Preds, [M|Ready]);
V when is_integer(V), V > 0 ->
initial_ready_set(M+1, N, Preds, Ready)
end
end.
%% The following handles the nodes ready to schedule:
%% 1. select the ready queue of given cycle
%% 2. if queue empty, return none
%% 3. otherwise, remove entry with highest priority
%% and return {next,Highest_Prio,NewReady}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : next_ready
%% Argument : C - current cycle
%% Ready - array with ready nodes
%% Prio - array with cpath-priorities for all nodes
%% Nodes - indexed list [{N, Instr}]
%% Returns : none / {next,Highest_Prio,NewReady}
%% Description : 1. select the ready queue of given cycle
%% 2. if queue empty, return none
%% 3. otherwise, remove entry with highest priority
%% and return {next,Highest_Prio,NewReady} where Highest_Prio
%% = Id of instr and NewReady = updated ready-array.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
next_ready(C, Ready, Prio, Nodes, DAG, Preds, Earl) ->
Curr = hipe_vectors:get(Ready, C-1),
case Curr of
[] ->
none;
Instrs ->
{BestI,RestIs} =
get_best_instr(Instrs, Prio, Nodes, DAG, Preds, Earl, C),
{next,BestI,hipe_vectors:set(Ready,C-1,RestIs)}
end.
% next_ready(C,Ready,Prio,Nodes) ->
% Curr = hipe_vectors:get(Ready,C-1),
% case Curr of
% [] ->
% none;
% Instrs ->
% {BestInstr,RestInstrs} = get_best_instr(Instrs, Prio, Nodes),
% {next,BestInstr,hipe_vectors:set(Ready,C-1,RestInstrs)}
% end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : get_best_instr
%% Argument : Instrs - list of node-id's
%% Prio - array with cpath-priorities for the nodes
%% Nodes - indexed list [{Id, Instr}]
%% Returns : {BestSoFar, Rest} - Id of best instr and the rest of id's
%% Description : Returns the id of the instr that is the best choice.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
get_best_instr([Instr|Instrs], Prio, Nodes, DAG, Preds, Earl, C) ->
get_best_instr(Instrs, [], Instr, Prio, Nodes, DAG, Preds, Earl, C).
get_best_instr([], Rest, BestSoFar, _Prio, _Nodes, _DAG, _Preds, _Earl, _C) ->
{BestSoFar, Rest};
get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes,
DAG, Preds, Earl, C) ->
case better(Instr, BestSoFar, Prio, Nodes, DAG, Preds, Earl, C) of
true ->
get_best_instr(Instrs, [BestSoFar|PassedInstrs],
Instr, Prio, Nodes, DAG, Preds, Earl, C);
false ->
get_best_instr(Instrs, [Instr|PassedInstrs], BestSoFar, Prio,
Nodes, DAG, Preds, Earl, C)
end.
% get_best_instr([Instr|Instrs], Prio, Nodes) ->
% get_best_instr(Instrs, [], Instr, Prio, Nodes).
% get_best_instr([], Rest, BestSoFar, Prio, Nodes) -> {BestSoFar, Rest};
% get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes) ->
% case better(Instr, BestSoFar, Prio, Nodes) of
% true ->
% get_best_instr(Instrs, [BestSoFar|PassedInstrs],
% Instr, Prio, Nodes);
% false ->
% get_best_instr(Instrs, [Instr|PassedInstrs],BestSoFar, Prio, Nodes)
% end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : better
%% Argument : Instr1 - Id of instr 1
%% Instr2 - Id of instr 2
%% Prio - array with cpath-priorities for the nodes
%% Nodes - indexed list [{Id, Instr}]
%% Returns : true if Instr1 has higher priority than Instr2
%% Description : Checks if Instr1 is a better choice than Instr2 for scheduling
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
better(Instr1, Instr2, Prio, Nodes, DAG, Preds, Earl, C) ->
better_hlp(priority(Instr1, Prio, Nodes, DAG, Preds, Earl, C),
priority(Instr2, Prio, Nodes, DAG, Preds, Earl, C)).
better_hlp([], []) -> false;
better_hlp([], [_|_]) -> false;
better_hlp([_|_], []) -> true;
better_hlp([X|Xs], [Y|Ys]) -> (X > Y) or ((X =:= Y) and better_hlp(Xs,Ys)).
%%
%% Returns the instr corresponding to id
%%
get_instr(InstrId, [{InstrId,Instr}|_]) -> Instr;
get_instr(InstrId, [_|Xs]) -> get_instr(InstrId, Xs).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : priority
%% Argument : InstrId - Id
%% Prio - array with cpath-priorities for the nodes
%% Nodes - indexed list [{Id, Instr}]
%% Returns : PrioList - list of priorities [MostSignificant, LessSign, ...]
%% Description : Returns a list of priorities where the first element is the
%% cpath-priority and the rest are added depending on what kind
%% of instr it is. Used to order loads/stores sequentially and
%% there is possibility to add whatever stuff...
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
priority(InstrId, Prio, Nodes, DAG, Preds, Earl, C) ->
{ReadyNodes,_,_,_} = hipe_schedule:delete_node(C,InstrId,DAG,Preds,Earl),
Instr = get_instr(InstrId, Nodes),
Prio1 = hipe_vectors:get(Prio, InstrId-1),
Prio2 = length(ReadyNodes),
PrioRest =
case Instr of
#load_atom{} ->
[3];
#move{} ->
[3];
#load{} ->
Src = hipe_sparc:load_src(Instr),
Off = hipe_sparc:load_off(Instr),
case hipe_sparc:is_reg(Off) of
false -> [3,
-(hipe_sparc:reg_nr(Src)),
-(hipe_sparc:imm_value(Off))];
true -> [1]
end;
#store{} ->
Src = hipe_sparc:store_dest(Instr),
Off = hipe_sparc:store_off(Instr),
case hipe_sparc:is_reg(Off) of
false -> [2,
-(hipe_sparc:reg_nr(Src)),
-(hipe_sparc:imm_value(Off))];
true -> [1]
end;
_ -> [0]
end,
[Prio1,Prio2|PrioRest].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : add_ready_nodes
%% Argument : Nodes - list of [{Cycle,Id}]
%% Ready - array of ready nodes for all cycles
%% Returns : NewReady - updated ready-array
%% Description : Gets a list of instrs and adds them to the ready-array
%% to the corresponding cycle.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
add_ready_nodes([], Ready) -> Ready;
add_ready_nodes([{C,I}|Xs], Ready) ->
add_ready_nodes(Xs, insert_node(C, I, Ready)).
insert_node(C, I, Ready) ->
Old = hipe_vectors:get(Ready, C-1),
hipe_vectors:set(Ready, C-1, [I|Old]).
%%
%% Computes the latency for the "most expensive" way through the graph
%% for all nodes. Returns an array of priorities for all nodes.
%%
critical_path(N, DAG) ->
critical_path(1, N, DAG, hipe_vectors:new(N, -1)).
critical_path(M, N, DAG, Prio) ->
if
M > N ->
Prio;
true ->
critical_path(M+1, N, DAG, cpath(M, DAG, Prio))
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Function : cpath
%% Argument : M - current node id
%% DAG - the dependence graph
%% Prio - array of priorities for all nodes
%% Returns : Prio - updated prio array
%% Description : If node has prio -1, it has not been visited
%% - otherwise, compute priority as max of priorities of
%% successors (+ latency)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
cpath(M, DAG, Prio) ->
InitPrio = hipe_vectors:get(Prio, M-1),
if
InitPrio =:= -1 ->
cpath_node(M, DAG, Prio);
true ->
Prio
end.
cpath_node(N, DAG, Prio) ->
SuccL = dag_succ(DAG, N),
{Max, NewPrio} = cpath_succ(SuccL, DAG, Prio),
hipe_vectors:set(NewPrio, N-1, Max).
cpath_succ(SuccL, DAG, Prio) ->
cpath_succ(SuccL, DAG, Prio, 0).
%% performs an unnecessary lookup of priority of Succ, but that might
%% not be such a big deal
cpath_succ([], _DAG, Prio, NodePrio) -> {NodePrio,Prio};
cpath_succ([{Lat,Succ}|Xs], DAG, Prio, NodePrio) ->
NewPrio = cpath(Succ, DAG, Prio),
NewNodePrio = erlang:max(hipe_vectors:get(NewPrio, Succ - 1) + Lat, NodePrio),
cpath_succ(Xs, DAG, NewPrio, NewNodePrio).
dag_succ(DAG, N) when is_integer(N) ->
hipe_vectors:get(DAG, N-1). | lib/hipe/opt/hipe_ultra_prio.erl | 0.529507 | 0.460713 | hipe_ultra_prio.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2021-2022 VMware, Inc. or its affiliates. All rights reserved.
%%
%% @doc
%% Khepri low-level API.
%%
%% This module exposes the "low-level" API to the Khepri database and state
%% machine. All functions in {@link khepri} are built on top of this module.
%%
%% The API is divided into two parts:
%% <ol>
%% <li>Functions to manipulate a simple set of tree nodes directly.</li>
%% <li>Functions to perform transactional queries and updates.</li>
%% </ol>
%%
%% == The store ID ==
%%
%% All functions require a store ID ({@link store_id/0}). The store ID
%% corresponds to the name of the Ra cluster Khepri was started with.
%%
%% See {@link khepri} for more details about Ra systems and clusters.
%%
%% == Direct manipulation on tree nodes ==
%%
%% The API provides the following three functions:
%% <ul>
%% <li>{@link get/2} and {@link get/3}: returns all tree node matching the given
%% path pattern.</li>
%% <li>{@link put/3} and {@link put/4}: updates a single specific tree node.</li>
%% <li>{@link delete/2}: removes all tree node matching the given path
%% pattern.</li>
%% </ul>
%%
%% All functions take a native path pattern. They do not accept Unix-like
%% paths.
%%
%% All functions return one of these tuples:
%% <ul>
%% <li>`{ok, NodePropsMap}' where `NodePropsMap' is a {@link
%% node_props_map/0}:
%% <ul>
%% <li>The map returned by {@link get/2}, {@link get/3} and {@link delete/2}
%% contains one entry per node matching the path pattern.</li>
%% <li>The map returned by {@link put/3} and {@link put/4} contains a single
%% entry if the modified node existed before the update, or no entry if it
%% didn't.</li>
%% </ul></li>
%% <li>`{error, Reason}' if an error occured. In the case, no modifications to
%% the tree was performed.</li>
%% </ul>
%%
%% == Transactional queries and updates ==
%%
%% Transactions are handled by {@link transaction/2} and {@link
%% transaction/3}.
%%
%% Both functions take an anonymous function. See {@link khepri_tx} for more
%% details about those functions and in particular their restrictions.
%%
%% The return value is whatever the anonymous function returns if it succeeded
%% or the reason why it aborted, similar to what {@link mnesia:transaction/1}
%% returns.
-module(khepri_machine).
-behaviour(ra_machine).
-include_lib("kernel/include/logger.hrl").
-include_lib("stdlib/include/assert.hrl").
-include("include/khepri.hrl").
-include("src/internal.hrl").
-include("src/khepri_machine.hrl").
-export([put/3, put/4,
get/2, get/3,
delete/2,
transaction/2,
transaction/3]).
-export([get_keep_while_conds_state/1]).
-export([init/1,
apply/3]).
%% For internal user only.
-export([find_matching_nodes/3,
insert_or_update_node/4,
delete_matching_nodes/2]).
-ifdef(TEST).
-export([are_keep_while_conditions_met/2,
get_root/1,
get_keep_while_conds/1,
get_keep_while_conds_revidx/1]).
-endif.
-compile({no_auto_import, [apply/3]}).
-type data() :: any().
%% Data stored in a node's payload.
-type payload_version() :: pos_integer().
%% Number of changes made to the payload of a node.
%%
%% The payload version starts at 1 when a node is created. It is increased by 1
%% each time the payload is added, modified or removed.
-type child_list_version() :: pos_integer().
%% Number of changes made to the list of child nodes of a node (child nodes
%% added or removed).
%%
%% The child list version starts at 1 when a node is created. It is increased
%% by 1 each time a child is added or removed. Changes made to existing nodes
%% are not reflected in this version.
-type child_list_length() :: non_neg_integer().
%% Number of direct child nodes under a tree node.
-type node_props() ::
#{data => data(),
payload_version => payload_version(),
child_list_version => child_list_version(),
child_list_length => child_list_length(),
child_nodes => #{khepri_path:node_id() => node_props()}}.
%% Structure used to return properties, payload and child nodes for a specific
%% node.
%%
%% <ul>
%% <li>Payload version, child list version, and child list count are always
%% included in the structure. The reason the type spec does not make them
%% mandatory is for {@link khepri_utils:flat_struct_to_tree/1} which may
%% construct fake node props without them.</li>
%% <li>Data is only included if there is data in the node's payload. Absence of
%% data is represented as no `data' entry in this structure.</li>
%% <li>Child nodes are only included if requested.</li>
%% </ul>
-type node_props_map() :: #{khepri_path:path() => node_props()}.
%% Structure used to return a map of nodes and their associated properties,
%% payload and child nodes.
%%
%% This structure is used in the return value of all commands and queries.
-type result() :: khepri:ok(node_props_map()) |
khepri:error().
%% Return value of a command or query.
-type stat() :: #{payload_version := payload_version(),
child_list_version := child_list_version()}.
%% Stats attached to each node in the tree structure.
-type payload() :: none | #kpayload_data{}.
%% All types of payload stored in the nodes of the tree structure.
%%
%% Beside the absence of payload, the only type of payload supported is data.
-type tree_node() :: #node{}.
%% A node in the tree structure.
-type command() :: #put{} |
#delete{} |
#tx{}.
%% Commands specific to this Ra machine.
-type machine_init_args() :: #{commands => [command()],
atom() => any()}.
%% Structure passed to {@link init/1}.
-type machine_config() :: #config{}.
%% Configuration record, holding read-only or rarely changing fields.
-type keep_while_conds_map() :: #{khepri_path:path() =>
khepri_condition:keep_while()}.
%% Internal index of the per-node keep_while conditions.
-type keep_while_conds_revidx() :: #{khepri_path:path() =>
#{khepri_path:path() => ok}}.
%% Internal reverse index of the keep_while conditions. If node A depends on a
%% condition on node B, then this reverse index will have a "node B => node A"
%% entry.
-type operation_options() :: #{expect_specific_node => boolean(),
include_child_names => boolean()}.
%% Options used in {@link find_matching_nodes/3}.
-type state() :: #?MODULE{}.
%% State of this Ra state machine.
-type query_fun() :: fun((state()) -> any()).
%% Function representing a query and used {@link process_query/2}.
-type walk_down_the_tree_extra() :: #{include_root_props =>
boolean(),
keep_while_conds =>
keep_while_conds_map(),
keep_while_conds_revidx =>
keep_while_conds_revidx()}.
-type walk_down_the_tree_fun() ::
fun((khepri_path:path(),
khepri:ok(tree_node()) | error(any(), map()),
Acc :: any()) ->
ok(tree_node() | keep | remove, any()) |
khepri:error()).
%% Function called to handle a node found (or an error) and used in {@link
%% walk_down_the_tree/6}.
-type ok(Type1, Type2) :: {ok, Type1, Type2}.
-type ok(Type1, Type2, Type3) :: {ok, Type1, Type2, Type3}.
-type error(Type1, Type2) :: {error, Type1, Type2}.
-export_type([data/0,
stat/0,
payload/0,
tree_node/0,
payload_version/0,
child_list_version/0,
child_list_length/0,
node_props/0,
node_props_map/0,
result/0,
operation_options/0]).
-export_type([state/0,
machine_config/0,
keep_while_conds_map/0,
keep_while_conds_revidx/0]).
%% -------------------------------------------------------------------
%% Machine protocol.
%% -------------------------------------------------------------------
%% TODO: Verify arguments carefully to avoid the construction of an invalid
%% command.
-spec put(StoreId, PathPattern, Payload) -> Result when
StoreId :: khepri:store_id(),
PathPattern :: khepri_path:pattern(),
Payload :: payload(),
Result :: result().
%% @doc Creates or modifies a specific tree node in the tree structure.
%%
%% Calling this function is the same as calling
%% `put(StoreId, PathPattern, Payload, #{})'.
%%
%% @see put/4.
put(StoreId, PathPattern, Payload) ->
put(StoreId, PathPattern, Payload, #{}).
-spec put(StoreId, PathPattern, Payload, Extra) -> Result when
StoreId :: khepri:store_id(),
PathPattern :: khepri_path:pattern(),
Payload :: payload(),
Extra :: #{keep_while => keep_while_conds_map()},
Result :: result().
%% @doc Creates or modifies a specific tree node in the tree structure.
%%
%% The path or path pattern must target a specific tree node.
%%
%% When using a simple path, if the target node does not exists, it is created
%% using the given payload. If the target node exists, it is updated with the
%% given payload and its payload version is increased by one. Missing parent
%% nodes are created on the way.
%%
%% When using a path pattern, the behavior is the same. However if a condition
%% in the path pattern is not met, an error is returned and the tree structure
%% is not modified.
%%
%% If the target node is modified, the returned structure in the "ok" tuple
%% will have a single key corresponding to the path of the target node. That
%% key will point to a map containing the properties and payload (if any) of
%% the node before the modification.
%%
%% If the target node is created , the returned structure in the "ok" tuple
%% will have a single key corresponding to the path of the target node. That
%% key will point to empty map, indicating there was no existing node (i.e.
%% there was no properties or payload to return).
%%
%% The payload must be one of the following form:
%% <ul>
%% <li>`none', meaning there will be no payload attached to the node</li>
%% <li>`#kpayload_data{data = Term}' to store any type of term in the
%% node</li>
%% </ul>
%%
%% Example:
%% ```
%% %% Insert a node at `/foo/bar', overwriting the previous value.
%% Result = khepri_machine:put(
%% ra_cluster_name, [foo, bar], #kpayload_data{data = new_value}),
%%
%% %% Here is the content of `Result'.
%% {ok, #{[foo, bar] => #{data => old_value,
%% payload_version => 1,
%% child_list_version => 1,
%% child_list_length => 0}}} = Result.
%% '''
%%
%% @param StoreId the name of the Ra cluster.
%% @param PathPattern the path (or path pattern) to the node to create or
%% modify.
%% @param Payload the payload to put in the specified node.
%% @param Extra extra options such as `keep_while' conditions.
%%
%% @returns an "ok" tuple with a map with one entry, or an "error" tuple.
put(StoreId, PathPattern, Payload, Extra) when ?IS_KHEPRI_PAYLOAD(Payload) ->
khepri_path:ensure_is_valid(PathPattern),
Command = #put{path = PathPattern,
payload = Payload,
extra = Extra},
process_command(StoreId, Command);
put(_StoreId, PathPattern, Payload, _Extra) ->
throw({invalid_payload, PathPattern, Payload}).
-spec get(StoreId, PathPattern) -> Result when
StoreId :: khepri:store_id(),
PathPattern :: khepri_path:pattern(),
Result :: result().
%% @doc Returns all tree nodes matching the path pattern.
%%
%% Calling this function is the same as calling
%% `get(StoreId, PathPattern, #{})'.
%%
%% @see get/3.
get(StoreId, PathPattern) ->
get(StoreId, PathPattern, #{}).
-spec get(StoreId, PathPattern, Options) -> Result when
StoreId :: khepri:store_id(),
PathPattern :: khepri_path:pattern(),
Options :: operation_options(),
Result :: result().
%% @doc Returns all tree nodes matching the path pattern.
%%
%% The returned structure in the "ok" tuple will have a key corresponding to
%% the path per node which matched the pattern. Each key will point to a map
%% containing the properties and payload of that matching node.
%%
%% Example:
%% ```
%% %% Query the node at `/foo/bar'.
%% Result = khepri_machine:get(ra_cluster_name, [foo, bar]),
%%
%% %% Here is the content of `Result'.
%% {ok, #{[foo, bar] => #{data => new_value,
%% payload_version => 2,
%% child_list_version => 1,
%% child_list_length => 0}}} = Result.
%% '''
%%
%% @param StoreId the name of the Ra cluster.
%% @param PathPattern the path (or path pattern) to match against the nodes to
%% retrieve.
%% @param Options options to tune the tree traversal or the returned structure
%% content.
%%
%% @returns an "ok" tuple with a map with zero, one or more entries, or an
%% "error" tuple.
get(StoreId, PathPattern, Options) ->
khepri_path:ensure_is_valid(PathPattern),
Query = fun(#?MODULE{root = Root}) ->
find_matching_nodes(Root, PathPattern, Options)
end,
process_query(StoreId, Query).
-spec delete(StoreId, PathPattern) -> Result when
StoreId :: khepri:store_id(),
PathPattern :: khepri_path:pattern(),
Result :: result().
%% @doc Deletes all tree nodes matching the path pattern.
%%
%% The returned structure in the "ok" tuple will have a key corresponding to
%% the path per node which was deleted. Each key will point to a map containing
%% the properties and payload of that deleted node.
%%
%% Example:
%% ```
%% %% Delete the node at `/foo/bar'.
%% Result = khepri_machine:delete(ra_cluster_name, [foo, bar]),
%%
%% %% Here is the content of `Result'.
%% {ok, #{[foo, bar] => #{data => new_value,
%% payload_version => 2,
%% child_list_version => 1,
%% child_list_length => 0}}} = Result.
%% '''
%%
%% @param StoreId the name of the Ra cluster.
%% @param PathPattern the path (or path pattern) to match against the nodes to
%% delete.
%%
%% @returns an "ok" tuple with a map with zero, one or more entries, or an
%% "error" tuple.
delete(StoreId, PathPattern) ->
khepri_path:ensure_is_valid(PathPattern),
Command = #delete{path = PathPattern},
process_command(StoreId, Command).
-spec transaction(StoreId, Fun) -> Ret when
StoreId :: khepri:store_id(),
Fun :: khepri_tx:tx_fun(),
Ret :: Atomic | Aborted,
Atomic :: {atomic, khepri_tx:tx_fun_result()},
Aborted :: khepri_tx:tx_abort().
%% @doc Runs a transaction and returns the result.
%%
%% Calling this function is the same as calling
%% `transaction(StoreId, Fun, auto)'.
%%
%% @see transaction/3.
transaction(StoreId, Fun) ->
transaction(StoreId, Fun, auto).
-spec transaction(StoreId, Fun, ReadWrite) -> Ret when
StoreId :: khepri:store_id(),
Fun :: khepri_tx:tx_fun(),
ReadWrite :: ro | rw | auto,
Ret :: Atomic | Aborted,
Atomic :: {atomic, khepri_tx:tx_fun_result()},
Aborted :: khepri_tx:tx_abort().
%% @doc Runs a transaction and returns the result.
%%
%% `Fun' is an arbitrary anonymous function which takes no arguments.
%%
%% The `ReadWrite' flag determines what the anonymous function is allowed to
%% do and in which context it runs:
%%
%% <ul>
%% <li>If `ReadWrite' is `ro', `Fun' can do whatever it wants, except modify
%% the content of the store. In other words, uses of {@link khepri_tx:put/2}
%% or {@link khepri_tx:delete/1} are forbidden and will abort the function.
%% `Fun' is executed from a process on the leader Ra member.</li>
%% <li>If `ReadWrite' is `rw', `Fun' can use the {@link khepri_tx} transaction
%% API as well as any calls to other modules as long as those functions or what
%% they do is permitted. See {@link khepri_tx} for more details. If `Fun' does
%% or calls something forbidden, the transaction will be aborted. `Fun' is
%% executed in the context of the state machine process on each Ra
%% members.</li>
%% <li>If `ReadWrite' is `auto', `Fun' is analyzed to determine if it calls
%% {@link khepri_tx:put/2} or {@link khepri_tx:delete/1}, or uses any denied
%% operations for a read/write transaction. If it does, this is the same as
%% setting `ReadWrite' to true. Otherwise, this is the equivalent of setting
%% `ReadWrite' to false.</li>
%% </ul>
%%
%% The result of `Fun' can be any term. That result is returned in an
%% `{atomic, Result}' tuple.
%%
%% @param StoreId the name of the Ra cluster.
%% @param Fun an arbitrary anonymous function.
%%
%% @returns `{atomic, Result}' with the return value of `Fun', or `{aborted,
%% Reason}' if the anonymous function was aborted.
transaction(StoreId, Fun, auto = ReadWrite) when is_function(Fun, 0) ->
case khepri_tx:to_standalone_fun(Fun, auto = ReadWrite) of
#standalone_fun{} = StandaloneFun ->
readwrite_transaction(StoreId, StandaloneFun);
_ ->
readonly_transaction(StoreId, Fun)
end;
transaction(StoreId, Fun, rw = ReadWrite) when is_function(Fun, 0) ->
StandaloneFun = khepri_tx:to_standalone_fun(Fun, ReadWrite),
readwrite_transaction(StoreId, StandaloneFun);
transaction(StoreId, Fun, ro) when is_function(Fun, 0) ->
readonly_transaction(StoreId, Fun);
transaction(_StoreId, Fun, _ReadWrite) when is_function(Fun) ->
{arity, Arity} = erlang:fun_info(Fun, arity),
throw({invalid_tx_fun, {requires_args, Arity}});
transaction(_StoreId, Term, _ReadWrite) ->
throw({invalid_tx_fun, Term}).
readonly_transaction(StoreId, Fun) when is_function(Fun, 0) ->
Query = fun(State) ->
{_State, Ret} = khepri_tx:run(State, Fun, false),
Ret
end,
case process_query(StoreId, Query) of
{exception, _, {aborted, _} = Aborted, _} ->
Aborted;
{exception, Class, Reason, Stacktrace} ->
erlang:raise(Class, Reason, Stacktrace);
Ret ->
{atomic, Ret}
end.
readwrite_transaction(StoreId, StandaloneFun) ->
Command = #tx{'fun' = StandaloneFun},
case process_command(StoreId, Command) of
{exception, _, {aborted, _} = Aborted, _} ->
Aborted;
{exception, Class, Reason, Stacktrace} ->
erlang:raise(Class, Reason, Stacktrace);
Ret ->
{atomic, Ret}
end.
-spec get_keep_while_conds_state(StoreId) -> Ret when
StoreId :: khepri:store_id(),
Ret :: {ok, keep_while_conds_map()} | khepri:error().
%% @doc Returns the `keep_while' conditions internal state.
%%
%% The returned state consists of all the `keep_while' condition set so far.
%% However, it doesn't include the reverse index.
%%
%% @param StoreId the name of the Ra cluster.
%%
%% @returns the `keep_while' conditions internal state.
%%
%% @private
get_keep_while_conds_state(StoreId) ->
Query = fun(#?MODULE{keep_while_conds = KeepWhileConds}) ->
{ok, KeepWhileConds}
end,
process_query(StoreId, Query).
-spec process_command(StoreId, Command) -> Ret when
StoreId :: khepri:store_id(),
Command :: command(),
Ret :: any().
%% @doc Processes a command which is appended to the Ra log and processed by
%% this state machine code.
%%
%% `Command' may modify the state of the machine.
%%
%% The command associated code is executed in the context of the state machine
%% process on each Ra members.
%%
%% @param StoreId the name of the Ra cluster.
%%
%% @returns the result of the command or an "error" tuple.
%%
%% @private
process_command(StoreId, Command) ->
%% StoreId is the same as Ra's cluster name.
case ra_leaderboard:lookup_leader(StoreId) of
undefined ->
{error, ra_leader_unknown};
LeaderId ->
case ra:process_command(LeaderId, Command) of
{ok, Ret, _LeaderId} -> Ret;
{timeout, _} = Timeout -> {error, Timeout};
{error, _} = Error -> Error
end
end.
-spec process_query(StoreId, QueryFun) -> Ret when
StoreId :: khepri:store_id(),
QueryFun :: query_fun(),
Ret :: any().
%% @doc Processes a query which is by the Ra leader.
%%
%% The `QueryFun' function takes the machine state as an argument and can
%% return anything. However, the machine state is never modified. The query
%% does not go through the Ra log and is not replicated.
%%
%% The `QueryFun' function is executed from a process on the leader Ra member.
%%
%% @param StoreId the name of the Ra cluster.
%%
%% @returns the result of the query or an "error" tuple.
%%
%% @private
process_query(StoreId, QueryFun) ->
%% StoreId is the same as Ra's cluster name.
case ra_leaderboard:lookup_leader(StoreId) of
undefined ->
{error, ra_leader_unknown};
LeaderId ->
%% TODO: Leader vs. consistent?
case ra:leader_query(LeaderId, QueryFun) of
{ok, {_RaIndex, Ret}, _} -> Ret;
{timeout, _} = Timeout -> {error, Timeout};
{error, _} = Error -> Error
end
end.
%% -------------------------------------------------------------------
%% ra_machine callbacks.
%% -------------------------------------------------------------------
-spec init(machine_init_args()) -> state().
%% @private
init(Params) ->
Config = case Params of
#{snapshot_interval := SnapshotInterval} ->
#config{snapshot_interval = SnapshotInterval};
_ ->
#config{}
end,
State = #?MODULE{config = Config},
%% Create initial "schema" if provided.
Commands = maps:get(commands, Params, []),
State3 = lists:foldl(
fun (Command, State1) ->
Meta = #{index => 0,
term => 0,
system_time => 0},
case apply(Meta, Command, State1) of
{S, _} -> S;
{S, _, _} -> S
end
end, State, Commands),
reset_applied_command_count(State3).
-spec apply(Meta, Command, State) ->
{State, Ret} | {State, Ret, SideEffects} when
Meta :: ra_machine:command_meta_data(),
Command :: command(),
State :: state(),
Ret :: any(),
SideEffects :: ra_machine:effects().
%% @private
%% TODO: Handle unknown/invalid commands.
apply(
Meta,
#put{path = PathPattern, payload = Payload, extra = Extra},
State) ->
Ret = insert_or_update_node(State, PathPattern, Payload, Extra),
bump_applied_command_count(Ret, Meta);
apply(
Meta,
#delete{path = PathPattern},
State) ->
Ret = delete_matching_nodes(State, PathPattern),
bump_applied_command_count(Ret, Meta);
apply(
Meta,
#tx{'fun' = StandaloneFun},
State) ->
Fun = case is_function(StandaloneFun) of
false -> fun() -> khepri_fun:exec(StandaloneFun, []) end;
true -> StandaloneFun
end,
Ret = khepri_tx:run(State, Fun, true),
bump_applied_command_count(Ret, Meta).
-spec bump_applied_command_count({State, Ret}, Meta) ->
{State, Ret} | {State, Ret, SideEffects} when
State :: state(),
Ret :: any(),
Meta :: ra_machine:command_meta_data(),
SideEffects :: ra_machine:effects().
%% @private
bump_applied_command_count(
{#?MODULE{config = #config{snapshot_interval = SnapshotInterval},
metrics = Metrics} = State,
Result},
#{index := RaftIndex}) ->
AppliedCmdCount0 = maps:get(applied_command_count, Metrics, 0),
AppliedCmdCount = AppliedCmdCount0 + 1,
case AppliedCmdCount < SnapshotInterval of
true ->
Metrics1 = Metrics#{applied_command_count => AppliedCmdCount},
State1 = State#?MODULE{metrics = Metrics1},
{State1, Result};
false ->
?LOG_DEBUG(
"Move release cursor after ~b commands applied "
"(>= ~b commands)",
[AppliedCmdCount, SnapshotInterval],
#{domain => [khepri, ra_machine]}),
State1 = reset_applied_command_count(State),
ReleaseCursor = {release_cursor, RaftIndex, State1},
SideEffects = [ReleaseCursor],
{State1, Result, SideEffects}
end.
reset_applied_command_count(#?MODULE{metrics = Metrics} = State) ->
Metrics1 = maps:remove(applied_command_count, Metrics),
State#?MODULE{metrics = Metrics1}.
%% -------------------------------------------------------------------
%% Internal functions.
%% -------------------------------------------------------------------
-spec create_node_record(Payload) -> Node when
Payload :: payload(),
Node :: tree_node().
%% @private
create_node_record(Payload) ->
#node{stat = ?INIT_NODE_STAT,
payload = Payload}.
-spec set_node_payload(tree_node(), payload()) ->
tree_node().
%% @private
set_node_payload(#node{payload = Payload} = Node, Payload) ->
Node;
set_node_payload(#node{stat = #{payload_version := DVersion} = Stat} = Node,
Payload) ->
Stat1 = Stat#{payload_version => DVersion + 1},
Node#node{stat = Stat1, payload = Payload}.
-spec remove_node_payload(tree_node()) -> tree_node().
%% @private
remove_node_payload(
#node{payload = none} = Node) ->
Node;
remove_node_payload(
#node{stat = #{payload_version := DVersion} = Stat} = Node) ->
Stat1 = Stat#{payload_version => DVersion + 1},
Node#node{stat = Stat1, payload = none}.
-spec add_node_child(tree_node(), khepri_path:component(), tree_node()) ->
tree_node().
add_node_child(#node{stat = #{child_list_version := CVersion} = Stat,
child_nodes = Children} = Node,
ChildName, Child) ->
Children1 = Children#{ChildName => Child},
Stat1 = Stat#{child_list_version => CVersion + 1},
Node#node{stat = Stat1, child_nodes = Children1}.
-spec update_node_child(tree_node(), khepri_path:component(), tree_node()) ->
tree_node().
update_node_child(#node{child_nodes = Children} = Node, ChildName, Child) ->
Children1 = Children#{ChildName => Child},
Node#node{child_nodes = Children1}.
-spec remove_node_child(tree_node(), khepri_path:component()) ->
tree_node().
remove_node_child(#node{stat = #{child_list_version := CVersion} = Stat,
child_nodes = Children} = Node,
ChildName) ->
?assert(maps:is_key(ChildName, Children)),
Stat1 = Stat#{child_list_version => CVersion + 1},
Children1 = maps:remove(ChildName, Children),
Node#node{stat = Stat1, child_nodes = Children1}.
-spec remove_node_child_nodes(tree_node()) -> tree_node().
remove_node_child_nodes(
#node{child_nodes = Children} = Node) when Children =:= #{} ->
Node;
remove_node_child_nodes(
#node{stat = #{child_list_version := CVersion} = Stat} = Node) ->
Stat1 = Stat#{child_list_version => CVersion + 1},
Node#node{stat = Stat1, child_nodes = #{}}.
-spec gather_node_props(tree_node(), operation_options()) ->
node_props().
gather_node_props(#node{stat = #{payload_version := DVersion,
child_list_version := CVersion},
payload = Payload,
child_nodes = Children},
Options) ->
Result0 = #{payload_version => DVersion,
child_list_version => CVersion,
child_list_length => maps:size(Children)},
Result1 = case Options of
#{include_child_names := true} ->
Result0#{child_names => maps:keys(Children)};
_ ->
Result0
end,
case Payload of
#kpayload_data{data = Data} -> Result1#{data => Data};
_ -> Result1
end.
-spec to_absolute_keep_while(BasePath, KeepWhile) -> KeepWhile when
BasePath :: khepri_path:path(),
KeepWhile :: khepri_condition:keep_while().
%% @private
to_absolute_keep_while(BasePath, KeepWhile) ->
maps:fold(
fun(Path, Cond, Acc) ->
AbsPath = khepri_path:abspath(Path, BasePath),
Acc#{AbsPath => Cond}
end, #{}, KeepWhile).
-spec are_keep_while_conditions_met(
tree_node(), khepri_condition:keep_while()) ->
true | {false, any()}.
%% @private
are_keep_while_conditions_met(_, KeepWhile)
when KeepWhile =:= #{} ->
true;
are_keep_while_conditions_met(Root, KeepWhile) ->
maps:fold(
fun
(Path, Condition, true) ->
case find_matching_nodes(Root, Path, #{}) of
{ok, Result} when Result =/= #{} ->
are_keep_while_conditions_met1(Result, Condition);
{ok, _} ->
{false, {pattern_matches_no_nodes, Path}};
{error, Reason} ->
{false, Reason}
end;
(_, _, False) ->
False
end, true, KeepWhile).
are_keep_while_conditions_met1(Result, Condition) ->
maps:fold(
fun
(Path, NodeProps, true) ->
khepri_condition:is_met(Condition, Path, NodeProps);
(_, _, False) ->
False
end, true, Result).
is_keep_while_condition_met_on_self(
Path, Node, #{keep_while_conds := KeepWhileConds}) ->
case KeepWhileConds of
#{Path := #{Path := Condition}} ->
khepri_condition:is_met(Condition, Path, Node);
_ ->
true
end;
is_keep_while_condition_met_on_self(_, _, _) ->
true.
-spec update_keep_while_conds_revidx(
keep_while_conds_map(), keep_while_conds_revidx(),
khepri_path:path(), khepri_condition:keep_while()) ->
keep_while_conds_revidx().
update_keep_while_conds_revidx(
KeepWhileConds, KeepWhileCondsRevIdx, Watcher, KeepWhile) ->
%% First, clean up reversed index where a watched path isn't watched
%% anymore in the new keep_while.
OldWatcheds = maps:get(Watcher, KeepWhileConds, #{}),
KeepWhileCondsRevIdx1 = maps:fold(
fun(Watched, _, KWRevIdx) ->
Watchers = maps:get(Watched, KWRevIdx),
Watchers1 = maps:remove(Watcher, Watchers),
case maps:size(Watchers1) of
0 -> maps:remove(Watched, KWRevIdx);
_ -> KWRevIdx#{Watched => Watchers1}
end
end, KeepWhileCondsRevIdx, OldWatcheds),
%% Then, record the watched paths.
maps:fold(
fun(Watched, _, KWRevIdx) ->
Watchers = maps:get(Watched, KWRevIdx, #{}),
Watchers1 = Watchers#{Watcher => ok},
KWRevIdx#{Watched => Watchers1}
end, KeepWhileCondsRevIdx1, KeepWhile).
-spec find_matching_nodes(
tree_node(),
khepri_path:pattern(),
operation_options()) ->
result().
%% @private
find_matching_nodes(Root, PathPattern, Options) ->
Fun = fun(Path, Node, Result) ->
find_matching_nodes_cb(Path, Node, Options, Result)
end,
WorkOnWhat = case Options of
#{expect_specific_node := true} -> specific_node;
_ -> many_nodes
end,
IncludeRootProps = khepri_path:pattern_includes_root_node(PathPattern),
Extra = #{include_root_props => IncludeRootProps},
case walk_down_the_tree(Root, PathPattern, WorkOnWhat, Extra, Fun, #{}) of
{ok, NewRoot, _, Result} ->
?assertEqual(Root, NewRoot),
{ok, Result};
Error ->
Error
end.
find_matching_nodes_cb(Path, #node{} = Node, Options, Result) ->
NodeProps = gather_node_props(Node, Options),
{ok, keep, Result#{Path => NodeProps}};
find_matching_nodes_cb(
_,
{interrupted, node_not_found = Reason, Info},
#{expect_specific_node := true},
_) ->
{error, {Reason, Info}};
find_matching_nodes_cb(_, {interrupted, _, _}, _, Result) ->
{ok, keep, Result}.
-spec insert_or_update_node(
state(), khepri_path:pattern(), payload(),
#{keep_while => khepri_condition:keep_while()}) ->
{state(), result()}.
%% @private
insert_or_update_node(
#?MODULE{root = Root,
keep_while_conds = KeepWhileConds,
keep_while_conds_revidx = KeepWhileCondsRevIdx} = State,
PathPattern, Payload,
#{keep_while := KeepWhile}) ->
Fun = fun(Path, Node, {_, _, Result}) ->
Ret = insert_or_update_node_cb(
Path, Node, Payload, Result),
case Ret of
{ok, Node1, Result1} when Result1 =/= #{} ->
AbsKeepWhile = to_absolute_keep_while(
Path, KeepWhile),
KeepWhileOnOthers = maps:remove(Path, AbsKeepWhile),
KWMet = are_keep_while_conditions_met(
Root, KeepWhileOnOthers),
case KWMet of
true ->
{ok, Node1, {updated, Path, Result1}};
{false, Reason} ->
%% The keep_while condition is not met. We
%% can't insert the node and return an
%% error.
NodeName = case Path of
[] -> ?ROOT_NODE;
_ -> lists:last(Path)
end,
Info = #{node_name => NodeName,
node_path => Path,
keep_while_reason => Reason},
{error,
{keep_while_conditions_not_met, Info}}
end;
{ok, Node1, Result1} ->
{ok, Node1, {updated, Path, Result1}};
Error ->
Error
end
end,
%% TODO: Should we support setting many nodes with the same value?
Ret1 = walk_down_the_tree(
Root, PathPattern, specific_node,
#{keep_while_conds => KeepWhileConds,
keep_while_conds_revidx => KeepWhileCondsRevIdx},
Fun, {undefined, [], #{}}),
case Ret1 of
{ok, Root1, #{keep_while_conds := KeepWhileConds1,
keep_while_conds_revidx := KeepWhileCondsRevIdx1},
{updated, ResolvedPath, Ret2}} ->
AbsKeepWhile = to_absolute_keep_while(ResolvedPath, KeepWhile),
KeepWhileCondsRevIdx2 = update_keep_while_conds_revidx(
KeepWhileConds1, KeepWhileCondsRevIdx1,
ResolvedPath, AbsKeepWhile),
KeepWhileConds2 = KeepWhileConds1#{ResolvedPath => AbsKeepWhile},
State1 = State#?MODULE{root = Root1,
keep_while_conds = KeepWhileConds2,
keep_while_conds_revidx = KeepWhileCondsRevIdx2},
{State1, {ok, Ret2}};
{ok, Root1, #{keep_while_conds := KeepWhileConds1,
keep_while_conds_revidx := KeepWhileCondsRevIdx1},
{removed, _, Ret2}} ->
State1 = State#?MODULE{root = Root1,
keep_while_conds = KeepWhileConds1,
keep_while_conds_revidx = KeepWhileCondsRevIdx1},
{State1, {ok, Ret2}};
Error ->
{State, Error}
end;
insert_or_update_node(
#?MODULE{root = Root,
keep_while_conds = KeepWhileConds,
keep_while_conds_revidx = KeepWhileCondsRevIdx} = State,
PathPattern, Payload,
_Extra) ->
Fun = fun(Path, Node, Result) ->
insert_or_update_node_cb(
Path, Node, Payload, Result)
end,
Ret1 = walk_down_the_tree(
Root, PathPattern, specific_node,
#{keep_while_conds => KeepWhileConds,
keep_while_conds_revidx => KeepWhileCondsRevIdx},
Fun, #{}),
case Ret1 of
{ok, Root1, #{keep_while_conds := KeepWhileConds1,
keep_while_conds_revidx := KeepWhileCondsRevIdx1},
Ret2} ->
State1 = State#?MODULE{root = Root1,
keep_while_conds = KeepWhileConds1,
keep_while_conds_revidx = KeepWhileCondsRevIdx1},
{State1, {ok, Ret2}};
Error ->
{State, Error}
end.
insert_or_update_node_cb(
Path, #node{} = Node, Payload, Result) ->
case maps:is_key(Path, Result) of
false ->
Node1 = set_node_payload(Node, Payload),
NodeProps = gather_node_props(Node, #{}),
{ok, Node1, Result#{Path => NodeProps}};
true ->
{ok, Node, Result}
end;
insert_or_update_node_cb(
Path, {interrupted, node_not_found = Reason, Info}, Payload, Result) ->
%% We store the payload when we reached the target node only, not in the
%% parent nodes we have to create in between.
IsTarget = maps:get(node_is_target, Info),
case can_continue_update_after_node_not_found(Info) of
true when IsTarget ->
Node = create_node_record(Payload),
NodeProps = #{},
{ok, Node, Result#{Path => NodeProps}};
true ->
Node = create_node_record(none),
{ok, Node, Result};
false ->
{error, {Reason, Info}}
end;
insert_or_update_node_cb(_, {interrupted, Reason, Info}, _, _) ->
{error, {Reason, Info}}.
can_continue_update_after_node_not_found(#{condition := Condition}) ->
can_continue_update_after_node_not_found1(Condition);
can_continue_update_after_node_not_found(#{node_name := NodeName}) ->
can_continue_update_after_node_not_found1(NodeName).
can_continue_update_after_node_not_found1(ChildName)
when ?IS_PATH_COMPONENT(ChildName) ->
true;
can_continue_update_after_node_not_found1(#if_node_exists{exists = false}) ->
true;
can_continue_update_after_node_not_found1(#if_all{conditions = Conds}) ->
lists:all(fun can_continue_update_after_node_not_found1/1, Conds);
can_continue_update_after_node_not_found1(#if_any{conditions = Conds}) ->
lists:any(fun can_continue_update_after_node_not_found1/1, Conds);
can_continue_update_after_node_not_found1(_) ->
false.
-spec delete_matching_nodes(state(), khepri_path:pattern()) ->
{state(), result()}.
%% @private
delete_matching_nodes(
#?MODULE{root = Root,
keep_while_conds = KeepWhileConds,
keep_while_conds_revidx = KeepWhileCondsRevIdx} = State,
PathPattern) ->
Ret1 = do_delete_matching_nodes(
PathPattern, Root,
#{keep_while_conds => KeepWhileConds,
keep_while_conds_revidx => KeepWhileCondsRevIdx}),
case Ret1 of
{ok, Root1, #{keep_while_conds := KeepWhileConds1,
keep_while_conds_revidx := KeepWhileCondsRevIdx1},
Ret2} ->
State1 = State#?MODULE{root = Root1,
keep_while_conds = KeepWhileConds1,
keep_while_conds_revidx = KeepWhileCondsRevIdx1},
{State1, {ok, Ret2}};
Error ->
{State, Error}
end.
do_delete_matching_nodes(PathPattern, Root, Extra) ->
Fun = fun delete_matching_nodes_cb/3,
walk_down_the_tree(Root, PathPattern, many_nodes, Extra, Fun, #{}).
delete_matching_nodes_cb([] = Path, #node{} = Node, Result) ->
Node1 = remove_node_payload(Node),
Node2 = remove_node_child_nodes(Node1),
NodeProps = gather_node_props(Node, #{}),
{ok, Node2, Result#{Path => NodeProps}};
delete_matching_nodes_cb(Path, #node{} = Node, Result) ->
NodeProps = gather_node_props(Node, #{}),
{ok, remove, Result#{Path => NodeProps}};
delete_matching_nodes_cb(_, {interrupted, _, _}, Result) ->
{ok, keep, Result}.
%% -------
-spec walk_down_the_tree(
tree_node(), khepri_path:pattern(), specific_node | many_nodes,
walk_down_the_tree_extra(),
walk_down_the_tree_fun(), any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()) |
khepri:error().
%% @private
walk_down_the_tree(Root, PathPattern, WorkOnWhat, Extra, Fun, FunAcc) ->
CompiledPathPattern = khepri_path:compile(PathPattern),
walk_down_the_tree1(
Root, CompiledPathPattern, WorkOnWhat,
[], %% Used to remember the path of the node currently on.
[], %% Used to update parents up in the tree in a tail-recursive
%% function.
Extra, Fun, FunAcc).
-spec walk_down_the_tree1(
tree_node(), khepri_path:pattern(), specific_node | many_nodes,
khepri_path:pattern(), [tree_node() | {tree_node(), child_created}],
walk_down_the_tree_extra(),
walk_down_the_tree_fun(), any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()) |
khepri:error().
%% @private
walk_down_the_tree1(
CurrentNode,
[?ROOT_NODE | PathPattern],
WorkOnWhat, ReversedPath, ReversedParentTree, Extra, Fun, FunAcc) ->
?assertEqual([], ReversedPath),
?assertEqual([], ReversedParentTree),
walk_down_the_tree1(
CurrentNode, PathPattern, WorkOnWhat,
ReversedPath,
ReversedParentTree,
Extra, Fun, FunAcc);
walk_down_the_tree1(
CurrentNode,
[?THIS_NODE | PathPattern],
WorkOnWhat, ReversedPath, ReversedParentTree, Extra, Fun, FunAcc) ->
walk_down_the_tree1(
CurrentNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc);
walk_down_the_tree1(
_CurrentNode,
[?PARENT_NODE | PathPattern],
WorkOnWhat,
[_CurrentName | ReversedPath], [ParentNode0 | ReversedParentTree],
Extra, Fun, FunAcc) ->
ParentNode = case ParentNode0 of
{PN, child_created} -> PN;
_ -> ParentNode0
end,
walk_down_the_tree1(
ParentNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc);
walk_down_the_tree1(
CurrentNode,
[?PARENT_NODE | PathPattern],
WorkOnWhat,
[] = ReversedPath, [] = ReversedParentTree,
Extra, Fun, FunAcc) ->
%% The path tries to go above the root node, like "cd /..". In this case,
%% we stay on the root node.
walk_down_the_tree1(
CurrentNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc);
walk_down_the_tree1(
#node{child_nodes = Children} = CurrentNode,
[ChildName | PathPattern],
WorkOnWhat, ReversedPath, ReversedParentTree, Extra, Fun, FunAcc)
when ?IS_NODE_ID(ChildName) ->
case Children of
#{ChildName := Child} ->
walk_down_the_tree1(
Child, PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode | ReversedParentTree],
Extra, Fun, FunAcc);
_ ->
interrupted_walk_down(
node_not_found,
#{node_name => ChildName,
node_path => lists:reverse([ChildName | ReversedPath])},
PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode | ReversedParentTree],
Extra, Fun, FunAcc)
end;
walk_down_the_tree1(
#node{child_nodes = Children} = CurrentNode,
[Condition | PathPattern], specific_node = WorkOnWhat,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc)
when ?IS_CONDITION(Condition) ->
%% We distinguish the case where the condition must be verified against the
%% current node (i.e. the node name is ?ROOT_NODE or ?THIS_NODE in the
%% condition) instead of its child nodes.
SpecificNode = khepri_path:component_targets_specific_node(Condition),
case SpecificNode of
{true, NodeName}
when NodeName =:= ?ROOT_NODE orelse NodeName =:= ?THIS_NODE ->
CurrentName = special_component_to_node_name(
NodeName, ReversedPath),
CondMet = khepri_condition:is_met(
Condition, CurrentName, CurrentNode),
case CondMet of
true ->
walk_down_the_tree1(
CurrentNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree,
Extra, Fun, FunAcc);
{false, Cond} ->
interrupted_walk_down(
mismatching_node,
#{node_name => CurrentName,
node_path => lists:reverse(ReversedPath),
node_props => gather_node_props(CurrentNode, #{}),
condition => Cond},
PathPattern, WorkOnWhat, ReversedPath,
ReversedParentTree, Extra, Fun, FunAcc)
end;
{true, ChildName} when ChildName =/= ?PARENT_NODE ->
case Children of
#{ChildName := Child} ->
CondMet = khepri_condition:is_met(
Condition, ChildName, Child),
case CondMet of
true ->
walk_down_the_tree1(
Child, PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode | ReversedParentTree],
Extra, Fun, FunAcc);
{false, Cond} ->
interrupted_walk_down(
mismatching_node,
#{node_name => ChildName,
node_path => lists:reverse(
[ChildName | ReversedPath]),
node_props => gather_node_props(Child, #{}),
condition => Cond},
PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode | ReversedParentTree],
Extra, Fun, FunAcc)
end;
_ ->
interrupted_walk_down(
node_not_found,
#{node_name => ChildName,
node_path => lists:reverse([ChildName | ReversedPath]),
condition => Condition},
PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode | ReversedParentTree],
Extra, Fun, FunAcc)
end;
{true, ?PARENT_NODE} ->
%% TODO: Support calling Fun() with parent node based on
%% conditions on child nodes.
{error, targets_dot_dot};
false ->
%% TODO: Should we provide more details about the error, like the
%% list of matching nodes?
{error, matches_many_nodes}
end;
walk_down_the_tree1(
#node{child_nodes = Children} = CurrentNode,
[Condition | PathPattern] = WholePathPattern, many_nodes = WorkOnWhat,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc)
when ?IS_CONDITION(Condition) ->
%% Like with WorkOnWhat =:= specific_node function clause above, We
%% distinguish the case where the condition must be verified against the
%% current node (i.e. the node name is ?ROOT_NODE or ?THIS_NODE in the
%% condition) instead of its child nodes.
SpecificNode = khepri_path:component_targets_specific_node(Condition),
case SpecificNode of
{true, NodeName}
when NodeName =:= ?ROOT_NODE orelse NodeName =:= ?THIS_NODE ->
CurrentName = special_component_to_node_name(
NodeName, ReversedPath),
CondMet = khepri_condition:is_met(
Condition, CurrentName, CurrentNode),
case CondMet of
true ->
walk_down_the_tree1(
CurrentNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree,
Extra, Fun, FunAcc);
{false, _} ->
StartingNode = starting_node_in_rev_parent_tree(
ReversedParentTree, CurrentNode),
{ok, StartingNode, Extra, FunAcc}
end;
{true, ?PARENT_NODE} ->
%% TODO: Support calling Fun() with parent node based on
%% conditions on child nodes.
{error, targets_parent_node};
_ ->
%% There is a special case if the current node is the root node
%% and the pattern is of the form of e.g. [#if_name_matches{regex
%% = any}]. In this situation, we consider the condition should be
%% compared to that root node as well. This allows to get its
%% props and payload atomically in a single suery.
IsRoot = ReversedPath =:= [],
IncludeRootProps = maps:get(include_root_props, Extra, false),
Ret0 = case IsRoot andalso IncludeRootProps of
true ->
walk_down_the_tree1(
CurrentNode, [], WorkOnWhat,
[], [],
Extra, Fun, FunAcc);
_ ->
{ok, CurrentNode, Extra, FunAcc}
end,
%% The result of the first part (the special case for the root
%% node if relevant) is used as a starting point for handling all
%% child nodes.
Ret1 = maps:fold(
fun
(ChildName, Child, {ok, CurNode, Extra1, FunAcc1}) ->
handle_branch(
CurNode, ChildName, Child,
WholePathPattern, WorkOnWhat,
ReversedPath,
Extra1, Fun, FunAcc1);
(_, _, Error) ->
Error
end, Ret0, Children),
case Ret1 of
{ok, CurrentNode, Extra2, FunAcc2} ->
%% The current node didn't change, no need to update the
%% tree and evaluate keep_while conditions.
?assertEqual(Extra, Extra2),
StartingNode = starting_node_in_rev_parent_tree(
ReversedParentTree, CurrentNode),
{ok, StartingNode, Extra, FunAcc2};
{ok, CurrentNode1, Extra2, FunAcc2} ->
%% Because of the loop, payload & child list versions may
%% have been increased multiple times. We want them to
%% increase once for the whole (atomic) operation.
CurrentNode2 = squash_version_bumps(
CurrentNode, CurrentNode1),
walk_back_up_the_tree(
CurrentNode2, ReversedPath, ReversedParentTree,
Extra2, FunAcc2);
Error ->
Error
end
end;
walk_down_the_tree1(
#node{} = CurrentNode, [], _,
ReversedPath, ReversedParentTree, Extra, Fun, FunAcc) ->
CurrentPath = lists:reverse(ReversedPath),
case Fun(CurrentPath, CurrentNode, FunAcc) of
{ok, keep, FunAcc1} ->
StartingNode = starting_node_in_rev_parent_tree(
ReversedParentTree, CurrentNode),
{ok, StartingNode, Extra, FunAcc1};
{ok, remove, FunAcc1} ->
walk_back_up_the_tree(
remove, ReversedPath, ReversedParentTree, Extra, FunAcc1);
{ok, #node{} = CurrentNode1, FunAcc1} ->
walk_back_up_the_tree(
CurrentNode1, ReversedPath, ReversedParentTree, Extra, FunAcc1);
Error ->
Error
end.
-spec special_component_to_node_name(
?ROOT_NODE | ?THIS_NODE,
khepri_path:pattern()) ->
khepri_path:component().
special_component_to_node_name(?ROOT_NODE = NodeName, []) -> NodeName;
special_component_to_node_name(?THIS_NODE, [NodeName | _]) -> NodeName;
special_component_to_node_name(?THIS_NODE, []) -> ?ROOT_NODE.
-spec starting_node_in_rev_parent_tree([tree_node()]) ->
tree_node().
%% @private
starting_node_in_rev_parent_tree(ReversedParentTree) ->
hd(lists:reverse(ReversedParentTree)).
-spec starting_node_in_rev_parent_tree([tree_node()], tree_node()) ->
tree_node().
%% @private
starting_node_in_rev_parent_tree([], CurrentNode) ->
CurrentNode;
starting_node_in_rev_parent_tree(ReversedParentTree, _) ->
starting_node_in_rev_parent_tree(ReversedParentTree).
-spec handle_branch(
tree_node(), khepri_path:component(), tree_node(),
khepri_path:pattern(), specific_node | many_nodes,
[tree_node() | {tree_node(), child_created}],
walk_down_the_tree_extra(),
walk_down_the_tree_fun(), any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()) |
khepri:error().
%% @private
handle_branch(
CurrentNode, ChildName, Child,
[Condition | PathPattern] = WholePathPattern,
WorkOnWhat, ReversedPath, Extra, Fun, FunAcc) ->
%% FIXME: A condition such as #if_path_matches{regex = any} at the end of
%% a path matches non-leaf nodes as well: we should call Fun() for them!
CondMet = khepri_condition:is_met(
Condition, ChildName, Child),
Ret = case CondMet of
true ->
walk_down_the_tree1(
Child, PathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode],
Extra, Fun, FunAcc);
{false, _} ->
{ok, CurrentNode, Extra, FunAcc}
end,
case Ret of
{ok, CurrentNode1, Extra1, FunAcc1} ->
case khepri_condition:applies_to_grandchildren(Condition) of
false ->
Ret;
true ->
walk_down_the_tree1(
Child, WholePathPattern, WorkOnWhat,
[ChildName | ReversedPath],
[CurrentNode1],
Extra1, Fun, FunAcc1)
end;
Error ->
Error
end.
-spec interrupted_walk_down(
mismatching_node | node_not_found,
map(),
khepri_path:pattern(), specific_node | many_nodes,
khepri_path:path(), [tree_node() | {tree_node(), child_created}],
walk_down_the_tree_extra(),
walk_down_the_tree_fun(), any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()) |
khepri:error().
%% @private
interrupted_walk_down(
Reason, Info, PathPattern, WorkOnWhat, ReversedPath, ReversedParentTree,
Extra, Fun, FunAcc) ->
NodePath = lists:reverse(ReversedPath),
IsTarget = khepri_path:realpath(PathPattern) =:= [],
Info1 = Info#{node_is_target => IsTarget},
ErrorTuple = {interrupted, Reason, Info1},
case Fun(NodePath, ErrorTuple, FunAcc) of
{ok, ToDo, FunAcc1}
when ToDo =:= keep orelse ToDo =:= remove ->
?assertNotEqual([], ReversedParentTree),
StartingNode = starting_node_in_rev_parent_tree(
ReversedParentTree),
{ok, StartingNode, Extra, FunAcc1};
{ok, #node{} = NewNode, FunAcc1} ->
ReversedParentTree1 =
case Reason of
node_not_found ->
%% We record the fact the child is a new node. This is used
%% to reset the child's stats if it got new payload or
%% child nodes at the same time.
[{hd(ReversedParentTree), child_created}
| tl(ReversedParentTree)];
_ ->
ReversedParentTree
end,
case PathPattern of
[] ->
%% We reached the target node. We could call
%% walk_down_the_tree1() again, but it would call Fun() a
%% second time.
walk_back_up_the_tree(
NewNode, ReversedPath, ReversedParentTree1,
Extra, FunAcc1);
_ ->
walk_down_the_tree1(
NewNode, PathPattern, WorkOnWhat,
ReversedPath, ReversedParentTree1,
Extra, Fun, FunAcc1)
end;
Error ->
Error
end.
-spec reset_versions(tree_node()) -> tree_node().
%% @private
reset_versions(#node{stat = Stat} = CurrentNode) ->
Stat1 = Stat#{payload_version => ?INIT_DATA_VERSION,
child_list_version => ?INIT_CHILD_LIST_VERSION},
CurrentNode#node{stat = Stat1}.
-spec squash_version_bumps(tree_node(), tree_node()) -> tree_node().
%% @private
squash_version_bumps(
#node{stat = #{payload_version := DVersion,
child_list_version := CVersion}},
#node{stat = #{payload_version := DVersion,
child_list_version := CVersion}} = CurrentNode) ->
CurrentNode;
squash_version_bumps(
#node{stat = #{payload_version := OldDVersion,
child_list_version := OldCVersion}},
#node{stat = #{payload_version := NewDVersion,
child_list_version := NewCVersion} = Stat} = CurrentNode) ->
DVersion = case NewDVersion > OldDVersion of
true -> OldDVersion + 1;
false -> OldDVersion
end,
CVersion = case NewCVersion > OldCVersion of
true -> OldCVersion + 1;
false -> OldCVersion
end,
Stat1 = Stat#{payload_version => DVersion,
child_list_version => CVersion},
CurrentNode#node{stat = Stat1}.
-spec walk_back_up_the_tree(
tree_node() | remove, khepri_path:path(),
[tree_node() | {tree_node(), child_created}],
walk_down_the_tree_extra(),
any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()).
%% @private
walk_back_up_the_tree(
Child, ReversedPath, ReversedParentTree, Extra, FunAcc) ->
walk_back_up_the_tree(
Child, ReversedPath, ReversedParentTree, Extra, #{}, FunAcc).
-spec walk_back_up_the_tree(
tree_node() | remove, khepri_path:path(),
[tree_node() | {tree_node(), child_created}],
walk_down_the_tree_extra(),
#{khepri_path:path() => tree_node() | remove},
any()) ->
ok(tree_node(), walk_down_the_tree_extra(), any()).
%% @private
walk_back_up_the_tree(
remove,
[ChildName | ReversedPath] = WholeReversedPath,
[ParentNode | ReversedParentTree], Extra, KeepWhileAftermath, FunAcc) ->
%% Evaluate keep_while of nodes which depended on ChildName (it is
%% removed) at the end of walk_back_up_the_tree().
Path = lists:reverse(WholeReversedPath),
KeepWhileAftermath1 = KeepWhileAftermath#{Path => remove},
%% Evaluate keep_while of parent node on itself right now (its child_count
%% has changed).
ParentNode1 = remove_node_child(ParentNode, ChildName),
handle_keep_while_for_parent_update(
ParentNode1, ReversedPath, ReversedParentTree,
Extra, KeepWhileAftermath1, FunAcc);
walk_back_up_the_tree(
Child,
[ChildName | ReversedPath],
[{ParentNode, child_created} | ReversedParentTree],
Extra, KeepWhileAftermath, FunAcc) ->
%% No keep_while to evaluate, the child is new and no nodes depend on it
%% at this stage.
%% FIXME: Perhaps there is a condition in a if_any{}?
Child1 = reset_versions(Child),
%% Evaluate keep_while of parent node on itself right now (its child_count
%% has changed).
ParentNode1 = add_node_child(ParentNode, ChildName, Child1),
handle_keep_while_for_parent_update(
ParentNode1, ReversedPath, ReversedParentTree,
Extra, KeepWhileAftermath, FunAcc);
walk_back_up_the_tree(
Child,
[ChildName | ReversedPath] = WholeReversedPath,
[ParentNode | ReversedParentTree],
Extra, KeepWhileAftermath, FunAcc) ->
%% Evaluate keep_while of nodes which depend on ChildName (it is
%% modified) at the end of walk_back_up_the_tree().
Path = lists:reverse(WholeReversedPath),
NodeProps = gather_node_props(Child, #{}),
KeepWhileAftermath1 = KeepWhileAftermath#{Path => NodeProps},
%% No need to evaluate keep_while of ParentNode, its child_count is
%% unchanged.
ParentNode1 = update_node_child(ParentNode, ChildName, Child),
walk_back_up_the_tree(
ParentNode1, ReversedPath, ReversedParentTree,
Extra, KeepWhileAftermath1, FunAcc);
walk_back_up_the_tree(
StartingNode,
[], %% <-- We reached the root (i.e. not in a branch, see handle_branch())
[], Extra, KeepWhileAftermath, FunAcc) ->
Extra1 = merge_keep_while_aftermath(Extra, KeepWhileAftermath),
handle_keep_while_aftermath(StartingNode, Extra1, FunAcc);
walk_back_up_the_tree(
StartingNode,
_ReversedPath,
[], Extra, KeepWhileAftermath, FunAcc) ->
Extra1 = merge_keep_while_aftermath(Extra, KeepWhileAftermath),
{ok, StartingNode, Extra1, FunAcc}.
handle_keep_while_for_parent_update(
ParentNode,
ReversedPath,
ReversedParentTree,
Extra, KeepWhileAftermath, FunAcc) ->
ParentPath = lists:reverse(ReversedPath),
IsMet = is_keep_while_condition_met_on_self(
ParentPath, ParentNode, Extra),
case IsMet of
true ->
%% We continue with the update.
walk_back_up_the_tree(
ParentNode, ReversedPath, ReversedParentTree,
Extra, KeepWhileAftermath, FunAcc);
{false, _Reason} ->
%% This parent node must be removed because it doesn't meet its
%% own keep_while condition. keep_while conditions for nodes
%% depending on this one will be evaluated with the recursion.
walk_back_up_the_tree(
remove, ReversedPath, ReversedParentTree,
Extra, KeepWhileAftermath, FunAcc)
end.
merge_keep_while_aftermath(Extra, KeepWhileAftermath) ->
OldKWA = maps:get(keep_while_aftermath, Extra, #{}),
NewKWA = maps:fold(
fun
(Path, remove, KWA1) ->
KWA1#{Path => remove};
(Path, NodeProps, KWA1) ->
case KWA1 of
#{Path := remove} -> KWA1;
_ -> KWA1#{Path => NodeProps}
end
end, OldKWA, KeepWhileAftermath),
Extra#{keep_while_aftermath => NewKWA}.
handle_keep_while_aftermath(
Root,
#{keep_while_aftermath := KeepWhileAftermath} = Extra,
FunAcc)
when KeepWhileAftermath =:= #{} ->
{ok, Root, Extra, FunAcc};
handle_keep_while_aftermath(
Root,
#{keep_while_conds := KeepWhileConds,
keep_while_conds_revidx := KeepWhileCondsRevIdx,
keep_while_aftermath := KeepWhileAftermath} = Extra,
FunAcc) ->
ToRemove = eval_keep_while_conditions(
KeepWhileAftermath, KeepWhileConds, KeepWhileCondsRevIdx,
Root),
{KeepWhileConds1,
KeepWhileCondsRevIdx1} = maps:fold(
fun
(RemovedPath, remove, {KW, KWRevIdx}) ->
KW1 = maps:remove(RemovedPath, KW),
KWRevIdx1 = update_keep_while_conds_revidx(
KW, KWRevIdx,
RemovedPath, #{}),
{KW1, KWRevIdx1};
(_, _, Acc) ->
Acc
end, {KeepWhileConds, KeepWhileCondsRevIdx},
KeepWhileAftermath),
Extra1 = maps:remove(keep_while_aftermath, Extra),
Extra2 = Extra1#{keep_while_conds => KeepWhileConds1,
keep_while_conds_revidx => KeepWhileCondsRevIdx1},
ToRemove1 = filter_and_sort_paths_to_remove(ToRemove, KeepWhileAftermath),
remove_expired_nodes(ToRemove1, Root, Extra2, FunAcc).
eval_keep_while_conditions(
KeepWhileAftermath, KeepWhileConds, KeepWhileCondsRevIdx, Root) ->
%% KeepWhileAftermath lists all nodes which were modified or removed. We
%% want to transform that into a list of nodes to remove.
%%
%% Those marked as `remove' in KeepWhileAftermath are already gone. We
%% need to find the nodes which depended on them, i.e. their keep_while
%% condition is not met anymore. Note that removed nodes' child nodes are
%% gone as well and must be handled (they are not specified in
%% KeepWhileAftermath).
%%
%% Those modified in KeepWhileAftermath must be evaluated again to decide
%% if they should be removed.
maps:fold(
fun
(RemovedPath, remove, ToRemove) ->
maps:fold(
fun(Path, Watchers, ToRemove1) ->
case lists:prefix(RemovedPath, Path) of
true ->
eval_keep_while_conditions_after_removal(
Watchers, KeepWhileConds, Root, ToRemove1);
false ->
ToRemove1
end
end, ToRemove, KeepWhileCondsRevIdx);
(UpdatedPath, NodeProps, ToRemove) ->
case KeepWhileCondsRevIdx of
#{UpdatedPath := Watchers} ->
eval_keep_while_conditions_after_update(
UpdatedPath, NodeProps,
Watchers, KeepWhileConds, Root, ToRemove);
_ ->
ToRemove
end
end, #{}, KeepWhileAftermath).
eval_keep_while_conditions_after_update(
UpdatedPath, NodeProps, Watchers, KeepWhileConds, Root, ToRemove) ->
maps:fold(
fun(Watcher, ok, ToRemove1) ->
KeepWhile = maps:get(Watcher, KeepWhileConds),
CondOnUpdated = maps:get(UpdatedPath, KeepWhile),
IsMet = khepri_condition:is_met(
CondOnUpdated, UpdatedPath, NodeProps),
case IsMet of
true ->
ToRemove1;
{false, _} ->
case are_keep_while_conditions_met(Root, KeepWhile) of
true -> ToRemove1;
{false, _} -> ToRemove1#{Watcher => remove}
end
end
end, ToRemove, Watchers).
eval_keep_while_conditions_after_removal(
Watchers, KeepWhileConds, Root, ToRemove) ->
maps:fold(
fun(Watcher, ok, ToRemove1) ->
KeepWhile = maps:get(Watcher, KeepWhileConds),
case are_keep_while_conditions_met(Root, KeepWhile) of
true -> ToRemove1;
{false, _} -> ToRemove1#{Watcher => remove}
end
end, ToRemove, Watchers).
filter_and_sort_paths_to_remove(ToRemove, KeepWhileAftermath) ->
Paths1 = lists:sort(
fun
(A, B) when length(A) =:= length(B) ->
A < B;
(A, B) ->
length(A) < length(B)
end,
maps:keys(ToRemove)),
Paths2 = lists:foldl(
fun(Path, Map) ->
case KeepWhileAftermath of
#{Path := remove} ->
Map;
_ ->
case is_parent_being_removed(Path, Map) of
false -> Map#{Path => remove};
true -> Map
end
end
end, #{}, Paths1),
maps:keys(Paths2).
is_parent_being_removed([], _) ->
false;
is_parent_being_removed(Path, Map) ->
is_parent_being_removed1(lists:reverse(Path), Map).
is_parent_being_removed1([_ | Parent], Map) ->
case maps:is_key(lists:reverse(Parent), Map) of
true -> true;
false -> is_parent_being_removed1(Parent, Map)
end;
is_parent_being_removed1([], _) ->
false.
remove_expired_nodes([], Root, Extra, FunAcc) ->
{ok, Root, Extra, FunAcc};
remove_expired_nodes([PathToRemove | Rest], Root, Extra, FunAcc) ->
case do_delete_matching_nodes(PathToRemove, Root, Extra) of
{ok, Root1, Extra1, _} ->
remove_expired_nodes(Rest, Root1, Extra1, FunAcc)
end.
-ifdef(TEST).
get_root(#?MODULE{root = Root}) ->
Root.
get_keep_while_conds(
#?MODULE{keep_while_conds = KeepWhileConds}) ->
KeepWhileConds.
get_keep_while_conds_revidx(
#?MODULE{keep_while_conds_revidx = KeepWhileCondsRevIdx}) ->
KeepWhileCondsRevIdx.
-endif. | src/khepri_machine.erl | 0.778481 | 0.517632 | khepri_machine.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(estatsd_protocol).
-include("estatsd.hrl").
-compile(inline).
-compile({inline_size, 150}).
-export([encode/5]).
%%--------------------------------------------------------------------
%% APIs
%%--------------------------------------------------------------------
-spec encode(Type, name(), value(), sample_rate(), tags()) -> iolist()
when Type :: counter | gauge | gauge_delta | timing | histogram | set.
encode(Type, Name, Value, SampleRate, Tags) ->
[encode_name(Name), <<":">>, encode_value(Type, Value), <<"|">>, encode_type(Type), encode_sample_rate(SampleRate), encode_tags(Tags)].
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
encode_name(Name) ->
to_string(Name).
encode_value(Type, Value) when Type =:= gauge_delta andalso Value >= 0 ->
[<<"+">>, encode_value(Value)];
encode_value(Type, Value) when (Type =:= gauge orelse Type =:= timing orelse Type =:= set) andalso Value < 0 ->
error({bad_value, Value});
encode_value(_, Value) ->
encode_value(Value).
encode_value(Value) when is_integer(Value) ->
integer_to_list(Value);
encode_value(Value) when is_float(Value) ->
float_to_list(Value, [{decimals, 2}]).
encode_type(counter) ->
<<"c">>;
encode_type(gauge) ->
<<"g">>;
encode_type(gauge_delta) ->
<<"g">>;
encode_type(timing) ->
<<"ms">>;
encode_type(histogram) ->
<<"h">>;
encode_type(set) ->
<<"s">>;
encode_type(Type) ->
error({bad_type, Type}).
encode_sample_rate(SampleRate) when SampleRate > 1 ->
error({bad_sample_rate, SampleRate});
encode_sample_rate(SampleRate) when SampleRate == 1 ->
[];
encode_sample_rate(SampleRate) ->
[<<"|@">>, float_to_list(SampleRate, [compact, {decimals, 6}])].
encode_tags(Tags) ->
encode_tags(lists:reverse(Tags), []).
encode_tags([], []) ->
[];
encode_tags([], Acc) ->
[<<"|#">> | Acc];
encode_tags([{Key, Value} | More], []) ->
encode_tags(More, [to_string(Key), <<":">>, to_string(Value)]);
encode_tags([{Key, Value} | More], Acc) ->
encode_tags(More, [to_string(Key), <<":">>, to_string(Value), <<",">> | Acc]).
to_string(Atom) when is_atom(Atom) ->
atom_to_binary(Atom, utf8);
to_string(List) when is_list(List) ->
List;
to_string(Binary) when is_binary(Binary) ->
Binary;
to_string(Item) ->
error({bad_data_type, Item}). | src/estatsd_protocol.erl | 0.58059 | 0.467149 | estatsd_protocol.erl | starcoder |
%% @doc Shannon Entropy Algorithm
%%
%% Erlang implementation of the Shannon Entropy Algorithm:
%% https://en.wiktionary.org/wiki/Shannon_entropy
%%
%% @end
-module(shannon_entropy).
-export([calculate/1]).
%% -----------------------------------------------------------------------
%% API Functions
%% -----------------------------------------------------------------------
-spec calculate(Input :: integer() | string() | binary()) -> Float :: float().
calculate(Input) when is_integer(Input) ->
BinaryInput = integer_to_binary(Input),
calculate(BinaryInput);
calculate(Input) when is_list(Input) ->
BinaryInput = iolist_to_binary(Input),
calculate(BinaryInput);
calculate(Input) ->
CounterMap = build_counter(Input, []),
InputLength = size(Input),
EntropyParts = [ entropy_part(Count, InputLength) || {_Char, Count} <- CounterMap ],
-1 * lists:sum(EntropyParts).
%% -----------------------------------------------------------------------
%% Private Functions
%% -----------------------------------------------------------------------
build_counter(<<>>, CounterMap) -> CounterMap;
build_counter(<<Character:8, Remainder/binary>>, CounterMap) ->
NewCount = case lists:keyfind(Character, 1, CounterMap) of
false -> 1;
{Character, Count} -> Count + 1
end,
NewCounterMap = lists:keystore(Character, 1, CounterMap, {Character, NewCount}),
build_counter(Remainder, NewCounterMap).
entropy_part(Number, InputLength) ->
Float = float(Number),
Div = Float / InputLength,
Div * math:log2(Div).
%% -----------------------------------------------------------------------
%% Unit Tests
%% -----------------------------------------------------------------------
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
calculate_test_() ->
[
?_assertEqual(
calculate("122333444455555666666777777788888888"),
2.7942086837942446
),
?_assertEqual(
calculate(563881467447538846567288767728553786),
2.7942086837942446
),
?_assertEqual(
calculate("https://www.reddit.com/r/dailyprogrammer"),
4.056198332810094
),
?_assertEqual(
calculate("int main(int argc, char *argv[])"),
3.8667292966721747
)
].
-endif. | Algorithms/Erlang/shannon_entropy.erl | 0.693784 | 0.636523 | shannon_entropy.erl | starcoder |
% @doc `rebar3 hex build'
%
%% Builds a new local version of your package.
%%
%% By default this provider will build both a package tarball and docs tarball.
%%
%% The package and docs .tar files are created in the current directory, but is not pushed to the repository. An app
%% named foo at version 1.2.3 will be built as foo-1.2.3.tar. Likewise the docs .tar would be built as
%% foo-1.2.4-docs.tar.
%%
%% ```shell
%% $ rebar3 hex build
%% '''
%%
%% You may also build only a package or docs tarball utilizing the same available command line options.
%%
%% ``` shell
%% $ rebar3 hex build package
%% '''
%%
%% ```
%% $ rebar3 hex build docs
%% '''
%%
%% <h2>Configuration</h2>
%% Packages are configured via `src/<myapp>.app.src' attributes.
%%
%% == Required configuration ==
%%
%% <ul>
%% <li> `application' - application name. This is required per Erlang/OTP thus it should always be present anyway.
% <li> `vsn' - must be a valid [semantic version](http://semver.org/) identifier.
%% <li>`licenses' - A list of licenses the project is licensed under. This attribute is required. A valid
%% [spdx](https://spdx.org/licenses/) is expected.</li>
%% </ul>
%%
%% == Optional configuration ==
%% In addition, the following meta attributes are supported and highly recommended :
%%
%% <ul>
%% <li> `description' - a brief description about your application.
%% <li>`pkg_name' - The name of the package in case you want to publish the package with a different name than the
%% application name.</li>
%% <li>`links' - A map where the key is a link name and the value is the link URL. Optional but highly recommended.
%% <li> `files' - A list of files and directories to include in the package. Defaults to standard project directories,
%% so you usually don't need to set this property.</li>
%% <li> `include_paths' - A list of paths containing files you wish to include in a release. </li>
%% <li> `exclude_paths' - A list of paths containing files you wish to exclude in a release. </li>
%% <li> `exclude_patterns' - A list of regular expressions used to exclude files that may have been accumulated via
%% `files' and `include_paths' and standard project paths.
%% <li> `build_tools' - List of build tools that can build the package. It's very rare that you need to set this. </li>
%% </ul>
%%
%% Below is an example :
%%
%% ```erlang
%% {application, myapp,
%% [{description, "An Erlang/OTP application"},
%% {vsn, "0.1.0"},
%% {modules, []},
%% {registered, []},
%% {applications, [kernel,
%% stdlib,
%% ]},
%% {licenses, ["Apache-2.0"]},
%% {links, [{"GitHub", "https://github.com/my_name/myapp"}]}]}.
%% ```
%%
%% <h2> Command line options </h2>
%%
%% <ul>
%% <li> `-r', `--repo' - Specify the repository to use in the task. This option is required when
%% you have multiple repositories configured, including organizations. The argument must
%% be a fully qualified repository name (e.g, `hexpm', `hexpm:my_org', `my_own_hexpm').
%% Defaults to `hexpm'.
%% </li>
%% <li> `-u', `--unpack' - Builds the tarball and unpacks contents into a directory. Useful for making sure the tarball
%% contains all needed files before publishing. See --output below for setting the output path.
%% </li>
%% <li> `-o', `--output' - Sets output path. When used with --unpack it means the directory
%% (Default: <app>-<version>). Otherwise, it specifies tarball path (Default: <app>-<version>.tar).
%% Artifacts will be written to `_build/<profile>/lib/<your_app>/' by default.
%% </li>
%% </ul>
-module(rebar3_hex_build).
-export([create_package/3, create_docs/3, create_docs/4]).
-include("rebar3_hex.hrl").
-define(DEFAULT_FILES, [
"src",
"c_src",
"include",
"rebar.config.script",
"priv",
"rebar.config",
"rebar.lock",
"CHANGELOG*",
"changelog*",
"README*",
"readme*",
"LICENSE*",
"license*",
"NOTICE"
]).
-define(DEPS, [{default, compile}, {default, lock}]).
-define(PROVIDER, build).
-define(DEFAULT_DOC_DIR, "doc").
-export([
init/1,
do/1,
format_error/1
]).
%% ===================================================================
%% Public API
%% ===================================================================
%% @private
-spec init(rebar_state:t()) -> {ok, rebar_state:t()}.
init(State) ->
Provider = providers:create([
{name, ?PROVIDER},
{module, ?MODULE},
{namespace, hex},
{bare, true},
{deps, ?DEPS},
{example, "rebar3 hex build"},
{short_desc, "Builds a new local version of your package and docs."},
{desc, ""},
{opts, [
rebar3_hex:repo_opt(),
{app, $a, "app", {string, undefined}, "HALP!"},
{output_dir, $o, "output", {string, undefined}, "HALP!"},
{unpack, $u, "unpack", {boolean, false}, "HALP!"}
]}
]),
State1 = rebar_state:add_provider(State, Provider),
{ok, State1}.
%% @private
-spec do(rebar_state:t()) -> {ok, rebar_state:t()}.
do(State) ->
case rebar3_hex:task_state(State) of
{ok, Task} ->
handle_task(Task);
{error, Reason} ->
?RAISE(Reason)
end.
%% @private
-spec format_error(any()) -> iolist().
format_error({build_package, Error}) when is_list(Error) ->
io_lib:format("Error building package : ~ts", [Error]);
format_error({build_docs, {error, no_doc_config}}) ->
no_doc_config_messsage();
format_error({build_docs, {error, {doc_provider_not_found, PrvName}}}) ->
doc_provider_not_found(PrvName);
format_error({build_docs, {error, missing_doc_index}}) ->
doc_missing_index_message();
format_error({build_docs, Error}) when is_list(Error) ->
io_lib:format("Error building docs : ~ts", [Error]);
format_error(app_switch_required) ->
"--app switch is required when building packages or docs in a umbrella with multiple apps";
format_error(Reason) ->
rebar3_hex_error:format_error(Reason).
no_doc_config_messsage() ->
"No doc provider has been specified in your hex config.\n"
"Be sure to add a doc provider to the hex config you rebar configuration file.\n\n"
"Example : {hex, [{doc, ex_doc}]\n".
doc_missing_index_message() ->
"An index.html file was not found in docs after running docs provider.\n"
"Be sure the docs provider is configured correctly and double check it by running it on its own\n".
doc_provider_not_found(Provider) ->
io_lib:format("The doc provider ~ts specified in your hex config could not be found", [Provider]).
handle_task(#{apps := [_,_|_]}) ->
?RAISE(app_switch_required);
handle_task(#{state := State, repo := Repo, apps := [App], args := #{task := docs} = Args}) ->
case create_docs(State, Repo, App) of
{ok, Docs} ->
AbsDir = write_or_unpack(App, Docs, Args),
rebar3_hex_io:say("Your docs can be inspected at ~ts", [AbsDir]),
{ok, State};
Error ->
?RAISE({build_docs, Error})
end;
handle_task(#{state := State, repo := Repo, apps := [App], args := #{task := package} = Args}) ->
case create_package(State, Repo, App) of
{ok, Pkg} ->
AbsDir = write_or_unpack(App, Pkg, Args),
rebar3_hex_io:say("Your package contents can be inspected at ~ts", [AbsDir]),
{ok, State};
Error ->
?RAISE({build_package, Error})
end;
handle_task(#{state := State, repo := Repo, apps := [App], args := Args}) ->
case create_package(State, Repo, App) of
{ok, Pkg} ->
AbsOutput = write_or_unpack(App, Pkg, Args),
rebar3_hex_io:say("Your package tarball is available at ~ts", [AbsOutput]),
case create_docs(State, Repo, App) of
{ok, Docs} ->
AbsFile = write_or_unpack(App, Docs, Args),
rebar3_hex_io:say("Your docs tarball is available at ~ts", [AbsFile]),
{ok, State};
{error, no_doc_config} ->
rebar_api:warn(no_doc_config_messsage(), []),
{ok, State};
{error, {doc_provider_not_found, PrvName}} ->
rebar_api:warn(doc_provider_not_found(PrvName), []),
{ok, State};
{error, missing_doc_index} ->
rebar_api:warn(doc_missing_index_message(), []),
{ok, State};
Error ->
?RAISE({build_docs, Error})
end;
Error ->
?RAISE({build_package, Error})
end.
output_path(docs, Name, Version, #{unpack := true}) ->
io_lib:format("~ts-~ts-docs", [Name, Version]);
output_path(docs, Name, Version, _Args) ->
io_lib:format("~ts-~ts-docs.tar", [Name, Version]);
output_path(package, Name, Version, #{unpack := true}) ->
io_lib:format("~ts-~ts", [Name, Version]);
output_path(package, Name, Version, _Args) ->
io_lib:format("~ts-~ts.tar", [Name, Version]).
write_or_unpack(App, #{type := Type, tarball := Tarball, name := Name, version := Version}, Args) ->
OutputDir = output_dir(App, Args),
Out = output_path(Type, Name, Version, Args),
AbsOut = filename:join(OutputDir, Out),
case Args of
#{unpack := true} ->
file:make_dir(AbsOut),
case Type of
docs ->
hex_tarball:unpack_docs(Tarball, AbsOut);
package ->
hex_tarball:unpack(Tarball, AbsOut)
end;
_ ->
file:write_file(AbsOut, Tarball)
end,
AbsOut.
%% We are exploiting a feature of ensuredir that that creates all
%% directories up to the last element in the filename, then ignores
%% that last element. This way we ensure that the dir is created
%% and not have any worries about path names
output_dir(App, #{output_dir := undefined}) ->
Dir = filename:join([rebar_app_info:out_dir(App), "hex"]),
filelib:ensure_dir(filename:join(Dir, "tmp")),
Dir;
output_dir(_App, #{output_dir := Output}) ->
Dir = filename:join(filename:absname(Output), "tmp"),
filelib:ensure_dir(Dir),
Dir;
output_dir(App, _) ->
Dir = filename:join([rebar_app_info:out_dir(App), "hex"]),
filelib:ensure_dir(filename:join(Dir, "tmp")),
Dir.
create_package(State, #{name := RepoName} = _Repo, App) ->
Name = rebar_app_info:name(App),
Version = rebar3_hex_app:vcs_vsn(State, App),
{application, _, AppDetails} = rebar3_hex_file:update_app_src(App, Version),
LockDeps = rebar_state:get(State, {locks, default}, []),
case rebar3_hex_app:get_deps(LockDeps) of
{ok, TopLevel} ->
AppDir = rebar_app_info:dir(App),
Config = rebar_config:consult(AppDir),
ConfigDeps = proplists:get_value(deps, Config, []),
Deps1 = update_versions(ConfigDeps, TopLevel),
Description = proplists:get_value(description, AppDetails, ""),
PackageFiles = include_files(Name, AppDir, AppDetails),
Licenses = proplists:get_value(licenses, AppDetails, []),
Links = proplists:get_value(links, AppDetails, []),
BuildTools = proplists:get_value(build_tools, AppDetails, [<<"rebar3">>]),
%% We check the app file for the 'pkg' key which allows us to select
%% a package name other then the app name, if it is not set we default
%% back to the app name.
PkgName = binarify(proplists:get_value(pkg_name, AppDetails, Name)),
Optional = [
{<<"app">>, Name},
{<<"parameters">>, []},
{<<"description">>, binarify(Description)},
{<<"files">>, [binarify(File) || {File, _} <- PackageFiles]},
{<<"licenses">>, binarify(Licenses)},
{<<"links">>, to_map(binarify(Links))},
{<<"build_tools">>, binarify(BuildTools)}
],
OptionalFiltered = [{Key, Value} || {Key, Value} <- Optional, Value =/= []],
Metadata = maps:from_list([
{<<"name">>, PkgName},
{<<"version">>, binarify(Version)},
{<<"requirements">>, maps:from_list(Deps1)}
| OptionalFiltered
]),
case create_package_tarball(Metadata, PackageFiles) of
{error, _} = Err ->
Err;
Tarball ->
Package = #{
type => package,
name => PkgName,
repo_name => RepoName,
deps => Deps1,
version => Version,
metadata => Metadata,
files => PackageFiles,
tarball => Tarball,
has_checkouts => has_checkouts(State)
},
{ok, Package}
end;
Error ->
Error
end.
update_versions(ConfigDeps, LockDeps) ->
[
begin
case lists:keyfind(binary_to_atom(N, utf8), 1, ConfigDeps) of
{_, V} when is_binary(V) ->
Req = {<<"requirement">>, V},
{N, maps:from_list(lists:keyreplace(<<"requirement">>, 1, M, Req))};
{_, V} when is_list(V) ->
Req = {<<"requirement">>, binarify(V)},
{N, maps:from_list(lists:keyreplace(<<"requirement">>, 1, M, Req))};
_ ->
%% using version from lock. prepend ~> to make it looser
{_, Version} = lists:keyfind(<<"requirement">>, 1, M),
Req = {<<"requirement">>, <<"~>", Version/binary>>},
{N, maps:from_list(lists:keyreplace(<<"requirement">>, 1, M, Req))}
end
end
|| {N, M} <- LockDeps
].
include_files(Name, AppDir, AppDetails) ->
AppSrc = {application, to_atom(Name), AppDetails},
FilePaths = proplists:get_value(files, AppDetails, ?DEFAULT_FILES),
%% In versions prior to v7 the name of the for including paths and excluding paths was include_files and
%% exclude_files. We don't document this anymore, but we do support it to avoid breaking changes. However,
%% users should be instructed to use *_paths. Likewise for exclude_regexps which is now documented as
%% exclude_patterns.
IncludePaths = proplists:get_value(include_paths, AppDetails, proplists:get_value(include_files, AppDetails, [])),
ExcludePaths = proplists:get_value(exclude_paths, AppDetails, proplists:get_value(exclude_files, AppDetails, [])),
ExcludeRes = proplists:get_value(exclude_patterns, AppDetails, proplists:get_value(exclude_regexps, AppDetails, [])),
AllFiles = lists:ukeysort(2, rebar3_hex_file:expand_paths(FilePaths, AppDir)),
IncludeFiles = lists:ukeysort(2, rebar3_hex_file:expand_paths(IncludePaths, AppDir)),
ExcludeFiles = lists:ukeysort(2, rebar3_hex_file:expand_paths(ExcludePaths, AppDir)),
%% We filter first and then include, that way glob excludes can be
%% overwritten be explict includes
FilterExcluded = lists:filter(
fun({_, Path}) ->
not exclude_file(Path, ExcludeFiles, ExcludeRes)
end,
AllFiles
),
WithIncludes = lists:ukeymerge(2, FilterExcluded, IncludeFiles),
AppFileSrc = filename:join("src", rebar_utils:to_list(Name) ++ ".app.src"),
AppSrcBinary = binarify(lists:flatten(io_lib:format("~tp.\n", [AppSrc]))),
lists:keystore(AppFileSrc, 1, WithIncludes, {AppFileSrc, AppSrcBinary}).
exclude_file(Path, ExcludeFiles, ExcludeRe) ->
lists:keymember(Path, 2, ExcludeFiles) orelse
known_exclude_file(Path, ExcludeRe).
known_exclude_file(Path, ExcludeRe) ->
KnownExcludes = [
%% emacs temp files
"~$",
%% c object files
"\\.o$",
%% compiled nif libraries
"\\.so$",
%% vim swap files
"\\.swp$"
],
lists:foldl(
fun
(_, true) -> true;
(RE, false) -> re:run(Path, RE) =/= nomatch
end,
false,
KnownExcludes ++ ExcludeRe
).
%% Note that we return a list
has_checkouts(State) ->
filelib:is_dir(rebar_dir:checkouts_dir(State)).
create_docs(State, Repo, App) ->
create_docs(State, Repo, App, #{doc_dir => undefined}).
-dialyzer({nowarn_function, create_docs/4}).
create_docs(State, Repo, App, Args) ->
case maybe_gen_docs(State, Repo, App, Args) of
{ok, DocDir} ->
case docs_detected(DocDir) of
true ->
AppDir = rebar_app_info:dir(App),
AppDetails = rebar_app_info:app_details(App),
Files = rebar3_hex_file:expand_paths([DocDir], AppDir),
Name = rebar_utils:to_list(rebar_app_info:name(App)),
PkgName = rebar_utils:to_list(proplists:get_value(pkg_name, AppDetails, Name)),
OriginalVsn = rebar_app_info:original_vsn(App),
Vsn = rebar_utils:vcs_vsn(App, OriginalVsn, State),
FileList = [
{filename:join(filename:split(ShortName) -- [DocDir]), FullName}
|| {ShortName, FullName} <- Files
],
case create_docs_tarball(FileList) of
{ok, Tarball} ->
{ok, #{
type => docs, tarball => Tarball, name => binarify(PkgName), version => binarify(Vsn)
}};
{error, Reason} ->
{error, hex_tarball:format_error(Reason)};
Err ->
Err
end;
false ->
{error, missing_doc_index}
end;
{error, _} = Err ->
Err;
Err ->
{error, Err}
end.
maybe_gen_docs(_State, _Repo, App, #{doc_dir := DocDir}) when is_list(DocDir) ->
AppDir = rebar_app_info:dir(App),
{ok, filename:absname(filename:join(AppDir, DocDir))};
maybe_gen_docs(State, Repo, App, _Args) ->
case doc_opts(State, Repo) of
{ok, PrvName} ->
case providers:get_provider(PrvName, rebar_state:providers(State)) of
not_found ->
{error, {doc_provider_not_found, PrvName}};
Prv ->
case providers:do(Prv, State) of
{ok, _State1} ->
{ok, resolve_dir(App, PrvName)};
_ ->
{error, {doc_provider_failed, PrvName}}
end
end;
_ ->
{error, no_doc_config}
end.
resolve_dir(App, PrvName) ->
AppDir = rebar_app_info:dir(App),
AppOpts = rebar_app_info:opts(App),
DocOpts =
case PrvName of
edoc ->
rebar_opts:get(AppOpts, edoc_opts, []);
_ ->
rebar_opts:get(AppOpts, PrvName, [])
end,
DocDir = proplists:get_value(dir, DocOpts, ?DEFAULT_DOC_DIR),
filename:absname(filename:join(AppDir, DocDir)).
docs_detected(DocDir) ->
filelib:is_file(DocDir ++ "/index.html").
doc_opts(State, Repo) ->
case Repo of
#{doc := #{provider := PrvName}} when is_atom(PrvName) ->
{ok, PrvName};
_ ->
Opts = rebar_state:opts(State),
case proplists:get_value(doc, rebar_opts:get(Opts, hex, []), undefined) of
undefined -> undefined;
PrvName when is_atom(PrvName) -> {ok, PrvName};
#{provider := PrvName} -> {ok, PrvName};
_ -> undefined
end
end.
binarify(Term) when is_boolean(Term) ->
Term;
binarify(Term) when is_atom(Term) ->
atom_to_binary(Term, utf8);
binarify([]) ->
[];
binarify(Map) when is_map(Map) ->
maps:from_list(binarify(maps:to_list(Map)));
binarify(Term) when is_list(Term) ->
case io_lib:printable_unicode_list(Term) of
true ->
rebar_utils:to_binary(Term);
false ->
[binarify(X) || X <- Term]
end;
binarify({Key, Value}) ->
{binarify(Key), binarify(Value)};
binarify(Term) ->
Term.
-dialyzer({nowarn_function, create_package_tarball/2}).
create_package_tarball(Metadata, Files) ->
case hex_tarball:create(Metadata, Files) of
{ok, #{tarball := Tarball, inner_checksum := _Checksum}} ->
Tarball;
{error, Reason} ->
{error, hex_tarball:format_error(Reason)};
Error ->
Error
end.
-dialyzer({nowarn_function, create_docs_tarball/1}).
create_docs_tarball(Files) ->
case hex_tarball:create_docs(Files) of
{ok, Tarball} ->
{ok, Tarball};
Error ->
Error
end.
-spec to_atom(atom() | string() | binary() | integer() | float()) ->
atom().
to_atom(X) when erlang:is_atom(X) ->
X;
to_atom(X) when erlang:is_list(X) ->
list_to_existing_atom(X);
to_atom(X) ->
to_atom(rebar_utils:to_list(X)).
to_map(Map) when is_map(Map) ->
Map;
to_map(List) when is_list(List) ->
maps:from_list(List). | src/rebar3_hex_build.erl | 0.627609 | 0.408867 | rebar3_hex_build.erl | starcoder |
%%%
%%% Copyright (c) 2018, 2020 <NAME>
%%% All rights reserved.
%%% Distributed under the terms of the MIT License. See the LICENSE file.
%%%
%%% @doc
%%% SIP branch related functions
%%%
-module(ersip_branch).
-export([make/1,
make_rfc3261/1,
make_random/1,
make_key/1,
assemble/1,
assemble_bin/1,
is_rfc3261/1
]).
%%===================================================================
%% Types
%%===================================================================
-type branch() :: {branch, binary()}.
-type branch_key() :: {branch_key, binary()}.
-export_type([branch/0,
branch_key/0
]).
%%===================================================================
%% API
%%===================================================================
%% @doc Create branch from binary.
%% Raises error if parameter cannot be accepted as branch.
%% Examples:
%% ```
%% Branch = ersip_branch:make(<<"z9hG4bKmCy89eBuJlR1GMv">>)
%% ersip_branch:make(<<"1,2">>). % => raises error
%% '''
-spec make(binary()) -> branch().
make(Bin) when is_binary(Bin) ->
case ersip_parser_aux:check_token(Bin) of
true -> {branch, Bin};
false -> error({invalid_branch, Bin})
end.
%% @doc Create RFC3261-compatible (with magic-cookie) branch from binary.
%% Raises error if parameter cannot be accepted as branch.
%% Examples:
%% ```
%% Branch = ersip_branch:make(<<"Cy89eBuJlR1GMv">>),
%% Branch = ersip_branch:make(<<"z9hG4bKCy89eBuJlR1GMv">>). % The same as above
%% ersip_branch:make_rfc3261(<<"1,2">>). % => raises error
%% '''
-spec make_rfc3261(binary()) -> branch().
make_rfc3261(Bin) ->
case is_rfc3261({branch, Bin}) of
true -> make(Bin);
false -> make(<<"z9hG4bK", Bin/binary>>)
end.
%% @doc Create random RFC3261-compatible branch.
%% @param NumBytes defines number bits of entropy that used in random.
-spec make_random(NumBytes :: pos_integer()) -> branch().
make_random(NumBytes) ->
make_rfc3261(ersip_id:alphanum(crypto:strong_rand_bytes(NumBytes))).
%% @doc Create comparable key for branch parameter.
%% After make_key(Branch) can be used in ets/maps as key to find
%% transaction.
-spec make_key(branch()) -> branch_key().
make_key({branch, Bin}) ->
{branch_key, ersip_bin:to_lower(Bin)};
make_key({branch_key, _} = Key) ->
Key.
%% @doc Serialize header to iolist.
-spec assemble(branch()) -> binary().
assemble({branch, Bin}) ->
Bin.
%% @doc Serialize header to binary.
-spec assemble_bin(branch()) -> binary().
assemble_bin({branch, Bin}) ->
Bin.
%% @doc Check that branch was generated by RFC3261-compatible
%% implementation (has z9hG4bK magic cookie).
-spec is_rfc3261(branch() | branch_key()) -> boolean().
is_rfc3261({branch, <<"z9hG4bK", _/binary>>}) ->
true;
is_rfc3261({branch_key, <<"z9hg4bk", _/binary>>}) ->
true;
is_rfc3261({branch, _}) ->
false;
is_rfc3261({branch_key, _}) ->
false. | src/message/ersip_branch.erl | 0.541409 | 0.401277 | ersip_branch.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(porkrind_logic).
-include("porkrind_internal.hrl").
-export([
is/1,
is_not/1,
anything/0,
equal_to/1,
exactly_equal_to/1,
gt/1,
gteq/1,
greater_than/1,
greater_than_or_equal_to/1,
lt/1,
lteq/1,
less_than/1,
less_than_or_equal_to/1,
close_to/1,
close_to/2,
all_of/1,
any_of/1
]).
is(Matcher0) ->
Matcher = porkrind_util:maybe_wrap(Matcher0),
#'porkrind.matcher'{
name = is,
args = [Matcher0],
match = fun(Value) -> porkrind:match(Value, Matcher) end
}.
is_not(Matcher0) ->
Matcher = porkrind_util:maybe_wrap(Matcher0),
#'porkrind.matcher'{
name = is_not,
args = [Matcher0],
match = fun(Value) ->
try porkrind:match(Value, Matcher) of
_ -> ?PR_FAIL({is_not_fail, Value})
catch _:_ ->
ok
end
end,
reason = fun({is_not_fail, Value}) ->
[io_lib:format("~w is ", [Value]), porkrind:describe(Matcher)]
end
}.
anything() ->
#'porkrind.matcher'{
name = anything,
args = [],
match = fun(_Value) -> ok end
}.
equal_to(Term) ->
#'porkrind.matcher'{
name = equal_to,
args = [Term],
match = fun(Value) ->
if Value == Term -> ok; true ->
?PR_FAIL({not_equal, Value})
end
end,
reason = fun({not_equal, Value}) ->
io_lib:format("~w is not equal to ~w", [Value, Term])
end
}.
exactly_equal_to(Term) ->
#'porkrind.matcher'{
name = exactly_equal_to,
args = [Term],
match = fun(Value) ->
if Value =:= Term -> ok; true ->
?PR_FAIL({not_exactly_equal, Value})
end
end,
reason = fun({not_exactly_equal, Value}) ->
io_lib:format("~w is not exactly equal to ~w", [Value, Term])
end
}.
gt(Term) ->
greater_than(Term).
gteq(Term) ->
greater_than_or_equal_to(Term).
greater_than(Term) ->
#'porkrind.matcher'{
name = greater_than,
args = [Term],
match = fun(Value) ->
if Value > Term -> ok; true ->
?PR_FAIL({gt_fail, Value})
end
end,
reason = fun({gt_fail, Value}) ->
io_lib:format("~w is less than or equal to ~w", [Value, Term])
end
}.
greater_than_or_equal_to(Term) ->
#'porkrind.matcher'{
name = greater_than_or_equal_to,
args = [Term],
match = fun(Value) ->
if Value >= Term -> ok; true ->
?PR_FAIL({gteq_fail, Value})
end
end,
reason = fun({gteq_fail, Value}) ->
io_lib:format("~w is less than ~w", [Value, Term])
end
}.
lt(Term) ->
less_than(Term).
lteq(Term) ->
less_than_or_equal_to(Term).
less_than(Term) ->
#'porkrind.matcher'{
name = less_than,
args = [Term],
match = fun(Value) ->
if Value < Term -> ok; true ->
?PR_FAIL({lt_fail, Value})
end
end,
reason = fun({lt_fail, Value}) ->
io_lib:format("~w is greater than or equal to ~w", [Value, Term])
end
}.
less_than_or_equal_to(Term) ->
#'porkrind.matcher'{
name = less_than_or_equal_to,
args = [Term],
match = fun(Value) ->
if Value =< Term -> ok; true ->
?PR_FAIL({lteq_fail, Value})
end
end,
reason = fun({lteq_fail, Value}) ->
io_lib:format("~w is greater than ~w", [Value, Term])
end
}.
close_to(Number) ->
close_to(Number, 0.000001).
close_to(Number, Delta) when is_number(Number), is_number(Delta), Delta >= 0 ->
M = #'porkrind.matcher'{
name = close_to,
args = [Number, Delta],
match = fun(Value) ->
AbsVal = erlang:abs(Value),
AbsNum = erlang:abs(Number),
case erlang:abs(AbsVal - AbsNum) < Delta of
true -> ok;
false -> ?PR_FAIL({not_close, Value})
end
end,
reason = fun({not_close, Value}) ->
Args = [Value, Delta, Number],
io_lib:format("~w is more than ~w different than ~w", Args)
end
},
all_of([porkrind_types:is_number(), M]).
all_of(Matchers0) when is_list(Matchers0), length(Matchers0) > 0 ->
% Simplify when we have nested all_of matchers
% which is a fairly common pattern. Make sure that
% we keep the same left-to-right ordering though.
Matchers1 = lists:flatmap(fun(M) ->
case M of
#'porkrind.matcher'{name = all_of, args = [SubMatchers]} ->
SubMatchers;
Else ->
[Else]
end
end, Matchers0),
Matchers = lists:map(fun porkrind_util:maybe_wrap/1, Matchers1),
#'porkrind.matcher'{
name = all_of,
args = [Matchers1],
match = fun(Value) ->
lists:foreach(fun(M) -> porkrind:match(Value, M) end, Matchers)
end
}.
any_of(Matchers0) when is_list(Matchers0), length(Matchers0) > 0 ->
Matchers = lists:map(fun porkrind_util:maybe_wrap/1, Matchers0),
#'porkrind.matcher'{
name = any_of,
args = [Matchers0],
match = fun(Value) ->
case porkrind_util:find_first_match(Value, Matchers) of
{match, _} ->
ok;
nomatch ->
?PR_FAIL({any_of_fail, Value})
end
end,
reason = fun({any_of_fail, Value}) ->
Prefix = io_lib:format("~w does not match any of", [Value]),
Descrs = lists:map(fun porkrind:describe/1, Matchers),
[Prefix, porkrind_util:str_join(Descrs, " or ", "()")]
end
}. | src/porkrind_logic.erl | 0.661267 | 0.529932 | porkrind_logic.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% A set of optics specific to lists.
%%% @end
%%%-------------------------------------------------------------------
-module(optic_lists).
%% API
-export([all/0,
all/1,
head/0,
head/1,
tail/0,
tail/1,
nth/1,
nth/2]).
%%%===================================================================
%%% API
%%%===================================================================
%% @see all/1
-spec all() -> optic:optic().
all() ->
all(#{}).
%% @doc
%% Focus on all elements of a list.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_lists:all()], [1,2,3]).
%% {ok,[1,2,3]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec all(Options) -> optic:optic() when
Options :: optic:variations().
all(Options) ->
Fold =
fun (Fun, Acc, List) when is_list(List) ->
{ok, lists:foldl(Fun, Acc, List)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, List) when is_list(List) ->
{ok, lists:mapfoldl(Fun, Acc, List)};
(_, _, _) ->
{error, undefined}
end,
New =
fun (_Data, _Template) ->
[]
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see head/1
-spec head() -> optic:optic().
head() ->
head(#{}).
%% @doc
%% Focus on the head of a list. The list must have at least one
%% element to have a head.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_lists:head()], [1,2,3]).
%% {ok,[1]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec head(Options) -> optic:optic() when
Options :: optic:variations().
head(Options) ->
Fold =
fun (Fun, Acc, [Head | _]) ->
{ok, Fun(Head, Acc)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, [Head | Tail]) ->
{NewHead, NewAcc} = Fun(Head, Acc),
{ok, {[NewHead | Tail], NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (_Data, Template) ->
[Template]
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see tail/1
-spec tail() -> optic:optic().
tail() ->
tail(#{}).
%% @doc
%% Focus on the tail of a list. A list must have at least one element
%% to have a tail.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_lists:tail()], [1,2,3]).
%% {ok,[2,3]}
%% '''
%% @end
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec tail(Options) -> optic:optic() when
Options :: optic:variations().
tail(Options) ->
Fold =
fun (Fun, Acc, [_ | Tail]) ->
{ok, lists:foldl(Fun, Acc, Tail)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, [Head | Tail]) ->
{NewTail, NewAcc} = lists:mapfoldl(Fun, Acc, Tail),
{ok, {[Head | NewTail], NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (_Data, Template) ->
[Template]
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New).
%% @see nth/2
-spec nth(N) -> optic:optic() when
N :: pos_integer().
nth(N) ->
nth(N, #{}).
%% @doc
%% Focus on the nth element of a list. As with `lists:nth/2', indexing
%% begins at 1.
%%
%% Example:
%%
%% ```
%% > optic:get([optic_lists:nth(1)], [1,2,3]).
%% {ok,[1]}
%% '''
%% @end
%% @param N The index of the list element to focus on.
%% @param Options Common optic options.
%% @returns An opaque optic record.
-spec nth(N, Options) -> optic:optic() when
N :: pos_integer(),
Options :: optic:variations().
nth(N, Options) ->
Fold =
fun (Fun, Acc, List) when N =< length(List) ->
Nth = lists:nth(N, List),
{ok, Fun(Nth, Acc)};
(_, _, _) ->
{error, undefined}
end,
MapFold =
fun (Fun, Acc, List) when N =< length(List) ->
{Before, [Head | Tail]} = lists:split(N - 1, List),
{NewHead, NewAcc} = Fun(Head, Acc),
{ok, {Before ++ [NewHead] ++ Tail, NewAcc}};
(_, _, _) ->
{error, undefined}
end,
New =
fun (Data, Template) when is_list(Data) ->
Data ++ lists:duplicate(N - length(Data), Template);
(_Data, Template) ->
lists:duplicate(N, Template)
end,
Optic = optic:new(MapFold, Fold),
optic:variations(Optic, Options, New). | src/optic_lists.erl | 0.591723 | 0.509886 | optic_lists.erl | starcoder |
%%==============================================================================
%% Copyright 2013-2021 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%%%-------------------------------------------------------------------
%%% @doc
%%% A property list style handling of {Key, Value} tuples.
%%% @end
%%%
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2013-2021, <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(plist).
-copyright('<NAME> <<EMAIL>>').
%% Library functions.
-export([new/0, new/2,
add/3, add/4,
delete/2, delete/3,
find/2, find/3, find/4, find_all/2,
keys/1, values/1,
member/2, replace/3, replace/4,
compact/1
]).
%% Types
-type key() :: _.
-type value() :: _.
-type default() :: _.
-type plist() :: [{key(), value()}].
-type flag() :: check | nocheck.
-type placement() :: first | last.
%% Exported Types
-export_type([plist/0]).
%% ===================================================================
%% Library functions.
%% ===================================================================
%%--------------------------------------------------------------------
%% Function: new() -> PList.
%% @doc
%% Creates an empty plist.
%% @end
%%--------------------------------------------------------------------
-spec new() -> plist().
%%--------------------------------------------------------------------
new() -> new([], []).
%%--------------------------------------------------------------------
%% Function: new(Keys, Values) -> PList.
%% @doc
%% Creates a plist from the zipping the lists of keys and values.
%% @end
%%--------------------------------------------------------------------
-spec new([key()], [value()]) -> plist().
%%--------------------------------------------------------------------
new(Keys, Values) -> new(Keys, Values, []).
new([], [], Acc) -> Acc;
new([H1 | T1], [H2 | T2], Acc) -> new(T1, T2, [{H1, H2} | Acc]);
new(KeysT, ValuesT, Acc) ->
{Keys, Values} = lists:unzip(Acc),
erlang:error(badarg,
[lists:reverse(Keys) ++ KeysT,
lists:reverse(Values) ++ ValuesT]).
%%--------------------------------------------------------------------
%% Function: add(Key, Values, PList) -> PList.
%% @doc
%% Extends Plist with the property without checking if it exists.
%% @end
%%--------------------------------------------------------------------
-spec add(key(), value(), plist()) -> plist().
%%--------------------------------------------------------------------
add(Key, Value, PList) -> add(Key, Value, PList, nocheck).
%%--------------------------------------------------------------------
%% Function: add(Key, Values, PList, Flag) -> PList.
%% @doc
%% Extends Plist with the property checking if it exists if required.
%% @end
%%--------------------------------------------------------------------
-spec add(key(), value(), plist(), flag()) -> plist().
%%--------------------------------------------------------------------
add(Key, Value, PList, nocheck) -> [{Key, Value} | PList];
add(Key, Value, PList, check) ->
case lists:keymember(Key, 1, PList) of
true -> erlang:error(badarg, [Key, Value, PList, check]);
false -> add(Key, Value, PList, nocheck)
end.
%%--------------------------------------------------------------------
%% Function: delete(Key, PList) -> PList.
%% @doc
%% Restricts Plist on the key without checking if it exists.
%% @end
%%--------------------------------------------------------------------
-spec delete(key(), plist()) -> plist().
%%--------------------------------------------------------------------
delete(Key, PList) -> delete(Key, PList, nocheck).
%%--------------------------------------------------------------------
%% Function: delete(Key, PList, Flag) -> PList.
%% @doc
%% Restricts Plist on the key without checking if it exists if required.
%% @end
%%--------------------------------------------------------------------
-spec delete(key(), plist(), flag()) -> plist().
%%--------------------------------------------------------------------
delete(Key, PList, nocheck) -> lists:keydelete(Key, 1, PList);
delete(_, [], check) -> erlang:error(badarg);
delete(Key, [{Key, _} | T], check) -> T;
delete(Key, [_ | T], check) -> delete(Key, T, check).
%%--------------------------------------------------------------------
%% Function: find(Key, PList) -> Value.
%% @doc
%% Finds the value of the property or undefined if not found.
%% @end
%%--------------------------------------------------------------------
-spec find(key(), plist()) -> value() | undefined.
%%--------------------------------------------------------------------
find(Key, PList) -> find(Key, PList, undefined).
%%--------------------------------------------------------------------
%% Function: find(Key, PList, Default) -> Value.
%% @doc
%% Finds the value of the property or Default if not found.
%% @end
%%--------------------------------------------------------------------
-spec find(key(), plist(), default()) -> value() | default().
%%--------------------------------------------------------------------
find(Key, PList, Default) -> find(Key, PList, Default, first).
%%--------------------------------------------------------------------
%% Function: find(Key, PList, Default, Placement) -> Value.
%% @doc
%% Finds the value of the property or Default if not found.
%% If more than property is found the one returned if determined by
%% the Placement as being the first or last.
%% @end
%%--------------------------------------------------------------------
-spec find(key(), plist(), default(), placement()) -> value() | default().
%%--------------------------------------------------------------------
find(_, [], Value, _) -> Value;
find(Key, PList, Default, first) ->
case lists:keyfind(Key, 1, PList) of
false -> Default;
{_, Value} -> Value
end;
find(Key, [{Key, Value} | T], _, last) -> find(Key, T, Value, last);
find(Key, [_ | T], Value, last) -> find(Key, T, Value, last).
%%--------------------------------------------------------------------
%% Function: find_all(Key, PList) -> Values.
%% @doc
%% Finds the all values associated with the key.
%% @end
%%--------------------------------------------------------------------
-spec find_all(key(), plist()) -> [value()].
%%--------------------------------------------------------------------
find_all(Key, PList) -> find_all(Key, PList, []).
find_all(_, [], Acc) -> lists:reverse(Acc);
find_all(Key, [{Key, Value} | T], Acc) -> find_all(Key, T, [Value | Acc]);
find_all(Key, [_ | T], Acc) -> find_all(Key, T, Acc).
%%--------------------------------------------------------------------
%% Function: keys(PList) -> Keys.
%% @doc
%% Returns all the keys.
%% @end
%%--------------------------------------------------------------------
-spec keys(plist()) -> [key()].
%%--------------------------------------------------------------------
keys(PList) -> lists:usort([Key || {Key, _} <- PList]).
%%--------------------------------------------------------------------
%% Function: values(PList) -> Values.
%% @doc
%% Returns all the values.
%% @end
%%--------------------------------------------------------------------
-spec values(plist()) -> [value()].
%%--------------------------------------------------------------------
values(PList) -> [Value || {_, Value} <- PList].
%%--------------------------------------------------------------------
%% Function: member(PList) -> Boolean.
%% @doc
%% Returns wether the key is to be found in the PList.
%% @end
%%--------------------------------------------------------------------
-spec member(key(), plist()) -> boolean().
%%--------------------------------------------------------------------
member(Key, PList) -> lists:keymember(Key, 1, PList).
%%--------------------------------------------------------------------
%% Function: replace(Key, Value, PList) -> PList.
%% @doc
%% Replaces the first occurence in the PList, adding if it if not found.
%% @end
%%--------------------------------------------------------------------
-spec replace(key(), value(), plist()) -> plist().
%%--------------------------------------------------------------------
replace(Key, Value, PList) -> replace(Key, Value, PList, nocheck).
%%--------------------------------------------------------------------
%% Function: replace(Key, Value, PList, Flag) -> PList.
%% @doc
%% Replaces the first occurence in the PList, esuring that i does exist
%% if required.
%% @end
%%--------------------------------------------------------------------
-spec replace(key(), value(), plist(), flag()) -> plist().
%%--------------------------------------------------------------------
replace(Key, Value, [], nocheck) -> [{Key, Value}];
replace(_, _, [], check) -> erlang:error(badarg);
replace(Key, Value, [{Key, _} | T], _) -> [{Key, Value} | T];
replace(Key, Value, [H | T], Check) -> [H | replace(Key, Value, T, Check)].
%%--------------------------------------------------------------------
%% Function: compact(PList) -> PList.
%% @doc
%% Ensures one property per key, with the first given precedence.
%% @end
%%--------------------------------------------------------------------
-spec compact(plist()) -> plist().
%%--------------------------------------------------------------------
compact([]) ->[];
compact(List) -> lists:ukeysort(1, List). | src/plist.erl | 0.554712 | 0.420481 | plist.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% Test database consistency with random transactions
-module(mria_proper_suite).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("snabbkaffe/include/ct_boilerplate.hrl").
%%================================================================================
%% Types
%%================================================================================
-type key() :: non_neg_integer().
-type value() :: non_neg_integer().
-record(s,
{ bag = [] :: [{key, value()}]
, set = #{} :: #{key() => value()}
}).
%%================================================================================
%% Testcases
%%================================================================================
t_import_transactions(Config0) when is_list(Config0) ->
Config = [{proper, #{max_size => 300,
numtests => 100,
timeout => 100000
}} | Config0],
?run_prop(Config, prop()).
prop() ->
Cluster = mria_ct:cluster([core, replicant], mria_mnesia_test_util:common_env()),
snabbkaffe:fix_ct_logging(),
?forall_trace(
Cmds, commands(?MODULE),
#{timetrap => 10000},
try
Nodes = mria_ct:start_cluster(mria, Cluster),
ok = mria_mnesia_test_util:wait_tables(Nodes),
{History, State, Result} = run_commands(?MODULE, Cmds),
mria_mnesia_test_util:wait_full_replication(Cluster),
[check_state(Cmds, State, Node) || Node <- Nodes],
{History, State, Result}
after
mria_ct:teardown_cluster(Cluster)
end,
fun({_History, _State, Result}, _Trace) ->
?assertMatch(ok, Result),
true
end).
%%================================================================================
%% Proper generators
%%================================================================================
table_key() ->
range(1, 100).
value() ->
non_neg_integer().
table() ->
union([test_tab, test_bag]).
write_op(Table) ->
{write, Table, table_key(), value()}.
trans_op(#s{bag = Bag, set = Set}) ->
?LET(Table, table(),
case Table of
test_tab ->
case maps:keys(Set) of
[] ->
write_op(Table);
Keys ->
frequency([ {60, write_op(Table)}
, {20, {delete, Table, oneof(Keys)}}
])
end;
test_bag ->
case Bag of
[] ->
write_op(Table);
Objs ->
Keys = proplists:get_keys(Objs),
frequency([ {60, write_op(Table)}
, {10, {delete, Table, oneof(Keys)}}
, {30, {delete_object, Table, oneof(Objs)}}
])
end
end).
transaction(State) ->
frequency([ {50, {transaction, resize(10, list(trans_op(State)))}}
, {50, {dirty, trans_op(State)}}
, {5, {clear_table, table()}}
]).
participant() ->
oneof([core_node(), replicant_node()]).
%%================================================================================
%% Proper FSM definition
%%================================================================================
%% Initial model value at system start. Should be deterministic.
initial_state() ->
#s{}.
command(State) ->
frequency([ {90, {call, ?MODULE, execute, [participant(), transaction(State)]}}
, {0, {call, ?MODULE, restart_mria, [participant()]}} %% TODO
]).
%% Picks whether a command should be valid under the current state.
precondition(_State, {call, _Mod, _Fun, _Args}) ->
true.
postcondition(_State, {call, _Mod, _Fun, _Args}, _Res) ->
true.
next_state(State, _Res, {call, ?MODULE, execute, [_, Args]}) ->
case Args of
{clear_table, test_tab} ->
State#s{set = #{}};
{clear_table, test_bag} ->
State#s{bag = []};
{transaction, Ops} ->
lists:foldl(fun symbolic_exec_op/2, State, Ops);
{dirty, Op} ->
symbolic_exec_op(Op, State)
end;
next_state(State, _Res, _Call) ->
State.
check_state(Cmds, #s{bag = Bag, set = Set}, Node) ->
compare_lists(bag, Node, Cmds, lists:sort(Bag), get_records(Node, test_bag)),
compare_lists(set, Node, Cmds, lists:sort(maps:to_list(Set)), get_records(Node, test_tab)).
compare_lists(Type, Node, Cmds, Expected, Got) ->
Unexpected = Expected -- Got,
Missing = Got -- Expected,
Comment = [ {node, Node}
, {cmds, Cmds}
, {unexpected, Unexpected}
, {missing, Missing}
, {table_type, Type}
],
?assert(length(Missing) + length(Unexpected) =:= 0, Comment).
%%================================================================================
%% Internal functions
%%================================================================================
symbolic_exec_op({write, test_tab, Key, Val}, State = #s{set = Old}) ->
Set = Old#{Key => Val},
State#s{set = Set};
symbolic_exec_op({write, test_bag, Key, Val}, State = #s{bag = Old}) ->
Rec = {Key, Val},
Bag = [Rec | Old -- [Rec]],
State#s{bag = Bag};
symbolic_exec_op({delete, test_tab, Key}, State = #s{set = Old}) ->
Set = maps:remove(Key, Old),
State#s{set = Set};
symbolic_exec_op({delete, test_bag, Key}, State = #s{bag = Old}) ->
Bag = proplists:delete(Key, Old),
State#s{bag = Bag};
symbolic_exec_op({delete_object, test_bag, Rec}, State = #s{bag = Old}) ->
Bag = lists:delete(Rec, Old),
State#s{bag = Bag}.
execute_op({write, Tab, Key, Val}) ->
ok = mnesia:write({Tab, Key, Val});
execute_op({delete, Tab, Key}) ->
ok = mnesia:delete({Tab, Key});
execute_op({delete_object, Tab, {K, V}}) ->
ok = mnesia:delete_object({Tab, K, V}).
execute_op_dirty({write, Tab, Key, Val}) ->
ok = mria:dirty_write({Tab, Key, Val});
execute_op_dirty({delete, Tab, Key}) ->
ok = mria:dirty_delete({Tab, Key});
execute_op_dirty({delete_object, Tab, {K, V}}) ->
ok = mria:dirty_delete_object({Tab, K, V}).
execute(Node, {clear_table, Tab}) ->
{atomic, ok} = rpc:call(Node, mria, clear_table, [Tab]);
execute(Node, {transaction, Ops}) ->
Fun = fun() ->
lists:foreach(fun execute_op/1, Ops)
end,
{atomic, ok} = rpc:call(Node, mria, transaction, [test_shard, Fun]);
execute(Node, {dirty, Op}) ->
ok = rpc:call(Node, ?MODULE, execute_op_dirty, [Op]).
restart_mria(Node) ->
rpc:call(Node, application, stop, [mria]),
{ok, _} = rpc:call(Node, application, ensure_all_started, [mria]).
core_node() ->
mria_ct:node_id(n1).
replicant_node() ->
mria_ct:node_id(n2).
get_records(Node, Table) ->
Records = rpc:call(Node, ets, tab2list, [Table]),
lists:sort([{K, V} || {_, K, V} <- Records]). | test/mria_proper_suite.erl | 0.569613 | 0.482063 | mria_proper_suite.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>, <NAME>
%%% @doc The general purpose of this module is to specify the different
%%% ways to process the data collected by the sensors.
%%%
%%%
%%% @end
%%%-------------------------------------------------------------------
-module(achlys_compute).
-author("<NAME>, <NAME>").
-type coordinate() :: {number(), number(), number()}.
%% API
-export([compute/4,
compute/3]).
%% @doc retrieve the data associated with the Value and call the
%% corresponding general_computation with th data retrieved
-spec compute(atom(), atom(), list(), atom()) -> number().
compute(Computation, Value, Data, Node) ->
general_computation(Computation, retrieve(Data, Value), Node).
%%====================================================================
%% Generic compute functions
%%====================================================================
%% @doc compute the average of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate average of X, Y and Z.
-spec general_computation(atom(), list({atom(), number()|coordinate()|not_avalaible}), atom()) -> number() | not_avalaible.
general_computation(average, Data, _) -> average(Data, 0, 0);
%% @doc compute the standard derivation of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate standard derivation of X, Y and Z.
general_computation(standard_derivation, Data, _) ->
Average = average(Data, 0, 0),
case Average of
X when is_number(X)-> standard_derivation(Data, X, 0, 0);
{X, Y, Z} -> standard_derivation(Data, {X, Y, Z}, 0, 0);
_ -> not_avalaible
end;
%% @doc compute the min of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate min according to X+Y+Z.
general_computation(min, Data, _) -> min(Data, not_avalaible);
%% @doc compute the max of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate max according to X+Y+Z.
general_computation(max, Data, _) -> max(Data, not_avalaible);
%% @doc compute the min of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate min according to X.
general_computation(minX, Data, _) -> minX(Data, not_avalaible);
%% @doc compute the max of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate max according to X.
general_computation(maxX, Data, _) -> maxX(Data, not_avalaible);
%% @doc compute the min of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate min according to Y.
general_computation(minY, Data, _) -> minY(Data, not_avalaible);
%% @doc compute the max of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate max according to Y.
general_computation(maxY, Data, _) -> maxY(Data, not_avalaible);
%% @doc compute the min of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate min according to Z.
general_computation(minZ, Data, _) -> minZ(Data, not_avalaible);
%% @doc compute the max of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate max according to Z.
general_computation(maxZ, Data, _) -> maxZ(Data, not_avalaible);
%% @doc compute the min of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate min according to the min between X, Y and Z.
general_computation(minXYZ, Data, _) -> minXYZ(Data, not_avalaible);
%% @doc compute the max of the Values in data.
%% Don't take element not_available into account, return not_available if no valid element.
%% If Values are {X, Y, Z} coordinate calculate max according to the max between X, Y and Z.
general_computation(maxXYZ, Data, _) -> maxXYZ(Data, not_avalaible);
%% @doc by default return not_available
general_computation(_, _, _) -> not_avalaible.
%%====================================================================
%% Specific compute functions
%%====================================================================
%% @doc set the color of the first led to red if this node is under -18°C or over 40°C,
%% to blue if some other node is under -18°C or over 40°C,
%% or to red if all the node are between -18°C and 40°C.
%% Return the list of nodes under -18°C or over 40°C.
-spec compute(atom(), list({atom(), number()|coordinate()|not_avalaible}), atom()) -> number() | not_avalaible.
compute(temperature_warning, Data, Node) ->
button_warning(retrieve(Data, temperature), Node, 1, -18, 40, []);
%% @doc set the color of the first led to red if this node is under 950 hPa or over 1060 hPa,
%% to blue if some other node is under 950 hPa or over 1060 hPa,
%% or to red if all the node are between 950 hPa or over 1060 hPa,
%% Return the list of nodes under 950 hPa or over 1060 hPa.
compute(pressure_warning, Data, Node) ->
button_warning(retrieve(Data, pressure), Node, 2, 950, 1060, []);
%% @doc by default return not_available.
compute(_, _, _) -> not_avalaible.
%%====================================================================
%% Util functions
%%====================================================================
%% @doc take a list of data and the element(s) from Value(s) wanted and return a tuple
%% with the values corresponding (with the form {node, value1, value2, ..}.
-spec retrieve(list({atom(), map()}), atom()|list()|tuple()) -> tuple().
retrieve(RawData, Values) when is_list(Values) ->
[extract_data_from_node(X, Values) || X <- RawData];
retrieve(RawData, Values) when is_tuple(Values) ->
[extract_data_from_node(X, erlang:tuple_to_list(Values)) || X <- RawData];
retrieve(RawData, Value) when is_atom(Value) ->
[extract_data_from_node(X, [Value]) || X <- RawData].
%% @doc take a list of data for a node and the element from Value wanted and
%% return the value wanted.
extract_data_from_node({Node, NodeData}, Collect) ->
extract_data_from_node(NodeData, Collect, {Node}).
%% @doc function with acc for the function explained before
extract_data_from_node(NodeData, [H | T], Acc) ->
extract_data_from_node(NodeData, T, erlang:append_element(Acc, mapz:deep_get([H], NodeData, not_avalaible)));
extract_data_from_node(_, [], Acc) -> Acc.
%%====================================================================
%% Internal functions
%%====================================================================
%% @doc function with acc for the function explained before
average([{_, {X1, Y1, Z1}} | T], Sum, Len) ->
case Sum of
{X2, Y2, Z2} -> average(T, {X1 + X2, Y1 + Y2, Z1 + Z2}, Len + 1);
_ -> average(T, {X1, Y1, Z1}, 1)
end;
average([{_, X} | T], Sum, Len) when is_number(X) -> average(T, Sum + X, Len + 1);
average([_ | T], Sum, Len) -> average(T, Sum, Len);
average([], {X, Y, Z}, Len) when Len =/= 0 -> {X / Len, Y / Len, Z / Len};
average([], X, Len) when Len =/= 0, is_number(X) -> X / Len;
average(_, _, _) -> not_avalaible.
%% @doc function with acc for the function explained before
standard_derivation([{_, {X1, Y1, Z1}} | T], Average, Sum, Len) ->
case Sum of
{X2, Y2, Z2} -> standard_derivation(T, Average, {(X1 * X1) + X2, (Y1 * Y1) + Y2, (Z1 * Z1) + Z2}, Len + 1);
_ -> standard_derivation(T, Average, {X1 * X1, Y1 * Y1, Z1 * Z1}, 1)
end;
standard_derivation([{_, X} | T], Average, Sum, Len) when is_number(X) -> standard_derivation(T, Average, Sum + (X * X), Len + 1);
standard_derivation([_ | T], Average, Sum, Len) -> standard_derivation(T, Average, Sum, Len);
standard_derivation([], {AverageX, AverageY, AverageZ}, {X, Y, Z}, Len) when Len =/= 0 ->
{math:sqrt((X / Len) - (AverageX * AverageX)), math:sqrt((Y / Len) - (AverageY * AverageY)), math:sqrt((Z / Len) - (AverageZ * AverageZ))};
standard_derivation([], Average, X, Len) when Len =/= 0, is_number(X) -> math:sqrt((X / Len) - (Average * Average));
standard_derivation(A, B, C, D) -> not_avalaible.
%% @doc function with acc for the function explained before
min([{Node1, {X1, Y1, Z1}} | T], Min) ->
case Min of
{Node2, {X2, Y2, Z2}} when X1 + Y1 + Z1 > X2 + Y2 + Z2 -> min(T, {Node2, {X2, Y2, Z2}});
_ -> min(T, {Node1, {X1, Y1, Z1}})
end;
min([{Node1, X1} | T], Min) when is_number(X1) ->
case Min of
{Node2, X2} when is_number(X2), X1 > X2 -> min(T, {Node2, X2});
_ -> min(T, {Node1, X1})
end;
min([_ | T], Min) -> min(T, Min);
min([], Min) -> Min.
%% @doc function with acc for the function explained before
max([{Node1, {X1, Y1, Z1}} | T], Max) ->
case Max of
{Node2, {X2, Y2, Z2}} when X1 + Y1 + Z1 < X2 + Y2 + Z2 -> max(T, {Node2, {X2, Y2, Z2}});
_ -> max(T, {Node1, {X1, Y1, Z1}})
end;
max([{Node1, X1} | T], Max) when is_number(X1) ->
case Max of
{Node2, X2} when is_number(X2), X1 < X2 -> max(T, {Node2, X2});
_ -> max(T, {Node1, X1})
end;
max([_ | T], Max) -> max(T, Max);
max([], Max) -> Max.
%% @doc function with acc for the function explained before
minX([{Node1, {X1, Y1, Z1}} | T], Min) ->
case Min of
{Node2, {X2, Y2, Z2}} when X1 > X2 -> minX(T, {Node2, {X2, Y2, Z2}});
_ -> minX(T, {Node1, {X1, Y1, Z1}})
end;
minX([{Node1, X1} | T], Min) when is_number(X1) -> not_avalaible;
minX([_ | T], Min) -> minX(T, Min);
minX([], Min) -> Min.
%% @doc function with acc for the function explained before
maxX([{Node1, {X1, Y1, Z1}} | T], Max) ->
case Max of
{Node2, {X2, Y2, Z2}} when X1 < X2 -> maxX(T, {Node2, {X2, Y2, Z2}});
_ -> maxX(T, {Node1, {X1, Y1, Z1}})
end;
maxX([{Node1, X1} | T], Max) when is_number(X1) -> not_avalaible;
maxX([_ | T], Max) -> maxX(T, Max);
maxX([], Max) -> Max.
%% @doc function with acc for the function explained before
minY([{Node1, {X1, Y1, Z1}} | T], Min) ->
case Min of
{Node2, {X2, Y2, Z2}} when Y1 > Y2 -> minY(T, {Node2, {X2, Y2, Z2}});
_ -> minY(T, {Node1, {X1, Y1, Z1}})
end;
minY([{Node1, X1} | T], Min) when is_number(X1) -> not_avalaible;
minY([_ | T], Min) -> minY(T, Min);
minY([], Min) -> Min.
%% @doc function with acc for the function explained before
maxY([{Node1, {X1, Y1, Z1}} | T], Max) ->
case Max of
{Node2, {X2, Y2, Z2}} when Y1 < Y2 -> maxY(T, {Node2, {X2, Y2, Z2}});
_ -> maxY(T, {Node1, {X1, Y1, Z1}})
end;
maxY([{Node1, X1} | T], Max) when is_number(X1) -> not_avalaible;
maxY([_ | T], Max) -> maxY(T, Max);
maxY([], Max) -> Max.
%% @doc function with acc for the function explained before
minZ([{Node1, {X1, Y1, Z1}} | T], Min) ->
case Min of
{Node2, {X2, Y2, Z2}} when Z1 > Z2 -> minZ(T, {Node2, {X2, Y2, Z2}});
_ -> minZ(T, {Node1, {X1, Y1, Z1}})
end;
minZ([{Node1, X1} | T], Min) when is_number(X1) -> not_avalaible;
minZ([_ | T], Min) -> minZ(T, Min);
minZ([], Min) -> Min.
%% @doc function with acc for the function explained before
maxZ([{Node1, {X1, Y1, Z1}} | T], Max) ->
case Max of
{Node2, {X2, Y2, Z2}} when Z1 < Z2 -> maxZ(T, {Node2, {X2, Y2, Z2}});
_ -> maxZ(T, {Node1, {X1, Y1, Z1}})
end;
maxZ([{Node1, X1} | T], Max) when is_number(X1) -> not_avalaible;
maxZ([_ | T], Max) -> maxZ(T, Max);
maxZ([], Max) -> Max.
%% @doc function with acc for the function explained before
minXYZ([{Node1, {X1, Y1, Z1}} | T], Min) ->
case Min of
{Node2, {X2, Y2, Z2}} ->
case lists:min([X1, Y1, Z1]) > lists:min([X2, Y2, Z2]) of
true -> minXYZ(T, {Node2, {X2, Y2, Z2}});
_ -> minXYZ(T, {Node1, {X1, Y1, Z1}})
end;
_ -> minXYZ(T, {Node1, {X1, Y1, Z1}})
end;
minXYZ([{Node1, X1} | T], Min) when is_number(X1) -> not_avalaible;
minXYZ([_ | T], Min) -> minXYZ(T, Min);
minXYZ([], Min) -> Min.
%% @doc function with acc for the function explained before
maxXYZ([{Node1, {X1, Y1, Z1}} | T], Max) ->
case Max of
{Node2, {X2, Y2, Z2}} ->
case lists:max([X1, Y1, Z1]) < lists:max([X2, Y2, Z2]) of
true -> maxXYZ(T, {Node2, {X2, Y2, Z2}});
_ -> maxXYZ(T, {Node1, {X1, Y1, Z1}})
end;
_ -> maxXYZ(T, {Node1, {X1, Y1, Z1}})
end;
maxXYZ([{Node1, X1} | T], Max) when is_number(X1) -> not_avalaible;
maxXYZ([_ | T], Max) -> maxXYZ(T, Max);
maxXYZ([], Max) -> Max.
%% @doc set the led with number Led to green if all Node in the list have a value between
%% Min and Max, blue if some node are not and red if ThisNode is not.
-spec button_warning(list({atom(), number()}), atom(), pos_integer(), number(), number(), list(atom())) -> list(atom()).
button_warning([{Node, Value}|T], ThisNode, Led, Min, Max, Acc) when Value<Min; Value>Max-> button_warning(T, ThisNode, Led, Min, Max, [Node|Acc]);
button_warning([_|T], ThisNode, Led, Min, Max, Acc) -> button_warning(T, ThisNode, Led, Min, Max, Acc);
button_warning([], _, Led, _, _, [])->
grisp_led:color(Led, green),
[];
button_warning([], ThisNode, Led, _, _, Acc)->
case lists:member(ThisNode, Acc) of
true -> grisp_led:color(Led, red);
_ -> grisp_led:color(Led, blue)
end,
Acc. | src/achlys_compute.erl | 0.545044 | 0.611933 | achlys_compute.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_datetime).
-include_lib("typerefl/include/types.hrl").
%% API
-export([ to_epoch_millisecond/1
, to_epoch_second/1
]).
-export([ epoch_to_rfc3339/1
, epoch_to_rfc3339/2
]).
-reflect_type([ epoch_millisecond/0
, epoch_second/0
]).
-type epoch_second() :: non_neg_integer().
-type epoch_millisecond() :: non_neg_integer().
-typerefl_from_string({epoch_second/0, ?MODULE, to_epoch_second}).
-typerefl_from_string({epoch_millisecond/0, ?MODULE, to_epoch_millisecond}).
to_epoch_second(DateTime) ->
to_epoch(DateTime, second).
to_epoch_millisecond(DateTime) ->
to_epoch(DateTime, millisecond).
to_epoch(DateTime, Unit) ->
try
case string:to_integer(DateTime) of
{Epoch, []} when Epoch >= 0 -> {ok, Epoch};
{_Epoch, []} -> {error, bad_epoch};
_ -> {ok, calendar:rfc3339_to_system_time(DateTime, [{unit, Unit}])}
end
catch error: _ ->
{error, bad_rfc3339_timestamp}
end.
epoch_to_rfc3339(TimeStamp) ->
epoch_to_rfc3339(TimeStamp, millisecond).
epoch_to_rfc3339(TimeStamp, Unit) when is_integer(TimeStamp) ->
list_to_binary(calendar:system_time_to_rfc3339(TimeStamp, [{unit, Unit}])).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(nowarn_export_all).
-compile(export_all).
roots() -> [bar].
fields(bar) ->
[
{second, ?MODULE:epoch_second()},
{millisecond, ?MODULE:epoch_millisecond()}
].
-define(FORMAT(_Sec_, _Ms_), lists:flatten(
io_lib:format("bar={second=~w,millisecond=~w}", [_Sec_, _Ms_]))).
epoch_ok_test() ->
Args = [
{0, 0, 0, 0},
{1, 1, 1, 1},
{"2022-01-01T08:00:00+08:00", "2022-01-01T08:00:00+08:00", 1640995200, 1640995200000}
],
lists:foreach(fun({Sec, Ms, EpochSec, EpochMs}) ->
check_ok(?FORMAT(Sec, Ms), EpochSec, EpochMs)
end, Args),
ok.
check_ok(Input, Sec, Ms) ->
{ok, Data} = hocon:binary(Input, #{}),
?assertMatch(#{bar := #{second := Sec, millisecond := Ms}},
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])),
ok.
epoch_failed_test() ->
Args = [
{-1, -1},
{"1s", "1s"},
{"2022-13-13T08:00:00+08:00", "2022-13-13T08:00:00+08:00"}],
lists:foreach(fun({Sec, Ms}) ->
check_failed(?FORMAT(Sec, Ms))
end, Args),
ok.
check_failed(Input) ->
{ok, Data} = hocon:binary(Input, #{}),
?assertException(throw, _,
hocon_tconf:check_plain(?MODULE, Data, #{atom_key => true}, [bar])),
ok.
-endif. | apps/emqx/src/emqx_datetime.erl | 0.723016 | 0.420957 | emqx_datetime.erl | starcoder |
-module(matasano_guess).
-export([hex_single_byte_xor/1,
hex_detect_single_byte_xor/1
]).
%% Globals
% a list of the frequency scores of all the lowercase letters and space
letter_scores() ->
ValueList = [ 8.16, 1.49, 2.78, 4.25, 12.70, 2.23, 2.02,
6.09, 6.97, 0.15, 0.77, 4.03, 2.41, 6.75,
7.51, 1.93, 0.10, 5.99, 6.33, 9.06, 2.76,
0.98, 2.36, 0.15, 1.97, 0.07 ],
[{32, 13.00}|lists:zip(lists:seq(97,122), ValueList)].
%% API
% attempt to decode a hex string which was XOR'd against a single
% character prior to being hex encoded
hex_single_byte_xor(HexText) ->
{Byte, Text, _Score} = hex_single_byte_xor_triple(HexText),
{Byte, Text}.
% search a list of strings for the string most likely to be XOR'd
hex_detect_single_byte_xor(StringList) ->
NSL = lists:zip(lists:seq(2, length(StringList)), tl(StringList)),
HighestVal = fun({Q, X}, Acc={_, {_, _, AS}}) ->
{B, T, S} = hex_single_byte_xor_triple(X),
if S > AS -> {Q, {B, T, S}};
true -> Acc
end
end,
Init = {1, hex_single_byte_xor_triple(hd(StringList))},
{Line, {Byte, Text, _Score}} = lists:foldl(HighestVal, Init, NSL),
{Line, Byte, Text}.
%% Helper Functions
% decode a hexstring and return the most likely single XOR'd
% string along with the XOR byte and character frequency score
hex_single_byte_xor_triple(HexText) ->
CipherText = matasano_bytes:hex_to_binary(HexText),
HighestFun = fun(Byte, Acc={_, _, AS}) ->
{T, S} = score_text(CipherText, Byte),
if S > AS -> {Byte, T, S};
true -> Acc
end
end,
{IT, IS} = score_text(CipherText, 0),
lists:foldl(HighestFun, {0, IT, IS}, lists:seq(1, 255)).
% calculate the output text and letter frequency score of the
% ciphertext assuming it has been xor'd with the given byte
score_text(CipherText, Byte) ->
XORFun = fun(X) -> X bxor Byte end,
ConvertedText = matasano_bytes:binary_map(XORFun, CipherText),
{ConvertedText, score_text(ConvertedText)}.
% work out the frequency score of the given text
score_text(Text) ->
AddScore = fun(X, Acc) -> Acc + get_score(X) end,
matasano_bytes:binary_foldl(AddScore, 0,
matasano_bytes:binary_map(fun to_lower/1,
Text)).
% get the frequency score for a single character
get_score(X) ->
Scores = letter_scores(),
case proplists:is_defined(X, Scores) of
true -> proplists:get_value(X, Scores);
false -> 0.0
end.
% convert a character to lower case
to_lower(X) when X >= 65 andalso X =< 90 -> X + 32;
to_lower(X) -> X. | src/matasano_guess.erl | 0.509764 | 0.439807 | matasano_guess.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(ot_tracer).
-export([start_span/3,
with_span/2,
with_span/3,
current_span_ctx/1,
end_span/1]).
%% tracer access functions
-export([span_module/1]).
-include("opentelemetry.hrl").
-callback start_span(opentelemetry:tracer(),
opentelemetry:span_name(),
ot_span:start_opts()) -> opentelemetry:span_ctx().
-callback with_span(opentelemetry:tracer(), opentelemetry:span_ctx()) -> ok.
-callback with_span(opentelemetry:tracer(), opentelemetry:span_ctx(), fun()) -> ok.
-callback end_span(opentelemetry:tracer()) -> ok.
-callback current_span_ctx(opentelemetry:tracer()) -> opentelemetry:span_ctx().
-callback span_module(opentelemetry:tracer()) -> module().
-spec start_span(opentelemetry:tracer(), opentelemetry:span_name(),
ot_span:start_opts()) -> opentelemetry:span_ctx().
start_span(Tracer={Module, _}, Name, Opts) ->
Module:start_span(Tracer, Name, Opts).
with_span(Tracer={Module, _}, Span) when is_atom(Module) ->
Module:with_span(Tracer, Span).
-spec with_span(opentelemetry:tracer(), opentelemetry:span_ctx(), fun()) -> ok.
with_span(Tracer={Module, _}, SpanCtx, Fun) when is_atom(Module) ->
Module:with_value(Tracer, SpanCtx, Fun).
-spec end_span(opentelemetry:tracer()) -> ok.
end_span(Tracer={Module, _}) ->
Module:end_span(Tracer).
current_span_ctx(Tracer={Module, _}) ->
Module:current_span_ctx(Tracer).
%% tracer access functions
span_module(Tracer={Module, _}) ->
Module:span_module(Tracer). | src/ot_tracer.erl | 0.585338 | 0.477311 | ot_tracer.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc
%% The sidejob_resource_sup manages the entire supervision hierarchy for
%% a sidejob resource. Thus, there is one resource supervisor for each
%% registered sidejob resource.
%%
%% The resource supervisor is the owner of a resource's limit and stats
%% ETS tables, therefore ensuring the ETS tables survive crashes elsewhere
%% in the resource hierarchy.
%%
%% The resource supervisor has two children: a {@link sidejob_worker_sup}
%% that supervises the actual worker processes for a given resource, and
%% a {@link sidejob_resource_stats} server that aggregates statistics
%% reported by the worker processes.
-module(sidejob_resource_sup).
-behaviour(supervisor).
%% API
-export([start_link/2, stats_ets/1]).
%% Supervisor callbacks
-export([init/1]).
%%%===================================================================
%%% API functions
%%%===================================================================
start_link(Name, Mod) ->
supervisor:start_link({local, Name}, ?MODULE, [Name, Mod]).
stats_ets(Name) ->
ETS = iolist_to_binary([atom_to_binary(Name, latin1), "_stats_ets"]),
binary_to_atom(ETS, latin1).
%%%===================================================================
%%% Supervisor callbacks
%%%===================================================================
init([Name, Mod]) ->
Width = Name:width(),
StatsETS = stats_ets(Name),
StatsName = Name:stats(),
WorkerNames = sidejob_worker:workers(Name, Width),
_WorkerETS = [begin
WorkerTab = ets:new(WorkerName, [named_table,
public]),
ets:insert(WorkerTab, [{usage, 0},
{full, 0}]),
WorkerTab
end || WorkerName <- WorkerNames],
StatsTab = ets:new(StatsETS, [named_table,
public,
{read_concurrency,true},
{write_concurrency,true}]),
sidejob_resource_stats:init_stats(StatsTab),
WorkerSup = {sidejob_worker_sup,
{sidejob_worker_sup, start_link,
[Name, Width, StatsName, Mod]},
permanent, infinity, supervisor, [sidejob_worker_sup]},
StatsServer = {StatsName,
{sidejob_resource_stats, start_link, [StatsName, StatsTab]},
permanent, 5000, worker, [sidejob_resource_stats]},
{ok, {{one_for_one, 10, 10}, [WorkerSup, StatsServer]}}. | deps/sidejob/src/sidejob_resource_sup.erl | 0.682362 | 0.432363 | sidejob_resource_sup.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_ql_inverse_distrib_fns: implementation of inverse distribution functions
%% for the query runner
%%
%% Copyright (c) 2017 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(riak_ql_inverse_distrib_fns).
-export(['PERCENTILE_DISC'/3,
'PERCENTILE_CONT'/3,
'MEDIAN'/3,
'MODE'/3]).
-export([fn_arity/1,
fn_type_signature/2,
fn_param_check/2,
supported_functions/0]).
-type invdist_function() :: 'PERCENTILE_CONT'
| 'PERCENTILE_DISC'
| 'MEDIAN'
| 'MODE'.
-export_type([invdist_function/0]).
-include("riak_ql_ddl.hrl").
supported_functions() ->
['PERCENTILE_DISC', 'PERCENTILE_CONT', 'MEDIAN', 'MODE'].
-spec fn_type_signature(invdist_function(), [riak_ql_ddl:external_field_type()]) ->
riak_ql_ddl:external_field_type() |
{error, term()}.
fn_type_signature('PERCENTILE_DISC', [ColumnType, double])
when ColumnType == sint64;
ColumnType == double;
ColumnType == timestamp -> ColumnType;
fn_type_signature('PERCENTILE_CONT', [ColumnType, double])
when ColumnType == sint64;
ColumnType == double;
ColumnType == timestamp -> double;
fn_type_signature('MEDIAN', [ColumnType])
when ColumnType == sint64;
ColumnType == double;
ColumnType == timestamp -> ColumnType;
fn_type_signature('MODE', [ColumnType])
when ColumnType == sint64;
ColumnType == double;
ColumnType == timestamp -> ColumnType;
fn_type_signature(Fn, Args) ->
{error, {argument_type_mismatch, Fn, Args}}.
-spec fn_arity(invdist_function()) -> non_neg_integer().
fn_arity('PERCENTILE_CONT') -> 2;
fn_arity('PERCENTILE_DISC') -> 2;
fn_arity('MEDIAN') -> 1;
fn_arity('MODE') -> 1;
fn_arity(_) -> {error, invalid_function}.
-spec fn_param_check(invdist_function(), [riak_ql_ddl:external_field_type()]) ->
ok | {error, WhichParamInvalid::pos_integer()}.
fn_param_check(PcntlFn, [Pc])
when (PcntlFn == 'PERCENTILE_CONT' orelse PcntlFn == 'PERCENTILE_DISC') andalso
(Pc >= 0.0 andalso Pc =< 1.0) ->
ok;
fn_param_check(PcntlFn, [_Pc])
when (PcntlFn == 'PERCENTILE_CONT' orelse PcntlFn == 'PERCENTILE_DISC') ->
{error, 2};
fn_param_check('MEDIAN', []) ->
ok;
fn_param_check('MODE', []) ->
ok.
%% functions defined
%%
%% Note that ValuesAtF expects row position to be 0-based.
'PERCENTILE_DISC'([Pc], RowsTotal, ValuesAtF) ->
RN = (Pc * (RowsTotal - 1)),
[[Ret]] = ValuesAtF([{trunc(RN), 1}]),
Ret.
'PERCENTILE_CONT'([Pc], RowsTotal, ValuesAtF) ->
RN = (Pc * (RowsTotal - 1)),
{LoRN, HiRN} = {trunc(RN), ceil(RN)},
case LoRN == HiRN of
true ->
[[Val]] = ValuesAtF([{LoRN, 1}]),
Val;
false ->
[[LoVal], [HiVal]] = ValuesAtF([{LoRN, 1}, {HiRN, 1}]),
(HiRN - RN) * LoVal + (RN - LoRN) * HiVal
end.
'MEDIAN'([], RowsTotal, ValuesAtF) ->
'PERCENTILE_DISC'([0.5], RowsTotal, ValuesAtF).
'MODE'([], RowsTotal, ValuesAtF) ->
[[Min]] = ValuesAtF([{0, 1}]),
largest_bin(Min, ValuesAtF, RowsTotal).
%% This will be inefficient for ldb backends (that is, when a qbuf is
%% dumped to leveldb): in this function, we call ValuesAtF to retrieve
%% one row at a time. This means, each time it is called,
%% `riak_kv_qry_buffers_ldb:fetch_rows` needs to seek from start and
%% trundle all the way to the Nth position, and all over again to
%% fetch N+1th row. The obvious todo item it to either teach
%% fetch_rows to cache iterators or, alternatively, fetch rows in
%% chunks ourselves.
largest_bin(Min, ValuesAtF, RowsTotal) ->
largest_bin_({Min, 1, Min, 1}, ValuesAtF, 1, RowsTotal).
largest_bin_({LargestV, _, _, _}, _ValuesAtF, Pos, RowsTotal) when Pos >= RowsTotal ->
LargestV;
largest_bin_({LargestV, LargestC, CurrentV, CurrentC}, ValuesAtF, Pos, RowsTotal) ->
case ValuesAtF([{Pos, 1}]) of
[[V]] when V == CurrentV ->
largest_bin_({LargestV, LargestC, %% collecting current bin
CurrentV, CurrentC + 1}, ValuesAtF, Pos + 1, RowsTotal);
[[V]] when V > CurrentV,
CurrentC > LargestC ->
largest_bin_({CurrentV, CurrentC, %% now these be largest
V, 1}, ValuesAtF, Pos + 1, RowsTotal);
[[V]] when V > CurrentV,
CurrentC =< LargestC ->
largest_bin_({LargestV, LargestC, %% keep largest, reset current
V, 1}, ValuesAtF, Pos + 1, RowsTotal)
end.
ceil(X) ->
T = trunc(X),
case X - T == 0 of
true -> T;
false -> T + 1
end. | src/riak_ql_inverse_distrib_fns.erl | 0.590071 | 0.441131 | riak_ql_inverse_distrib_fns.erl | starcoder |
%%% @doc Module enacl implements bindings to the NaCl/libsodium crypto-library
%%% <p>This module implements NIF bindings to the library known as NaCl (pronounced "salt").
%%% The NaCl library provides a sane cryptographic interface to the world in an attempt to
%%% make it harder to abuse and misuse cryptographic primitives.</p>
%%% <p>This module implements an Erlang-idiomatic API to the underlying library. If in doubt
%%% about a primitive, always consult the underlying documentation.</p>
%%% <p>There are two libraries in existence: NaCl and libsodium, the latter being a more
%%% portable variant of the NaCl library. The C-level API is interchangeable so we can run
%%% on any of these underlying libraries as seen from the Erlang world. We simply have to
%%% restrict ourselves to the portion of the code base which is overlapping.</p>
%%% <p><b>Warning:</b> It is necessary to apply the primitives here correctly. Wrong
%%% application may result in severely reduced strength of the cryptography. Take some
%%% time to make sure this is the case before using.</p>
%%% <p><b>Note:</b> All functions will fail with a `badarg' error if given incorrect
%%% parameters. Also, if something is wrong internally, they will raise an error of
%%% the form `enacl_internal_error'. There is usually no way to continue gracefully
%%% from either of these. A third error is `enacl_finalized', raised when you try
%%% re-using an already finalized state for multi-part messages.</p>
%%% @end.
-module(enacl).
%% Public key crypto
-export([
%% EQC
box_keypair/0,
box/4,
box_open/4,
box_beforenm/2,
box_afternm/3,
box_open_afternm/3,
box_NONCEBYTES/0,
box_PUBLICKEYBYTES/0,
box_SECRETKEYBYTES/0,
box_BEFORENMBYTES/0,
sign_PUBLICBYTES/0,
sign_SECRETBYTES/0,
sign_SEEDBYTES/0,
sign_keypair/0,
sign_seed_keypair/1,
sign/2,
sign_open/2,
sign_detached/2,
sign_verify_detached/3,
sign_init/0,
sign_update/2,
sign_final_create/2,
sign_final_verify/3,
box_seal/2,
box_seal_open/3
]).
%% Secret key crypto
-export([
%% EQC
secretbox_KEYBYTES/0,
secretbox_NONCEBYTES/0,
secretbox/3,
secretbox_open/3,
%% No Tests!
stream_chacha20_KEYBYTES/0,
stream_chacha20_NONCEBYTES/0,
stream_chacha20/3,
stream_chacha20_xor/3,
%% EQC
aead_chacha20poly1305_ietf_encrypt/4,
aead_chacha20poly1305_ietf_decrypt/4,
aead_chacha20poly1305_ietf_KEYBYTES/0,
aead_chacha20poly1305_ietf_NPUBBYTES/0,
aead_chacha20poly1305_ietf_ABYTES/0,
aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX/0,
aead_xchacha20poly1305_ietf_encrypt/4,
aead_xchacha20poly1305_ietf_decrypt/4,
aead_xchacha20poly1305_ietf_KEYBYTES/0,
aead_xchacha20poly1305_ietf_NPUBBYTES/0,
aead_xchacha20poly1305_ietf_ABYTES/0,
aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX/0,
%% EQC
stream_KEYBYTES/0,
stream_NONCEBYTES/0,
stream/3,
%% No Tests!
stream_xor/3,
%% EQC
auth_KEYBYTES/0,
auth_BYTES/0,
auth/2,
auth_verify/3,
%% EQC
onetime_auth_KEYBYTES/0,
onetime_auth_BYTES/0,
onetime_auth/2,
onetime_auth_verify/3
]).
%% Hash functions
-export([
%% No Tests!
generichash/3,
generichash/2,
generichash_init/2,
generichash_update/2,
generichash_final/1,
%% EQC!
shorthash_key_size/0,
shorthash_size/0,
shorthash/2,
pwhash_SALTBYTES/0,
pwhash/2,
pwhash/4,
pwhash/5,
pwhash_str/1,
pwhash_str/3,
pwhash_str_verify/2
]).
%% Key derivation
-export([
kdf_KEYBYTES/0,
kdf_CONTEXTBYTES/0,
kdf_derive_from_key/3
]).
%% Low-level subtle functions which are hard to get correct
-export([
%% EQC
hash/1,
verify_16/2,
verify_32/2,
%% No Tests!
unsafe_memzero/1
]).
%% Randomness
-export([
%% EQC
randombytes/1,
randombytes_uint32/0,
randombytes_uniform/1
]).
%%% Specific primitives
%% Curve 25519 operations.
-export([
%% No Tests!
curve25519_scalarmult/1, curve25519_scalarmult/2,
curve25519_scalarmult_base/1
]).
%% Ed 25519 operations.
-export([
%% No Tests!
crypto_sign_ed25519_keypair/0,
crypto_sign_ed25519_sk_to_pk/1,
crypto_sign_ed25519_public_to_curve25519/1,
crypto_sign_ed25519_secret_to_curve25519/1,
crypto_sign_ed25519_public_size/0,
crypto_sign_ed25519_secret_size/0
]).
%% Key exchange functions
-export([
%% EQC
kx_keypair/0,
kx_client_session_keys/3,
kx_server_session_keys/3,
kx_PUBLICKEYBYTES/0,
kx_SECRETKEYBYTES/0,
kx_SESSIONKEYBYTES/0
]).
%% Secretstream operations.
-export([
%% Unit tests
secretstream_xchacha20poly1305_ABYTES/0,
secretstream_xchacha20poly1305_HEADERBYTES/0,
secretstream_xchacha20poly1305_KEYBYTES/0,
secretstream_xchacha20poly1305_MESSAGEBYTES_MAX/0,
secretstream_xchacha20poly1305_TAG_MESSAGE/0,
secretstream_xchacha20poly1305_TAG_PUSH/0,
secretstream_xchacha20poly1305_TAG_REKEY/0,
secretstream_xchacha20poly1305_TAG_FINAL/0,
secretstream_xchacha20poly1305_keygen/0,
secretstream_xchacha20poly1305_init_push/1,
secretstream_xchacha20poly1305_push/4,
secretstream_xchacha20poly1305_init_pull/2,
secretstream_xchacha20poly1305_pull/3,
secretstream_xchacha20poly1305_rekey/1
]).
%% Internal verification of the system
-export([verify/0]).
%% Definitions of system budgets
%% To get a grip for these, call `enacl_timing:all/0' on your system. The numbers here are
%% described in the README.md file.
-define(HASH_SIZE, 4 * 1024).
-define(HASH_REDUCTIONS, 17 * 2).
-define(BOX_BEFORENM_REDUCTIONS, 60).
-define(BOX_AFTERNM_SIZE, 8 * 1024).
-define(BOX_AFTERNM_REDUCTIONS, 17 * 2).
-define(SECRETBOX_SIZE, 8 * 1024).
-define(SECRETBOX_REDUCTIONS, 17 * 2).
-define(SECRETBOX_OPEN_REDUCTIONS, 17 * 2).
-define(STREAM_SIZE, 16 * 1024).
-define(STREAM_REDUCTIONS, 17 * 2).
-define(auth_BYTES, 4 * 1024).
-define(AUTH_REDUCTIONS, 17 * 2).
-define(ONETIME_auth_BYTES, 16 * 1024).
-define(ONETIME_AUTH_REDUCTIONS, 16 * 2).
-define(ED25519_PUBLIC_TO_CURVE_REDS, 20 * 2).
-define(ED25519_SECRET_TO_CURVE_REDS, 20 * 2).
%% Constants used throughout the code base
-define(CRYPTO_BOX_ZEROBYTES, 32).
-define(P_ZEROBYTES, <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>). %% 32 bytes of 0
-define(CRYPTO_BOX_BOXZEROBYTES, 16).
-define(P_BOXZEROBYTES, <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>). %% 16 bytes
-define(CRYPTO_SECRETBOX_ZEROBYTES, 32).
-define(S_ZEROBYTES, <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>). %% 32 bytes
-define(CRYPTO_SECRETBOX_BOXZEROBYTES, 16).
-define(S_BOXZEROBYTES, <<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>). %% 16 bytes
-define(CRYPTO_STREAM_CHACHA20_KEYBYTES, 32).
-define(CRYPTO_STREAM_CHACHA20_NONCEBYTES, 8).
-define(CRYPTO_STREAM_KEYBYTES, 32).
-define(CRYPTO_STREAM_NONCEBYTES, 24).
-define(CRYPTO_KX_PUBLICKEYBYTES, 32).
-define(CRYPTO_KX_SECRETKEYBYTES, 32).
-define(CRYPTO_KX_SESSIONKEYBYTES, 32).
-define(CRYPTO_GENERICHASH_BYTES_MIN, 16).
-define(CRYPTO_GENERICHASH_BYTES_MAX, 64).
-define(CRYPTO_GENERICHASH_BYTES, 32).
-define(CRYPTO_GENERICHASH_KEYBYTES_MIN, 16).
-define(CRYPTO_GENERICHASH_KEYBYTES_MAX, 64).
-define(CRYPTO_GENERICHASH_KEYBYTES, 32).
-define(CRYPTO_SECRETSTREAM_TAG_MESSAGE, 0).
-define(CRYPTO_SECRETSTREAM_TAG_PUSH, 1).
-define(CRYPTO_SECRETSTREAM_TAG_REKEY, 2).
-define(CRYPTO_SECRETSTREAM_TAG_FINAL, 3).
%% Size limits
-define(MAX_32BIT_INT, 1 bsl 32).
%% @doc Verify makes sure the constants defined in libsodium matches ours
verify() ->
true = equals(binary:copy(<<0>>, enacl_nif:crypto_box_ZEROBYTES()), ?P_ZEROBYTES),
true = equals(binary:copy(<<0>>, enacl_nif:crypto_box_BOXZEROBYTES()), ?P_BOXZEROBYTES),
true = equals(binary:copy(<<0>>, enacl_nif:crypto_secretbox_ZEROBYTES()), ?S_ZEROBYTES),
true = equals(binary:copy(<<0>>, enacl_nif:crypto_secretbox_BOXZEROBYTES()),
?S_BOXZEROBYTES),
Verifiers =
[
{crypto_stream_chacha20_KEYBYTES, ?CRYPTO_STREAM_CHACHA20_KEYBYTES},
{crypto_stream_chacha20_NONCEBYTES, ?CRYPTO_STREAM_CHACHA20_NONCEBYTES},
{crypto_stream_KEYBYTES, ?CRYPTO_STREAM_KEYBYTES},
{crypto_stream_NONCEBYTES, ?CRYPTO_STREAM_NONCEBYTES},
{crypto_box_ZEROBYTES, ?CRYPTO_BOX_ZEROBYTES},
{crypto_box_BOXZEROBYTES, ?CRYPTO_BOX_BOXZEROBYTES},
{crypto_secretbox_ZEROBYTES, ?CRYPTO_SECRETBOX_ZEROBYTES},
{crypto_secretbox_BOXZEROBYTES, ?CRYPTO_SECRETBOX_BOXZEROBYTES},
{crypto_kx_SESSIONKEYBYTES, ?CRYPTO_KX_SESSIONKEYBYTES},
{crypto_kx_PUBLICKEYBYTES, ?CRYPTO_KX_PUBLICKEYBYTES},
{crypto_kx_SECRETKEYBYTES, ?CRYPTO_KX_SECRETKEYBYTES},
{crypto_generichash_BYTES, ?CRYPTO_GENERICHASH_BYTES},
{crypto_generichash_BYTES_MIN, ?CRYPTO_GENERICHASH_BYTES_MIN},
{crypto_generichash_BYTES_MAX, ?CRYPTO_GENERICHASH_BYTES_MAX},
{crypto_generichash_KEYBYTES, ?CRYPTO_GENERICHASH_KEYBYTES},
{crypto_generichash_KEYBYTES_MIN, ?CRYPTO_GENERICHASH_KEYBYTES_MIN},
{crypto_generichash_KEYBYTES_MAX, ?CRYPTO_GENERICHASH_KEYBYTES_MAX},
{crypto_secretstream_xchacha20poly1305_TAG_MESSAGE, ?CRYPTO_SECRETSTREAM_TAG_MESSAGE},
{crypto_secretstream_xchacha20poly1305_TAG_PUSH, ?CRYPTO_SECRETSTREAM_TAG_PUSH},
{crypto_secretstream_xchacha20poly1305_TAG_REKEY, ?CRYPTO_SECRETSTREAM_TAG_REKEY},
{crypto_secretstream_xchacha20poly1305_TAG_FINAL, ?CRYPTO_SECRETSTREAM_TAG_FINAL}
],
run_verifiers(Verifiers).
run_verifiers([]) -> ok;
run_verifiers([{V, R} | Vs]) ->
case enacl_nif:V() of
R -> run_verifiers(Vs);
Other -> {error, {verifier, V, {R, '/=', Other}}}
end.
equals(X,X) -> true;
equals(X,Y) -> {X, '/=', Y}.
%% Low level helper functions
%% -----------------
%% @doc hash/1 hashes data into a cryptographically secure checksum.
%%
%% <p>Given an iodata(), `Data' of any size, run a cryptographically secure hash algorithm to
%% produce a checksum of the data. This can be used to verify the integrity of a data block
%% since the checksum have the properties of cryptographic hashes in general.</p>
%% <p>The currently selected primitive (Nov. 2014) is SHA-512</p>
%% @end
-spec hash(Data) -> Checksum
when
Data :: iodata(),
Checksum :: binary().
hash(Bin) ->
case iolist_size(Bin) of
K when K =< ?HASH_SIZE ->
bump(enacl_nif:crypto_hash_b(Bin), ?HASH_REDUCTIONS, ?HASH_SIZE, K);
_ ->
enacl_nif:crypto_hash(Bin)
end.
%% @doc verify_16/2 implements constant time 16-byte binary() verification
%%
%% <p>A subtle problem in cryptographic software are timing attacks where an attacker exploits
%% early exist in string verification if the strings happen to mismatch. This allows the
%% attacker to time how long verification took and thus learn the structure of the desired
%% string to use. The verify_16/2 call will check two 16 byte strings for equality while
%% guaranteeing the equality operation is constant time.</p>
%% <p>If the strings are not exactly 16 bytes, the comparison function will fail with badarg.</p>
%% <p>The functions take binary() values and not iolist() values since the latter would convert in non-constant time</p>
%% <p>Verification returns a boolean. `true' if the strings match, `false' otherwise.</p>
%% @end
-spec verify_16(binary(), binary()) -> boolean().
verify_16(X, Y) when is_binary(X), is_binary(Y) ->
enacl_nif:crypto_verify_16(X, Y);
verify_16(_, _) ->
error(badarg).
%% @doc verify_32/2 implements constant time 32-byte iolist() verification
%%
%% This function works as {@link verify_16/2} but does so on 32 byte strings. Same caveats apply.
%% @end
-spec verify_32(binary(), binary()) -> boolean().
verify_32(X, Y) when is_binary(X), is_binary(Y) ->
enacl_nif:crypto_verify_32(X, Y);
verify_32(_, _) ->
error(badarg).
%% @doc unsafe_memzero/1 ipmlements guaranteed zero'ing of binary data.
%%
%% <p><bold>WARNING:</bold> Take great care. This way be dragons.</p>
%% <p>This is verify unsafe. If any copies of the binary have been made they are unaffected.
%% This is intended for use with cryptographic keys where they are only shared within
%% a running process without copies. This allows removing, eg, symmetric session keys. </p>
%% @end
-spec unsafe_memzero(binary()) -> atom().
unsafe_memzero(X) when is_binary(X) ->
enacl_nif:sodium_memzero(X);
unsafe_memzero(_) ->
error(badarg).
%% @doc generichash/3 creates a hash of the message using a key.
%%
%% This function generates a hash of the message using a key. The hash size is
%% either 16, 32 or 64 bytes
%% @end
-type generichash_bytes() :: 10..64.
-spec generichash(generichash_bytes(), iodata(), binary()) -> binary().
generichash(HashSize, Message, Key) ->
enacl_nif:crypto_generichash(HashSize, Message, Key).
%% @doc generichash/2 creates a hash of the message.
%%
%% This function generates a hash of the message. The hash size is
%% either 16, 32 or 64 bytes
%% @end
-spec generichash(generichash_bytes(), iodata()) -> binary().
generichash(HashSize, Message) ->
enacl_nif:crypto_generichash(HashSize, Message, <<>>).
%% @doc generichash_init/2 initializes a multi-part hash.
%% @end
-spec generichash_init(generichash_bytes(), binary()) -> reference().
generichash_init(HashSize, Key) ->
enacl_nif:crypto_generichash_init(HashSize, Key).
%% @doc generichash_update/2 updates a multi-part hash with new data.
%% @end
-spec generichash_update(reference(), iodata()) -> reference().
generichash_update(State, Message) ->
enacl_nif:crypto_generichash_update(State, Message).
%% @doc generichash_final/1 finalizes a multi-part hash.
-spec generichash_final(reference()) -> binary().
generichash_final(State) ->
enacl_nif:crypto_generichash_final(State).
-type pwhash_limit() :: interactive | moderate | sensitive | pos_integer().
%% @doc pwhash_SALTBYTES/0 returns the number of bytes required for salt.
%% @end
-spec pwhash_SALTBYTES() -> pos_integer().
pwhash_SALTBYTES() ->
enacl_nif:crypto_pwhash_SALTBYTES().
-type pwhash_alg() :: default | argon2i13 | argon2id13 | pos_integer().
%% @doc pwhash/2 hash a password
%%
%% This function generates a fixed size salted hash of a user defined password.
%% Defaults to interactive/interactive limits.
%% @end
-spec pwhash(iodata(), binary()) -> binary().
pwhash(Password, Salt) ->
pwhash(Password, Salt, interactive, interactive).
%% @doc pwhash/4 hash a password
%%
%% This function generates a fixed size salted hash of a user defined password given Ops and Mem
%% limits.
%% @end
-spec pwhash(Password, Salt, Ops, Mem) -> binary()
when
Password :: iodata(),
Salt :: binary(),
Ops :: pwhash_limit(),
Mem :: pwhash_limit().
pwhash(Password, Salt, Ops, Mem) ->
enacl_nif:crypto_pwhash(Password, Salt, Ops, Mem, default).
%% @doc pwhash/5 hash a password
%%
%% This function generates a fixed size salted hash of a user defined password given Ops and Mem
%% limits.
%% @end
-spec pwhash(Password, Salt, Ops, Mem, Alg) -> binary()
when
Password :: iodata(),
Salt :: binary(),
Ops :: pwhash_limit(),
Mem :: pwhash_limit(),
Alg :: pwhash_alg().
pwhash(Password, Salt, Ops, Mem, Alg) ->
enacl_nif:crypto_pwhash(Password, Salt, Ops, Mem, Alg).
%% @doc pwhash_str/1 generates a ASCII encoded hash of a password
%%
%% This function generates a fixed size, salted, ASCII encoded hash of a user defined password.
%% Defaults to interactive/interactive limits.
%% @end
-spec pwhash_str(iodata()) -> iodata().
pwhash_str(Password) ->
pwhash_str(Password, interactive, interactive).
%% @doc pwhash_str/3 generates a ASCII encoded hash of a password
%%
%% This function generates a fixed size, salted, ASCII encoded hash of a user defined password
%% given Ops and Mem limits.
%% @end
-spec pwhash_str(Password, Ops, Mem) -> iodata()
when
Password :: iodata(),
Ops :: pwhash_limit(),
Mem :: pwhash_limit().
pwhash_str(Password, Ops, Mem) ->
strip_null_terminate(enacl_nif:crypto_pwhash_str(Password, Ops, Mem)).
strip_null_terminate(Binary) ->
[X, _] = binary:split(Binary, <<0>>),
X.
null_terminate(ASCII) ->
iolist_to_binary([ASCII, 0]).
%% @doc pwhash_str_verify/2 compares a password with a hash
%%
%% This function verifies that the hash is generated from the password. The
%% function returns true if the verifcate succeeds, false otherwise
%% @end
-spec pwhash_str_verify(binary(), iodata()) -> boolean().
pwhash_str_verify(HashPassword, Password) ->
enacl_nif:crypto_pwhash_str_verify(null_terminate(HashPassword), Password).
%% Key Derivation
%% @doc kdf_KEYBYTES/0 returns the number of bytes required for master key.
%% @end
-spec kdf_KEYBYTES() -> pos_integer().
kdf_KEYBYTES() ->
enacl_nif:crypto_kdf_KEYBYTES().
%% @doc kdf_CONTEXTBYTES/0 returns the number of bytes required for context.
%% @end
-spec kdf_CONTEXTBYTES() -> pos_integer().
kdf_CONTEXTBYTES() ->
enacl_nif:crypto_kdf_CONTEXTBYTES().
%% @doc kdf_derive_from_key/3 derive a key from a single high entropy key
%% @end.
-spec kdf_derive_from_key(MasterKey, Context, Id) -> binary()
when
MasterKey :: iodata(),
Context :: binary(),
Id :: pos_integer().
kdf_derive_from_key(MasterKey, Context, Id) ->
enacl_nif:crypto_kdf_derive_from_key(MasterKey, Context, Id).
%% Public Key Crypto
%% ---------------------
%% @doc box_keypair/0 creates a new Public/Secret keypair.
%%
%% Generates and returns a new key pair for the Box encryption scheme. The return value is a
%% map in order to avoid using the public key as a secret key and vice versa.
%% @end.
-spec box_keypair() -> #{ atom() => binary() }.
box_keypair() ->
{PK, SK} = enacl_nif:crypto_box_keypair(),
#{ public => PK, secret => SK}.
%% @doc box/4 encrypts+authenticates a message to another party.
%%
%% Encrypt a `Msg' to the party identified by public key `PK' using your own secret key `SK' to
%% authenticate yourself. Requires a `Nonce' in addition. Returns the ciphered message.
%% @end
-spec box(Msg, Nonce, PK, SK) -> CipherText
when
Msg :: iodata(),
Nonce :: binary(),
PK :: binary(),
SK :: binary(),
CipherText :: binary().
box(Msg, Nonce, PK, SK) ->
enacl_nif:crypto_box([?P_ZEROBYTES, Msg], Nonce, PK, SK).
%% @doc box_open/4 decrypts+verifies a message from another party.
%%
%% Decrypt a `CipherText' into a `Msg' given the other partys public key `PK' and your secret
%% key `SK'. Also requires the same nonce as was used by the other party. Returns the plaintext
%% message.
%% @end
-spec box_open(CipherText, Nonce, PK, SK) -> {ok, Msg} | {error, failed_verification}
when
CipherText :: iodata(),
Nonce :: binary(),
PK :: binary(),
SK :: binary(),
Msg :: binary().
box_open(CipherText, Nonce, PK, SK) ->
enacl_nif:crypto_box_open([?P_BOXZEROBYTES, CipherText], Nonce, PK, SK).
%% @doc box_beforenm/2 precomputes a box shared key for a PK/SK keypair
%% @end
-spec box_beforenm(PK, SK) -> binary()
when
PK :: binary(),
SK :: binary().
box_beforenm(PK, SK) ->
R = enacl_nif:crypto_box_beforenm(PK, SK),
erlang:bump_reductions(?BOX_BEFORENM_REDUCTIONS),
R.
%% @doc box_afternm/3 works like `box/4' but uses a precomputed key
%%
%% Calling `box_afternm(M, Nonce, K)' for a precomputed key `K = box_beforenm(PK, SK)' works exactly as
%% if you had called `box(M, Nonce, PK, SK)'. Except that it avoids computations in the elliptic curve Curve25519,
%% and thus is a much faster operation.
%% @end
-spec box_afternm(Msg, Nonce, K) -> CipherText
when
Msg :: iodata(),
Nonce :: binary(),
K :: binary(),
CipherText :: binary().
box_afternm(Msg, Nonce, Key) ->
case iolist_size(Msg) of
K when K =< ?BOX_AFTERNM_SIZE ->
bump(enacl_nif:crypto_box_afternm_b([?P_ZEROBYTES, Msg], Nonce, Key),
?BOX_AFTERNM_REDUCTIONS, ?BOX_AFTERNM_SIZE, K);
_ ->
enacl_nif:crypto_box_afternm([?P_ZEROBYTES, Msg], Nonce, Key)
end.
%% @doc box_open_afternm/3 works like `box_open/4` but uses a precomputed key
%%
%% Calling `box_open_afternm(M, Nonce, K)' for a precomputed key `K = box_beforenm(PK, SK)' works exactly as
%% if you had called `box_open(M, Nonce, PK, SK)'. Except the operation is much faster as it avoids costly
%% computations in the elliptic curve Curve25519.
%% @end
-spec box_open_afternm(CT, Nonce, K) -> {ok, Msg} | {error, failed_verification}
when
CT :: binary(),
Nonce :: binary(),
K :: binary(),
Msg :: binary().
box_open_afternm(CipherText, Nonce, Key) ->
case iolist_size(CipherText) of
K when K =< ?BOX_AFTERNM_SIZE ->
R = enacl_nif:crypto_box_open_afternm_b([?P_BOXZEROBYTES, CipherText], Nonce, Key),
bump(R, ?BOX_AFTERNM_REDUCTIONS, ?BOX_AFTERNM_SIZE, K);
_ ->
enacl_nif:crypto_box_open_afternm([?P_BOXZEROBYTES, CipherText], Nonce, Key)
end.
%% @doc box_NONCEBYTES()/0 return the byte-size of the nonce
%%
%% Used to obtain the size of the nonce.
%% @end.
-spec box_NONCEBYTES() -> pos_integer().
box_NONCEBYTES() ->
enacl_nif:crypto_box_NONCEBYTES().
%% @private
-spec box_PUBLICKEYBYTES() -> pos_integer().
box_PUBLICKEYBYTES() ->
enacl_nif:crypto_box_PUBLICKEYBYTES().
%% @private
box_BEFORENMBYTES() ->
enacl_nif:crypto_box_BEFORENMBYTES().
%% Signatures
%% @private
sign_PUBLICBYTES() ->
enacl_nif:crypto_sign_PUBLICKEYBYTES().
%% @private
sign_SECRETBYTES() ->
enacl_nif:crypto_sign_SECRETKEYBYTES().
%% @private
sign_SEEDBYTES() ->
enacl_nif:crypto_sign_SEEDBYTES().
%% @doc sign_keypair/0 returns a signature keypair for signing
%%
%% The returned value is a map in order to make it harder to misuse keys.
%% @end
-spec sign_keypair() -> #{ atom() => binary() }.
sign_keypair() ->
{PK, SK} = enacl_nif:crypto_sign_keypair(),
#{ public => PK, secret => SK}.
%% @doc sign_seed_keypair/1 returns a signature keypair based on seed for signing
%%
%% The returned value is a map in order to make it harder to misuse keys.
%% @end
-spec sign_seed_keypair(S) -> #{ atom() => binary() }
when
S :: binary().
sign_seed_keypair(S) ->
{PK, SK} = enacl_nif:crypto_sign_seed_keypair(S),
#{ public => PK, secret => SK}.
%% @doc sign/2 signs a message with a digital signature identified by a secret key.
%%
%% Given a message `M' and a secret key `SK' the function will sign the message and return a signed message `SM'.
%% @end
-spec sign(M, SK) -> SM
when
M :: iodata(),
SK :: binary(),
SM :: binary().
sign(M, SK) ->
enacl_nif:crypto_sign(M, SK).
%% @doc sign_open/2 opens a digital signature
%%
%% Given a signed message `SM' and a public key `PK', verify that the message has the
%% right signature. Returns either `{ok, M}' or `{error, failed_verification}' depending
%% on the correctness of the signature.
%% @end
-spec sign_open(SM, PK) -> {ok, M} | {error, failed_verification}
when
SM :: iodata(),
PK :: binary(),
M :: binary().
sign_open(SM, PK) ->
enacl_nif:crypto_sign_open(SM, PK).
%% @doc sign_detached/2 computes a digital signature given a message and a secret key.
%%
%% Given a message `M' and a secret key `SK' the function will compute the digital signature `DS'.
%% @end
-spec sign_detached(M, SK) -> DS
when
M :: iodata(),
SK :: binary(),
DS :: binary().
sign_detached(M, SK) ->
enacl_nif:crypto_sign_detached(M, SK).
%% @doc sign_verify_detached/3 verifies the given signature against the given
%% message for the given public key.
%%
%% Given a signature `SIG', a message `M', and a public key `PK', the function computes
%% true iff the `SIG' is valid for `M' and `PK'; false otherwise.
-spec sign_verify_detached(SIG, M, PK) -> boolean()
when
SIG :: binary(),
M :: iodata(),
PK :: binary().
sign_verify_detached(SIG, M, PK) ->
enacl_nif:crypto_sign_verify_detached(SIG, M, PK).
-type sign_state() :: reference().
%% @doc sign_init/0 initialize a multi-part signature state.
%%
%% This state must be passed to all future calls to `sign_update/2',
%% `sign_final_create/2' and `sign_final_verify/3'.
%% @end
-spec sign_init() -> sign_state().
sign_init() ->
enacl_nif:crypto_sign_init().
%% @doc sign_update/2 update the signature state `S' with a new chunk of data `M'.
%% @end
-spec sign_update(S, M) -> sign_state() | {error, sign_update_error}
when S :: sign_state(),
M :: iodata().
sign_update(SignState, M) ->
enacl_nif:crypto_sign_update(SignState, M).
%% @doc sign_final_create/2 computes the signature for the previously supplied
%% message(s) using the secret key `SK'.
%% @end
-spec sign_final_create(S, SK) -> {ok, binary()} | {error, atom()}
when S :: sign_state(),
SK :: iodata().
sign_final_create(SignState, SK) ->
enacl_nif:crypto_sign_final_create(SignState, SK).
%% @doc sign_final_verify/3 verify a chunked signature
%%
%% Verifies that `SIG' is a valid signature for the message whose content has
%% been previously supplied using `sign_update/2' using the public key `PK.'
%% @end
-spec sign_final_verify(S, SIG, PK) -> boolean()
when S :: sign_state(),
SIG :: binary(),
PK :: iodata().
sign_final_verify(SignState, SIG, PK) ->
enacl_nif:crypto_sign_final_verify(SignState, SIG, PK).
%% @private
-spec box_SECRETKEYBYTES() -> pos_integer().
box_SECRETKEYBYTES() ->
enacl_nif:crypto_box_SECRETKEYBYTES().
%% @doc seal_box/2 encrypts an anonymous message to another party.
%%
%% Encrypt a `Msg' to a party using his public key, `PK'. This generates an ephemeral
%% keypair and then uses `box'. Ephemeral public key will sent to other party. Returns the
%% enciphered message `SealedCipherText' which includes ephemeral public key at head.
%% @end
-spec box_seal(Msg, PK) -> SealedCipherText
when
Msg :: iodata(),
PK :: binary(),
SealedCipherText :: binary().
box_seal(Msg, PK) ->
enacl_nif:crypto_box_seal(Msg, PK).
%% @doc seal_box_open/3 decrypts+check message integrity from an unknown sender.
%%
%% Decrypt a `SealedCipherText' which contains an ephemeral public key from another party
%% into a `Msg' using that key and your public and secret keys, `PK' and `SK'. Returns the
%% plaintext message.
%% @end
-spec box_seal_open(SealedCipherText, PK, SK) -> {ok, Msg} | {error, failed_verification}
when
SealedCipherText :: iodata(),
PK :: binary(),
SK :: binary(),
Msg :: binary().
box_seal_open(SealedCipherText, PK, SK) ->
enacl_nif:crypto_box_seal_open(SealedCipherText, PK, SK).
%% @doc secretbox/3 encrypts a message with a key
%%
%% Given a `Msg', a `Nonce' and a `Key' encrypt the message with the Key while taking the
%% nonce into consideration. The function returns the Box obtained from the encryption.
%% @end
-spec secretbox(Msg, Nonce, Key) -> Box
when
Msg :: iodata(),
Nonce :: binary(),
Key :: binary(),
Box :: binary().
secretbox(Msg, Nonce, Key) ->
case iolist_size(Msg) of
K when K =< ?SECRETBOX_SIZE ->
bump(enacl_nif:crypto_secretbox_b([?S_ZEROBYTES, Msg], Nonce, Key),
?SECRETBOX_REDUCTIONS,
?SECRETBOX_SIZE,
K);
_ ->
enacl_nif:crypto_secretbox([?S_ZEROBYTES, Msg], Nonce, Key)
end.
%% @doc secretbox_open/3 opens a sealed box.
%%
%% Given a boxed `CipherText' and given we know the used `Nonce' and `Key' we can open the box
%% to obtain the `Msg` within. Returns either `{ok, Msg}' or `{error, failed_verification}'.
%% @end
-spec secretbox_open(CipherText, Nonce, Key) -> {ok, Msg} | {error, failed_verification}
when
CipherText :: iodata(),
Nonce :: binary(),
Key :: binary(),
Msg :: binary().
secretbox_open(CipherText, Nonce, Key) ->
case iolist_size(CipherText) of
K when K =< ?SECRETBOX_SIZE ->
R = enacl_nif:crypto_secretbox_open_b([?S_BOXZEROBYTES, CipherText],
Nonce, Key),
bump(R, ?SECRETBOX_OPEN_REDUCTIONS, ?SECRETBOX_SIZE, K);
_ ->
enacl_nif:crypto_secretbox_open([?S_BOXZEROBYTES, CipherText], Nonce, Key)
end.
%% @doc secretbox_NONCEBYTES()/0 returns the size of the secretbox nonce
%%
%% When encrypting with a secretbox, the nonce must have this size
%% @end
secretbox_NONCEBYTES() ->
enacl_nif:crypto_secretbox_NONCEBYTES().
%% @doc secretbox_KEYBYTES/0 returns the size of the secretbox key
%%
%% When encrypting with a secretbox, the key must have this size
%% @end
secretbox_KEYBYTES() ->
enacl_nif:crypto_secretbox_KEYBYTES().
%% @doc stream_chacha20_NONCEBYTES/0 returns the byte size of the nonce for streams
%% @end
-spec stream_chacha20_NONCEBYTES() -> ?CRYPTO_STREAM_CHACHA20_NONCEBYTES.
stream_chacha20_NONCEBYTES() ->
?CRYPTO_STREAM_CHACHA20_NONCEBYTES.
%% @doc stream_chacha20_KEYBYTES/0 returns the byte size of the key for streams
%% @end
-spec stream_chacha20_KEYBYTES() -> ?CRYPTO_STREAM_CHACHA20_KEYBYTES.
stream_chacha20_KEYBYTES() ->
?CRYPTO_STREAM_CHACHA20_KEYBYTES.
%% @doc stream_chacha20/3 produces a cryptographic stream suitable for secret-key encryption
%%
%% <p>Given a positive `Len' a `Nonce' and a `Key', the stream_chacha20/3 function will return an unpredictable cryptographic stream of bytes
%% based on this output. In other words, the produced stream is indistinguishable from a random stream. Using this stream one
%% can XOR it with a message in order to produce a encrypted message.</p>
%% <p><b>Note:</b> You need to use different Nonce values for different messages. Otherwise the same stream is produced and thus
%% the messages will have predictability which in turn makes the encryption scheme fail.</p>
%% @end
-spec stream_chacha20(Len, Nonce, Key) -> CryptoStream
when
Len :: non_neg_integer(),
Nonce :: binary(),
Key :: binary(),
CryptoStream :: binary().
stream_chacha20(Len, Nonce, Key) when is_integer(Len), Len >= 0, Len =< ?STREAM_SIZE ->
bump(enacl_nif:crypto_stream_chacha20_b(Len, Nonce, Key),
?STREAM_REDUCTIONS,
?STREAM_SIZE,
Len);
stream_chacha20(Len, Nonce, Key) when is_integer(Len), Len >= 0 ->
enacl_nif:crypto_stream_chacha20(Len, Nonce, Key);
stream_chacha20(_, _, _) -> error(badarg).
%% @doc stream_chacha20_xor/3 encrypts a plaintext message into ciphertext
%%
%% The stream_chacha20_xor/3 function works by using the {@link stream_chacha20/3} api to XOR a message with the cryptographic stream. The same
%% caveat applies: the nonce must be new for each sent message or the system fails to work.
%% @end
-spec stream_chacha20_xor(Msg, Nonce, Key) -> CipherText
when
Msg :: iodata(),
Nonce :: binary(),
Key :: binary(),
CipherText :: binary().
stream_chacha20_xor(Msg, Nonce, Key) ->
case iolist_size(Msg) of
K when K =< ?STREAM_SIZE ->
bump(enacl_nif:crypto_stream_chacha20_xor_b(Msg, Nonce, Key),
?STREAM_REDUCTIONS,
?STREAM_SIZE,
K);
_ ->
enacl_nif:crypto_stream_chacha20_xor(Msg, Nonce, Key)
end.
%% @doc stream_NONCEBYTES/0 returns the byte size of the nonce for streams
%% @end
-spec stream_NONCEBYTES() -> ?CRYPTO_STREAM_NONCEBYTES.
stream_NONCEBYTES() ->
?CRYPTO_STREAM_NONCEBYTES.
%% @doc stream_KEYBYTES/0 returns the byte size of the key for streams
%% @end
-spec stream_KEYBYTES() -> ?CRYPTO_STREAM_KEYBYTES.
stream_KEYBYTES() ->
?CRYPTO_STREAM_KEYBYTES.
%% @doc stream/3 produces a cryptographic stream suitable for secret-key encryption
%%
%% <p>Given a positive `Len' a `Nonce' and a `Key', the stream/3 function will return an unpredictable cryptographic stream of bytes
%% based on this output. In other words, the produced stream is indistinguishable from a random stream. Using this stream one
%% can XOR it with a message in order to produce a encrypted message.</p>
%% <p><b>Note:</b> You need to use different Nonce values for different messages. Otherwise the same stream is produced and thus
%% the messages will have predictability which in turn makes the encryption scheme fail.</p>
%% @end
-spec stream(Len, Nonce, Key) -> CryptoStream
when
Len :: non_neg_integer(),
Nonce :: binary(),
Key :: binary(),
CryptoStream :: binary().
stream(Len, Nonce, Key) when is_integer(Len), Len >= 0, Len =< ?STREAM_SIZE ->
bump(enacl_nif:crypto_stream_b(Len, Nonce, Key),
?STREAM_REDUCTIONS,
?STREAM_SIZE,
Len);
stream(Len, Nonce, Key) when is_integer(Len), Len >= 0 ->
enacl_nif:crypto_stream(Len, Nonce, Key);
stream(_, _, _) -> error(badarg).
%% @doc stream_xor/3 encrypts a plaintext message into ciphertext
%%
%% The stream_xor/3 function works by using the {@link stream/3} api to XOR a message with the cryptographic stream. The same
%% caveat applies: the nonce must be new for each sent message or the system fails to work.
%% @end
-spec stream_xor(Msg, Nonce, Key) -> CipherText
when
Msg :: iodata(),
Nonce :: binary(),
Key :: binary(),
CipherText :: binary().
stream_xor(Msg, Nonce, Key) ->
case iolist_size(Msg) of
K when K =< ?STREAM_SIZE ->
bump(enacl_nif:crypto_stream_xor_b(Msg, Nonce, Key),
?STREAM_REDUCTIONS,
?STREAM_SIZE,
K);
_ ->
enacl_nif:crypto_stream_xor(Msg, Nonce, Key)
end.
%% @doc auth_KEYBYTES/0 returns the byte-size of the authentication key
%% @end
-spec auth_KEYBYTES() -> pos_integer().
auth_KEYBYTES() ->
enacl_nif:crypto_auth_KEYBYTES().
%% @doc auth_BYTES/0 returns the byte-size of the authenticator
%% @end
-spec auth_BYTES() -> pos_integer().
auth_BYTES() ->
enacl_nif:crypto_auth_BYTES().
%% @doc auth/2 produces an authenticator (MAC) for a message
%%
%% Given a `Msg' and a `Key' produce a MAC/Authenticator for that message. The key can be reused for several such Msg/Authenticator pairs.
%% An eavesdropper will not learn anything extra about the message structure.
%% @end
-spec auth(Msg, Key) -> Authenticator
when
Msg :: iodata(),
Key :: binary(),
Authenticator :: binary().
auth(Msg, Key) ->
case iolist_size(Msg) of
K when K =< ?auth_BYTES ->
bump(enacl_nif:crypto_auth_b(Msg, Key), ?AUTH_REDUCTIONS, ?auth_BYTES, K);
_ ->
enacl_nif:crypto_auth(Msg, Key)
end.
%% @doc auth_verify/3 verifies an authenticator for a message
%%
%% Given an `Authenticator', a `Msg' and a `Key'; verify that the MAC for the pair `{Msg, Key}' is really `Authenticator'. Returns
%% the value `true' if the verfication passes. Upon failure, the function returns `false'.
%% @end
-spec auth_verify(Authenticator, Msg, Key) -> boolean()
when
Authenticator :: binary(),
Msg :: iodata(),
Key :: binary().
auth_verify(A, M, K) ->
case iolist_size(M) of
K when K =< ?auth_BYTES ->
bump(enacl_nif:crypto_auth_verify_b(A, M, K),
?AUTH_REDUCTIONS,
?auth_BYTES,
K);
_ ->
enacl_nif:crypto_auth_verify(A, M, K)
end.
%% @doc shorthash_key_size/0 returns the byte-size of the authentication key
%% @end
-spec shorthash_key_size() -> pos_integer().
shorthash_key_size() ->
enacl_nif:crypto_shorthash_KEYBYTES().
%% @doc shorthash_size/0 returns the byte-size of the authenticator
%% @end
-spec shorthash_size() -> pos_integer().
shorthash_size() ->
enacl_nif:crypto_shorthash_BYTES().
%% @doc shorthash/2 produces a short authenticator (MAC) for a message suitable for hashtables and refs
%%
%% Given a `Msg' and a `Key' produce a MAC/Authenticator for that message. The key can be reused for several such Msg/Authenticator pairs.
%% An eavesdropper will not learn anything extra about the message structure.
%%
%% The intended use is to generate a random key and use it as a hash table or bloom filter function.
%% This avoids an enemy their ability to predict where a collision would occur in the data structure,
%% since they don't know the key.
%% @end
-spec shorthash(Msg, Key) -> Authenticator
when
Msg :: iodata(),
Key :: binary(),
Authenticator :: binary().
shorthash(Msg, Key) ->
enacl_nif:crypto_shorthash(Msg, Key).
%% @doc onetime_auth/2 produces a ONE-TIME authenticator for a message
%%
%% This function works like {@link auth/2} except that the key must not be used again for subsequent messages. That is, the pair
%% `{Msg, Key}' is unique and only to be used once. The advantage is noticably faster execution.
%% @end
-spec onetime_auth(Msg, Key) -> Authenticator
when
Msg :: iodata(),
Key :: binary(),
Authenticator :: binary().
onetime_auth(Msg, Key) ->
case iolist_size(Msg) of
K when K =< ?ONETIME_auth_BYTES ->
bump(enacl_nif:crypto_onetimeauth_b(Msg, Key),
?ONETIME_AUTH_REDUCTIONS,
?ONETIME_auth_BYTES,
K);
_ ->
enacl_nif:crypto_onetimeauth(Msg, Key)
end.
%% @doc onetime_auth_verify/3 verifies an ONE-TIME authenticator for a message
%%
%% Given an `Authenticator', a `Msg' and a `Key'; verify that the MAC for the pair `{Msg, Key}' is really `Authenticator'. Returns
%% the value `true' if the verification passes. Upon failure, the function returns `false'. Note the caveat from {@link onetime_auth/2}
%% applies: you are not allowed to ever use the same key again for another message.
%% @end
-spec onetime_auth_verify(Authenticator, Msg, Key) -> boolean()
when
Authenticator :: binary(),
Msg :: iodata(),
Key :: binary().
onetime_auth_verify(A, M, K) ->
case iolist_size(M) of
K when K =< ?ONETIME_auth_BYTES ->
bump(enacl_nif:crypto_onetimeauth_verify_b(A, M, K),
?ONETIME_AUTH_REDUCTIONS,
?ONETIME_auth_BYTES,
K);
_ ->
enacl_nif:crypto_onetimeauth_verify(A, M, K)
end.
%% @doc onetime_auth_BYTES/0 returns the number of bytes of the one-time authenticator
%% @end
-spec onetime_auth_BYTES() -> pos_integer().
onetime_auth_BYTES() ->
enacl_nif:crypto_onetimeauth_BYTES().
%% @doc onetime_auth_KEYBYTES/0 returns the byte-size of the onetime authentication key
%% @end
-spec onetime_auth_KEYBYTES() -> pos_integer().
onetime_auth_KEYBYTES() ->
enacl_nif:crypto_onetimeauth_KEYBYTES().
%% Curve 25519 Crypto
%% ------------------
%% @doc curve25519_scalarmult/2 does a scalar multiplication between the Secret and the BasePoint.
%% @end.
-spec curve25519_scalarmult(Secret :: binary(), BasePoint :: binary()) -> binary().
curve25519_scalarmult(Secret, BasePoint) ->
enacl_nif:crypto_curve25519_scalarmult(Secret, BasePoint).
%% @doc curve25519_scalarmult/1 avoids messing up arguments.
%% Takes as input a map `#{ secret := Secret, base_point := BasePoint }' in order to avoid
%% messing up the calling order.
%% @end
curve25519_scalarmult(#{ secret := Secret, base_point := BasePoint }) ->
curve25519_scalarmult(Secret, BasePoint).
%% @doc curve25519_scalarmult_base/1 compute the corresponding public key for a
%% given secret key.
%% @end.
-spec curve25519_scalarmult_base(Secret :: binary()) -> binary().
curve25519_scalarmult_base(Secret) ->
enacl_nif:crypto_curve25519_scalarmult_base(Secret).
%% Ed 25519 Crypto
%% ---------------
%% @doc crypto_sign_ed25519_keypair/0 creates a new Ed 25519 Public/Secret keypair.
%%
%% Generates and returns a new key pair for the Ed 25519 signature scheme. The return value is a
%% map in order to avoid using the public key as a secret key and vice versa.
%% @end
-spec crypto_sign_ed25519_keypair() -> #{ atom() => binary() }.
crypto_sign_ed25519_keypair() ->
{PK, SK} = enacl_nif:crypto_sign_ed25519_keypair(),
#{ public => PK, secret => SK }.
%% @doc crypto_sign_ed25519_sk_to_pk/1 derives an ed25519 public key from a secret key
%% The ed25519 signatures secret keys contains enough information to dervice its corresponding
%% public key. This function extracts the public key from the secret if needed.
%% @end
-spec crypto_sign_ed25519_sk_to_pk(Secret :: binary()) -> binary().
crypto_sign_ed25519_sk_to_pk(Secret) ->
enacl_nif:crypto_sign_ed25519_sk_to_pk(Secret).
%% @doc crypto_sign_ed25519_public_to_curve25519/1 converts a given Ed 25519 public
%% key to a Curve 25519 public key.
%% @end
-spec crypto_sign_ed25519_public_to_curve25519(PublicKey :: binary()) -> binary().
crypto_sign_ed25519_public_to_curve25519(PublicKey) ->
R = enacl_nif:crypto_sign_ed25519_public_to_curve25519(PublicKey),
erlang:bump_reductions(?ED25519_PUBLIC_TO_CURVE_REDS),
R.
%% @doc crypto_sign_ed25519_secret_to_curve25519/1 converts a given Ed 25519 secret
%% key to a Curve 25519 secret key.
%% @end
-spec crypto_sign_ed25519_secret_to_curve25519(SecretKey :: binary()) -> binary().
crypto_sign_ed25519_secret_to_curve25519(SecretKey) ->
R = enacl_nif:crypto_sign_ed25519_secret_to_curve25519(SecretKey),
erlang:bump_reductions(?ED25519_SECRET_TO_CURVE_REDS),
R.
-spec crypto_sign_ed25519_public_size() -> pos_integer().
crypto_sign_ed25519_public_size() ->
enacl_nif:crypto_sign_ed25519_PUBLICKEYBYTES().
-spec crypto_sign_ed25519_secret_size() -> pos_integer().
crypto_sign_ed25519_secret_size() ->
enacl_nif:crypto_sign_ed25519_SECRETKEYBYTES().
%% Key exchange functions
%% ----------------------
%% @doc kx_keypair/0 creates a new Public/Secret keypair.
%%
%% Generates and returns a new key pair for the key exchange. The return value is a
%% map in order to avoid using the public key as a secret key and vice versa.
%% @end
-spec kx_keypair() -> #{ atom() => binary() }.
kx_keypair() ->
{PK, SK} = enacl_nif:crypto_kx_keypair(),
#{ public => PK, secret => SK}.
%% @doc kx_client_session_keys/3 computes and returns shared keys for client session.
%%
%% <p>Compute two shared keys using the server's public key `ServerPk' and the client's secret key `ClientPk'.</p>
%% <p>Returns map with two keys `client_rx' and `client_tx'.
%% `client_rx' will be used by the client to receive data from the server,
%% `client_tx' will by used by the client to send data to the server.</p>
%% @end
-spec kx_client_session_keys(ClientPk, ClientSk, ServerPk) -> #{ atom() => binary() }
when
ClientPk :: binary(),
ClientSk :: binary(),
ServerPk :: binary().
kx_client_session_keys(ClientPk, ClientSk, ServerPk) ->
{Rx, Tx} = enacl_nif:crypto_kx_client_session_keys(ClientPk, ClientSk, ServerPk),
#{ client_rx => Rx, client_tx => Tx}.
%% @doc kx_server_session_keys/3 computes and returns shared keys for server session.
%% <p>Compute two shared keys using the client's public key `ClientPk' and the server's secret key `ServerSk'.</p>
%% <p>Returns map with two keys `server_rx' and `server_tx'.
%% `server_rx' will be used by the server to receive data from the client,
%% `server_tx' will be used by the server to send data to the client.</p>
%% @end
-spec kx_server_session_keys(ServerPk, ServerSk, ClientPk) -> #{ atom() => binary() }
when
ServerPk :: binary(),
ServerSk :: binary(),
ClientPk :: binary().
kx_server_session_keys(ServerPk, ServerSk, ClientPk) ->
{Rx, Tx} = enacl_nif:crypto_kx_server_session_keys(ServerPk, ServerSk, ClientPk),
#{ server_rx => Rx, server_tx => Tx}.
%% @doc kx_SESSIONKEYBYTES/0 returns the number of bytes of the generated during key exchange session key.
%% @end
-spec kx_SESSIONKEYBYTES() -> pos_integer().
kx_SESSIONKEYBYTES() ->
enacl_nif:crypto_kx_SESSIONKEYBYTES().
%% @doc kx_PUBLICKEYBYTES/0 returns the number of bytes of the public key used in key exchange.
%% @end
-spec kx_PUBLICKEYBYTES() -> pos_integer().
kx_PUBLICKEYBYTES() ->
enacl_nif:crypto_kx_PUBLICKEYBYTES().
%% @doc kx_SECRETKEYBYTES/0 returns the number of bytes of the secret key used in key exchange.
%% @end
-spec kx_SECRETKEYBYTES() -> pos_integer().
kx_SECRETKEYBYTES() ->
enacl_nif:crypto_kx_SECRETKEYBYTES().
%% AEAD ChaCha20 Poly1305
%% ----------------------
%% @doc aead_chacha20poly1305_encrypt/4 encrypts `Message' with additional data
%% `AD' using `Key' and `Nonce'. Returns the encrypted message followed by
%% `aead_chacha20poly1305_ABYTES/0' bytes of MAC.
%% @end
-spec aead_chacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key) -> binary()
when Key :: binary(),
Nonce :: binary(),
AD :: binary(),
Msg :: binary().
aead_chacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key) ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key).
%% @doc aead_chacha20poly1305_decrypt/4 decrypts ciphertext `CT' with additional
%% data `AD' using `Key' and `Nonce'. Note: `CipherText' should contain
%% `aead_chacha20poly1305_ABYTES/0' bytes that is the MAC. Returns the decrypted
%% message.
%% @end
-spec aead_chacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key) -> binary() | {error, term()}
when Key :: binary(),
Nonce :: binary(),
AD :: binary(),
CT :: binary().
aead_chacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key) ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key).
%% @doc aead_chacha20poly1305_KEYBYTES/0 returns the number of bytes
%% of the key used in AEAD ChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_chacha20poly1305_ietf_KEYBYTES() -> pos_integer().
aead_chacha20poly1305_ietf_KEYBYTES() ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_KEYBYTES().
%% @doc aead_chacha20poly1305_NPUBBYTES/0 returns the number of bytes
%% of the Nonce in AEAD ChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_chacha20poly1305_ietf_NPUBBYTES() -> pos_integer().
aead_chacha20poly1305_ietf_NPUBBYTES() ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_NPUBBYTES().
%% @doc aead_chacha20poly1305_ABYTES/0 returns the number of bytes
%% of the MAC in AEAD ChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_chacha20poly1305_ietf_ABYTES() -> pos_integer().
aead_chacha20poly1305_ietf_ABYTES() ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_ABYTES().
%% @doc aead_chacha20poly1305_MESSAGEBYTES_MAX/0 returns the max number of bytes
%% allowed in a message in AEAD ChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX() -> pos_integer().
aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX() ->
enacl_nif:crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX().
%% AEAD XChaCha20 Poly1305
%% ----------------------
%% @doc aead_xchacha20poly1305_encrypt/4 encrypts `Message' with additional data
%% `AD' using `Key' and `Nonce'. Returns the encrypted message followed by
%% `aead_xchacha20poly1305_ABYTES/0' bytes of MAC.
%% @end
-spec aead_xchacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key) -> binary()
when Key :: binary(),
Nonce :: binary(),
AD :: binary(),
Msg :: binary().
aead_xchacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key) ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_encrypt(Msg, AD, Nonce, Key).
%% @doc aead_xchacha20poly1305_decrypt/4 decrypts ciphertext `CT' with additional
%% data `AD' using `Key' and `Nonce'. Note: `CipherText' should contain
%% `aead_xchacha20poly1305_ABYTES/0' bytes that is the MAC. Returns the decrypted
%% message.
%% @end
-spec aead_xchacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key) -> binary() | {error, term()}
when Key :: binary(),
Nonce :: binary(),
AD :: binary(),
CT :: binary().
aead_xchacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key) ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_decrypt(CT, AD, Nonce, Key).
%% @doc aead_xchacha20poly1305_KEYBYTES/0 returns the number of bytes
%% of the key used in AEAD XChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_xchacha20poly1305_ietf_KEYBYTES() -> pos_integer().
aead_xchacha20poly1305_ietf_KEYBYTES() ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_KEYBYTES().
%% @doc aead_xchacha20poly1305_NPUBBYTES/0 returns the number of bytes
%% of the Nonce in AEAD XChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_xchacha20poly1305_ietf_NPUBBYTES() -> pos_integer().
aead_xchacha20poly1305_ietf_NPUBBYTES() ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_NPUBBYTES().
%% @doc aead_xchacha20poly1305_ABYTES/0 returns the number of bytes
%% of the MAC in AEAD XChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_xchacha20poly1305_ietf_ABYTES() -> pos_integer().
aead_xchacha20poly1305_ietf_ABYTES() ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_ABYTES().
%% @doc aead_xchacha20poly1305_MESSAGEBYTES_MAX/0 returns the max number of bytes
%% allowed in a message in AEAD XChaCha20 Poly1305 encryption/decryption.
%% @end
-spec aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX() -> pos_integer().
aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX() ->
enacl_nif:crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX().
%% Secretstream
%% ----------------------
%% @doc secretstream_xchacha20poly1305_ABYTES/0 returns the number of bytes
%% of the MAC used on secretstream encryption/decryption
%% @end
-spec secretstream_xchacha20poly1305_ABYTES() -> pos_integer().
secretstream_xchacha20poly1305_ABYTES() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_ABYTES().
%% @doc secretstream_xchacha20poly1305_HEADERBYTES/0 returns the number
%% of bytes for header used in secretstream encryption/decryption.
%% @end
-spec secretstream_xchacha20poly1305_HEADERBYTES() -> pos_integer().
secretstream_xchacha20poly1305_HEADERBYTES() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_HEADERBYTES().
%% @doc secretstream_xchacha20poly1305_KEYBYTES/0 returns the number
%% of bytes of the key used in secretstream encryption/decryption.
%% @end
-spec secretstream_xchacha20poly1305_KEYBYTES() -> pos_integer().
secretstream_xchacha20poly1305_KEYBYTES() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_KEYBYTES().
%% @doc secretstream_xchacha20poly1305_MESSAGEBYTES_MAX/0 returns the max
%% number of bytes allowed in a message in secretstream encryption/decryption.
%% @end
-spec secretstream_xchacha20poly1305_MESSAGEBYTES_MAX() -> pos_integer().
secretstream_xchacha20poly1305_MESSAGEBYTES_MAX() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX().
%% @doc secretstream_xchacha20poly1305_TAG_MESSAGE/0 returns integer value
%% of tag `message'. The most common tag, that doesn't add any information
%% about the nature of the message.
%% @end
-spec secretstream_xchacha20poly1305_TAG_MESSAGE() -> pos_integer().
secretstream_xchacha20poly1305_TAG_MESSAGE() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_MESSAGE().
%% @doc secretstream_xchacha20poly1305_TAG_PUSH/0 returns integer value
%% of tag `push'.
%%
%% This tag indicates that the message marks the end
%% of a set of messages, but not the end of the stream.
%%
%% For example, a huge JSON string sent as multiple chunks can use
%% this tag to indicate to the application that the string is complete
%% and that it can be decoded. But the stream itself is not closed,
%% and more data may follow.
%% @end
-spec secretstream_xchacha20poly1305_TAG_PUSH() -> pos_integer().
secretstream_xchacha20poly1305_TAG_PUSH() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_PUSH().
%% @doc secretstream_xchacha20poly1305_TAG_REKEY/0 returns integer value
%% of tag `rekey'. Indicates that next messages will derive new keys.
%% @end
-spec secretstream_xchacha20poly1305_TAG_REKEY() -> pos_integer().
secretstream_xchacha20poly1305_TAG_REKEY() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_REKEY().
%% @doc secretstream_xchacha20poly1305_TAG_FINAL/0 returns integer value
%% of tag `final'. Indicates that the message is the last message in
%% the secretstream.
%% @end
-spec secretstream_xchacha20poly1305_TAG_FINAL() -> pos_integer().
secretstream_xchacha20poly1305_TAG_FINAL() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_FINAL().
%% @doc secretstream_xchacha20poly1305_keygen/0 returns new random key
%% for secretsteam encryption.
%% @end
-spec secretstream_xchacha20poly1305_keygen() -> binary().
secretstream_xchacha20poly1305_keygen() ->
enacl_nif:crypto_secretstream_xchacha20poly1305_keygen().
%% @doc secretstream_xchacha20poly1305_init_push/1
%% initializes a secretstream encryption context using given `key'.
%% Returns `Header' and reference to encryption context.
%% @end
-spec secretstream_xchacha20poly1305_init_push(Key) -> {binary(), reference()}
when Key :: binary().
secretstream_xchacha20poly1305_init_push(Key) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_init_push(Key).
-type secretstream_xchacha20poly1305_tag() :: message | rekey | final | push | pos_integer().
%% @doc secretstream_xchacha20poly1305_push/4 returns encrypted chunk binary.
%% Updates a secretstream context referenced by `Ref' with `Message' data,
%% given `Tag' and additional data `AD'.
%% @end
-spec secretstream_xchacha20poly1305_push(Ref, Message, AD, Tag) -> binary()
when
Ref :: reference(),
Message :: binary(),
AD :: binary(),
Tag :: secretstream_xchacha20poly1305_tag().
secretstream_xchacha20poly1305_push(Ref, Message, AD, Tag) ->
TagValue = secretstream_xchacha20poly1305_tag_value(Tag),
enacl_nif:crypto_secretstream_xchacha20poly1305_push(Ref, Message, AD, TagValue).
%% @doc secretstream_xchacha20poly1305_init_pull/3
%% initializes a secretstream decryption context using `Header' and `Key'.
%% Returns reference to decryption context.
%% @end
-spec secretstream_xchacha20poly1305_init_pull(Header, Key) -> reference()
when
Header :: binary(),
Key :: binary().
secretstream_xchacha20poly1305_init_pull(Header, Key) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_init_pull(Header, Key).
%% @doc secretstream_xchacha20poly1305_pull/3 decrypts `CipherText'
%% with additional data `AD' in referenced decryption context `Ref'.
%% @end
-spec secretstream_xchacha20poly1305_pull(Ref, CipherText, AD) ->
{binary(), secretstream_xchacha20poly1305_tag()} | {error, failed_verification}
when
Ref :: reference(),
CipherText :: binary(),
AD :: binary().
secretstream_xchacha20poly1305_pull(Ref, CipherText, AD) ->
{Message, TagValue} = enacl_nif:crypto_secretstream_xchacha20poly1305_pull(Ref, CipherText, AD),
{Message, secretstream_xchacha20poly1305_tag(TagValue)}.
%% @doc secretstream_xchacha20poly1305_rekey/1 updates encryption/decryption context state.
%% This doesn't add any information about key update to stream.
%% If this function is used to create an encrypted stream,
%% the decryption process must call that function at the exact same stream location.
%% @end
-spec secretstream_xchacha20poly1305_rekey(Ref) -> ok
when Ref :: reference().
secretstream_xchacha20poly1305_rekey(Ref) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_rekey(Ref).
%% @doc secretstream_xchacha20poly1305_tag_value/1 returns integer value of tag.
%% @end
-spec secretstream_xchacha20poly1305_tag_value(TagName) -> pos_integer()
when TagName :: secretstream_xchacha20poly1305_tag().
secretstream_xchacha20poly1305_tag_value(message) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_MESSAGE();
secretstream_xchacha20poly1305_tag_value(rekey) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_REKEY();
secretstream_xchacha20poly1305_tag_value(push) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_PUSH();
secretstream_xchacha20poly1305_tag_value(final) ->
enacl_nif:crypto_secretstream_xchacha20poly1305_TAG_FINAL();
secretstream_xchacha20poly1305_tag_value(Other) ->
Other.
%% @doc secretstream_xchacha20poly1305_tag/1 returns tag name
%% @end
-spec secretstream_xchacha20poly1305_tag(TagValue) -> secretstream_xchacha20poly1305_tag()
when TagValue :: pos_integer().
secretstream_xchacha20poly1305_tag(?CRYPTO_SECRETSTREAM_TAG_MESSAGE) ->
message;
secretstream_xchacha20poly1305_tag(?CRYPTO_SECRETSTREAM_TAG_PUSH) ->
push;
secretstream_xchacha20poly1305_tag(?CRYPTO_SECRETSTREAM_TAG_REKEY) ->
rekey;
secretstream_xchacha20poly1305_tag(?CRYPTO_SECRETSTREAM_TAG_FINAL) ->
final;
secretstream_xchacha20poly1305_tag(Other) ->
Other.
%% Obtaining random bytes
%% ----------------------
%% @doc randombytes/1 produces a stream of random bytes of the given size
%%
%% The security properties of the random stream are that of the libsodium library. Specifically,
%% we use:
%%
%% * RtlGenRandom() on Windows systems
%% * arc4random() on OpenBSD and Bitrig
%% * /dev/urandom on other Unix environments
%%
%% It is up to you to pick a system with a appropriately strong (P)RNG for your purpose. We refer
%% you to the underlying system implementations for random data.
%% @end
-spec randombytes(non_neg_integer()) -> binary().
randombytes(N) ->
enacl_nif:randombytes(N).
%% @doc randombytes_uint32/0 produces an integer in the 32bit range
%% @end
-spec randombytes_uint32() -> integer().
randombytes_uint32() ->
enacl_nif:randombytes_uint32().
%% @doc randombytes_uniform/1 produces a random integer in the space [0..N)
%% That is with the upper bound excluded. Fails for integers above 32bit size
%% @end
randombytes_uniform(N) when N < ?MAX_32BIT_INT ->
enacl_nif:randombytes_uniform(N).
%% Helpers
%% @doc bump/4 bumps a reduction budget linearly before returning the result
%% It is used for the on-scheduler variants of functions in order to make sure there
%% is a realistic apporach to handling the reduction counts of the system.
%% @end
bump(Res, Budget, Max, Sz) ->
Reds = (Budget * Sz) div Max,
erlang:bump_reductions(max(1, Reds)),
Res. | src/enacl.erl | 0.567937 | 0.471649 | enacl.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2011 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Helpers for validating inputs.
-module(riak_pipe_v).
-export([validate_module/2,
validate_function/3,
type_of/1]).
%% @doc Validate that `Module' is an atom that names a loaded or
%% loadable module. If a module is already loaded under that
%% name, or {@link code:load_file/1} is able to load one, the
%% atom `ok' is returned. If no module is found, and `{error,
%% Reason}' tuple is returned. (`Label' is used in the error
%% message).
-spec validate_module(string(), term()) -> ok | {error, iolist()}.
validate_module(Label, Module) when is_atom(Module) ->
case code:ensure_loaded(Module) of
{module, Module} -> ok;
{error, Error} ->
{error, io_lib:format(
"~s must be a valid module name"
" (failed to load ~p: ~p)",
[Label, Module, Error])}
end;
validate_module(Label, Module) ->
{error, io_lib:format("~s must be an atom, not a ~p",
[Label, type_of(Module)])}.
%% @doc Validate that `Fun' is a function of arity `Arity'.
%%
%% If the function is of type `local' (anonymous functions, and
%% functions named via `fun Name/Arity'), validation completes
%% onces the arity is checked.
%%
%% If the function is of type `external' (functions named via
%% `fun Module:Function/Arity'), then it is also verified that
%% the module is loaded or loadable (see {@link
%% validate_module/2}) and that it exports the named function.
%%
%% If validation completes successfully, the atom `ok' is
%% returned. If validation failes, an `{error, Reason}' tuple is
%% returned. (`Label' is used in the error message).
-spec validate_function(string(), integer(), fun() | {atom(), atom()}) ->
ok | {error, iolist()}.
validate_function(Label, Arity, {Module, Function})
when is_atom(Module), is_atom(Function) ->
validate_exported_function(Label, Arity, Module, Function);
validate_function(Label, Arity, Fun) when is_function(Fun) ->
Info = erlang:fun_info(Fun),
case proplists:get_value(arity, Info) of
Arity ->
case proplists:get_value(type, Info) of
local ->
%% reference was validated by compiler
ok;
external ->
Module = proplists:get_value(module, Info),
Function = proplists:get_value(name, Info),
validate_exported_function(
Label, Arity, Module, Function)
end;
N ->
{error, io_lib:format("~s must be of arity ~b, not ~b",
[Label, Arity, N])}
end;
validate_function(Label, Arity, Fun) ->
{error, io_lib:format(
"~s must be a function or {Mod, Fun} (arity ~b), not a ~p",
[Label, Arity, type_of(Fun)])}.
%% @doc Validate an exported function. See {@link validate_function/3}.
-spec validate_exported_function(string(), integer(), atom(), atom()) ->
ok | {error, iolist()}.
validate_exported_function(Label, Arity, Module, Function) ->
case validate_module("", Module) of
ok ->
Exports = Module:module_info(exports),
case lists:member({Function,Arity}, Exports) of
true ->
ok;
false ->
{error, io_lib:format(
"~s specifies ~p:~p/~b, which is not exported",
[Label, Module, Function, Arity])}
end;
{error,Error} ->
{error, io_lib:format("invalid module named in ~s function:~n~s",
[Label, Error])}
end.
%% @doc Determine the type of a term. For example:
%% ```
%% number = riak_pipe_v:type_of(1).
%% atom = riak_pipe_v:type_of(a).
%% pid = riak_pipe_v:type_of(self()).
%% function = riak_pipe_v:type_of(fun() -> ok end).
%% '''
-spec type_of(term()) -> pid | reference | list | tuple | atom
| number | binary | function.
type_of(Term) ->
case erl_types:t_from_term(Term) of
{c,identifier,[Type|_],_} ->
Type; % pid,reference
{c,Type,_,_} ->
Type % list,tuple,atom,number,binary,function
end. | deps/riak_pipe/src/riak_pipe_v.erl | 0.746139 | 0.412944 | riak_pipe_v.erl | starcoder |
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
%%
%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
%%
-module(delegate).
%% delegate is an alternative way of doing remote calls. Compared to
%% the rpc module, it reduces inter-node communication. For example,
%% if a message is routed to 1,000 queues on node A and needs to be
%% propagated to nodes B and C, it would be nice to avoid doing 2,000
%% remote casts to queue processes.
%%
%% An important issue here is preserving order - we need to make sure
%% that messages from a certain channel to a certain queue take a
%% consistent route, to prevent them being reordered. In fact all
%% AMQP-ish things (such as queue declaration results and basic.get)
%% must take the same route as well, to ensure that clients see causal
%% ordering correctly. Therefore we have a rather generic mechanism
%% here rather than just a message-reflector. That's also why we pick
%% the delegate process to use based on a hash of the source pid.
%%
%% When a function is invoked using delegate:invoke/2,
%% or delegate:invoke_no_result/2 on a group of pids, the pids are first split
%% into local and remote ones. Remote processes are then grouped by
%% node. The function is then invoked locally and on every node (using
%% gen_server2:multi/4) as many times as there are processes on that
%% node, sequentially.
%%
%% Errors returned when executing functions on remote nodes are re-raised
%% in the caller.
%%
%% RabbitMQ starts a pool of delegate processes on boot. The size of
%% the pool is configurable, the aim is to make sure we don't have too
%% few delegates and thus limit performance on many-CPU machines.
-behaviour(gen_server2).
-export([start_link/1, start_link/2, invoke_no_result/2,
invoke/2, invoke/3, monitor/2, monitor/3, demonitor/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-record(state, {node, monitors, name}).
%%----------------------------------------------------------------------------
-export_type([monitor_ref/0]).
-type monitor_ref() :: reference() | {atom(), pid()}.
-type fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}.
-spec start_link
(non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}.
-spec invoke
( pid(), fun_or_mfa(A)) -> A;
([pid()], fun_or_mfa(A)) -> {[{pid(), A}], [{pid(), term()}]}.
-spec invoke_no_result(pid() | [pid()], fun_or_mfa(any())) -> 'ok'.
-spec monitor('process', pid()) -> monitor_ref().
-spec demonitor(monitor_ref()) -> 'true'.
%%----------------------------------------------------------------------------
-define(HIBERNATE_AFTER_MIN, 1000).
-define(DESIRED_HIBERNATE, 10000).
-define(DEFAULT_NAME, "delegate_").
%%----------------------------------------------------------------------------
start_link(Num) ->
start_link(?DEFAULT_NAME, Num).
start_link(Name, Num) ->
Name1 = delegate_name(Name, Num),
gen_server2:start_link({local, Name1}, ?MODULE, [Name1], []).
invoke(Pid, FunOrMFA) ->
invoke(Pid, ?DEFAULT_NAME, FunOrMFA).
invoke(Pid, _Name, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
apply1(FunOrMFA, Pid);
invoke(Pid, Name, FunOrMFA) when is_pid(Pid) ->
case invoke([Pid], Name, FunOrMFA) of
{[{Pid, Result}], []} ->
Result;
{[], [{Pid, {Class, Reason, StackTrace}}]} ->
erlang:raise(Class, Reason, StackTrace)
end;
invoke([], _Name, _FunOrMFA) -> %% optimisation
{[], []};
invoke([Pid], _Name, FunOrMFA) when node(Pid) =:= node() -> %% optimisation
case safe_invoke(Pid, FunOrMFA) of
{ok, _, Result} -> {[{Pid, Result}], []};
{error, _, Error} -> {[], [{Pid, Error}]}
end;
invoke(Pids, Name, FunOrMFA) when is_list(Pids) ->
{LocalPids, Grouped} = group_pids_by_node(Pids),
%% The use of multi_call is only safe because the timeout is
%% infinity, and thus there is no process spawned in order to do
%% the sending. Thus calls can't overtake preceding calls/casts.
{Replies, BadNodes} =
case maps:keys(Grouped) of
[] -> {[], []};
RemoteNodes -> gen_server2:multi_call(
RemoteNodes, delegate(self(), Name, RemoteNodes),
{invoke, FunOrMFA, Grouped}, infinity)
end,
BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} ||
BadNode <- BadNodes,
Pid <- maps:get(BadNode, Grouped)],
ResultsNoNode = lists:append([safe_invoke(LocalPids, FunOrMFA) |
[Results || {_Node, Results} <- Replies]]),
lists:foldl(
fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad};
({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]}
end, {[], BadPids}, ResultsNoNode).
monitor(process, Pid) ->
?MODULE:monitor(process, Pid, ?DEFAULT_NAME).
monitor(process, Pid, _Prefix) when node(Pid) =:= node() ->
erlang:monitor(process, Pid);
monitor(process, Pid, Prefix) ->
Name = delegate(Pid, Prefix, [node(Pid)]),
gen_server2:cast(Name, {monitor, self(), Pid}),
{Name, Pid}.
demonitor(Ref) when is_reference(Ref) ->
erlang:demonitor(Ref);
demonitor({Name, Pid}) ->
gen_server2:cast(Name, {demonitor, self(), Pid}).
invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
%% Optimization, avoids calling invoke_no_result/3.
%%
%% This may seem like a cosmetic change at first but it actually massively reduces the memory usage in mirrored
%% queues when ack/nack are sent to the node that hosts a mirror.
%% This way binary references are not kept around unnecessarily.
%%
%% See https://github.com/rabbitmq/rabbitmq-common/issues/208#issuecomment-311308583 for a before/after
%% comparison.
_ = safe_invoke(Pid, FunOrMFA), %% we don't care about any error
ok;
invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) ->
%% Optimization, avoids calling invoke_no_result/3
RemoteNode = node(Pid),
gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
{invoke, FunOrMFA,
maps:from_list([{RemoteNode, [Pid]}])}),
ok;
invoke_no_result([], _FunOrMFA) -> %% optimisation
ok;
invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
_ = safe_invoke(Pid, FunOrMFA), %% must not die
ok;
invoke_no_result([Pid], FunOrMFA) ->
RemoteNode = node(Pid),
gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
{invoke, FunOrMFA,
maps:from_list([{RemoteNode, [Pid]}])}),
ok;
invoke_no_result(Pids, FunOrMFA) when is_list(Pids) ->
{LocalPids, Grouped} = group_pids_by_node(Pids),
case maps:keys(Grouped) of
[] -> ok;
RemoteNodes -> gen_server2:abcast(
RemoteNodes, delegate(self(), ?DEFAULT_NAME, RemoteNodes),
{invoke, FunOrMFA, Grouped})
end,
_ = safe_invoke(LocalPids, FunOrMFA), %% must not die
ok.
%%----------------------------------------------------------------------------
group_pids_by_node(Pids) ->
LocalNode = node(),
lists:foldl(
fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode ->
{[Pid | Local], Remote};
(Pid, {Local, Remote}) ->
{Local,
maps:update_with(
node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)}
end, {[], maps:new()}, Pids).
delegate_name(Name, Hash) ->
list_to_atom(Name ++ integer_to_list(Hash)).
delegate(Pid, Prefix, RemoteNodes) ->
case get(delegate) of
undefined -> Name = delegate_name(Prefix,
erlang:phash2(Pid,
delegate_sup:count(RemoteNodes, Prefix))),
put(delegate, Name),
Name;
Name -> Name
end.
safe_invoke(Pids, FunOrMFA) when is_list(Pids) ->
[safe_invoke(Pid, FunOrMFA) || Pid <- Pids];
safe_invoke(Pid, FunOrMFA) when is_pid(Pid) ->
try
{ok, Pid, apply1(FunOrMFA, Pid)}
catch Class:Reason:Stacktrace ->
{error, Pid, {Class, Reason, Stacktrace}}
end.
apply1({M, F, A}, Arg) -> apply(M, F, [Arg | A]);
apply1(Fun, Arg) -> Fun(Arg).
%%----------------------------------------------------------------------------
init([Name]) ->
{ok, #state{node = node(), monitors = dict:new(), name = Name}, hibernate,
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
handle_call({invoke, FunOrMFA, Grouped}, _From, State = #state{node = Node}) ->
{reply, safe_invoke(maps:get(Node, Grouped), FunOrMFA), State,
hibernate}.
handle_cast({monitor, MonitoringPid, Pid},
State = #state{monitors = Monitors}) ->
Monitors1 = case dict:find(Pid, Monitors) of
{ok, {Ref, Pids}} ->
Pids1 = gb_sets:add_element(MonitoringPid, Pids),
dict:store(Pid, {Ref, Pids1}, Monitors);
error ->
Ref = erlang:monitor(process, Pid),
Pids = gb_sets:singleton(MonitoringPid),
dict:store(Pid, {Ref, Pids}, Monitors)
end,
{noreply, State#state{monitors = Monitors1}, hibernate};
handle_cast({demonitor, MonitoringPid, Pid},
State = #state{monitors = Monitors}) ->
Monitors1 = case dict:find(Pid, Monitors) of
{ok, {Ref, Pids}} ->
Pids1 = gb_sets:del_element(MonitoringPid, Pids),
case gb_sets:is_empty(Pids1) of
true -> erlang:demonitor(Ref),
dict:erase(Pid, Monitors);
false -> dict:store(Pid, {Ref, Pids1}, Monitors)
end;
error ->
Monitors
end,
{noreply, State#state{monitors = Monitors1}, hibernate};
handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) ->
_ = safe_invoke(maps:get(Node, Grouped), FunOrMFA),
{noreply, State, hibernate}.
handle_info({'DOWN', Ref, process, Pid, Info},
State = #state{monitors = Monitors, name = Name}) ->
{noreply,
case dict:find(Pid, Monitors) of
{ok, {Ref, Pids}} ->
Msg = {'DOWN', {Name, Pid}, process, Pid, Info},
gb_sets:fold(fun (MonitoringPid, _) -> MonitoringPid ! Msg end,
none, Pids),
State#state{monitors = dict:erase(Pid, Monitors)};
error ->
State
end, hibernate};
handle_info(_Info, State) ->
{noreply, State, hibernate}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}. | erlang_server/_build/default/lib/rabbit_common/src/delegate.erl | 0.555435 | 0.420183 | delegate.erl | starcoder |
%%
%% Copyright (c) 2018 <NAME>
%% All rights reserved.
%% Distributed under the terms of the MIT License. See the LICENSE file.
%%
%% SIP Response common routines
%%
-module(ersip_response).
-export([target/1]).
%%%===================================================================
%%% Types
%%%===================================================================
-type target() :: {ersip_host:host(),
inet:port_number(),
ersip_transport:transport(),
Options :: map()
}.
%%%===================================================================
%%% API
%%%===================================================================
%% 18.2.2 Sending Responses
-spec target(ersip_hdr_via:via()) -> Result when
Result :: {direct, target()}
| {reuse, target()}.
target(Via) ->
ViaParams = ersip_hdr_via:params(Via),
{sent_protocol, _, _, Transport} = ersip_hdr_via:sent_protocol(Via),
case ersip_transport:is_reliable(Transport) of
true ->
%% If the "sent-protocol" is a reliable transport protocol
%% such as TCP or SCTP, or TLS over those, the response
%% MUST be sent using the existing connection to the
%% source of the original request that created the
%% transaction, if that connection is still open. This
%% requires the server transport to maintain an
%% association between server transactions and transport
%% connections. If that connection is no longer open, the
%% server SHOULD open a connection to the IP address in
%% the "received" parameter, if present, using the port in
%% the "sent-by" value, or the default port for that
%% transport, if no port is specified. If that connection
%% attempt fails, the server SHOULD use the procedures in
%% [4] for servers in order to determine the IP address
%% and port to open the connection and send the response
%% to..
{reuse, make_target_from_received(Via)};
false ->
case ViaParams of
#{maddr := Host} ->
%% Otherwise, if the Via header field value
%% contains a "maddr" parameter, the response MUST
%% be forwarded to the address listed there, using
%% the port indicated in "sent-by", or port 5060
%% if none is present. If the address is a
%% multicast address, the response SHOULD be sent
%% using the TTL indicated in the "ttl" parameter,
%% or with a TTL of 1 if that parameter is not
%% present.
Port = select_port(Via),
Options =
case ViaParams of
#{ttl := TTL} ->
#{ttl => TTL};
_ ->
#{ttl => 1}
end,
{direct, {Host, Port, Transport, Options}};
_ ->
target_rfc3261_or_3581(ViaParams, Via)
end
end.
%%%===================================================================
%%% Helpers
%%%===================================================================
-spec make_target_from_received(ersip_hdr_via:via()) -> target().
make_target_from_received(Via) ->
{sent_protocol, _, _, Transport} = ersip_hdr_via:sent_protocol(Via),
ViaParams = ersip_hdr_via:params(Via),
Host =
case ViaParams of
#{received := H} ->
H;
_ ->
{sent_by, H, _} = ersip_hdr_via:sent_by(Via),
H
end,
{Host, select_port(Via), Transport, #{}}.
-spec select_port(ersip_hdr_via:via()) -> inet:port_number().
select_port(Via) ->
{sent_by, _, SentByPort} = ersip_hdr_via:sent_by(Via),
SentByPort.
target_rfc3261_or_3581(ViaParams, Via) ->
case ViaParams of
#{received := Host, rport := Port} when Port /= true ->
%% RFC 3581:
%% When a server attempts to send a response, it examines the topmost
%% Via header field value of that response. If the "sent-protocol"
%% component indicates an unreliable unicast transport protocol, such as
%% UDP, and there is no "maddr" parameter, but there is both a
%% "received" parameter and an "rport" parameter, the response MUST be
%% sent to the IP address listed in the "received" parameter, and the
%% port in the "rport" parameter. The response MUST be sent from the
%% same address and port that the corresponding request was received on.
%% This effectively adds a new processing step between bullets two and
%% three in Section 18.2.2 of SIP [1].
{sent_protocol, _, _, Transport} = ersip_hdr_via:sent_protocol(Via),
{direct, {Host, Port, Transport, #{}}};
_ ->
%% Otherwise (for unreliable unicast transports), if the
%% top Via has a "received" parameter, the response MUST
%% be sent to the address in the "received" parameter,
%% using the port indicated in the "sent-by" value, or
%% using port 5060 if none is specified explicitly. If
%% this fails, for example, elicits an ICMP "port
%% unreachable" response, the procedures of Section 5 of
%% [4] SHOULD be used to determine where to send the
%% response.
{direct, make_target_from_received(Via)}
end. | src/ersip_response.erl | 0.597138 | 0.439868 | ersip_response.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2011 <NAME>
%% @doc Simple topological sort of tuples {item, [depends], [provides]}
%% Copyright 2011 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_toposort).
-export([
sort/1
]).
-type name() :: term().
-type topoitem() :: {Name::name(), Depends::list(), Provides::list()}.
%% @doc Return the topological sort of a list.
-spec sort( [ topoitem() ] ) -> {error, {cyclic, [name()]}} | {ok, [name()]}.
sort([]) ->
{ok, []};
sort(L) ->
G = digraph:new(),
Vs = [ {N, digraph:add_vertex(G)} || {N, _, _} <- L ],
add_node(G, L, L, Vs).
add_node(G, _Nodes, [], Vs) ->
case digraph_utils:is_acyclic(G) of
true ->
SortedVs = digraph_utils:topsort(G),
digraph:delete(G),
{ok, vertices_to_nodes(SortedVs, Vs)};
false ->
Cycles = digraph_utils:cyclic_strong_components(G),
digraph:delete(G),
{error, {cyclic, [ vertices_to_nodes(Components, Vs) || Components <- Cycles ]}}
end;
add_node(G, Nodes, [{_N, [], _Provides}|L], Vs) ->
add_node(G, Nodes, L, Vs);
add_node(G, Nodes, [{Node, Depends, _Provides}|L], Vs) ->
{Node, NVx} = proplists:lookup(Node, Vs),
DepNodes = lists:flatten([ find_node(Nodes, [], Depend) || Depend <- Depends ]),
[
begin
{N, Vx} = proplists:lookup(N, Vs),
digraph:add_edge(G, Vx, NVx)
end
|| N <- DepNodes
],
add_node(G, Nodes, L, Vs).
% find_node([], [], D) ->
% throw({error, {missing_provide, D}});
find_node([], Fs, _D) ->
Fs;
find_node([{N, _, Provides}|L], Fs, D) ->
case lists:member(D, Provides) of
true -> find_node(L, [N|Fs], D);
false -> find_node(L, Fs, D)
end.
vertices_to_nodes(Vertices, Nodes) ->
[
begin
{value, {N,_}} = lists:keysearch(V, 2, Nodes),
N
end
|| V <- Vertices
]. | apps/zotonic_core/src/support/z_toposort.erl | 0.73077 | 0.424949 | z_toposort.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-----------------------------------------------------------------------
-module(oc_sampler_period_or_count_SUITE).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
all() ->
[
period_sample1,
period_sample2,
count_sample1,
count_sample2,
count_and_period_sample1,
count_and_period_sample2,
count_and_period_sample3,
count_and_period_sample4
].
init_per_suite(Config) ->
application:load(opencensus),
%% limit is a count of traces to run
%% delay is a time delay between traces, in milliseconds
[{limit, 10000}, {delay, 5} | Config].
init_per_testcase(period_sample1, Config) ->
Config1 = [{period, 1}, {count, 0} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(period_sample2, Config) ->
Config1 = [{period, 10}, {count, 0} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_sample1, Config) ->
Config1 = [{period, 0}, {count, 1} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_sample2, Config) ->
Config1 = [{period, 0}, {count, 10} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_and_period_sample1, Config) ->
Config1 = [{period, 5}, {count, 1} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_and_period_sample2, Config) ->
Config1 = [{period, 5}, {count, 50} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_and_period_sample3, Config) ->
Config1 = [{period, 2}, {count, 100} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1;
init_per_testcase(count_and_period_sample4, Config) ->
Config1 = [{period, 2}, {count, 100} | Config],
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count, [
{period, ?config(period, Config1)},
{count, ?config(count, Config1)}
]}),
{ok, _} = application:ensure_all_started(opencensus),
Config1.
end_per_testcase(_, _Config) ->
ok = application:stop(opencensus),
ok.
end_per_suite(_Config) ->
ok.
period_sample1(Config) ->
%% expecting 1 trace after every 2 second
%% count = 0 doesn't affect on result
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assert(abs(DesiredResult - Result) =< 5).
period_sample2(Config) ->
%% expecting 1 trace after every 10 seconds
%% count = 0 doesn't affect on result
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assert(abs(DesiredResult - Result) =< 5).
count_sample1(Config) ->
%% all traces will be stored
%% period = 0 doesn't affect on result
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assertEqual(DesiredResult, Result).
count_sample2(Config) ->
%% expecting every 10th trace
%% period = 0 doesn't affect on result
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assertEqual(DesiredResult, Result).
count_and_period_sample1(Config) ->
%% all traces will be stored
%% because count = 1 that means every first
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assertEqual(DesiredResult, Result).
count_and_period_sample2(Config) ->
%% expecting every fifth trace
%% or 1 trace each 5 seconds
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assert(abs(DesiredResult - Result) =< 5).
count_and_period_sample3(Config) ->
%% expecting every 100th trace
%% or 1 trace each 2 seconds
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
?assert(abs(DesiredResult - Result) =< 5).
count_and_period_sample4(Config) ->
%% 1000/100 (0th, 99th, 199th, etc)
?assertMatch({_, 100}, run_tracing(Config)),
timer:sleep(2500),
?assertMatch([{sampler, _, 100}], ets:lookup(sampler_period_or_count, sampler)),
%% this enabled because of counter
SpanContext = oc_trace:start_span(<<"span">>, undefined),
?assertEqual(true, oc_trace:is_enabled(SpanContext)),
?assertMatch([{sampler, _, 1}], ets:lookup(sampler_period_or_count, sampler)),
timer:sleep(2500),
%% this enabled because of period
SpanContext1 = oc_trace:start_span(<<"span">>, undefined),
?assertEqual(true, oc_trace:is_enabled(SpanContext1)),
?assertMatch([{sampler, _, 2}], ets:lookup(sampler_period_or_count, sampler)),
?assertMatch({_, 100}, run_tracing(Config)).
desired_result(Config, Duration) ->
Limit = ?config(limit, Config),
_Delay = ?config(delay, Config),
Period = ?config(period, Config),
Count = ?config(count, Config),
Res = if
Count == 0 ->
Duration / Period;
Period == 0 ->
Limit / Count;
%% if period is too small, counter will not reach the trigger value
%% and sampler will act according to period duration settings
(Duration / Period) > (Limit / Count) ->
Duration / Period;
true ->
Limit / Count
%% (Limit / Count) + ((Limit - (Limit / Count)) * (Delay / 1000) / Period)
end,
round(Res).
%%
%%
run_tracing(Config) ->
Start = erlang:monotonic_time(microsecond),
Limit = ?config(limit, Config),
Delay = ?config(delay, Config),
%% run traces counted by Limit, with pause specified by Delay,
%% and filter only enabled ones
L = lists:filter(fun(_) ->
SpanContext = oc_trace:start_span(<<"span">>, undefined),
timer:sleep(Delay),
oc_trace:is_enabled(SpanContext)
end, lists:seq(1, Limit)),
End = erlang:monotonic_time(microsecond),
{(End - Start) / 1000000, length(L)}. | test/oc_sampler_period_or_count_SUITE.erl | 0.709824 | 0.422773 | oc_sampler_period_or_count_SUITE.erl | starcoder |
%%==============================================================================
%% Copyright 2010 Erlang Solutions Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
-module(escalus).
% Public API
-export([suite/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
create_users/1,
create_users/2,
delete_users/1,
delete_users/2,
get_users/1,
override/3,
make_everyone_friends/1,
fresh_story/3,
fresh_story_with_config/3,
story/3,
assert/2,
assert/3,
assert_many/2,
send/2,
send_and_wait/2,
wait_for_stanza/1,
wait_for_stanza/2,
wait_for_stanzas/2,
wait_for_stanzas/3,
peek_stanzas/1]).
-export_type([client/0,
config/0]).
-include("escalus.hrl").
%%--------------------------------------------------------------------
%% Public Types
%%--------------------------------------------------------------------
-type client() :: #client{}.
-type config() :: escalus_config:config().
%%--------------------------------------------------------------------
%% Public API
%%--------------------------------------------------------------------
suite() ->
[{require, escalus_users}].
init_per_suite(Config) ->
ensure_started(escalus),
escalus_users:start(Config),
escalus_fresh:start(Config),
Config.
end_per_suite(Config) ->
escalus_users:stop(Config),
escalus_fresh:stop(Config),
ok.
init_per_testcase(CaseName, Config) ->
Config1 = escalus_cleaner:start(Config),
escalus_event:start([{tc_name, CaseName}|Config1]).
end_per_testcase(_CaseName, Config) ->
Config1 = escalus_event:stop(Config),
escalus_cleaner:stop(Config1).
%%--------------------------------------------------------------------
%% Public API - forward functions from other modules
%%--------------------------------------------------------------------
%% User API
create_users(Config) ->
escalus_users:create_users(Config).
create_users(Config, Users) ->
escalus_users:create_users(Config, Users).
delete_users(Config) ->
escalus_users:delete_users(Config).
delete_users(Config, Users) ->
escalus_users:delete_users(Config, Users).
get_users(Names) ->
escalus_users:get_users(Names).
%% Story API
make_everyone_friends(Config) ->
escalus_story:make_everyone_friends(Config).
fresh_story(Config, ResourceCounts, Story) ->
escalus_fresh:story(Config, ResourceCounts, Story).
fresh_story_with_config(Config, ResourceCounts, Story) ->
escalus_fresh:story_with_config(Config, ResourceCounts, Story).
story(Config, ResourceCounts, Story) ->
escalus_story:story(Config, ResourceCounts, Story).
%% Assertions
assert(PredSpec, Arg) ->
escalus_new_assert:assert(PredSpec, Arg).
assert(PredSpec, Params, Arg) ->
escalus_new_assert:assert(PredSpec, Params, Arg).
assert_many(Predicates, Stanzas) ->
escalus_new_assert:assert_many(Predicates, Stanzas).
%% Client API
send(Client, Packet) ->
escalus_client:send(Client, Packet).
send_and_wait(Client, Packet) ->
escalus_client:send_and_wait(Client, Packet).
wait_for_stanza(Client) ->
escalus_client:wait_for_stanza(Client).
wait_for_stanza(Client, Timeout) ->
escalus_client:wait_for_stanza(Client, Timeout).
wait_for_stanzas(Client, Count) ->
escalus_client:wait_for_stanzas(Client, Count).
wait_for_stanzas(Client, Count, Timeout) ->
escalus_client:wait_for_stanzas(Client, Count, Timeout).
peek_stanzas(Client) ->
escalus_client:peek_stanzas(Client).
%% Other functions
override(Config, OverrideName, NewValue) ->
escalus_overridables:override(Config, OverrideName, NewValue).
ensure_started(App) ->
case application:start(App) of
{error, {not_started, NotStartedApp}} ->
ensure_started(NotStartedApp),
ensure_started(App);
ok ->
ok;
{error, {already_started, _}} ->
ok
end. | src/escalus.erl | 0.600071 | 0.412944 | escalus.erl | starcoder |
%%% @author <NAME> <<EMAIL>> [http://steve.vinoski.net/]
%%% @doc MIME-Type Parser based on Joe Gregorio's mimeparse.py Python module. This module
%%% provides basic functions for handling mime-types. It can handle matching mime-types
%%% against a list of media-ranges. Comments are mostly excerpted from the original.
%%% @reference See <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1">
%%% RFC 2616, section 14.1</a> for a complete explanation of mime-type handling.
%%% @reference <a href="http://code.google.com/p/mimeparse/">mimeparse</a>
-module(mimeparse).
-author('<EMAIL>').
-export([parse_mime_type/1, parse_media_range/1, quality/2, best_match/2]).
-export([test/0]).
%%% @type mime_type() = string().
%%% @type media_range() = mime_type().
%%% @type type() = atom().
%%% @type subtype() = atom().
%%% @type value() = string().
%%% @type param() = {atom(), value()}.
%%% @type mime_tuple() = {type(), subtype(), [param()]}.
%%% @type parsed_ranges() = [mime_tuple()].
%% @spec parse_mime_type(Mime_type::mime_type()) -> mime_tuple()
%% @doc Parses a mime-type into its component parts.
%% Returns a tuple of the {type, subtype, params} where 'params' is a proplist
%% of all the parameters for the mime-type. For example, the mime-type
%% "application/xhtml;q=0.5" would get parsed into:
%%
%% {application, xhtml, [{q, "0.5"}]}
%%
parse_mime_type(Mime_type) ->
[Full_type | Parts] = string:tokens(string:strip(Mime_type), ";"),
Params = lists:map(fun({K}) ->
{list_to_atom(K), ""};
({K, V}) ->
{list_to_atom(K), V}
end, [list_to_tuple([string:strip(S) || S <- string:tokens(Param, "=")])
|| Param <- Parts]),
[Type, Subtype] = case Full_type of
% Java URLConnection class sends an Accept header that includes a single "*"
% Turn it into a legal wildcard.
"*" ->
['*', '*'];
_ ->
[list_to_atom(string:strip(S)) || S <- string:tokens(Full_type, "/")]
end,
{Type, Subtype, Params}.
%% @spec parse_media_range(Range::media_range()) -> mime_tuple()
%% @doc Parses a media-range into its component parts.
%% Media-ranges are mime-types with wildcards and a 'q' quality parameter. This
%% function performs the same as parse_mime_type/1 except that it also
%% guarantees that there is a value for 'q' in the params dictionary, filling it
%% in with a proper default if necessary.
%% @see parse_mime_type/1
%%
parse_media_range(Range) ->
{Type, Subtype, Params} = parse_mime_type(Range),
Default_q = {q, "1"},
New_q = case lists:keysearch(q, 1, Params) of
false ->
Default_q;
{value, {q, Value}} ->
New_value = case Value of
[$.|_] ->
string:concat("0", Value);
_ ->
Value
end,
case number_to_float(New_value) of
{error, _} ->
Default_q;
Float ->
if
Float > 1.0;
Float < 0.0 ->
Default_q;
true ->
{q, New_value}
end
end
end,
{Type, Subtype, lists:keystore(q, 1, Params, New_q)}.
%% @spec quality(Mime_type::mime_type(), Range::media_range()) -> float()
%% @doc Determines the quality ('q') of a mime-type when compared against a media-range.
%% Returns the quality 'q' of a mime_type when compared against the media-ranges
%% in ranges. For example, given this media-range:
%%
%% "text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5"
%%
%% this function returns 0.7.
%%
quality(Mime_type, Range) ->
{_, Q} = quality_parsed(Mime_type, [parse_media_range(R) || R <- string:tokens(Range, ",")]),
Q.
%% @spec best_match(Supported_mime_types::[mime_type()], Header::media_range()) -> mime_type()
%% @doc Choose the mime-type with the highest quality ('q') from a list of candidates.
%% Takes a list of supported mime-types and finds the best match for all the
%% media-ranges listed in Header. The value of Header must be a string that
%% conforms to the format of the HTTP Accept: header. The value of
%% Supported_mime_types is a list of mime-types.
%%
%% best_match(["application/xbel+xml", "text/xml"], "text/*;q=0.5,*/*; q=0.1").
%%
%% returns "text/xml".
%%
best_match(Supported_mime_types, Header) ->
Parsed_header = [parse_media_range(R) || R <- string:tokens(Header, ",")],
Best_match = lists:foldl(fun(Mime_type, {{Best_fit, Best_q}, _}=Best_so_far) ->
Score = {Fitness, Q} = quality_parsed(Mime_type, Parsed_header),
if
Fitness >= Best_fit andalso Q >= Best_q ->
{Score, Mime_type};
true ->
Best_so_far
end
end, {{-1, 0.0}, ""}, Supported_mime_types),
case Best_match of
{{_, 0.0}, _} ->
"";
{_Score, Mime_type} ->
Mime_type
end.
%% Internal functions.
quality_parsed(Mime_type, Parsed_ranges) ->
{Target_type, Target_subtype, Target_params} = parse_media_range(Mime_type),
lists:foldl(
fun({Type, Subtype, Params}, {Best_fitness, _}=Best_so_far) ->
case possible_best_fit(Target_type, Type, Target_subtype, Subtype) of
true ->
Type_fitness = type_fitness(Target_type, Type, 100),
Subtype_fitness = type_fitness(Target_subtype, Subtype, 10),
Param_matches = count_param_matches(Target_params, Params),
Fitness = Type_fitness + Subtype_fitness + Param_matches,
if
Fitness > Best_fitness ->
{Fitness, number_to_float(proplists:get_value(q, Params))};
true ->
Best_so_far
end;
_ ->
Best_so_far
end
end, {-1, 0.0}, Parsed_ranges).
count_param_matches(Target_params, Params) ->
lists:foldl(fun({q,_}, Acc) ->
Acc;
({Key,Value}, Acc) ->
case lists:keysearch(Key, 1, Params) of
false ->
Acc;
{value, {Key, Value}} ->
1 + Acc;
_ ->
Acc
end
end, 0, Target_params).
possible_best_fit(Target_type, Type, Target_subtype, Subtype) ->
(lists:member(Type, ['*', Target_type]) orelse Target_type =:= '*')
andalso
(lists:member(Subtype, ['*', Target_subtype]) orelse
Target_subtype =:= '*').
type_fitness(Target_type, Type, Value) ->
case Type of
Target_type -> Value;
_ -> 0
end.
number_to_float(String) ->
case string:to_float(String) of
{error, _}=Error ->
case string:to_integer(String) of
{error, _} ->
Error;
{Num, _} ->
float(Num)
end;
{Float, _} ->
Float
end.
%% Tests.
test_parse_media_range() ->
{application, xml, [{q, "1"}]} = mimeparse:parse_media_range("application/xml;q=1"),
{application, xml, [{q, "1"}]} = mimeparse:parse_media_range("application/xml"),
{application, xml, [{q, "1"}]} = mimeparse:parse_media_range("application/xml;q="),
{application, xml, [{q, "1"}]} = mimeparse:parse_media_range("application/xml ; q="),
{application, xml, [{q, "1"}, {b, "other"}]} =
mimeparse:parse_media_range("application/xml ; q=1;b=other"),
{application, xml, [{q, "1"}, {b, "other"}]} =
mimeparse:parse_media_range("application/xml ; q=2;b=other"),
% Java URLConnection class sends an Accept header with a single *
{'*', '*', [{q, "0.2"}]} = mimeparse:parse_media_range(" *; q=.2"),
ok.
test_rfc_2616_example() ->
Accept = "text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5",
1.0 = mimeparse:quality("text/html;level=1", Accept),
0.7 = mimeparse:quality("text/html", Accept),
0.3 = mimeparse:quality("text/plain", Accept),
0.5 = mimeparse:quality("image/jpeg", Accept),
0.4 = mimeparse:quality("text/html;level=2", Accept),
0.7 = mimeparse:quality("text/html;level=3", Accept),
ok.
test_best_match() ->
Mime_types_supported1 = ["application/xbel+xml", "application/xml"],
% direct match
"application/xbel+xml" = mimeparse:best_match(Mime_types_supported1, "application/xbel+xml"),
% direct match with a q parameter
"application/xbel+xml" = mimeparse:best_match(Mime_types_supported1, "application/xbel+xml; q=1"),
% direct match of our second choice with a q parameter
"application/xml" = mimeparse:best_match(Mime_types_supported1, "application/xml; q=1"),
% match using a subtype wildcard
"application/xml" = mimeparse:best_match(Mime_types_supported1, "application/*; q=1"),
% match using a type wildcard
"application/xml" = mimeparse:best_match(Mime_types_supported1, "*/*"),
Mime_types_supported2 = ["application/xbel+xml", "text/xml"],
% match using a type versus a lower weighted subtype
"text/xml" = mimeparse:best_match(Mime_types_supported2, "text/*;q=0.5,*/*; q=0.1"),
% fail to match anything
"" = mimeparse:best_match(Mime_types_supported2, "text/html,application/atom+xml; q=0.9"),
% common AJAX scenario
Mime_types_supported3 = ["application/json", "text/html"],
"application/json" = mimeparse:best_match(Mime_types_supported3, "application/json, text/javascript, */*"),
% verify fitness ordering
"application/json" = mimeparse:best_match(Mime_types_supported3, "application/json, text/html;q=0.9"),
ok.
test_support_wildcards() ->
Mime_types_supported = ["image/*", "application/xml"],
% match using a type wildcard
"image/*" = mimeparse:best_match(Mime_types_supported, "image/png"),
% match using a wildcard for both requested and supported
"image/*" = mimeparse:best_match(Mime_types_supported, "image/*"),
ok.
test() ->
test_parse_media_range(),
test_rfc_2616_example(),
test_best_match(),
test_support_wildcards(). | src/mimeparse.erl | 0.57081 | 0.406803 | mimeparse.erl | starcoder |
%%----------------------------------------------------------------
%% Copyright (c) 2020 Faceplate
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%----------------------------------------------------------------
-module(ecomet_bits_SUITE).
-include_lib("ecomet_schema.hrl").
-include_lib("ecomet.hrl").
-include_lib("ecomet_test.hrl").
%% API
-export([
all/0,
groups/0,
init_per_testcase/2,
end_per_testcase/2,
init_per_group/2,
end_per_group/2,
init_per_suite/1,
end_per_suite/1
]).
-export([
set_bit_test/1,
reset_bit_test/1,
get_bit_test/1,
shrink_test/1,
test_and_oper/1,
test_or_oper/1,
test_andnot_oper/1,
test_xor_oper/1,
lsb_test/1,
msb_test/1,
foldl_test/1,
foldr_test/1,
insert_test/1,
bubles_add_test/1,
bubbles_remove_test/1,
bubbles_to_bits_test/1
]).
%%----Vectors-------
-export([
fill_test/1,
notx_test/1,
tovector_test/1,
splitr_test/1,splitl_test/1,
tailr_test/1,taill_test/1,
dropr_test/1,dropl_test/1,
count_test/1
]).
all()->
[
set_bit_test,
reset_bit_test,
get_bit_test,
shrink_test,
{group,oper_test},
lsb_test,
msb_test,
foldl_test,
foldr_test,
{group,bubbles_test},
{group,vectors_test}
].
groups()->
[
{oper_test,
[], % Run strategy
[ % Cases:
test_and_oper, % AND operation
test_or_oper, % OR operation
test_andnot_oper, % ANDNOT operation
test_xor_oper % XOR operation
]
},
{bubbles_test,
[], % Run strategy
[ % Cases:
insert_test, % Bubles union
bubles_add_test, % Add bit
bubbles_remove_test, % Remove bit
bubbles_to_bits_test % Bubbles to bits
]
},
{vectors_test,
[], % Run strategy
[
fill_test,
notx_test,
tovector_test,
splitr_test,splitl_test,
tailr_test,taill_test,
dropr_test,dropl_test,
count_test
]
}
].
%% Init system storages
init_per_suite(Config)->
Config.
end_per_suite(_Config)->
ok.
init_per_group(_,Config)->
Config.
end_per_group(_,_Config)->
ok.
init_per_testcase(_,Config)->
Config.
end_per_testcase(_,_Config)->
ok.
%--------------------------------------------------------------
% Set bit
%--------------------------------------------------------------
set_bit_test(_Config) ->
% Create bits
{3,2#1}=ecomet_bits:set_bit(3,none),
% Add bit to the left
{3,2#101}=ecomet_bits:set_bit(5,{3,2#1}),
% Add bit to the right
{1,2#10101}=ecomet_bits:set_bit(1,{3,2#101}).
%--------------------------------------------------------------
% Reset bit
%--------------------------------------------------------------
reset_bit_test(_Config) ->
% Reset bit in empty bits
none=ecomet_bits:reset_bit(3,none),
% Reset bit, that not set
{1,2#10101}=ecomet_bits:reset_bit(4,{1,2#10101}),
% Reset bit
{1,2#10001}=ecomet_bits:reset_bit(3,{1,2#10101}),
% Reset msb
{1,2#101}=ecomet_bits:reset_bit(5,{1,2#10101}),
% Reset lsb
{1,2#10100}=ecomet_bits:reset_bit(1,{1,2#10101}),
% Reset last bit
none=ecomet_bits:reset_bit(3,{3,2#1}).
%--------------------------------------------------------------
% Get bit
%--------------------------------------------------------------
get_bit_test(_Config) ->
% Get bit in empty bits
false=ecomet_bits:get_bit(3,none),
% Get bit, that not set
false=ecomet_bits:get_bit(4,{1,2#100101}),
% Get bit, that set
true=ecomet_bits:get_bit(3,{1,2#10101}).
%--------------------------------------------------------------
% Shrink bits
%--------------------------------------------------------------
shrink_test(_Config) ->
% Shrink empty bits
none=ecomet_bits:shrink(none),
% Shrink null
none=ecomet_bits:shrink({5,0}),
% No shrink possible
{3,2#101}=ecomet_bits:shrink({3,2#101}),
% No shrink possible
{5,2#101}=ecomet_bits:shrink({3,2#10100}).
%--------------------------------------------------------------
% bits operations
%--------------------------------------------------------------
test_and_oper(_Config)->
% Left is none
none=ecomet_bits:oper('AND',none,{3,2#1010111}),
% Left is null
none=ecomet_bits:oper('AND',{2,2#0},{3,2#1010111}),
% Left is start
{3,2#1010111}=ecomet_bits:oper('AND',start,{3,2#1010111}),
% Right is none
none=ecomet_bits:oper('AND',{3,2#1010111},none),
% Null result
none=ecomet_bits:oper('AND',{3,2#10101},{3,2#10}),
% Equal base
{3,2#1000110}=ecomet_bits:oper('AND',{3,2#1010111},{3,2#1000110}),
% Right base
{3,2#100}=ecomet_bits:oper('AND',{5,2#101},{3,2#1101}),
% Left base
{3,2#100}=ecomet_bits:oper('AND',{3,2#1101},{5,2#101}),
% No common base
none=ecomet_bits:oper('AND',{3,2#1101},{50,2#101}).
test_or_oper(_Config)->
% Left is none
{3,2#1010111}=ecomet_bits:oper('OR',none,{3,2#1010111}),
% Right is none
{3,2#1010111}=ecomet_bits:oper('OR',{3,2#1010111},none),
% Equal base
{3,2#1010111}=ecomet_bits:oper('OR',{3,2#1010001},{3,2#110}),
% Right base
{3,2#11101}=ecomet_bits:oper('OR',{5,2#101},{3,2#1101}),
% Left base
{3,2#1010001101}=ecomet_bits:oper('OR',{3,2#1101},{10,2#101}).
test_andnot_oper(_Config)->
% Left is none
none=ecomet_bits:oper('ANDNOT',none,{3,2#1010111}),
% Right is none
{3,2#1010111}=ecomet_bits:oper('ANDNOT',{3,2#1010111},none),
% Equal base
{3,2#1010001}=ecomet_bits:oper('ANDNOT',{3,2#1010111},{3,2#110}),
% Null result
none=ecomet_bits:oper('ANDNOT',{3,2#1010111},{1,2#11111111111}),
% Right base
{1,2#101010000}=ecomet_bits:oper('ANDNOT',{3,2#1010111},{1,2#1100}),
% Left base
{1,2#1010011}=ecomet_bits:oper('ANDNOT',{1,2#1010111},{3,2#11}),
% Nothing to cut off
{1,2#1010111}=ecomet_bits:oper('ANDNOT',{1,2#1010111},{9,2#11}).
test_xor_oper(_Config)->
% Left is none
{3,2#1010111}=ecomet_bits:oper('XOR',none,{3,2#1010111}),
% Right is none
{3,2#1010111}=ecomet_bits:oper('XOR',{3,2#1010111},none),
% Equal base
{3,2#1010001}=ecomet_bits:oper('XOR',{3,2#1010111},{3,2#110}),
% Null result (converted to none after shrink)
{3,2#0}=ecomet_bits:oper('XOR',{3,2#1010111},{3,2#1010111}),
% Right base
{1,2#101010001}=ecomet_bits:oper('XOR',{3,2#1010111},{1,2#1101}),
% Left base
{1,2#1000011}=ecomet_bits:oper('XOR',{1,2#1010111},{3,2#101}).
% Least significant bit search
lsb_test(_Config) ->
% +2
5=ecomet_bits:lsb({3,2#100100}),
% +5
8=ecomet_bits:lsb({3,2#100100},5),
% first
3=ecomet_bits:lsb({3,2#100101}),
% alone
5=ecomet_bits:lsb({5,2#1}),
% No next
-1=ecomet_bits:lsb({5,2#1001},8),
% None value
-1=ecomet_bits:lsb(none),
% Null value
-1=ecomet_bits:lsb({3,2#0}),
% Big value
1025=ecomet_bits:lsb({3,2#10101 bsl 1022}),
% Big value, big offset
6022=ecomet_bits:lsb({5000,2#10101 bsl 1022}),
% Very big value
16025=ecomet_bits:lsb({3,2#10101 bsl 16022}),
% Very big value, very big offset
24022=ecomet_bits:lsb({8000,2#10101 bsl 16022}),
% Very big value, very big offset, next
24024=ecomet_bits:lsb({8000,2#10101 bsl 16022},24022).
% Most significant bit search
msb_test(_Config) ->
% First
8=ecomet_bits:msb({3,2#100100}),
% Next
5=ecomet_bits:msb({3,2#100100},8),
% alone
5=ecomet_bits:msb({5,2#1}),
% No next
-1=ecomet_bits:msb({5,2#1000},8),
% None value
-1=ecomet_bits:msb(none,8),
% Null value
-1=ecomet_bits:msb({4,2#0},8),
% Big value
-1=ecomet_bits:msb({10,2#0},8),
% Very big value
34012=ecomet_bits:msb({10,2#101 bsl 34000}),
% Very big value, very big offset
44002=ecomet_bits:msb({10000,2#101 bsl 34000}),
% Very big value, very big offset, next
44000=ecomet_bits:msb({10000,2#101 bsl 34000},44002).
% Iterator from lsb to msb
foldl_test(_Config) ->
{0,[]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],none,{none,none}),
{3,[5,4,2]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],{1,2#11010},{none,none}),
{3,[2]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],{1,2#11010},{0,1}),
{3,[4,3]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],{0,2#11010},{1,3}),
{3,[9,8]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],{5,2#11010},{1,3}),
{9,[22,21,20]}=ecomet_bits:foldl(fun(Bit,Res)->[Bit|Res] end,[],{15,2#1011000011110011},{3,6}).
% Iterator from msb to lsb
foldr_test(_Config) ->
{0,[]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],none,{none,none}),
{3,[2,4,5]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],{1,2#11010},{none,none}),
{3,[5]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],{1,2#11010},{0,1}),
{3,[1,3]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],{0,2#11010},{1,3}),
{3,[6,8]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],{5,2#11010},{1,3}),
{9,[20,21,22]}=ecomet_bits:foldr(fun(Bit,Res)->[Bit|Res] end,[],{15,2#1011000011110011},{3,6}).
%%-------------------------------------------------------------
%% Bubbles
%%-------------------------------------------------------------
%Bubles union
insert_test(_Config) ->
%Exact inside
{2,7,2#111011}=ecomet_bits:insert({2,7,2#101001},{3,6,2#1001}),
%Right expand
{1,7,2#1011011}=ecomet_bits:insert({2,7,2#101001},{1,4,2#1001}),
%Left expand
{2,12,2#10010101001}=ecomet_bits:insert({2,7,2#101001},{9,12,2#1001}),
%Covered
{0,9,2#1010100101}=ecomet_bits:insert({2,7,2#101001},{0,9,2#1000000001}).
%--------------------------------------------------------------
% Add bit
%--------------------------------------------------------------
bubles_add_test(_Config) ->
% First bubble
[{3,3,2#1}]=ecomet_bits:bubbles_add([],3),
% Add bit to bubble
[{3,5,2#101}]=ecomet_bits:bubbles_add([{3,3,2#1}],5),
% New bubble
[{3,5,2#101},{1000,1000,2#1}]=ecomet_bits:bubbles_add([{3,5,2#101}],1000),
% Add bit to second
[{3,5,2#101},{1000,1005,2#100001}]=ecomet_bits:bubbles_add([{3,5,2#101},{1000,1000,2#1}],1005),
% Couple bubbles
CoupleRes=ecomet_bits:bubbles_add([{3,5,2#101},{1000,1005,2#100001}],512),
[{3,1005,_}]=CoupleRes,
% Decouple bubbles
[{3,5,2#101},{1000,1005,2#100001}]=ecomet_bits:bubbles_remove(CoupleRes,512).
%--------------------------------------------------------------
% Remove bit
%--------------------------------------------------------------
bubbles_remove_test(_Config) ->
% Remove bit from second bubble
[{3,5,2#101}]=ecomet_bits:bubbles_remove([{3,5,2#101},{1000,1000,2#1}],1000),
% Remove bit, that not set
[{3,5,2#101},{1000,1000,2#1}]=ecomet_bits:bubbles_remove([{3,5,2#101},{1000,1000,2#1}],1005),
% Remove bit from first bubble
[{4,5,2#11},{1000,1000,2#1}]=ecomet_bits:bubbles_remove([{3,5,2#111},{1000,1000,2#1}],3),
% Remove bit from first bubble
[{3,5,2#101},{1000,1000,2#1}]=ecomet_bits:bubbles_remove([{3,5,2#111},{1000,1000,2#1}],4),
% Remove bit from first bubble
[{5,5,2#1},{1000,1000,2#1}]=ecomet_bits:bubbles_remove([{3,5,2#101},{1000,1000,2#1}],3),
% Remove first bubble
[{1000,1000,2#1}]=ecomet_bits:bubbles_remove([{5,5,2#1},{1000,1000,2#1}],5),
% Remove last bubble
[]=ecomet_bits:bubbles_remove([{1000,1000,2#1}],1000).
%--------------------------------------------------------------
% Bubbles to bits
%--------------------------------------------------------------
bubbles_to_bits_test(_Config)->
X1=2#101,
X2=2#100001,
V=X1 bor (X2 bsl (1000-3)),
{3,V}=ecomet_bits:bubbles_to_bits([{3,5,X1},{1000,1005,X2}]).
%--------------------------------------------------------------
% Build bitsring of defined length with 'true' bits
%--------------------------------------------------------------
fill_test(_Config) ->
2#0=ecomet_bits:fill_bits(0),
2#11=ecomet_bits:fill_bits(2),
2#1111=ecomet_bits:fill_bits(4),
2#1111111111=ecomet_bits:fill_bits(10).
%--------------------------------------------------------------
% Invert bit-string
%--------------------------------------------------------------
notx_test(_Config) ->
2#0=ecomet_bits:notx(2#1),
2#00110101=ecomet_bits:notx(2#11001010),
2#11=ecomet_bits:notx(2#0,2),
2#1110=ecomet_bits:notx(2#1,4).
%--------------------------------------------------------------
% Prepare bit-string for vector operations
%--------------------------------------------------------------
tovector_test(_Config)->
{2#1,2#0,{0,0}}=ecomet_bits:to_vector({0,2#1}),
{2#11001010,2#00110101,{9,2}}=ecomet_bits:to_vector({2,2#11001010}),
{2#1011000011110011,2#0100111100001100,{30,15}}=ecomet_bits:to_vector({15,2#1011000011110011}).
%--------------------------------------------------------------
% Splitting continuous bits from right of the bit-string
%--------------------------------------------------------------
splitr_test(_Config) ->
{1,none}=ecomet_bits:splitr(1,{2#1,2#0,{0,0}}),
V00=ecomet_bits:to_vector({0,2#11001010}),
{2,V01}=ecomet_bits:splitr(1,V00),
{0,V00}=ecomet_bits:splitr(0,V00),
% 2#001010
{0,V01}=ecomet_bits:splitr(1,V01),
{2,V02}=ecomet_bits:splitr(0,V01),
% 2#1010
{0,V02}=ecomet_bits:splitr(0,V02),
{1,V03}=ecomet_bits:splitr(1,V02),
% 2#010
{0,V03}=ecomet_bits:splitr(1,V03),
{1,V04}=ecomet_bits:splitr(0,V03),
% 2#10
{0,V04}=ecomet_bits:splitr(0,V04),
{1,V05}=ecomet_bits:splitr(1,V04),
% 2#0
{0,V05}=ecomet_bits:splitr(1,V05),
{1,none}=ecomet_bits:splitr(0,V05),
% Tail test
{4,none}=ecomet_bits:splitr(1,ecomet_bits:to_vector({0,2#1111})).
%--------------------------------------------------------------
% Splitting continuous bits from left of the bit-string
%--------------------------------------------------------------
splitl_test(_Config) ->
{1,none}=ecomet_bits:splitl(1,{2#1,2#0,{0,0}}),
V00=ecomet_bits:to_vector({0,2#11001010}),
{0,V00}=ecomet_bits:splitl(1,V00),
{1,V01}=ecomet_bits:splitl(0,V00),
% 2#1100101
{0,V01}=ecomet_bits:splitl(0,V01),
{1,V02}=ecomet_bits:splitl(1,V01),
% 2#110010
{0,V02}=ecomet_bits:splitl(1,V02),
{1,V03}=ecomet_bits:splitl(0,V02),
% 2#11001
{0,V03}=ecomet_bits:splitl(0,V03),
{1,V04}=ecomet_bits:splitl(1,V03),
% 2#1100
{0,V04}=ecomet_bits:splitl(1,V04),
{2,V05}=ecomet_bits:splitl(0,V04),
% 2#11
{0,V05}=ecomet_bits:splitl(0,V05),
{2,none}=ecomet_bits:splitl(1,V05),
% Tail test
{4,none}=ecomet_bits:splitl(1,ecomet_bits:to_vector({0,2#1111})).
%--------------------------------------------------------------
% Drop bits from right of the vector
%--------------------------------------------------------------
tailr_test(_Config)->
{2#11001010,2#00110101,{7,0}}=ecomet_bits:tailr(0,{2#11001010,2#00110101,{7,0}}),
none=ecomet_bits:tailr(8,{2#11001010,2#00110101,{7,0}}),
none=ecomet_bits:tailr(10,{2#11001010,2#00110101,{7,0}}),
{2#001010,2#110101,{5,0}}=ecomet_bits:tailr(2,{2#11001010,2#00110101,{7,0}}),
{2#0,2#1,{0,0}}=ecomet_bits:tailr(5,{2#001010,2#110101,{5,0}}).
%--------------------------------------------------------------
% Drop bits from left of the vector
%--------------------------------------------------------------
taill_test(_Config)->
{2#11001010,2#00110101,{7,0}}=ecomet_bits:taill(0,{2#11001010,2#00110101,{7,0}}),
none=ecomet_bits:taill(8,{2#11001010,2#00110101,{7,0}}),
none=ecomet_bits:taill(10,{2#11001010,2#00110101,{7,0}}),
{2#110010,2#001101,{7,2}}=ecomet_bits:taill(2,{2#11001010,2#00110101,{7,0}}),
{2#1,2#0,{7,7}}=ecomet_bits:taill(5,{2#110010,2#001101,{7,2}}).
%--------------------------------------------------------------
% Drop defined count of the 'true' bits from right of the vector
%--------------------------------------------------------------
dropr_test(_Config)->
{0,{2#11001010,2#00110101,{7,0}}}=ecomet_bits:dropr(0,{2#11001010,2#00110101,{7,0}}),
{0,{2#1010,2#0101,{3,0}}}=ecomet_bits:dropr(2,{2#11001010,2#00110101,{7,0}}),
{0,{2#10,2#01,{1,0}}}=ecomet_bits:dropr(3,{2#11001010,2#00110101,{7,0}}),
{0,none}=ecomet_bits:dropr(4,{2#11001010,2#00110101,{7,0}}),
{1,none}=ecomet_bits:dropr(5,{2#11001010,2#00110101,{7,0}}),
{0,{2#110000,2#001111,{5,0}}}=ecomet_bits:dropr(2,{2#11110000,2#00001111,{7,0}}),
{0,none}=ecomet_bits:dropr(4,{2#11110000,2#00001111,{7,0}}),
{6,none}=ecomet_bits:dropr(10,{2#11110000,2#00001111,{7,0}}).
%--------------------------------------------------------------
% Drop defined count of the 'true' bits from right of the vector
%--------------------------------------------------------------
dropl_test(_Config)->
{0,{2#11001010,2#00110101,{7,0}}}=ecomet_bits:dropl(0,{2#11001010,2#00110101,{7,0}}),
{0,{2#11,2#00,{7,6}}}=ecomet_bits:dropl(2,{2#11001010,2#00110101,{7,0}}),
{0,{2#1,2#0,{7,7}}}=ecomet_bits:dropl(3,{2#11001010,2#00110101,{7,0}}),
{0,none}=ecomet_bits:dropl(4,{2#11001010,2#00110101,{7,0}}),
{1,none}=ecomet_bits:dropl(5,{2#11001010,2#00110101,{7,0}}),
{0,{2#11,2#00,{7,6}}}=ecomet_bits:dropl(2,{2#11110000,2#00001111,{7,0}}),
{0,none}=ecomet_bits:dropl(4,{2#11110000,2#00001111,{7,0}}),
{6,none}=ecomet_bits:dropl(10,{2#11110000,2#00001111,{7,0}}).
%--------------------------------------------------------------
% Get count of the 'true' bits in the bit-string
%--------------------------------------------------------------
count_test(_Config)->
0=ecomet_bits:count(none,0),
1=ecomet_bits:count(ecomet_bits:to_vector({5,2#10000000}),0),
6=ecomet_bits:count(ecomet_bits:to_vector({0,2#10000000}),5),
8=ecomet_bits:count(ecomet_bits:to_vector({0,2#1101111000001100}),0),
4=ecomet_bits:count(ecomet_bits:to_vector({0,2#11001010}),0). | test/module/ecomet_bits_SUITE.erl | 0.502197 | 0.416856 | ecomet_bits_SUITE.erl | starcoder |
%%% ==========================================================================
%%% @author <NAME>
%%% @copyright 2018 <NAME>
%%% @version .01
%%% @doc
%%% License:
%%% File: ep_bezier.erl
%%% Description: Display bezier curves
%%% @end
%%% ==========================================================================
-module (ep_bezier).
-export ([create/4]).
-export([bezier/3]).
-export([from/1, control1/1, control2/1, to/1]).
-export([width/1, color/1, format/1]).
-export ([update_from/2, update_control1/2, update_control2/2, update_to/2]).
-export ([update_width/2, update_color/2, update_format/2]).
-export([features/1]).
-include("../../include/ep.hrl").
-define(DEFAULT_WIDTH, 1).
-define(DEFAULT_DASH, solid).
-define(DEFAULT_COLOR, black).
-define(DEFAULT_FORMAT, letter).
%% ***********************************************************
%% Create bezier map
%% ***********************************************************
%% @doc Create bezier map
-spec create(Pt1 :: tuple(),
Pt2 :: tuple(),
Pt3 :: tuple(),
Pt4 :: tuple()) -> map().
create(Pt1, Pt2, Pt3, Pt4) ->
#{ from => Pt1
, pt2 => Pt2
, pt3 => Pt3
, to => Pt4
, width => ?DEFAULT_WIDTH
, color => ?DEFAULT_COLOR
}.
%% ***********************************************************
%% Bezier to pdf
%% ***********************************************************
bezier(PDF, Job, BezierMap) ->
PaperStock = maps:get(paper_stock, Job),
PagePositions = ep_job:page_positions(Job, 1),
[Position | _] = PagePositions,
From = maps:get(from, BezierMap),
Pt2 = maps:get(pt2, BezierMap),
Pt3 = maps:get(pt3, BezierMap),
To = maps:get(to, BezierMap),
Width = maps:get(width, BezierMap),
Color = maps:get(color, BezierMap),
FromA = ep_lib:impose_xy(From, Position, PaperStock),
Pt2A = ep_lib:impose_xy(Pt2, Position, PaperStock),
Pt3A = ep_lib:impose_xy(Pt3, Position, PaperStock),
ToA = ep_lib:impose_xy(To, Position, PaperStock),
eg_pdf:save_state(PDF),
eg_pdf:move_to(PDF, From),
eg_pdf:set_line_width(PDF, Width),
eg_pdf:bezier(PDF, FromA, Pt2A, Pt3A, ToA),
eg_pdf:set_stroke_color(PDF, Color),
eg_pdf:path(PDF, stroke),
eg_pdf:restore_state(PDF),
PDF.
%% ***********************************************************
%% Get bezier attributes
%% ***********************************************************
%% @doc Return start-of-bezier coordinates
-spec from(BezierMap :: map()) -> tuple().
from(BezierMap) ->
maps:get(from, BezierMap).
%% @doc Return return control point 1
-spec control1(BezierMap :: map()) -> tuple().
control1(BezierMap) ->
maps:get(control1, BezierMap).
%% @doc Return return control point 2
-spec control2(BezierMap :: map()) -> tuple().
control2(BezierMap) ->
maps:get(control2, BezierMap).
%% @doc Return end-of-bezier coordinates
-spec to(BezierMap :: map()) -> tuple().
to(BezierMap) ->
maps:get(to, BezierMap).
%% @doc Return width of bezier
-spec width(BezierMap :: map()) -> integer().
width(BezierMap) ->
maps:get(width, BezierMap).
%% @doc Return color of bezier
%% Colors: white, silver, gray, black, maroon, red, fuschia,
%% purple, lime, green, olive, yellow, navy, blue, teal, aqua
-spec color(BezierMap :: map()) -> integer().
color(BezierMap) ->
maps:get(color, BezierMap).
%% @doc Return page format
-spec format(BezierMap :: map()) -> integer().
format(BezierMap) ->
maps:get(format, BezierMap).
%% @doc Return style of bezier; e.g. width, dash, color
-spec features(BezierMap :: map()) -> integer().
features(BezierMap) ->
Width = width(BezierMap),
Color = color(BezierMap),
{Width, Color}.
%% ***********************************************************
%% Update bezier attributes
%% ***********************************************************
%% doc Update beinning-of-line coordinates
-spec update_from(From :: tuple(),
BezierMap :: map()) -> map().
update_from(From, BezierMap) ->
maps:put(from, From, BezierMap).
%% doc Update control point 1
-spec update_control1(Control1 :: tuple(),
BezierMap :: map()) -> map().
update_control1(Control1, BezierMap) ->
maps:put(control1, Control1, BezierMap).
%% doc Update control point d
-spec update_control2(Control2 :: tuple(),
BezierMap :: map()) -> map().
update_control2(Control2, BezierMap) ->
maps:put(control1, Control2, BezierMap).
%% doc Update end-of-bezier coordinates
-spec update_to(To :: tuple(),
BezierMap :: map()) -> map().
update_to(To, BezierMap) ->
maps:put(to, To, BezierMap).
%% @doc Update width of bezier
-spec update_width(Width :: integer(),
BezierMap :: map()) -> map().
update_width(Width, BezierMap) ->
maps:put(width, Width, BezierMap).
%% @doc Update color of line: e.g. white, silver, gray,
%% black, maroon, red, fuschia, purple, lime, green,
%% olive, yellow, navy, blue, teal, aqua
-spec update_color(Color :: atom(),
BezierMap :: map()) -> map().
update_color(Color, BezierMap) ->
maps:put(color, Color, BezierMap).
%% @doc Update page format
%% SEE: rp(ep_format:formats().
-spec update_format(Format :: atom(),
BezierMap :: map()) -> map().
update_format(Format, BezierMap) ->
maps:put(format, Format, BezierMap). | src/line/ep_bezier.erl | 0.506347 | 0.414366 | ep_bezier.erl | starcoder |
%% @doc
%% A VM implementation for the {@link intcode. intcode} instruction set.
-module(intcode_server).
-include("intcode.hrl").
-export([
% GenServer specifics
init/1, handle_call/3, handle_cast/2,
% IntcodeIO implementations
push/2, poll/1, poll_or_notify/2, as_list/1,
% Start and control
start_link/3, new/4, new/3, start/1, stop/1, run_sync/3,
% Getters
state/1, output/1, memory/1, 'finished?'/1
]).
-import(intcode, [
instruction/1,
increment_pc/3,
advance/3,
read_instruction/1
]).
-behaviour(gen_server).
-behaviour(intcode_io).
-record(?MODULE, {
reference :: pid()
}).
-type ref() :: #?MODULE{reference :: pid()}.
%% Describes the identifier used to reference an instance of the server as
%% defined by this module.
%% @doc Starts a VM node with the provided memory and input and output
%% providers (that implement intcode_io).
-spec start_link(
Memory :: list(value()),
Input :: input(),
Output :: output()) -> {ok, pid()} | {error, {already_started, pid()}} | {error, Reason :: term()}.
start_link(Memory, Input, Output) -> start_link(Memory, Input, Output, nil, []).
%% @doc Starts a VM node with the provided memory, input, output, name, and
%% gen_server options. The name is used for debugging purposes.
-spec start_link(
Memory :: list(value()),
Input :: input(),
Output :: output(),
Name :: string() | nil,
Opts :: gen:options()) -> {ok, pid()} | {error, {already_started, pid()}} | {error, Reason :: term()}.
start_link([I | _] = Memory, Input, Output, Name, Opts) ->
MachineState = #machine_state{
pc = #pc{pc = 0, instruction = I},
mem = array:from_list(Memory, 0),
output = Output,
relbase = 0
},
VmState = #vm_state{
name = Name,
input = Input,
input_callback = fun(_, _) -> ok end
},
gen_server:start_link(?MODULE, {MachineState, VmState}, Opts).
%% @doc Ensures that `IO' is an {@type intcode_io:intcode_io()}, and if it is a
%% list, converts it to an {@type intcode_io:intcode_io()}.
-spec ensure_intcode_io(list(value()) | intcode_io:intcode_io()) -> intcode_io:intcode_io().
ensure_intcode_io(IO) when is_tuple(IO) andalso is_atom(element(1, IO)) -> IO;
ensure_intcode_io(IO) when is_list(IO) -> intcode_io_queue:new(IO).
%% @doc Creates a new instance of an intcode_io-compatible structure that
%% represents an IntCode VM thread.
-spec new(
Memory :: list(value()),
Input :: input() | list(value()),
Output :: output() | list(),
Name :: string() | nil) -> ref().
new(Memory, Input, Output, Name) ->
{ok, Ref} = start_link(Memory, ensure_intcode_io(Input), ensure_intcode_io(Output), Name, []),
Return = #?MODULE{reference = Ref},
start(Return),
Return.
%% @doc Creates a new instance of an intcode_io-compatible structure that
%% represents an IntCode VM thread.
%% This method defaults Name to nil.
-spec new(
Memory :: list(value()),
Input :: input() | list(value()),
Output :: output() | list(value())) -> ref().
new(Memory, Input, Output) -> new(Memory, Input, Output, nil).
%% @doc Return the current machine state (pc, current memory).
-spec state(Reference :: ref()) -> machine_state().
state(#?MODULE{reference = Ref}) -> gen_server:call(Ref, machine_state).
%% @doc Pop one element from the output buffer.
-spec poll(Reference) -> {value(), Reference} when Reference :: ref().
poll(#?MODULE{reference = Ref} = Reference) -> {gen_server:call(Ref, poll), Reference}.
%% @doc Pop one element from the output buffer.
-spec poll_or_notify(Reference, Callback :: fun(() -> any())) -> nil | {value(), Reference} when Reference :: ref().
poll_or_notify(#?MODULE{reference = Ref} = Reference, Callback) ->
{gen_server:call(Ref, {poll_or_notify, Callback}), Reference}.
%% @deprecated
%% @equiv as_list(Reference)
-spec output(ReferenceOrState :: ref() | machine_state()) -> list(value()).
output(Reference) -> as_list(Reference).
%% @doc Get the output buffer as a list.
-spec as_list(ReferenceOrState :: ref() | machine_state()) -> list(value()).
as_list(#machine_state{output = Output}) -> intcode_io:as_list(Output);
as_list(#?MODULE{} = Reference) -> as_list(state(Reference)).
%% @doc Get the current memory.
-spec memory(ReferenceOrState :: ref() | machine_state()) -> list(value()).
memory(#machine_state{mem = Memory}) -> array:to_list(Memory);
memory(#?MODULE{} = Reference) -> memory(state(Reference)).
%% @doc Wait for the VM to finish.
-spec 'finished?'(Reference :: ref()) -> {ok, normal}.
'finished?'(#?MODULE{reference = Ref}) -> gen_server:call(Ref, wait_finish).
%% @doc Start (or restart) the VM.
-spec start(Reference :: ref()) -> ok.
start(#?MODULE{reference = Ref}) -> gen_server:cast(Ref, start).
%% @doc Shut down the VM.
-spec stop(Reference :: ref()) -> ok.
stop(#?MODULE{reference = Ref}) -> gen_server:cast(Ref, stop).
%% @doc Push a new value onto the output buffer.
-spec push(Reference, Value :: value()) -> Reference when Reference :: ref().
push(#?MODULE{reference = Ref} = Reference, Value) ->
gen_server:cast(Ref, {input, Value}),
Reference.
%% @doc Execute the program synchronously
-spec run_sync(
Program :: list(value()),
Input :: list(value()) | input(),
Output :: list(value()) | output()) -> machine_state().
run_sync(Program, Input, Output) ->
Server = new(Program, Input, Output),
'finished?'(Server),
State = state(Server),
stop(Server),
State.
%% @doc Initializes this VM.
-spec init(InitialState :: State) -> {ok, State} when State :: {machine_state(), vm_state()}.
init({MachineState, VmState}) ->
R = self(),
{ok, {MachineState, VmState#vm_state{input_callback = fun(_, _) -> start(#?MODULE{reference = R}) end}}}.
%% @doc Implement the handle_call function as specified by {@link gen_server}.
%%
%% The @spec'd type is a general type, as `edoc' has issues with rendering very
%% complex typespecs.
%% This is the actual typespec (which is the same as the type specified by the
%% `-spec' annotation on this method.
%% ```
%% (Action, From :: {pid(), Tag :: term()}, State) -> {reply, machine_state(), State}
%% when Action :: machine_state, State :: {machine_state(), vm_state()};
%% (Action, From :: {pid(), Tag :: term()}, State) -> {reply, value(), State}
%% when Action :: poll, State :: {machine_state(), vm_state()};
%% (Action, From :: {pid(), Tag :: term()}, State) -> {reply, sleep | value(), State}
%% when Action :: {poll_or_notify, fun(() -> any())}, State :: {machine_state(), vm_state()};
%% (Action, From :: {pid(), Tag :: term()}, State) -> {reply, list(value()), State}
%% when Action :: as_list, State :: {machine_state(), vm_state()};
%% (Action, From :: {pid(), Tag :: term()}, State) -> {reply, normal, State} | {noreply, State}
%% when Action :: wait_finish, State :: {machine_state(), vm_state()}.
%% '''
%% The actual `-spec' typespec can be inspected for this same type.
%%
%% @spec handle_call(Command :: Command, From :: {pid(), Tag :: term()}, State) -> {reply, Reply, State} | {noreply, State}
%% Command = machine_state | poll | {poll_or_notify, fun(() -> any())} | as_list | wait_finish
%% State = {machine_state(), vm_state()}
%% Reply = machine_state() | value() | nil | list(value()) | normal
-spec handle_call
(Action, From :: {pid(), Tag :: term()}, State) -> {reply, machine_state(), State}
when Action :: machine_state, State :: {machine_state(), vm_state()};
(Action, From :: {pid(), Tag :: term()}, State) -> {reply, value(), State}
when Action :: poll, State :: {machine_state(), vm_state()};
(Action, From :: {pid(), Tag :: term()}, State) -> {reply, nil | value(), State}
when Action :: {poll_or_notify, fun(() -> any())}, State :: {machine_state(), vm_state()};
(Action, From :: {pid(), Tag :: term()}, State) -> {reply, list(value()), State}
when Action :: as_list, State :: {machine_state(), vm_state()};
(Action, From :: {pid(), Tag :: term()}, State) -> {reply, normal, State} | {noreply, State}
when Action :: wait_finish, State :: {machine_state(), vm_state()}.
handle_call(machine_state, _, {MachineState, _} = State) ->
{reply, MachineState, State};
handle_call(poll, _, {MachineState, RestState}) ->
{V, Q} = intcode_io:poll(MachineState#machine_state.output),
{reply, V, {MachineState#machine_state{output = Q}}, RestState};
handle_call({poll_or_notify, Callback}, _, {MachineState, RestState}) ->
case intcode_io:poll_or_notify(MachineState#machine_state.output, Callback) of
{wait, Q} -> {reply, sleep, {MachineState, RestState}};
{R, Q} -> {reply, R, {MachineState#machine_state{output = Q}, RestState}}
end;
handle_call(as_list, _, {MachineState, _} = State) ->
{reply, intcode_io:as_list(MachineState#machine_state.output), State};
handle_call(wait_finish, _, {#machine_state{pc = #pc{instruction = 99}}, _} = State) ->
{reply, normal, State};
handle_call(wait_finish, From, {MachineState, VmState}) ->
{noreply, {MachineState, VmState#vm_state{shutdown_listeners = [From | VmState#vm_state.shutdown_listeners]}}}.
%% @doc Implement the handle_cast function as specified by {@link gen_server}.
%%
%% The @spec'd type is a general type, as `edoc' has issues with rendering very
%% complex typespecs.
%% This is the actual typespec (which is the same as the type specified by the
%% `-spec' annotation on this method.
%% ```
%% (Action, State) -> {noreply, State}
%% when Action :: start | {input, value()} | {set_input, input()}, State :: {machine_state(), vm_state()};
%% (Action, State) -> {stop, normal, State}
%% when Action :: stop, State :: {machine_state(), vm_state()}.
%% '''
%% The actual `-spec' typespec can be inspected for this same type.
%%
%% @spec handle_cast(Action :: Action, State) -> {noreply, State} | {stop, normal, State}
%% Action = start | {input, value()} | {set_input, input()} | stop
%% State = {machine_state(), vm_state()}
-spec handle_cast
(Action, State) -> {noreply, State}
when Action :: start | {input, value()} | {set_input, input()}, State :: {machine_state(), vm_state()};
(Action, State) -> {stop, normal, State}
when Action :: stop, State :: {machine_state(), vm_state()}.
handle_cast(start, {MachineState, VmState}) ->
case loop(MachineState, VmState) of
{halt, NewMachineState, NewVmState} ->
[gen_server:reply(Listener, normal) || Listener <- NewVmState#vm_state.shutdown_listeners],
{noreply, {NewMachineState, NewVmState#vm_state{shutdown_listeners = []}}};
{sleep, NewMachineState, NewVmState} ->
{noreply, {NewMachineState, NewVmState}}
end;
handle_cast({input, Value}, {MachineState, VmState}) ->
NewVmState = VmState#vm_state{input = intcode_io:push(VmState#vm_state.input, Value)},
{noreply, {MachineState, NewVmState}};
handle_cast({set_input, Input}, {MachineState, VmState}) ->
{noreply, {MachineState, VmState#vm_state{input = Input}}};
handle_cast(stop, State) ->
{stop, normal, State}.
%% @doc Runs the interpreter until input is required or the program is
%% finished. In those cases, the new machine state will be `sleep' or `halt'
%% respectively.
-spec loop(machine_state(), vm_state()) -> {sleep | halt, machine_state(), vm_state()}.
loop(#machine_state{pc = Pc, mem = Mem} = MachineState, VmState) ->
{Arity, Function, Vs} = read_instruction(MachineState),
case Function(Vs, MachineState, VmState) of
{continue, NewMachineState, NewVmState} ->
loop(advance(MachineState, NewMachineState, Arity), NewVmState);
{sleep, NewMachineState, NewVmState} -> {sleep, advance(MachineState, NewMachineState, Arity), NewVmState};
{halt, NewMachineState, NewVmState} -> {halt, advance(MachineState, NewMachineState, Arity), NewVmState}
end. | src/intcode/intcode_server.erl | 0.73173 | 0.407569 | intcode_server.erl | starcoder |
-module(complete_a_task).
-author("<NAME>").
-export([
%% prod
completion_date/2,
add_days/2,
total_weeks_to_complete/1,
total_days_to_complete/1,
work_days_to_complete/1,
days_off/1,
number_of_weeks/2,
number_of_days/2,
today/0,
%% testing
test/0
]).
%% Config %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
total() -> 510.
amount_per_day() -> 5.
buffer_days() -> 17.
days_off_per_week() -> 1.
%%% Functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Returns the expected completion date
%% @param Date - erlang:date() - current date
%% @param Current - int - current page number
completion_date(Date, Current) ->
Days = total_days_to_complete(Current),
add_days(Date, Days).
add_days(Date, Days) ->
calendar:gregorian_days_to_date(
calendar:date_to_gregorian_days(Date) + Days).
total_weeks_to_complete(Current) ->
total_days_to_complete(Current) div 7.
total_days_to_complete(Current) ->
work_days_to_complete(Current) + buffer_days() + days_off(Current).
days_off(Current) ->
(work_days_to_complete(Current) div 7) * days_off_per_week().
work_days_to_complete(Current) ->
(total() - Current) div amount_per_day().
%% Returns the integer number of weeks to achieve the goal
number_of_weeks(Date1, Date2) ->
number_of_days(Date1, Date2) div 7.
%% Returns the integer number days diff between 2 dates
number_of_days(Date1, Date2) ->
Datetime1 = {Date1, {0,0,0}},
Datetime2 = {Date2, {0,0,0}},
{Days, _} = calendar:time_difference(Datetime1, Datetime2),
Days.
%% Returns today's date
%% @return (tuple) {Year, Month, Day}
today() ->
{Today, _} = calendar:universal_time(),
Today.
%% Tests %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
test() ->
test_completion_date(),
test_add_days(),
test_total_weeks_to_complete(),
test_total_days_to_complete(),
test_days_off(),
test_work_days_to_complete(),
test_number_of_weeks(),
test_number_of_days(),
{ok}.
test_completion_date() ->
Date = {2019, 6, 15},
Current = 63,
{2019, 10, 11} = completion_date(Date, Current).
test_add_days() ->
Date = {2019, 6, 15},
Days = 4,
{2019, 6, 19} = add_days(Date, Days).
test_total_weeks_to_complete() ->
16 = total_weeks_to_complete(63).
test_total_days_to_complete() ->
118 = total_days_to_complete(63).
test_days_off() ->
14 = days_off(10).
test_work_days_to_complete() ->
89 = work_days_to_complete(63).
test_number_of_weeks() ->
Date1 = {2019, 6, 15},
Date2 = {2019, 6, 23},
1 = number_of_weeks(Date1, Date2).
test_number_of_days() ->
Date1 = {2019, 6, 15},
Date2 = {2019, 6, 19},
4 = number_of_days(Date1, Date2). | src/complete_a_task.erl | 0.592902 | 0.471588 | complete_a_task.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_set_view_util).
-export([detuple_kvs/2, expand_dups/2, expand_dups/3, partitions_map/2]).
-export([build_bitmask/1, decode_bitmask/1]).
-export([make_btree_purge_fun/1]).
-export([make_key_options/1]).
-include_lib("couch_set_view/include/couch_set_view.hrl").
detuple_kvs([], Acc) ->
lists:reverse(Acc);
detuple_kvs([KV | Rest], Acc) ->
{{Key,Id}, {_PartId, Value}} = KV,
NKV = [[Key, Id], Value],
detuple_kvs(Rest, [NKV | Acc]).
expand_dups([], Acc) ->
lists:reverse(Acc);
expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
Expanded = lists:map(fun({PartId, Val}) -> {Key, {PartId, Val}} end, Vals),
expand_dups(Rest, Expanded ++ Acc);
expand_dups([{_Key, {_PartId, _Val}} = Kv | Rest], Acc) ->
expand_dups(Rest, [Kv | Acc]).
expand_dups([], _Abitmask, Acc) ->
lists:reverse(Acc);
expand_dups([{Key, {dups, [{PartId, _} | _] = Vals}} | Rest], Abitmask, Acc) ->
case (1 bsl PartId) band Abitmask of
0 ->
expand_dups(Rest, Abitmask, Acc);
_ ->
Expanded = lists:map(fun({_PartId, _Val} = V) -> {Key, V} end, Vals),
expand_dups(Rest, Abitmask, Expanded ++ Acc)
end;
expand_dups([{_Key, {PartId, _Val}} = Kv | Rest], Abitmask, Acc) ->
case (1 bsl PartId) band Abitmask of
0 ->
expand_dups(Rest, Abitmask, Acc);
_ ->
expand_dups(Rest, Abitmask, [Kv | Acc])
end.
partitions_map([], BitMap) ->
BitMap;
partitions_map([{_Key, {dups, [{PartitionId, _Val} | _]}} | RestKvs], BitMap) ->
partitions_map(RestKvs, BitMap bor (1 bsl PartitionId));
partitions_map([{_Key, {PartitionId, _Val}} | RestKvs], BitMap) ->
partitions_map(RestKvs, BitMap bor (1 bsl PartitionId)).
build_bitmask(ActiveList) ->
build_bitmask(ActiveList, 0).
build_bitmask([], Acc) ->
Acc;
build_bitmask([PartId | Rest], Acc) when is_integer(PartId), PartId >= 0 ->
build_bitmask(Rest, (1 bsl PartId) bor Acc).
decode_bitmask(Bitmask) ->
decode_bitmask(Bitmask, 0).
decode_bitmask(0, _) ->
[];
decode_bitmask(Bitmask, PartId) ->
case Bitmask band 1 of
1 ->
[PartId | decode_bitmask(Bitmask bsr 1, PartId + 1)];
0 ->
decode_bitmask(Bitmask bsr 1, PartId + 1)
end.
make_btree_purge_fun(Group) when ?set_cbitmask(Group) =/= 0 ->
fun(Type, Value, {go, Acc}) ->
receive
stop ->
{stop, {stop, Acc}}
after 0 ->
btree_purge_fun(Type, Value, {go, Acc}, ?set_cbitmask(Group))
end
end.
btree_purge_fun(value, {_K, {PartId, _}}, {go, Acc}, Cbitmask) ->
Mask = 1 bsl PartId,
case (Cbitmask band Mask) of
Mask ->
{purge, {go, Acc + 1}};
0 ->
{keep, {go, Acc}}
end;
btree_purge_fun(branch, Red, {go, Acc}, Cbitmask) ->
Bitmap = element(tuple_size(Red), Red),
case Bitmap band Cbitmask of
0 ->
{keep, {go, Acc}};
Bitmap ->
{purge, {go, Acc + element(1, Red)}};
_ ->
{partial_purge, {go, Acc}}
end.
make_key_options(QueryArgs) ->
couch_httpd_view:make_key_options(QueryArgs). | src/couch_set_view/src/couch_set_view_util.erl | 0.675658 | 0.600276 | couch_set_view_util.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc OpenCensus Stats package
%% @end
%%%-----------------------------------------------------------------------
-module(oc_stat).
-export([record/2,
record/3,
export/0]).
-export([start_link/0,
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3,
terminate/2]).
-record(state, {}).
-include("opencensus.hrl").
-define(RECORD(Tags, MeasureName, Value),
begin
Module = oc_stat_measure:measure_module(MeasureName),
Module:record(Tags, Value),
ok
end).
%% @doc
%% Records one or multiple measurements with the same tags at once.
%% If there are any tags in the context, measurements will be tagged with them.
%%
%% Can be optimized with `oc_stat_measure' parse transform.
%%
%% Raises `{unknown_measure, MeasureName}' if measure doesn't exist.
%% @end
-spec record(ctx:t() | oc_tags:tags(), oc_stat_measure:name(), number()) -> ok.
record(Tags, MeasureName, Value) when is_map(Tags) ->
?RECORD(Tags, MeasureName, Value);
record(Ctx, MeasureName, Value)->
Tags = oc_tags:from_ctx(Ctx),
?RECORD(Tags, MeasureName, Value).
%% @doc
%% Records multiple measurements at once.
%%
%% Can be optimized with `oc_stat_measure' parse transform.
%%
%% Raises `{unknown_measure, MeasureName}' if measure doesn't exist.
%% @end
-spec record(ctx:t() | oc_tags:tags(), [{oc_stat_measure:name(), number()}]) -> ok.
record(Tags, Measures) when is_map(Tags) ->
[?RECORD(Tags, MeasureName, Value) || {MeasureName, Value} <- Measures],
ok;
record(Ctx, Measures) ->
Tags = oc_tags:from_ctx(Ctx),
record(Tags, Measures).
%% @doc Exports view_data of all subscribed views
-spec export() -> oc_stat_view:view_data().
export() ->
[oc_stat_view:export(View) || View <- oc_stat_view:all_subscribed_()].
%% gen_server implementation
%% @private
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
%% @private
init(_Args) ->
process_flag(trap_exit, true),
ok = oc_stat_view:'__init_backend__'(),
ok = oc_stat_measure:'__init_backend__'(),
{ok, #state{}}.
%% @private
handle_call({measure_register, Measure}, _From, State) ->
{reply, oc_stat_measure:register_(Measure), State};
handle_call({view_register_subscribe, View}, _From, State) ->
{reply, oc_stat_view:register_subscribe_(View), State};
handle_call({view_register, View}, _From, State) ->
{reply, oc_stat_view:register_(View), State};
handle_call({view_deregister, Name}, _From, State) ->
oc_stat_view:deregister_(Name),
{reply, ok, State};
handle_call({view_subscribe, Name}, _From, State) ->
{reply, oc_stat_view:subscribe_(Name), State};
handle_call({view_unsubscribe, Name}, _From, State) ->
{reply, oc_stat_view:unsubscribe_(Name), State};
handle_call(_, _From, State) ->
{noreply, State}.
%% @private
handle_cast(_, State) ->
{noreply, State}.
%% @private
handle_info(_, State) ->
{noreply, State}.
%% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% @private
terminate(_, _) ->
oc_stat_measure:terminate_(),
ok. | src/oc_stat.erl | 0.614625 | 0.474205 | oc_stat.erl | starcoder |
%%
%% @doc A collection of frequently used functions.
%% Inspired by Clojure and Haskell.
%%
-module(core).
-export([cross/2]).
-export([frequencies/1, group_by/2, inc/1, min_by/2, minfree/1, msc/1, msc2/1]).
-export([foldl1/2, zipfold/4, zipfold/5]).
-import(lists, [filter/2, map/2, max/1]).
%%
%% @doc Applies pair of functions to a pair of arguments.
%%
-spec cross(Funs :: {F, G}, Args :: {X, Y}) -> {A, B} when
F :: fun((X) -> A),
G :: fun((Y) -> B),
X :: term(),
Y :: term(),
A :: term(),
B :: term().
cross({F, G}, {X, Y}) -> {F(X), G(Y)}.
foldl1(_, [X]) -> X;
foldl1(F, [X | Xs]) -> foldl1(F, X, Xs).
foldl1(F, Accu, [Hd | Tail]) -> foldl1(F, F(Accu, Hd), Tail);
foldl1(F, Accu, []) when is_function(F, 2) -> Accu.
%%
%% @doc Returns a map from distinct items in List to the number of times they appear.
%%
%% See [https://clojuredocs.org/clojure.core/frequencies]
%%
-spec frequencies([A]) -> #{A => pos_integer()}.
frequencies(List) ->
lists:foldl(fun update_count/2, #{}, List).
update_count(X, Map) ->
maps:update_with(X, fun(C) -> C + 1 end, 1, Map).
%%
%% @doc Returns an element `E' of the list with minimum value of `F(E)'.
%%
min_by(_, [X]) -> X;
min_by(F, [X | Xs]) -> min_by(F, Xs, {X, F(X)}).
min_by(_, [], {Y, _}) -> Y;
min_by(F, [X | Xs], {_, Fy} = Min) ->
Fx = F(X),
case min(Fx, Fy) of
Fx -> min_by(F, Xs, {X, Fx});
Fy -> min_by(F, Xs, Min)
end.
%%
%% @doc Returns a map of the elements of List keyed by the result of
%% Fun on each element. The value at each key will be a list of the
%% corresponding elements, in the order they appeared in List.
%%
%% See [https://clojuredocs.org/clojure.core/group-by]
%%
group_by(Fun, List) ->
F = fun(E, Map) ->
K = Fun(E),
maps:update_with(K, fun(L) -> [E | L] end, [E], Map)
end,
maps:map(fun(_, V) -> lists:reverse(V) end, lists:foldl(F, #{}, List)).
%%
%% @doc Returns a number one greater than X.
%%
-spec inc(X :: number()) -> number().
inc(X) -> X + 1.
%%
%% @doc Zips the elements of the given lists
%% left folding them with the accumulator.
%%
-spec zipfold(F, Acc, [A], [B]) -> Acc when
F :: fun((Acc, A, B) -> Acc),
Acc :: term(),
A :: term(),
B :: term().
zipfold(F, Acc, [], []) when is_function(F) -> Acc;
zipfold(F, Acc, [A | As], [B | Bs]) ->
zipfold(F, F(Acc, A, B), As, Bs).
%%
%% @doc Zips the elements of the given lists
%% left folding them with the accumulator.
%%
-spec zipfold(F, Acc, [A], [B], [C]) -> Acc when
F :: fun((Acc, A, B, C) -> Acc),
Acc :: term(),
A :: term(),
B :: term(),
C :: term().
zipfold(F, Acc, [], [], []) when is_function(F) -> Acc;
zipfold(F, Acc, [A | As], [B | Bs], [C | Cs]) ->
zipfold(F, F(Acc, A, B, C), As, Bs, Cs).
%%
%% @doc The smallest free number.
%%
%% Computes the smallest natural number not in a given finite list
%% of <em>unique</em> natural numbers.
%%
%% This algorithm takes linear time and space (tail-optimized).
%% In comparison, the straightforward algorithm is quadratic:
%% ```
%% hd(lists:seq(0, N) -- List)'''
%%
%% See [B1] Chapter 1, pp. 1–6.
%%
-spec minfree([non_neg_integer()]) -> non_neg_integer().
minfree(List) -> minfrom(0, {length(List), List}).
minfrom(A, {0, []}) -> A;
minfrom(A, {N, List}) ->
B = A + 1 + N div 2,
{Us, Vs} = lists:partition(fun(X) -> X < B end, List), % Θ(N)
M = length(Us),
case B - A of
M -> minfrom(B, {N - M, Vs});
_ -> minfrom(A, {M, Us})
end.
%%
%% @doc Maximum surpasser count.
%%
%% `x[j]' is a surpasser of `x[i]' if `i < j' and `x[i] < x[j]'.
%% The <em>surpasser count</em> of an element is the number of
%% its surpassers.
%%
%% The complexity of the divide and conquer version of the MSC algorithm is `O(n log n)'.
%%
%% See [B1] Chapter 2, pp. 7–11.
%%
%% @see msc2/1
%%
-spec msc(list()) -> non_neg_integer().
msc(List) -> max([C || {_, C} <- table(List)]).
-spec table([A]) -> [{A, Count :: non_neg_integer()}].
table([X]) -> [{X, 0}];
table(List) ->
M = length(List),
N = M div 2,
{Ys, Zs} = lists:split(N, List),
join(M - N, table(Ys), table(Zs)).
-spec join(non_neg_integer(), [A], [A]) -> [A].
join(0, Txs, []) -> Txs;
join(_N, [], Tys) -> Tys;
join(N, [{X, C} | Txst] = Txs, [{Y, D} | Tyst] = Tys) ->
case X < Y of
true -> [{X, C + N} | join(N, Txst, Tys)];
false -> [{Y, D} | join(N - 1, Txs, Tyst)]
end.
%%
%% @doc MSC algorithm running in quadratic time.
%%
-spec msc2(list()) -> non_neg_integer().
msc2(List) -> max([scount(X, Xs) || [X | Xs] <- tails(List)]).
scount(X, Xs) -> length(filter(fun(Y) -> X < Y end, Xs)).
tails([]) -> [];
tails([_ | Xs] = List) -> [List | tails(Xs)].
%% =============================================================================
%% Unit tests
%% =============================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
cross_test_() ->
F = fun(X) -> 2 * X end,
G = fun(Y) -> 3 * Y end,
[
?_assertEqual({2, 6}, cross({F, G}, {1, 2}))
].
zipfold_test_() -> [
?_assertEqual(2 + 6 + 12,
zipfold(fun(Acc, A, B) -> Acc + A * B end, 0, [1, 2, 3], [2, 3, 4])),
?_assertEqual(6 + 24 + 60,
zipfold(fun(Acc, A, B, C) -> Acc + A * B * C end, 0, [1, 2, 3], [2, 3, 4], [3, 4, 5]))].
frequencies_test() ->
?assertEqual(#{1 => 2, 2 => 2, 3 => 3, 4 => 1},
frequencies([1, 2, 3, 2, 3, 4, 1, 3])).
group_by_test() ->
?assertEqual(#{1 => ["a"], 2 => ["as", "aa"], 3 => ["asd"], 4 => ["asdf", "qwer"]},
group_by(fun erlang:length/1, ["a", "as", "asd", "aa", "asdf", "qwer"])).
min_by_test_() ->
F = fun(X) -> X * X end,
[
?_assertEqual(1, min_by(F, [-3, 1, 5]))
].
minfree_test_() ->
List = [4, 0, 5, 7, 3, 10, 2, 1],
[
?_assertEqual(0, minfree(lists:seq(1, 10))),
?_assertEqual(0, minfree(lists:reverse(lists:seq(1, 10)))),
?_assertEqual(9, minfree(lists:seq(0, 8))),
?_assertEqual(9, minfree(lists:reverse(lists:seq(0, 8)))),
?_assertEqual(6, minfree(List)),
?_assertEqual(hd(lists:seq(0, 8) -- List), minfree(List))
].
scount([X | Xs]) -> scount(X, Xs).
msc_test_() ->
Word = "GENERATING",
[
?_assertEqual(5, scount(hd(Word), tl(Word))),
?_assertEqual([5, 6, 2, 5, 1, 4, 0, 1, 0, 0], map(fun scount/1, tails(Word))),
?_assertEqual(6, msc2(Word)),
?_assertEqual(6, msc(Word)),
?_assertEqual([6, 6, 5, 5, 4, 4, 1, 1, 0, 0], map(fun msc/1, tails(Word)))
].
tails_test_() -> [
?_assertEqual([[1, 2, 3], [2, 3], [3]], tails([1, 2, 3]))
].
-endif. | lib/ndpar/src/core.erl | 0.605099 | 0.678587 | core.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_stream).
-define(FILE_POINTER_BYTES, 8).
-define(FILE_POINTER_BITS, 8*(?FILE_POINTER_BYTES)).
-define(STREAM_OFFSET_BYTES, 4).
-define(STREAM_OFFSET_BITS, 8*(?STREAM_OFFSET_BYTES)).
-define(HUGE_CHUNK, 1000000000). % Huge chuck size when reading all in one go
-define(DEFAULT_STREAM_CHUNK, 16#00100000). % 1 meg chunks when streaming data
-export([foldl/4, foldl/5, foldl_decode/6, old_foldl/5]).
-export([old_read_term/2]).
-include("couch_db.hrl").
%%% Interface functions %%%
% 09 UPGRADE CODE
old_foldl(_Fd, null, 0, _Fun, Acc) ->
Acc;
old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)->
{ok, Acc2, _} = old_stream_data(Fd, OldPointer, Len, ?DEFAULT_STREAM_CHUNK, Fun, Acc),
Acc2.
foldl(_Fd, [], _Fun, Acc) ->
Acc;
foldl(Fd, [Pos|Rest], Fun, Acc) ->
{ok, Bin} = couch_file:pread_iolist(Fd, Pos),
foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
foldl(Fd, PosList, <<>>, Fun, Acc) ->
foldl(Fd, PosList, Fun, Acc);
foldl(Fd, PosList, Md5, Fun, Acc) ->
foldl(Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc).
foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
{DecDataFun, DecEndFun} = case Enc of
gzip ->
ungzip_init();
identity ->
identity_enc_dec_funs()
end,
Result = foldl_decode(
DecDataFun, Fd, PosList, Md5, couch_util:md5_init(), Fun, Acc
),
DecEndFun(),
Result.
foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
Md5 = couch_util:md5_final(Md5Acc),
Acc;
foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
{ok, Bin} = couch_file:pread_iolist(Fd, Pos),
Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, Bin)),
Fun(Bin, Acc);
foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
{ok, Bin} = couch_file:pread_iolist(Fd, Pos),
foldl(Fd, Rest, Md5, couch_util:md5_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
Md5 = couch_util:md5_final(Md5Acc),
Acc;
foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
{ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
Md5 = couch_util:md5_final(couch_util:md5_update(Md5Acc, EncBin)),
Bin = DecFun(EncBin),
Fun(Bin, Acc);
foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
{ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
Bin = DecFun(EncBin),
Md5Acc2 = couch_util:md5_update(Md5Acc, EncBin),
foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
ungzip_init() ->
Z = zlib:open(),
zlib:inflateInit(Z, 16 + 15),
{
fun(Data) ->
zlib:inflate(Z, Data)
end,
fun() ->
ok = zlib:inflateEnd(Z),
ok = zlib:close(Z)
end
}.
identity_enc_dec_funs() ->
{
fun(Data) -> Data end,
fun() -> [] end
}.
% 09 UPGRADE CODE
old_read_term(Fd, Sp) ->
{ok, <<TermLen:(?STREAM_OFFSET_BITS)>>, Sp2}
= old_read(Fd, Sp, ?STREAM_OFFSET_BYTES),
{ok, Bin, _Sp3} = old_read(Fd, Sp2, TermLen),
{ok, binary_to_term(Bin)}.
old_read(Fd, Sp, Num) ->
{ok, RevBin, Sp2} = old_stream_data(Fd, Sp, Num, ?HUGE_CHUNK, fun(Bin, Acc) -> [Bin | Acc] end, []),
Bin = list_to_binary(lists:reverse(RevBin)),
{ok, Bin, Sp2}.
% 09 UPGRADE CODE
old_stream_data(_Fd, Sp, 0, _MaxChunk, _Fun, Acc) ->
{ok, Acc, Sp};
old_stream_data(Fd, {Pos, 0}, Num, MaxChunk, Fun, Acc) ->
{ok, <<NextPos:(?FILE_POINTER_BITS), NextOffset:(?STREAM_OFFSET_BITS)>>}
= couch_file:old_pread(Fd, Pos, ?FILE_POINTER_BYTES + ?STREAM_OFFSET_BYTES),
Sp = {NextPos, NextOffset},
% Check NextPos is past current Pos (this is always true in a stream)
% Guards against potential infinite loops caused by corruption.
case NextPos > Pos of
true -> ok;
false -> throw({error, stream_corruption})
end,
old_stream_data(Fd, Sp, Num, MaxChunk, Fun, Acc);
old_stream_data(Fd, {Pos, Offset}, Num, MaxChunk, Fun, Acc) ->
ReadAmount = lists:min([MaxChunk, Num, Offset]),
{ok, Bin} = couch_file:old_pread(Fd, Pos, ReadAmount),
Sp = {Pos + ReadAmount, Offset - ReadAmount},
old_stream_data(Fd, Sp, Num - ReadAmount, MaxChunk, Fun, Fun(Bin, Acc)).
% Tests moved to tests/etap/050-stream.t | src/couch_stream.erl | 0.549641 | 0.404096 | couch_stream.erl | starcoder |
%% Copyright 2018 Erlio GmbH Basel Switzerland (http://erl.io)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_queue_sup_sup).
-behaviour(supervisor).
%% API
-export([start_link/3,
start_queue/1,
start_queue/2,
get_queue_pid/1,
fold_queues/2,
summary/0,
nr_of_queues/0]).
%% Supervisor callbacks
-export([init/1]).
-define(SERVER, ?MODULE).
%%====================================================================
%% API functions
%%====================================================================
start_link(Shutdown, MaxR, MaxT) ->
supervisor:start_link({local, ?SERVER}, ?MODULE, [Shutdown, MaxR, MaxT]).
%%====================================================================
%% Supervisor callbacks
%%====================================================================
%% Child :: {Id,StartFunc,Restart,Shutdown,Type,Modules}
init([Shutdown, MaxR, MaxT]) ->
NumSups = num_child_sups(),
SupFlags =
{one_for_one, 1, 5},
ChildSpec =
fun(RegName, QueueTabId) ->
{{RegName, QueueTabId},
{vmq_queue_sup, start_link, [Shutdown, RegName, QueueTabId, MaxR, MaxT]},
permanent, 5000, supervisor, [vmq_queue_sup]}
end,
ChildSpecs =
[ChildSpec(
gen_sup_name(N),
gen_queue_tab_id(N))
|| N <- lists:seq(1,NumSups)],
{ok, {SupFlags, ChildSpecs}}.
%%====================================================================
%% Internal functions
%%====================================================================
start_queue(SubscriberId) ->
start_queue(SubscriberId, true).
start_queue(SubscriberId, Clean) ->
%% Always map the same subscriber to the same supervisor
%% as we may have concurrent attempts at setting up the
%% queue. vmq_queue_sup:start_queue/3 prevents duplicates
%% as long as it's under the same supervisor.
SupName =
subscriberid_to_supname(SubscriberId),
vmq_queue_sup:start_queue(SupName, SubscriberId, Clean).
num_child_sups() ->
application:get_env(vmq_server, queue_sup_sup_children, 50).
subscriberid_to_supname(SubscriberId) ->
gen_sup_name(erlang:phash2(SubscriberId, num_child_sups()) + 1).
subscriberid_to_tabid(SubscriberId) ->
gen_queue_tab_id(erlang:phash2(SubscriberId, num_child_sups()) + 1).
gen_queue_tab_id(N) ->
TabId = <<"vmq_queue_sup_", (integer_to_binary(N))/binary, "_tab">>,
erlang:binary_to_atom(TabId, latin1).
gen_sup_name(N) ->
TabId = <<"vmq_queue_sup_", (integer_to_binary(N))/binary>>,
erlang:binary_to_atom(TabId, latin1).
get_queue_pid(SubscriberId) ->
QueueTabId = subscriberid_to_tabid(SubscriberId),
vmq_queue_sup:get_queue_pid(QueueTabId, SubscriberId).
fold_queues(FoldFun, Acc) ->
lists:foldl(
fun(QueueTabId, AccAcc) ->
vmq_queue_sup:fold_queues(QueueTabId, FoldFun, AccAcc)
end,
Acc,
child_tab_ids()).
summary() ->
fold_queues(
fun(_, QPid, {AccOnline, AccWait, AccDrain, AccOffline, AccStoredMsgs} = Acc) ->
try vmq_queue:status(QPid) of
{_, _, _, _, true} ->
%% this is a queue belonging to a plugin... ignore it
Acc;
{online, _, TotalStoredMsgs, _, _} ->
{AccOnline + 1, AccWait, AccDrain, AccOffline, AccStoredMsgs + TotalStoredMsgs};
{wait_for_offline, _, TotalStoredMsgs, _, _} ->
{AccOnline, AccWait + 1, AccDrain, AccOffline, AccStoredMsgs + TotalStoredMsgs};
{drain, _, TotalStoredMsgs, _, _} ->
{AccOnline, AccWait, AccDrain + 1, AccOffline, AccStoredMsgs + TotalStoredMsgs};
{offline, _, TotalStoredMsgs, _, _} ->
{AccOnline, AccWait, AccDrain, AccOffline + 1, AccStoredMsgs + TotalStoredMsgs}
catch
_:_ ->
%% queue stopped in the meantime, that's ok.
Acc
end
end, {0, 0, 0, 0, 0}).
child_tab_ids() ->
[ gen_queue_tab_id(N) || N <- lists:seq(1, num_child_sups()) ].
nr_of_queues() ->
lists:sum(
[vmq_queue_sup:nr_of_queues(QueueTabId) || QueueTabId <-child_tab_ids()]
). | apps/vmq_server/src/vmq_queue_sup_sup.erl | 0.545286 | 0.413773 | vmq_queue_sup_sup.erl | starcoder |
%% taken from http://crunchyd.com/scutil/
%% All code here is MIT Licensed
%% http://scutil.com/license.html
-module(folsom_statistics_scutil).
-export([
kendall_correlation/2
]).
-compile([export_all]).
-compile([native]).
% seems to match the value returned by the 'cor' (method="kendal") R function
% http://en.wikipedia.org/wiki/Kendall_tau_rank_correlation_coefficient
kendall_correlation(List1, List2) when is_list(List1), is_list(List2) ->
{RA,_} = lists:unzip(tied_ordered_ranking(List1)),
{RB,_} = lists:unzip(tied_ordered_ranking(List2)),
Ordering = lists:keysort(1, lists:zip(RA,RB)),
{_,OrdB} = lists:unzip(Ordering),
N = length(List1),
P = lists:sum(kendall_right_of(OrdB, [])),
-(( (4*P) / (N * (N - 1))) - 1).
%%%===================================================================
%%% Internal functions
%%%==================================================================
simple_ranking(List) when is_list(List) ->
lists:zip(lists:seq(1,length(List)),lists:reverse(lists:sort(List))).
tied_ranking(List) ->
tied_rank_worker(simple_ranking(List), [], no_prev_value).
tied_ordered_ranking(List) when is_list(List) ->
tied_ordered_ranking(List, tied_ranking(List), []).
tied_ordered_ranking([], [], Work) ->
lists:reverse(Work);
tied_ordered_ranking([Front|Rem], Ranks, Work) ->
{value,Item} = lists:keysearch(Front,2,Ranks),
{IRank,Front} = Item,
tied_ordered_ranking(Rem, Ranks--[Item], [{IRank,Front}]++Work).
kendall_right_of([], Work) ->
lists:reverse(Work);
kendall_right_of([F|R], Work) ->
kendall_right_of(R, [kendall_right_of_item(F,R)]++Work).
kendall_right_of_item(B, Rem) ->
length([R || R <- Rem, R < B]).
tied_add_prev(Work, {FoundAt, NewValue}) ->
lists:duplicate( length(FoundAt), {lists:sum(FoundAt)/length(FoundAt), NewValue} ) ++ Work.
tied_rank_worker([], Work, PrevValue) ->
lists:reverse(tied_add_prev(Work, PrevValue));
tied_rank_worker([Item|Remainder], Work, PrevValue) ->
case PrevValue of
no_prev_value ->
{BaseRank,BaseVal} = Item,
tied_rank_worker(Remainder, Work, {[BaseRank],BaseVal});
{FoundAt,OldVal} ->
case Item of
{Id,OldVal} ->
tied_rank_worker(Remainder, Work, {[Id]++FoundAt,OldVal});
{Id,NewVal} ->
tied_rank_worker(Remainder, tied_add_prev(Work, PrevValue), {[Id],NewVal})
end
end. | src/folsom_statistics_scutil.erl | 0.517815 | 0.440229 | folsom_statistics_scutil.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(hocon_schema_doc).
-export([gen/1]).
-include("hoconsc.hrl").
-include("hocon_private.hrl").
gen(Schema) ->
{RootNs, RootFields, Structs} = hocon_schema:find_structs(Schema),
[fmt_structs(1, RootNs, [{RootNs, "Root Keys", #{fields => RootFields}}]),
fmt_structs(2, RootNs, Structs)].
fmt_structs(_HeadWeight, _RootNs, []) -> [];
fmt_structs(HeadWeight, RootNs, [{Ns, Name, Fields} | Rest]) ->
[fmt_struct(HeadWeight, RootNs, Ns, Name, Fields), "\n" |
fmt_structs(HeadWeight, RootNs, Rest)].
fmt_struct(HeadWeight, RootNs, Ns0, Name, #{fields := Fields} = Meta) ->
Ns = case RootNs =:= Ns0 of
true -> undefined;
false -> Ns0
end,
FieldMd = fmt_fields(HeadWeight + 1, Ns, Fields),
FullNameDisplay = ref(Ns, Name),
[ hocon_md:h(HeadWeight, FullNameDisplay), FieldMd,
case Meta of
#{desc := StructDoc} -> ["\n", StructDoc];
_ -> []
end
].
fmt_fields(_Weight, _Ns, []) -> [];
fmt_fields(Weight, Ns, [{Name, FieldSchema} | Fields]) ->
Type = fmt_type(Ns, hocon_schema:field_schema(FieldSchema, type)),
Default = fmt_default(hocon_schema:field_schema(FieldSchema, default)),
Desc = hocon_schema:field_schema(FieldSchema, desc),
NewMd =
[ ["- ", bin(Name), ": ", Type, "\n"]
, case Desc =/= undefined of
true -> [" - Description: ", Desc, "\n"];
false -> []
end
, case Default =/= undefined of
true -> [" - Default:", Default, "\n"];
false -> []
end
],
[bin(NewMd) | fmt_fields(Weight, Ns, Fields)].
fmt_default(undefined) -> undefined;
fmt_default(Value) ->
case hocon_pp:do(Value, #{newline => "", embedded => true}) of
[OneLine] -> [" `", OneLine, "`"];
Lines -> ["\n```\n", [[L, "\n"] || L <- Lines], "```"]
end.
fmt_type(Ns, T) -> hocon_md:code(do_type(Ns, T)).
do_type(_Ns, A) when is_atom(A) -> bin(A); % singleton
do_type(Ns, Ref) when is_list(Ref) -> do_type(Ns, ?REF(Ref));
do_type(Ns, ?REF(Ref)) -> hocon_md:local_link(ref(Ns, Ref), ref(Ns, Ref));
do_type(_Ns, ?R_REF(Module, Ref)) -> do_type(hocon_schema:namespace(Module), ?REF(Ref));
do_type(Ns, ?ARRAY(T)) -> io_lib:format("[~s]", [do_type(Ns, T)]);
do_type(Ns, ?UNION(Ts)) -> lists:join(" | ", [do_type(Ns, T) || T <- Ts]);
do_type(_Ns, ?ENUM(Symbols)) -> lists:join(" | ", [bin(S) || S <- Symbols]);
do_type(Ns, ?LAZY(T)) -> do_type(Ns, T);
do_type(Ns, ?MAP(Name, T)) -> ["{$", bin(Name), " -> ", do_type(Ns, T), "}"];
do_type(_Ns, {'$type_refl', #{name := Type}}) -> lists:flatten(Type).
ref(undefined, Name) -> Name;
ref(Ns, Name) ->
%% when namespace is the same as reference name
%% we do not prepend the reference link with namespace
%% because the root name is already unique enough
case bin(Ns) =:= bin(Name) of
true -> bin(Ns);
false -> [bin(Ns), ":", bin(Name)]
end.
bin(S) when is_list(S) -> unicode:characters_to_binary(S, utf8);
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
bin(B) when is_binary(B) -> B. | src/hocon_schema_doc.erl | 0.589126 | 0.480844 | hocon_schema_doc.erl | starcoder |
%% Copyright 2014 Erlio GmbH Basel Switzerland (http://erl.io)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_reg_sup).
-behaviour(supervisor).
%% API
-export([start_link/0,
start_reg_view/1,
stop_reg_view/1,
reconfigure_registry/1]).
%% Supervisor callbacks
-export([init/1]).
-define(CHILD(Id, Mod, Type, Args), {Id, {Mod, start_link, Args},
permanent, 5000, Type, [Mod]}).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the supervisor
%%
%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
%% @end
%%--------------------------------------------------------------------
start_link() ->
{ok, Pid} = supervisor:start_link({local, ?MODULE}, ?MODULE, []),
DefaultRegView = vmq_config:get_env(default_reg_view, vmq_reg_trie),
RegViews = lists:usort([DefaultRegView|vmq_config:get_env(reg_views, [])]),
_ = [{ok, _} = start_reg_view(RV) || RV <- RegViews],
{ok, Pid}.
reconfigure_registry(Config) ->
case lists:keyfind(reg_views, 1, Config) of
{_, RegViews} ->
DefaultRegView = vmq_config:get_env(default_reg_view, vmq_reg_trie),
RequiredRegViews = lists:usort([DefaultRegView|RegViews]),
InstalledRegViews = [Id || {{reg_view, Id}, _, _, _}
<- supervisor:which_children(?MODULE)],
ToBeInstalled = RequiredRegViews -- InstalledRegViews,
ToBeUnInstalled = InstalledRegViews -- RequiredRegViews,
install_reg_views(ToBeInstalled),
uninstall_reg_views(ToBeUnInstalled);
false ->
ok
end.
install_reg_views([RV|RegViews]) ->
case start_reg_view(RV) of
{ok, _} ->
lager:info("installed reg view ~p", [RV]),
install_reg_views(RegViews);
{error, Reason} ->
lager:error("can't install reg view due to ~p", [RV, Reason]),
install_reg_views(RegViews)
end;
install_reg_views([]) -> ok.
uninstall_reg_views([RV|RegViews]) ->
case stop_reg_view(RV) of
{error, Reason} ->
lager:error("can't uninstall reg view due to ~p", [RV, Reason]),
uninstall_reg_views(RegViews);
_ ->
lager:info("uninstalled reg view ~p", [RV]),
uninstall_reg_views(RegViews)
end;
uninstall_reg_views([]) -> ok.
start_reg_view(ViewModule) ->
supervisor:start_child(?MODULE, reg_view_child_spec(ViewModule)).
stop_reg_view(ViewModule) ->
ChildId = {reg_view, ViewModule},
case supervisor:terminate_child(?MODULE, ChildId) of
ok ->
supervisor:delete_child(?MODULE, ChildId);
{error, Reason} ->
{error, Reason}
end.
%%%===================================================================
%%% Supervisor callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Whenever a supervisor is started using supervisor:start_link/[2,3],
%% this function is called by the new process to find out about
%% restart strategy, maximum restart frequency and child
%% specifications.
%%
%% @spec init(Args) -> {ok, {SupFlags, [ChildSpec]}} |
%% ignore |
%% {error, Reason}
%% @end
%%--------------------------------------------------------------------
init([]) ->
{ok, {{one_for_one, 5, 10},[
?CHILD(vmq_reg, vmq_reg, worker, []),
?CHILD(vmq_retain_srv, vmq_retain_srv, worker, []),
?CHILD(vmq_reg_leader, vmq_reg_leader, worker, [])]
}
}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
reg_view_child_spec(ViewModule) ->
?CHILD({reg_view, ViewModule}, ViewModule, worker, []). | src/vmq_reg_sup.erl | 0.617282 | 0.461927 | vmq_reg_sup.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% sms: Streaming merge sort
%%
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%%
%% @doc Streaming merge sort
-module(sms).
-define(DICTMODULE, orddict).
-export([new/1,
add_results/3,
done/1,
sms/1]).
-export_type([sms/0]).
-opaque sms() :: ?DICTMODULE:?DICTMODULE().
%% @doc create a new sms buffer for the given covering set
%% of `Vnodes'
-spec new([non_neg_integer()]) -> sms().
new(Vnodes) ->
DictList = [{VnodeID, {active,[]}} || VnodeID <- Vnodes],
?DICTMODULE:from_list(DictList).
%% @doc Append `Results' to existing buffer for `VnodeID' in
%% `Data'
-spec add_results(non_neg_integer(), list() | atom(), sms()) -> sms().
add_results(VnodeID, done, Data) ->
UpdateFun = fun({_, Prev}) -> {done, Prev} end,
update(VnodeID, UpdateFun, Data);
add_results(VnodeID, Results, Data) ->
UpdateFun = fun ({active, Prev}) -> {active, Prev ++ Results} end,
update(VnodeID, UpdateFun, Data).
%% @private
update(VnodeID, UpdateFun, Data) ->
?DICTMODULE:update(VnodeID, UpdateFun, Data).
%% @doc get all data in buffer, for all vnodes, merged
-spec done(sms()) -> [term()].
done(Data) ->
Values = values(Data),
lists:merge(Values).
%% @doc perform the streaming merge sort over given `Data:sms()'
%% returns a two tuple of {`MergedReadyToSendResults::[term()], sms()},
%% where the first element is the merge-sorted data from all vnodes that can
%% be consumed by the client, and `sms()' is a buffer of remaining results.
-spec sms(sms()) -> {[term()] | [], sms()}.
sms(Data) ->
Vals = values(Data),
case any_empty(Vals) orelse Vals == [] of
true ->
{[], Data};
false ->
unsafe_sms(Data)
end.
%% @private, perform the merge
unsafe_sms(Data) ->
MinOfLastsOfLists = lists:min([lists:last(List) || List <- values(Data)]),
SplitFun = fun (Elem) -> Elem =< MinOfLastsOfLists end,
Split = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, lists:splitwith(SplitFun, V)} end, Data),
LessThan = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, element(1, V)} end, Split),
GreaterThan = ?DICTMODULE:map(fun (_Key, {Status, V}) -> {Status, element(2, V)} end, Split),
Merged = lists:merge(values(LessThan)),
{Merged, GreaterThan}.
%% @private
values(Data) ->
%% Don't make the SMS wait forever for vnodes that are done
[V || {_Key, {_Status, V}=T} <- ?DICTMODULE:to_list(Data), T /= {done, []}].
%% @private
empty([]) -> true;
empty(_) -> false.
%% @private
any_empty(Lists) ->
lists:any(fun empty/1, Lists). | deps/riak_kv/src/sms.erl | 0.596903 | 0.555134 | sms.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2016, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
%% @doc Implements a valve which increases its size based on CoDel (Controlling
%% Queue Delay).
%%
%% decreasing
%% intervals of updates being below a target between a minimum and maximum
%% capacity.
%%
%% `sregulator_codel_value' can be used as the `sregulator_valve' in a
%% `sregulator'. It will provide a valve that increases in size in decreasing
%% intervals while updates remain below a target (based on CoDel) between the
%% minimum and maximum capacity. Its argument, `spec()', is of the form:
%% ```
%% #{target => Target :: integer(), % default: 100
%% interval => Interval :: pos_integer(), % default: 1000
%% min => Min :: non_neg_integer(), % default: 0
%% max => Max :: non_neg_integer() | infinity} % default: infinity
%% '''
%% `Target' is the target relative value in milliseconds. `Interval' is the
%% initial interval in milliseconds. The valve will open when updates remain
%% below the target (in `native' time units) for an interval. Each consecutive
%% interval is smaller than the last until an update above the target is
%% received. The valve is always open when below the minimum and always closed
%% once it reaches the maximum.
%%
%% This valve tries to enforce a minimum level of concurrency and will grow
%% while a relevant `sbroker_queue' is moving quickly - up to a maximum.
%% Therefore this valves expects the updates to be from a
%% `sregulator_update_meter'.
%%
%% The algorithm used in this valve is similar to `sbroker_codel_queue', except
%% designed to keep the relevant queue slow (but not too slow) instead of fast.
%% Therefore trying to ensure the counter party to the queue is always fast
%% without using too many resources. This works by increasing the concurrency or
%% number of requests when the queue is consistently fast and remaining static
%% when the queue is slow. Therefore forcing the queue to be slightly slow.
%%
%% This valve is designed to be used a `sbroker_codel_queue' with the same
%% `Interval' and `Target's that are between 10% and 20% of the `Interval'. The
%% target range is suggested due to the observation in the CoDel paper that the
%% queue goes from to fast to slow over this target range. Higher targets result
%% in heavily congested queues and wasted resources. A suggested initial
%% `Interval' is the 95% percentile of the time it takes to stop a task and
%% restart, as this is equivalent to the round trip when a packet is dropped and
%% resent to rejoin the queue in the paper.
%%
%% Decreasing the target of the valve makes it more resistant to bursts and
%% reducing the target of the queue will increase the rate of shrinking when
%% load decreases. This fulfils the general desire to increase resouce usage as
%% late as possible and decrease resource usage as early as possible. If the
%% queue's target is significantly lower than valve's this may lead to churn as
%% the queue and valve may act against each other. Also if the minimum is too
%% high the queue may drop the requests only for the valve to allow immediate
%% enqueues.
%%
%% More investigation needs to be done on suitable parameters.
%% @reference <NAME> and <NAME>, Controlling Queue Delay,
%% ACM Queue, 6th May 2012.
-module(sregulator_codel_valve).
-behaviour(sregulator_valve).
%% sregulator_valve_api
-export([init/3]).
-export([handle_ask/4]).
-export([handle_done/3]).
-export([handle_continue/3]).
-export([handle_update/3]).
-export([handle_info/3]).
-export([handle_timeout/2]).
-export([code_change/4]).
-export([config_change/3]).
-export([size/1]).
-export([open_time/1]).
-export([terminate/2]).
%% types
-type spec() ::
#{target => Target :: integer(),
interval => Interval :: pos_integer(),
min => Min :: non_neg_integer(),
max => Max :: non_neg_integer() | infinity}.
-export_type([spec/0]).
-record(state, {min :: non_neg_integer(),
max :: non_neg_integer() | infinity,
target :: integer(),
interval :: pos_integer(),
count=0 :: non_neg_integer(),
open_next :: integer(),
open_first=infinity :: integer() | infinity | opening | await,
small_time :: integer(),
map :: sregulator_valve:internal_map()}).
%% sregulator_valve api
%% @private
-spec init(Map, Time, Spec) -> {open | closed, State, infinity} when
Map :: sregulator_valve:internal_map(),
Time :: integer(),
Spec :: spec(),
State :: #state{}.
init(Map, Time, Spec) ->
{Min, Max} = sbroker_util:min_max(Spec),
State = #state{min=Min, max=Max,
target=sbroker_util:relative_target(Spec),
interval=sbroker_util:interval(Spec), open_next=Time,
small_time=Time, map=Map},
handle(Time, State).
%% @private
-spec handle_ask(Pid, Ref, Time, State) ->
{go, Open, open | closed, NState, infinity} when
Pid :: pid(),
Ref :: reference(),
Time :: integer(),
State :: #state{},
Open :: integer(),
NState :: #state{}.
handle_ask(Pid, Ref, Time,
#state{min=Min, open_first=First, open_next=Next, small_time=Small,
count=C, map=Map} = State) ->
NMap = maps:put(Ref, Pid, Map),
NState = State#state{map=NMap},
if
map_size(NMap) < Min ->
{go, Small, open, NState, infinity};
%% open based on size of Min-1
map_size(NMap) =:= Min ->
go(Small, Time, NState);
%% opening and fast for a consecutive interval
First == opening, Time >= Next->
go(Next, Time, open_control(C+1, Next, NState));
%% fast for an initial interval
Time >= First ->
go(First, Time, open_control(Time, NState))
end.
%% @private
-spec handle_done(Ref, Time, State) ->
{done | error, open | closed, NState, infinity} when
Ref :: reference(),
Time :: integer(),
State :: #state{},
NState :: #state{}.
handle_done(Ref, Time, #state{map=Map} = State) ->
Before = map_size(Map),
done(Ref, Map, Before, Time, State).
%% @private
-spec handle_continue(Ref, Time, State) ->
{go, Open, open | closed, NState, infinity} |
{done | error, open | closed, NState, infinity} when
Ref :: reference(),
Time :: integer(),
State :: #state{},
Open :: integer(),
NState :: #state{}.
handle_continue(Ref, Time,
#state{min=Min, max=Max, open_first=First, open_next=Next,
small_time=Small, count=C, map=Map} = State) ->
Size = map_size(Map),
if
Size < Min ->
continue(Ref, Map, Size, Small, Time, State, State);
Size =:= Min ->
continue(Ref, Map, Size, Time, Time, State, State);
Size > Max ->
done(Ref, Map, Size, Time, State);
First == opening, Time >= Next ->
NState = open_control(C+1, Next, State),
continue(Ref, Map, Size, Next, Time, State, NState);
is_integer(First), Time >= First ->
NState = open_control(Time, State),
continue(Ref, Map, Size, First, Time, State, NState);
true ->
done(Ref, Map, Size, Time, State)
end.
%% @private
-spec handle_update(Value, Time, State) ->
{open | closed, NState, infinity} when
Value :: integer(),
Time :: integer(),
State :: #state{},
NState :: #state{}.
handle_update(RelativeTime, Time,
#state{open_first=infinity, target=Target,
interval=Interval} = State) when RelativeTime < Target ->
handle(Time, State#state{open_first=Time+Interval});
handle_update(RelativeTime, Time,
#state{target=Target, open_first=await} = State)
when RelativeTime < Target ->
handle(Time, State#state{open_first=opening});
handle_update(RelativeTime, Time, #state{target=Target} = State)
when RelativeTime < Target ->
handle(Time, State);
handle_update(_, Time, #state{open_first=infinity} = State) ->
handle(Time, State);
handle_update(_, Time, State) ->
handle(Time, State#state{open_first=infinity}).
%% @private
-spec handle_info(Msg, Time, State) -> {open | closed, NState, infinity} when
Msg :: any(),
Time :: integer(),
State :: #state{},
NState :: #state{}.
handle_info({'DOWN', Ref, _, _, _}, Time, #state{map=Map, min=Min} = State) ->
Before = map_size(Map),
NMap = maps:remove(Ref, Map),
case map_size(NMap) of
After when Before =:= Min, After < Min ->
handle(Time, State#state{map=NMap, small_time=Time});
_ ->
handle(Time, State#state{map=NMap})
end;
handle_info(_, Time, State) ->
handle(Time, State).
%% @private
-spec handle_timeout(Time, State) -> {open | closed, NState, infinity} when
Time :: integer(),
State :: #state{},
NState :: #state{}.
handle_timeout(Time, State) ->
handle(Time, State).
%% @private
-spec code_change(OldVsn, Time, State, Extra) -> {Status, NState, infinity} when
OldVsn :: any(),
Time :: integer(),
State :: #state{},
Extra :: any(),
Status :: open | closed,
NState :: #state{}.
code_change(_, Time, State, _) ->
handle(Time, State).
%% @private
-spec config_change(Spec, Time, State) -> {open | closed, NState, infinity} when
Spec :: spec(),
Time :: integer(),
State :: #state{},
NState :: #state{}.
config_change(Spec, Time, State) ->
{Min, Max} = sbroker_util:min_max(Spec),
NState = State#state{min=Min, max=Max,
target=sbroker_util:relative_target(Spec),
interval=sbroker_util:interval(Spec)},
change(Time, NState).
%% @private
-spec size(State) -> Size when
State :: #state{},
Size :: non_neg_integer().
size(#state{map=Map}) ->
map_size(Map).
%% @private
-spec open_time(State) -> Open | closed when
State :: #state{},
Open :: integer().
open_time(#state{map=Map, min=Min, small_time=Small})
when map_size(Map) < Min ->
Small;
open_time(#state{map=Map, max=Max}) when map_size(Map) >= Max ->
closed;
open_time(#state{open_first=infinity}) ->
closed;
open_time(#state{open_first=await}) ->
closed;
open_time(#state{open_first=opening, open_next=Next}) ->
Next;
open_time(#state{open_first=First}) ->
First.
%% @private
-spec terminate(Reason, State) -> Map when
Reason :: any(),
State :: #state{},
Map :: sregulator_valve:internal_map().
terminate(_, #state{map=Map}) ->
Map.
%% Internal
go(Open, Time, #state{map=Map} = State) ->
{go, Open, status(map_size(Map), Time, State), State, infinity}.
handle(Time, #state{map=Map} = State) ->
{status(map_size(Map), Time, State), State, infinity}.
status(Size, _, #state{min=Min}) when Size < Min ->
open;
status(Size, _, #state{max=Max}) when Size >= Max ->
closed;
status(_, _, #state{open_first=infinity}) ->
closed;
status(_, _, #state{open_first=await}) ->
closed;
status(_, Time, #state{open_first=opening, open_next=Next}) when Time < Next ->
closed;
status(_, _, #state{open_first=opening}) ->
open;
status(_, Time, #state{open_first=First}) when Time < First ->
closed;
status(_, _, _) ->
open.
%% If first fast update in fast interval was "soon" after switching from
%% opening to closed use the previous dropping interval length as it
%% should be appropriate.
open_control(Time, #state{interval=Interval, count=C, open_next=Next} = State)
when C > 2 andalso Time - Next < 8 * Interval ->
open_control(C - 2, Time, State);
open_control(Time, #state{interval=Interval} = State) ->
State#state{count=1, open_next=Time+Interval, open_first=await}.
%% Shrink the interval to increase open rate and reduce relative time.
open_control(C, Time, #state{interval=Interval} = State) ->
Next = Time + trunc(Interval / math:sqrt(C)),
State#state{count=C, open_first=await, open_next=Next}.
continue(Ref, Map, Size, Open, Time, ErrorState, OKState) ->
case maps:find(Ref, Map) of
{ok, _} ->
{go, Open, status(Size, Time, OKState), OKState, infinity};
error ->
{error, status(Size, Time, ErrorState), ErrorState, infinity}
end.
done(Ref, Map, Before, Time, #state{min=Min} = State) ->
NMap = maps:remove(Ref, Map),
NState = State#state{map=NMap},
case map_size(NMap) of
Before ->
{error, status(Before, Time, NState), NState, infinity};
_ when Before =:= Min ->
{done, open, NState#state{small_time=Time}, infinity};
After ->
demonitor(Ref, [flush]),
{done, status(After, Time, NState), NState, infinity}
end.
change(Time, #state{open_first=First, interval=Interval} = State)
when is_integer(First), First > Time+Interval ->
change(Time, State#state{open_first=Time+Interval});
change(Time, #state{open_next=Next, interval=Interval} = State)
when is_integer(Next) andalso Next > Time+Interval ->
change(Time, State#state{open_next=Time+Interval});
change(Time, State) ->
handle(Time, State). | deps/sbroker/src/sregulator_codel_valve.erl | 0.772702 | 0.457985 | sregulator_codel_valve.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2012-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc
%% This module implements a persistent, on-disk hash tree that is used
%% predominately for active anti-entropy exchange in Riak. The tree consists
%% of two parts, a set of unbounded on-disk segments and a fixed size hash
%% tree (that may be on-disk or in-memory) constructed over these segments.
%%
%% A graphical description of this design can be found in: docs/hashtree.md
%%
%% Each segment logically represents an on-disk list of (key, hash) pairs.
%% Whereas the hash tree is represented as a set of levels and buckets, with a
%% fixed width (or fan-out) between levels that determines how many buckets of
%% a child level are grouped together and hashed to represent a bucket at the
%% parent level. Each leaf in the tree corresponds to a hash of one of the
%% on-disk segments. For example, a tree with a width of 4 and 16 segments
%% would look like the following:
%%
%% level buckets
%% 1: [0]
%% 2: [0 1 2 3]
%% 3: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
%%
%% With each bucket entry of the form ``{bucket-id, hash}'', eg. ``{0,
%% binary()}''. The hash for each of the entries at level 3 would come from
%% one of the 16 segments, while the hashes for entries at level 1 and 2 are
%% derived from the lower levels.
%%
%% Specifically, the bucket entries in level 2 would come from level 3:
%% 0: hash([ 0 1 2 3])
%% 1: hash([ 4 5 6 7])
%% 2: hash([ 8 9 10 11])
%% 3: hash([12 13 14 15])
%%
%% And the bucket entries in level 1 would come from level 2:
%% 1: hash([hash([ 0 1 2 3])
%% hash([ 4 5 6 7])
%% hash([ 8 9 10 11])
%% hash([12 13 14 15])])
%%
%% When a (key, hash) pair is added to the tree, the key is hashed to
%% determine which segment it belongs to and inserted/upserted into the
%% segment. Rather than update the hash tree on every insert, a dirty bit is
%% set to note that a given segment has changed. The hashes are then updated
%% in bulk before performing a tree exchange
%%
%% To update the hash tree, the code iterates over each dirty segment,
%% building a list of (key, hash) pairs. A hash is computed over this list,
%% and the leaf node in the hash tree corresponding to the given segment is
%% updated. After iterating over all dirty segments, and thus updating all
%% leaf nodes, the update then continues to update the tree bottom-up,
%% updating only paths that have changed. As designed, the update requires a
%% single sparse scan over the on-disk segments and a minimal traversal up the
%% hash tree.
%%
%% The heavy-lifting of this module is provided by LevelDB. What is logically
%% viewed as sorted on-disk segments is in reality a range of on-disk
%% (segment, key, hash) values written to LevelDB. Each insert of a (key,
%% hash) pair therefore corresponds to a single LevelDB write (no read
%% necessary). Likewise, the update operation is performed using LevelDB
%% iterators.
%%
%% When used for active anti-entropy in Riak, the hash tree is built once and
%% then updated in real-time as writes occur. A key design goal is to ensure
%% that adding (key, hash) pairs to the tree is non-blocking, even during a
%% tree update or a tree exchange. This is accomplished using LevelDB
%% snapshots. Inserts into the tree always write directly to the active
%% LevelDB instance, however updates and exchanges operate over a snapshot of
%% the tree.
%%
%% In order to improve performance, writes are buffered in memory and sent
%% to LevelDB using a single batch write. Writes are flushed whenever the
%% buffer becomes full, as well as before updating the hashtree.
%%
%% Tree exchange is provided by the ``compare/4'' function.
%% The behavior of this function is determined through a provided function
%% that implements logic to get buckets and segments for a given remote tree,
%% as well as a callback invoked as key differences are determined. This
%% generic interface allows for tree exchange to be implemented in a variety
%% of ways, including directly against to local hash tree instances, over
%% distributed Erlang, or over a custom protocol over a TCP socket. See
%% ``local_compare/2'' and ``do_remote/1'' for examples (-ifdef(TEST) only).
-module(hashtree).
-export([new/0,
new/2,
new/3,
insert/3,
insert/4,
estimate_keys/1,
delete/2,
update_tree/1,
update_snapshot/1,
update_perform/1,
rehash_tree/1,
flush_buffer/1,
close/1,
destroy/1,
read_meta/2,
write_meta/3,
compare/4,
top_hash/1,
get_bucket/3,
key_hashes/2,
levels/1,
segments/1,
width/1,
mem_levels/1,
path/1,
next_rebuild/1,
set_next_rebuild/2,
mark_open_empty/2,
mark_open_and_check/2,
mark_clean_close/2]).
-export([compare2/4]).
-export([multi_select_segment/3, safe_decode/1]).
-ifdef(namespaced_types).
-type hashtree_dict() :: dict:dict().
-type hashtree_array() :: array:array().
-else.
-type hashtree_dict() :: dict().
-type hashtree_array() :: array().
-endif.
-define(ALL_SEGMENTS, ['*', '*']).
-define(BIN_TO_INT(B), list_to_integer(binary_to_list(B))).
-ifdef(TEST).
-export([fake_close/1, local_compare/2, local_compare1/2]).
-export([run_local/0,
run_local/1,
run_concurrent_build/0,
run_concurrent_build/1,
run_concurrent_build/2,
run_multiple/2,
run_remote/0,
run_remote/1]).
-ifdef(EQC).
-export([prop_correct/0, prop_sha/0, prop_est/0]).
-include_lib("eqc/include/eqc.hrl").
-endif.
-include_lib("eunit/include/eunit.hrl").
-endif. %% TEST
-define(NUM_SEGMENTS, (1024*1024)).
-define(WIDTH, 1024).
-define(MEM_LEVELS, 0).
-define(NUM_KEYS_REQUIRED, 1000).
-type tree_id_bin() :: <<_:176>>.
-type segment_bin() :: <<_:256, _:_*8>>.
-type bucket_bin() :: <<_:320>>.
-type meta_bin() :: <<_:8, _:_*8>>.
-type proplist() :: proplists:proplist().
-type orddict() :: orddict:orddict().
-type index() :: non_neg_integer().
-type index_n() :: {index(), pos_integer()}.
-type keydiff() :: {missing | remote_missing | different, binary()}.
-type remote_fun() :: fun((get_bucket | key_hashes | start_exchange_level |
start_exchange_segments | init | final,
{integer(), integer()} | integer() | term()) -> any()).
-type acc_fun(Acc) :: fun(([keydiff()], Acc) -> Acc).
-type select_fun(T) :: fun((orddict()) -> T).
-type next_rebuild() :: full | incremental.
-record(state, {id :: tree_id_bin(),
index :: index(),
levels :: pos_integer(),
segments :: pos_integer(),
width :: pos_integer(),
mem_levels :: integer(),
tree :: hashtree_dict(),
ref :: term(),
path :: string(),
itr :: term(),
next_rebuild :: next_rebuild(),
write_buffer :: [{put, binary(), binary()} |
{delete, binary()}],
write_buffer_count :: integer(),
dirty_segments :: hashtree_array()
}).
-record(itr_state, {itr :: term(),
id :: tree_id_bin(),
current_segment :: '*' | integer(),
remaining_segments :: ['*' | integer()],
acc_fun :: fun(([{binary(),binary()}]) -> any()),
segment_acc :: [{binary(), binary()}],
final_acc :: [{integer(), any()}],
prefetch=false :: boolean()
}).
-opaque hashtree() :: #state{}.
-export_type([hashtree/0,
tree_id_bin/0,
keydiff/0,
remote_fun/0,
acc_fun/1]).
%%%===================================================================
%%% API
%%%===================================================================
-spec new() -> hashtree().
new() ->
new({0,0}).
-spec new({index(), tree_id_bin() | non_neg_integer()}) -> hashtree().
new(TreeId) ->
{DB, Path} = new_segment_store([]),
new(TreeId, DB, Path, []).
-spec new({index(), tree_id_bin() | non_neg_integer()}, proplist()) -> hashtree();
({index(), tree_id_bin() | non_neg_integer()}, hashtree()) -> hashtree().
new(TreeId, Options) when is_list(Options) ->
{DB, Path} = new_segment_store(Options),
new(TreeId, DB, Path, Options);
new(TreeId, LinkedStore = #state{}) ->
new(TreeId, LinkedStore, []).
-spec new({index(), tree_id_bin() | non_neg_integer()},
hashtree(), proplist()) -> hashtree().
new(TreeId, LinkedStore, Options) ->
new(TreeId, LinkedStore#state.ref, LinkedStore#state.path, Options).
-spec new({index(), tree_id_bin() | non_neg_integer()},
term(), string(),
proplist()) -> hashtree().
new({Index,TreeId}, DB, Path, Options) ->
NumSegments = proplists:get_value(segments, Options, ?NUM_SEGMENTS),
Width = proplists:get_value(width, Options, ?WIDTH),
MemLevels = proplists:get_value(mem_levels, Options, ?MEM_LEVELS),
NumLevels = erlang:trunc(math:log(NumSegments) / math:log(Width)) + 1,
#state{id=encode_id(TreeId),
index=Index,
levels=NumLevels,
segments=NumSegments,
width=Width,
mem_levels=MemLevels,
%% dirty_segments=gb_sets:new(),
dirty_segments=bitarray_new(NumSegments),
next_rebuild=full,
write_buffer=[],
write_buffer_count=0,
tree=dict:new(),
ref = DB,
path = Path}.
-spec close(hashtree()) -> hashtree().
close(State) ->
close_iterator(State#state.itr),
catch eleveldb:close(State#state.ref),
State#state{itr=undefined}.
close_iterator(Itr) ->
try
eleveldb:iterator_close(Itr)
catch
_:_ ->
ok
end.
-spec destroy(string() | hashtree()) -> ok | hashtree().
destroy(Path) when is_list(Path) ->
ok = eleveldb:destroy(Path, []);
destroy(State) ->
%% Assumption: close was already called on all hashtrees that
%% use this LevelDB instance,
ok = eleveldb:destroy(State#state.path, []),
State.
-spec insert(binary(), binary(), hashtree()) -> hashtree().
insert(Key, ObjHash, State) ->
insert(Key, ObjHash, State, []).
-spec insert(binary(), binary(), hashtree(), proplist()) -> hashtree().
insert(Key, ObjHash, State, Opts) ->
Hash = erlang:phash2(Key),
Segment = Hash rem State#state.segments,
HKey = encode(State#state.id, Segment, Key),
case should_insert(HKey, Opts, State) of
true ->
State2 = enqueue_action({put, HKey, ObjHash}, State),
%% Dirty = gb_sets:add_element(Segment, State2#state.dirty_segments),
Dirty = bitarray_set(Segment, State2#state.dirty_segments),
State2#state{dirty_segments=Dirty};
false ->
State
end.
enqueue_action(Action, State) ->
WBuffer = [Action|State#state.write_buffer],
WCount = State#state.write_buffer_count + 1,
State2 = State#state{write_buffer=WBuffer,
write_buffer_count=WCount},
State3 = maybe_flush_buffer(State2),
State3.
maybe_flush_buffer(State=#state{write_buffer_count=WCount}) ->
Threshold = 200,
case WCount > Threshold of
true ->
flush_buffer(State);
false ->
State
end.
-spec flush_buffer(hashtree()) -> hashtree().
flush_buffer(State=#state{write_buffer=[], write_buffer_count=0}) ->
State;
flush_buffer(State=#state{write_buffer=WBuffer}) ->
%% Write buffer is built backwards, reverse to build update list
Updates = lists:reverse(WBuffer),
ok = eleveldb:write(State#state.ref, Updates, []),
State#state{write_buffer=[],
write_buffer_count=0}.
-spec delete(binary(), hashtree()) -> hashtree().
delete(Key, State) ->
Hash = erlang:phash2(Key),
Segment = Hash rem State#state.segments,
HKey = encode(State#state.id, Segment, Key),
State2 = enqueue_action({delete, HKey}, State),
%% Dirty = gb_sets:add_element(Segment, State2#state.dirty_segments),
Dirty = bitarray_set(Segment, State2#state.dirty_segments),
State2#state{dirty_segments=Dirty}.
-spec should_insert(segment_bin(), proplist(), hashtree()) -> boolean().
should_insert(HKey, Opts, State) ->
IfMissing = proplists:get_value(if_missing, Opts, false),
case IfMissing of
true ->
%% Only insert if object does not already exist
%% TODO: Use bloom filter so we don't always call get here
case eleveldb:get(State#state.ref, HKey, []) of
not_found ->
true;
_ ->
false
end;
_ ->
true
end.
-spec update_snapshot(hashtree()) -> {hashtree(), hashtree()}.
update_snapshot(State=#state{segments=NumSegments}) ->
State2 = flush_buffer(State),
SnapState = snapshot(State2),
State3 = SnapState#state{dirty_segments=bitarray_new(NumSegments)},
{SnapState, State3}.
-spec update_tree(hashtree()) -> hashtree().
update_tree(State) ->
State2 = flush_buffer(State),
State3 = snapshot(State2),
update_perform(State3).
-spec update_perform(hashtree()) -> hashtree().
update_perform(State=#state{dirty_segments=Dirty, segments=NumSegments}) ->
NextRebuild = State#state.next_rebuild,
Segments = case NextRebuild of
full ->
?ALL_SEGMENTS;
incremental ->
%% gb_sets:to_list(Dirty),
bitarray_to_list(Dirty)
end,
State2 = maybe_clear_buckets(NextRebuild, State),
State3 = update_tree(Segments, State2),
%% State2#state{dirty_segments=gb_sets:new()}
State3#state{dirty_segments=bitarray_new(NumSegments),
next_rebuild=incremental}.
%% Clear buckets if doing a full rebuild
maybe_clear_buckets(full, State) ->
clear_buckets(State);
maybe_clear_buckets(incremental, State) ->
State.
%% Fold over the 'live' data (outside of the snapshot), removing all
%% bucket entries for the tree.
clear_buckets(State=#state{id=Id, ref=Ref}) ->
Fun = fun({K,_V},Acc) ->
try
case decode_bucket(K) of
{Id, _, _} ->
ok = eleveldb:delete(Ref, K, []),
Acc + 1;
_ ->
throw({break, Acc})
end
catch
_:_ -> % not a decodable bucket
throw({break, Acc})
end
end,
Opts = [{first_key, encode_bucket(Id, 0, 0)}],
Removed =
try
eleveldb:fold(Ref, Fun, 0, Opts)
catch
{break, AccFinal} ->
AccFinal
end,
lager:debug("Tree ~p cleared ~p segments.\n", [Id, Removed]),
%% Mark the tree as requiring a full rebuild (will be fixed
%% reset at end of update_trees) AND dump the in-memory
%% tree.
State#state{next_rebuild = full,
tree = dict:new()}.
-spec update_tree([integer()], hashtree()) -> hashtree().
update_tree([], State) ->
State;
update_tree(Segments, State=#state{next_rebuild=NextRebuild, width=Width,
levels=Levels}) ->
LastLevel = Levels,
Hashes = orddict:from_list(hashes(State, Segments)),
%% Paranoia to make sure all of the hash entries are updated as expected
lager:debug("segments ~p -> hashes ~p\n", [Segments, Hashes]),
case Segments == ?ALL_SEGMENTS orelse
length(Segments) == length(Hashes) of
true ->
Groups = group(Hashes, Width),
update_levels(LastLevel, Groups, State, NextRebuild);
false ->
%% At this point the hashes are no longer sufficient to update
%% the upper trees. Alternative is to crash here, but that would
%% lose updates and is the action taken on repair anyway.
%% Save the customer some pain by doing that now and log.
%% Enable lager debug tracing with lager:trace_file(hashtree, "/tmp/ht.trace"
%% to get the detailed segment information.
lager:warning("Incremental AAE hash was unable to find all required data, "
"forcing full rebuild of ~p", [State#state.path]),
update_perform(State#state{next_rebuild = full})
end.
-spec rehash_tree(hashtree()) -> hashtree().
rehash_tree(State) ->
State2 = flush_buffer(State),
State3 = snapshot(State2),
rehash_perform(State3).
-spec rehash_perform(hashtree()) -> hashtree().
rehash_perform(State) ->
Hashes = orddict:from_list(hashes(State, ?ALL_SEGMENTS)),
case Hashes of
[] ->
State;
_ ->
Groups = group(Hashes, State#state.width),
LastLevel = State#state.levels,
%% Always do a full rebuild on rehash
NewState = update_levels(LastLevel, Groups, State, full),
NewState
end.
%% @doc Mark/clear metadata for tree-id opened/closed.
%% Set next_rebuild to be incremental.
-spec mark_open_empty(index_n()|binary(), hashtree()) -> hashtree().
mark_open_empty(TreeId, State) when is_binary(TreeId) ->
State1 = write_meta(TreeId, [{opened, 1}, {closed, 0}], State),
State1#state{next_rebuild=incremental};
mark_open_empty(TreeId, State) ->
mark_open_empty(term_to_binary(TreeId), State).
%% @doc Check if shutdown/closing of tree-id was clean/dirty by comparing
%% `closed' to `opened' metadata count for the hashtree, and,
%% increment opened count for hashtree-id.
%%
%%
%% If it was a clean shutdown, set `next_rebuild' to be an incremental one.
%% Otherwise, if it was a dirty shutdown, set `next_rebuild', instead,
%% to be a full one.
-spec mark_open_and_check(index_n()|binary(), hashtree()) -> hashtree().
mark_open_and_check(TreeId, State) when is_binary(TreeId) ->
MetaTerm = read_meta_term(TreeId, [], State),
OpenedCnt = proplists:get_value(opened, MetaTerm, 0),
ClosedCnt = proplists:get_value(closed, MetaTerm, -1),
_ = write_meta(TreeId, lists:keystore(opened, 1, MetaTerm,
{opened, OpenedCnt + 1}), State),
case ClosedCnt =/= OpenedCnt orelse State#state.mem_levels > 0 of
true ->
State#state{next_rebuild = full};
false ->
State#state{next_rebuild = incremental}
end;
mark_open_and_check(TreeId, State) ->
mark_open_and_check(term_to_binary(TreeId), State).
%% @doc Call on a clean-close to update the meta for a tree-id's `closed' count
%% to match the current `opened' count, which is checked on new/reopen.
-spec mark_clean_close(index_n()|binary(), hashtree()) -> hashtree().
mark_clean_close(TreeId, State) when is_binary(TreeId) ->
MetaTerm = read_meta_term(TreeId, [], State),
OpenedCnt = proplists:get_value(opened, MetaTerm, 0),
_ = write_meta(TreeId, lists:keystore(closed, 1, MetaTerm,
{closed, OpenedCnt}), State);
mark_clean_close(TreeId, State) ->
mark_clean_close(term_to_binary(TreeId), State).
-spec top_hash(hashtree()) -> [] | [{0, binary()}].
top_hash(State) ->
get_bucket(1, 0, State).
compare(Tree, Remote, AccFun, Acc) ->
compare(1, 0, Tree, Remote, AccFun, Acc).
-spec levels(hashtree()) -> pos_integer().
levels(#state{levels=L}) ->
L.
-spec segments(hashtree()) -> pos_integer().
segments(#state{segments=S}) ->
S.
-spec width(hashtree()) -> pos_integer().
width(#state{width=W}) ->
W.
-spec mem_levels(hashtree()) -> integer().
mem_levels(#state{mem_levels=M}) ->
M.
-spec path(hashtree()) -> string().
path(#state{path=P}) ->
P.
-spec next_rebuild(hashtree()) -> next_rebuild().
next_rebuild(#state{next_rebuild=NextRebuild}) ->
NextRebuild.
-spec set_next_rebuild(hashtree(), next_rebuild()) -> hashtree().
set_next_rebuild(Tree, NextRebuild) ->
Tree#state{next_rebuild = NextRebuild}.
%% Note: meta is currently a one per file thing, even if there are multiple
%% trees per file. This is intentional. If we want per tree metadata
%% this will need to be added as a separate thing.
-spec write_meta(binary(), binary()|term(), hashtree()) -> hashtree().
write_meta(Key, Value, State) when is_binary(Key) and is_binary(Value) ->
HKey = encode_meta(Key),
ok = eleveldb:put(State#state.ref, HKey, Value, []),
State;
write_meta(Key, Value0, State) when is_binary(Key) ->
Value = term_to_binary(Value0),
write_meta(Key, Value, State).
-spec read_meta(binary(), hashtree()) -> {ok, binary()} | undefined.
read_meta(Key, State) when is_binary(Key) ->
HKey = encode_meta(Key),
case eleveldb:get(State#state.ref, HKey, []) of
{ok, Value} ->
{ok, Value};
_ ->
undefined
end.
-spec read_meta_term(binary(), term(), hashtree()) -> term().
read_meta_term(Key, Default, State) when is_binary(Key) ->
case read_meta(Key, State) of
{ok, Value} ->
binary_to_term(Value);
_ ->
Default
end.
%% @doc
%% Estimate number of keys stored in the AAE tree. This is determined
%% by sampling segments to to calculate an estimated keys-per-segment
%% value, which is then multiplied by the number of segments. Segments
%% are sampled until either 1% of segments have been visited or 1000
%% keys have been observed.
%%
%% Note: this function must be called on a tree with a valid iterator,
%% such as the snapshotted tree returned from update_snapshot/1
%% or a recently updated tree returned from update_tree/1 (which
%% internally creates a snapshot). Using update_tree/1 is the best
%% choice since that ensures segments are updated giving a better
%% estimate.
-spec estimate_keys(hashtree()) -> {ok, integer()}.
estimate_keys(State) ->
estimate_keys(State, 0, 0, ?NUM_KEYS_REQUIRED).
estimate_keys(#state{segments=Segments}, CurrentSegment, Keys, MaxKeys)
when (CurrentSegment * 100) >= Segments;
Keys >= MaxKeys ->
{ok, (Keys * Segments) div CurrentSegment};
estimate_keys(State, CurrentSegment, Keys, MaxKeys) ->
[{_, KeyHashes2}] = key_hashes(State, CurrentSegment),
estimate_keys(State, CurrentSegment + 1, Keys + length(KeyHashes2), MaxKeys).
-spec key_hashes(hashtree(), integer()) -> [{integer(), orddict()}].
key_hashes(State, Segment) ->
multi_select_segment(State, [Segment], fun(X) -> X end).
-spec get_bucket(integer(), integer(), hashtree()) -> orddict().
get_bucket(Level, Bucket, State) ->
case Level =< State#state.mem_levels of
true ->
get_memory_bucket(Level, Bucket, State);
false ->
get_disk_bucket(Level, Bucket, State)
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-ifndef(old_hash).
md5(Bin) ->
crypto:hash(md5, Bin).
-ifdef(TEST).
esha(Bin) ->
crypto:hash(sha, Bin).
-endif.
esha_init() ->
crypto:hash_init(sha).
esha_update(Ctx, Bin) ->
crypto:hash_update(Ctx, Bin).
esha_final(Ctx) ->
crypto:hash_final(Ctx).
-else.
md5(Bin) ->
crypto:md5(Bin).
-ifdef(TEST).
esha(Bin) ->
crypto:sha(Bin).
-endif.
esha_init() ->
crypto:sha_init().
esha_update(Ctx, Bin) ->
crypto:sha_update(Ctx, Bin).
esha_final(Ctx) ->
crypto:sha_final(Ctx).
-endif.
-spec set_bucket(integer(), integer(), any(), hashtree()) -> hashtree().
set_bucket(Level, Bucket, Val, State) ->
case Level =< State#state.mem_levels of
true ->
set_memory_bucket(Level, Bucket, Val, State);
false ->
set_disk_bucket(Level, Bucket, Val, State)
end.
-spec del_bucket(integer(), integer(), hashtree()) -> hashtree().
del_bucket(Level, Bucket, State) ->
case Level =< State#state.mem_levels of
true ->
del_memory_bucket(Level, Bucket, State);
false ->
del_disk_bucket(Level, Bucket, State)
end.
-spec new_segment_store(proplist()) -> {term(), string()}.
new_segment_store(Opts) ->
DataDir = case proplists:get_value(segment_path, Opts) of
undefined ->
Root = "/tmp/anti/level",
<<P:128/integer>> = md5(term_to_binary({os:timestamp(), make_ref()})),
filename:join(Root, integer_to_list(P));
SegmentPath ->
SegmentPath
end,
DefaultWriteBufferMin = 4 * 1024 * 1024,
DefaultWriteBufferMax = 14 * 1024 * 1024,
ConfigVars = get_env(anti_entropy_leveldb_opts,
[{write_buffer_size_min, DefaultWriteBufferMin},
{write_buffer_size_max, DefaultWriteBufferMax}]),
Config = orddict:from_list(ConfigVars),
%% Use a variable write buffer size to prevent against all buffers being
%% flushed to disk at once when under a heavy uniform load.
WriteBufferMin = proplists:get_value(write_buffer_size_min, Config, DefaultWriteBufferMin),
WriteBufferMax = proplists:get_value(write_buffer_size_max, Config, DefaultWriteBufferMax),
Offset = rand:uniform(1 + WriteBufferMax - WriteBufferMin),
WriteBufferSize = WriteBufferMin + Offset,
Config2 = orddict:store(write_buffer_size, WriteBufferSize, Config),
Config3 = orddict:erase(write_buffer_size_min, Config2),
Config4 = orddict:erase(write_buffer_size_max, Config3),
Config5 = orddict:store(is_internal_db, true, Config4),
Config6 = orddict:store(use_bloomfilter, true, Config5),
Options = orddict:store(create_if_missing, true, Config6),
ok = filelib:ensure_dir(DataDir),
{ok, Ref} = eleveldb:open(DataDir, Options),
{Ref, DataDir}.
-spec hash(term()) -> empty | binary().
hash([]) ->
empty;
hash(X) ->
%% erlang:phash2(X).
sha(term_to_binary(X)).
sha(Bin) ->
Chunk = get_env(anti_entropy_sha_chunk, 4096),
sha(Chunk, Bin).
sha(Chunk, Bin) ->
Ctx1 = esha_init(),
Ctx2 = sha(Chunk, Bin, Ctx1),
SHA = esha_final(Ctx2),
SHA.
sha(Chunk, Bin, Ctx) ->
case Bin of
<<Data:Chunk/binary, Rest/binary>> ->
Ctx2 = esha_update(Ctx, Data),
sha(Chunk, Rest, Ctx2);
Data ->
Ctx2 = esha_update(Ctx, Data),
Ctx2
end.
get_env(Key, Default) ->
CoreEnv = app_helper:get_env(riak_core, Key, Default),
app_helper:get_env(riak_kv, Key, CoreEnv).
-spec update_levels(integer(),
[{integer(), [{integer(), binary()}]}],
hashtree(), next_rebuild()) -> hashtree().
update_levels(0, _, State, _) ->
State;
update_levels(Level, Groups, State, Type) ->
{_, _, NewState, NewBuckets} = rebuild_fold(Level, Groups, State, Type),
lager:debug("level ~p hashes ~w\n", [Level, NewBuckets]),
Groups2 = group(NewBuckets, State#state.width),
update_levels(Level - 1, Groups2, NewState, Type).
-spec rebuild_fold(integer(),
[{integer(), [{integer(), binary()}]}], hashtree(),
next_rebuild()) -> {integer(), next_rebuild(),
hashtree(), [{integer(), binary()}]}.
rebuild_fold(Level, Groups, State, Type) ->
lists:foldl(fun rebuild_folder/2, {Level, Type, State, []}, Groups).
rebuild_folder({Bucket, NewHashes}, {Level, Type, StateAcc, BucketsAcc}) ->
Hashes = case Type of
full ->
orddict:from_list(NewHashes);
incremental ->
Hashes1 = get_bucket(Level, Bucket,
StateAcc),
Hashes2 = orddict:from_list(NewHashes),
orddict:merge(
fun(_, _, New) -> New end,
Hashes1,
Hashes2)
end,
%% All of the segments that make up this bucket, trim any
%% newly emptied hashes (likely result of deletion)
PopHashes = [{S, H} || {S, H} <- Hashes, H /= [], H /= empty],
case PopHashes of
[] ->
%% No more hash entries, if a full rebuild then disk
%% already clear. If not, remove the empty bucket.
StateAcc2 = case Type of
full ->
StateAcc;
incremental ->
del_bucket(Level, Bucket, StateAcc)
end,
%% Although not written to disk, propagate hash up to next level
%% to mark which entries of the tree need updating.
NewBucket = {Bucket, []},
{Level, Type, StateAcc2, [NewBucket | BucketsAcc]};
_ ->
%% Otherwise, at least one hash entry present, update
%% and propagate
StateAcc2 = set_bucket(Level, Bucket, Hashes, StateAcc),
NewBucket = {Bucket, hash(PopHashes)},
{Level, Type, StateAcc2, [NewBucket | BucketsAcc]}
end.
%% Takes a list of bucket-hash entries from level X and groups them together
%% into groups representing entries at parent level X-1.
%%
%% For example, given bucket-hash entries at level X:
%% [{1,H1}, {2,H2}, {3,H3}, {4,H4}, {5,H5}, {6,H6}, {7,H7}, {8,H8}]
%%
%% The grouping at level X-1 with a width of 4 would be:
%% [{1,[{1,H1}, {2,H2}, {3,H3}, {4,H4}]},
%% {2,[{5,H5}, {6,H6}, {7,H7}, {8,H8}]}]
%%
-spec group([{integer(), binary()}], pos_integer())
-> [{integer(), [{integer(), binary()}]}].
group([], _) ->
[];
group(L, Width) ->
{FirstId, _} = hd(L),
FirstBucket = FirstId div Width,
{LastBucket, LastGroup, Groups} =
lists:foldl(fun(X={Id, _}, {LastBucket, Acc, Groups}) ->
Bucket = Id div Width,
case Bucket of
LastBucket ->
{LastBucket, [X|Acc], Groups};
_ ->
{Bucket, [X], [{LastBucket, Acc} | Groups]}
end
end, {FirstBucket, [], []}, L),
[{LastBucket, LastGroup} | Groups].
-spec get_memory_bucket(integer(), integer(), hashtree()) -> any().
get_memory_bucket(Level, Bucket, #state{tree=Tree}) ->
case dict:find({Level, Bucket}, Tree) of
error ->
orddict:new();
{ok, Val} ->
Val
end.
-spec set_memory_bucket(integer(), integer(), any(), hashtree()) -> hashtree().
set_memory_bucket(Level, Bucket, Val, State) ->
Tree = dict:store({Level, Bucket}, Val, State#state.tree),
State#state{tree=Tree}.
-spec del_memory_bucket(integer(), integer(), hashtree()) -> hashtree().
del_memory_bucket(Level, Bucket, State) ->
Tree = dict:erase({Level, Bucket}, State#state.tree),
State#state{tree=Tree}.
-spec get_disk_bucket(integer(), integer(), hashtree()) -> any().
get_disk_bucket(Level, Bucket, #state{id=Id, ref=Ref}) ->
HKey = encode_bucket(Id, Level, Bucket),
case eleveldb:get(Ref, HKey, []) of
{ok, Bin} ->
binary_to_term(Bin);
_ ->
orddict:new()
end.
-spec set_disk_bucket(integer(), integer(), any(), hashtree()) -> hashtree().
set_disk_bucket(Level, Bucket, Val, State=#state{id=Id, ref=Ref}) ->
HKey = encode_bucket(Id, Level, Bucket),
Bin = term_to_binary(Val),
ok = eleveldb:put(Ref, HKey, Bin, []),
State.
del_disk_bucket(Level, Bucket, State = #state{id = Id, ref = Ref}) ->
HKey = encode_bucket(Id, Level, Bucket),
ok = eleveldb:delete(Ref, HKey, []),
State.
-spec encode_id(binary() | non_neg_integer()) -> tree_id_bin().
encode_id(TreeId) when is_integer(TreeId) ->
if (TreeId >= 0) andalso
(TreeId < ((1 bsl 160)-1)) ->
<<TreeId:176/integer>>;
true ->
erlang:error(badarg)
end;
encode_id(TreeId) when is_binary(TreeId) and (byte_size(TreeId) == 22) ->
TreeId;
encode_id(_) ->
erlang:error(badarg).
-spec encode(tree_id_bin(), integer(), binary()) -> segment_bin().
encode(TreeId, Segment, Key) ->
<<$t,TreeId:22/binary,$s,Segment:64/integer,Key/binary>>.
-spec safe_decode(binary()) -> {tree_id_bin() | bad, integer(), binary()}.
safe_decode(Bin) ->
case Bin of
<<$t,TreeId:22/binary,$s,Segment:64/integer,Key/binary>> ->
{TreeId, Segment, Key};
_ ->
{bad, -1, <<>>}
end.
-spec decode(segment_bin()) -> {tree_id_bin(), non_neg_integer(), binary()}.
decode(Bin) ->
<<$t,TreeId:22/binary,$s,Segment:64/integer,Key/binary>> = Bin,
{TreeId, Segment, Key}.
-spec encode_bucket(tree_id_bin(), integer(), integer()) -> bucket_bin().
encode_bucket(TreeId, Level, Bucket) ->
<<$b,TreeId:22/binary,$b,Level:64/integer,Bucket:64/integer>>.
-spec decode_bucket(bucket_bin()) -> {tree_id_bin(), integer(), integer()}.
decode_bucket(Bin) ->
<<$b,TreeId:22/binary,$b,Level:64/integer,Bucket:64/integer>> = Bin,
{TreeId, Level, Bucket}.
-spec encode_meta(binary()) -> meta_bin().
encode_meta(Key) ->
<<$m,Key/binary>>.
-spec hashes(hashtree(), list('*'|integer())) -> [{integer(), binary()}].
hashes(State, Segments) ->
multi_select_segment(State, Segments, fun hash/1).
-spec snapshot(hashtree()) -> hashtree().
snapshot(State) ->
%% Abuse eleveldb iterators as snapshots
catch eleveldb:iterator_close(State#state.itr),
{ok, Itr} = eleveldb:iterator(State#state.ref, []),
State#state{itr=Itr}.
-spec multi_select_segment(hashtree(), list('*'|integer()), select_fun(T))
-> [{integer(), T}].
multi_select_segment(#state{id=Id, itr=Itr}, Segments, F) ->
[First | Rest] = Segments,
IS1 = #itr_state{itr=Itr,
id=Id,
current_segment=First,
remaining_segments=Rest,
acc_fun=F,
segment_acc=[],
final_acc=[]},
Seek = case First of
'*' ->
encode(Id, 0, <<>>);
_ ->
encode(Id, First, <<>>)
end,
IS2 = try
iterate(iterator_move(Itr, Seek), IS1)
after
%% Always call prefetch stop to ensure the iterator
%% is safe to use in the compare. Requires
%% eleveldb > 2.0.16 or this may segv/hang.
_ = iterator_move(Itr, prefetch_stop)
end,
#itr_state{remaining_segments = LeftOver,
current_segment=LastSegment,
segment_acc=LastAcc,
final_acc=FA} = IS2,
%% iterate completes without processing the last entries in the state. Compute
%% the final visited segment, and add calls to the F([]) for all of the segments
%% that do not exist at the end of the file (due to deleting the last entry in the
%% segment).
Result = [{LeftSeg, F([])} || LeftSeg <- lists:reverse(LeftOver),
LeftSeg =/= '*'] ++
[{LastSegment, F(LastAcc)} | FA],
case Result of
[{'*', _}] ->
%% Handle wildcard select when all segments are empty
[];
_ ->
Result
end.
iterator_move(undefined, _Seek) ->
{error, invalid_iterator};
iterator_move(Itr, Seek) ->
try
eleveldb:iterator_move(Itr, Seek)
catch
_:badarg ->
{error, invalid_iterator}
end.
-spec iterate({'error','invalid_iterator'} | {'ok',binary(),binary()},
#itr_state{}) -> #itr_state{}.
%% Ended up at an invalid_iterator likely due to encountering a missing dirty
%% segment - e.g. segment dirty, but removed last entries for it
iterate({error, invalid_iterator}, IS=#itr_state{current_segment='*'}) ->
IS;
iterate({error, invalid_iterator}, IS=#itr_state{itr=Itr,
id=Id,
current_segment=CurSeg,
remaining_segments=Segments,
acc_fun=F,
segment_acc=Acc,
final_acc=FinalAcc}) ->
case Segments of
[] ->
IS;
['*'] ->
IS;
[NextSeg | Remaining] ->
Seek = encode(Id, NextSeg, <<>>),
IS2 = IS#itr_state{current_segment=NextSeg,
remaining_segments=Remaining,
segment_acc=[],
final_acc=[{CurSeg, F(Acc)} | FinalAcc]},
iterate(iterator_move(Itr, Seek), IS2)
end;
iterate({ok, K, V}, IS=#itr_state{itr=Itr,
id=Id,
current_segment=CurSeg,
remaining_segments=Segments,
acc_fun=F,
segment_acc=Acc,
final_acc=FinalAcc}) ->
{SegId, Seg, _} = safe_decode(K),
Segment = case CurSeg of
'*' ->
Seg;
_ ->
CurSeg
end,
case {SegId, Seg, Segments, IS#itr_state.prefetch} of
{bad, -1, _, _} ->
%% Non-segment encountered, end traversal
IS;
{Id, Segment, _, _} ->
%% Still reading existing segment
IS2 = IS#itr_state{current_segment=Segment,
segment_acc=[{K,V} | Acc],
prefetch=true},
iterate(iterator_move(Itr, prefetch), IS2);
{Id, _, [Seg|Remaining], _} ->
%% Pointing at next segment we are interested in
IS2 = IS#itr_state{current_segment=Seg,
remaining_segments=Remaining,
segment_acc=[{K,V}],
final_acc=[{Segment, F(Acc)} | FinalAcc],
prefetch=true},
iterate(iterator_move(Itr, prefetch), IS2);
{Id, _, ['*'], _} ->
%% Pointing at next segment we are interested in
IS2 = IS#itr_state{current_segment=Seg,
remaining_segments=['*'],
segment_acc=[{K,V}],
final_acc=[{Segment, F(Acc)} | FinalAcc],
prefetch=true},
iterate(iterator_move(Itr, prefetch), IS2);
{Id, _, [NextSeg | Remaining], true} ->
%% Pointing at uninteresting segment, but need to halt the
%% prefetch to ensure the iterator can be reused
IS2 = IS#itr_state{current_segment=NextSeg,
segment_acc=[],
remaining_segments=Remaining,
final_acc=[{Segment, F(Acc)} | FinalAcc],
prefetch=true}, % will be after second move
_ = iterator_move(Itr, prefetch_stop), % ignore the pre-fetch,
Seek = encode(Id, NextSeg, <<>>), % and risk wasting a reseek
iterate(iterator_move(Itr, Seek), IS2);% to get to the next segment
{Id, _, [NextSeg | Remaining], false} ->
%% Pointing at uninteresting segment, seek to next interesting one
Seek = encode(Id, NextSeg, <<>>),
IS2 = IS#itr_state{current_segment=NextSeg,
remaining_segments=Remaining,
segment_acc=[],
final_acc=[{Segment, F(Acc)} | FinalAcc]},
iterate(iterator_move(Itr, Seek), IS2);
{_, _, _, true} ->
%% Done with traversal, but need to stop the prefetch to
%% ensure the iterator can be reused. The next operation
%% with this iterator is a seek so no need to be concerned
%% with the data returned here.
_ = iterator_move(Itr, prefetch_stop),
IS#itr_state{prefetch=false};
{_, _, _, false} ->
%% Done with traversal
IS
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% level-by-level exchange (BFS instead of DFS)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compare2(Tree, Remote, AccFun, Acc) ->
Final = Tree#state.levels + 1,
Local = fun(get_bucket, {L, B}) ->
get_bucket(L, B, Tree);
(key_hashes, Segment) ->
[{_, KeyHashes2}] = key_hashes(Tree, Segment),
KeyHashes2
end,
Opts = [],
exchange(1, [0], Final, Local, Remote, AccFun, Acc, Opts).
exchange(_Level, [], _Final, _Local, _Remote, _AccFun, Acc, _Opts) ->
Acc;
exchange(Level, Diff, Final, Local, Remote, AccFun, Acc, Opts) ->
if Level =:= Final ->
exchange_final(Level, Diff, Local, Remote, AccFun, Acc, Opts);
true ->
Diff2 = exchange_level(Level, Diff, Local, Remote, Opts),
exchange(Level+1, Diff2, Final, Local, Remote, AccFun, Acc, Opts)
end.
exchange_level(Level, Buckets, Local, Remote, _Opts) ->
Remote(start_exchange_level, {Level, Buckets}),
lists:flatmap(fun(Bucket) ->
A = Local(get_bucket, {Level, Bucket}),
B = Remote(get_bucket, {Level, Bucket}),
Delta = riak_core_util:orddict_delta(lists:keysort(1, A),
lists:keysort(1, B)),
lager:debug("Exchange Level ~p Bucket ~p\nA=~p\nB=~p\nD=~p\n",
[Level, Bucket, A, B, Delta]),
Diffs = Delta,
[BK || {BK, _} <- Diffs]
end, Buckets).
exchange_final(_Level, Segments, Local, Remote, AccFun, Acc0, _Opts) ->
Remote(start_exchange_segments, Segments),
lists:foldl(fun(Segment, Acc) ->
A = Local(key_hashes, Segment),
B = Remote(key_hashes, Segment),
Delta = riak_core_util:orddict_delta(lists:keysort(1, A),
lists:keysort(1, B)),
lager:debug("Exchange Final\nA=~p\nB=~p\nD=~p\n",
[A, B, Delta]),
Keys = [begin
{_Id, Segment, Key} = decode(KBin),
Type = key_diff_type(Diff),
{Type, Key}
end || {KBin, Diff} <- Delta],
AccFun(Keys, Acc)
end, Acc0, Segments).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec compare(integer(), integer(), hashtree(), remote_fun(), acc_fun(X), X) -> X.
compare(Level, Bucket, Tree, Remote, AccFun, KeyAcc) when Level == Tree#state.levels+1 ->
Keys = compare_segments(Bucket, Tree, Remote),
AccFun(Keys, KeyAcc);
compare(Level, Bucket, Tree, Remote, AccFun, KeyAcc) ->
HL1 = get_bucket(Level, Bucket, Tree),
HL2 = Remote(get_bucket, {Level, Bucket}),
Union = lists:ukeysort(1, HL1 ++ HL2),
Inter = ordsets:intersection(ordsets:from_list(HL1),
ordsets:from_list(HL2)),
Diff = ordsets:subtract(Union, Inter),
lager:debug("Tree ~p level ~p bucket ~p\nL=~p\nR=~p\nD=~p\n",
[Tree, Level, Bucket, HL1, HL2, Diff]),
KeyAcc3 =
lists:foldl(fun({Bucket2, _}, KeyAcc2) ->
compare(Level+1, Bucket2, Tree, Remote, AccFun, KeyAcc2)
end, KeyAcc, Diff),
KeyAcc3.
-spec compare_segments(integer(), hashtree(), remote_fun()) -> [keydiff()].
compare_segments(Segment, Tree=#state{id=Id}, Remote) ->
[{_, KeyHashes1}] = key_hashes(Tree, Segment),
KeyHashes2 = Remote(key_hashes, Segment),
HL1 = orddict:from_list(KeyHashes1),
HL2 = orddict:from_list(KeyHashes2),
Delta = riak_core_util:orddict_delta(HL1, HL2),
lager:debug("Tree ~p segment ~p diff ~p\n",
[Tree, Segment, Delta]),
Keys = [begin
{Id, Segment, Key} = decode(KBin),
Type = key_diff_type(Diff),
{Type, Key}
end || {KBin, Diff} <- Delta],
Keys.
key_diff_type({'$none', _}) ->
missing;
key_diff_type({_, '$none'}) ->
remote_missing;
key_diff_type(_) ->
different.
%%%===================================================================
%%% bitarray
%%%===================================================================
-define(W, 27).
-spec bitarray_new(integer()) -> hashtree_array().
bitarray_new(N) -> array:new((N-1) div ?W + 1, {default, 0}).
-spec bitarray_set(integer(), hashtree_array()) -> hashtree_array().
bitarray_set(I, A) ->
AI = I div ?W,
V = array:get(AI, A),
V1 = V bor (1 bsl (I rem ?W)),
array:set(AI, V1, A).
-spec bitarray_to_list(hashtree_array()) -> [integer()].
bitarray_to_list(A) ->
lists:reverse(
array:sparse_foldl(fun(I, V, Acc) ->
expand(V, I * ?W, Acc)
end, [], A)).
%% Convert bit vector into list of integers, with optional offset.
%% expand(2#01, 0, []) -> [0]
%% expand(2#10, 0, []) -> [1]
%% expand(2#1101, 0, []) -> [3,2,0]
%% expand(2#1101, 1, []) -> [4,3,1]
%% expand(2#1101, 10, []) -> [13,12,10]
%% expand(2#1101, 100, []) -> [103,102,100]
expand(0, _, Acc) ->
Acc;
expand(V, N, Acc) ->
Acc2 =
case (V band 1) of
1 ->
[N|Acc];
0 ->
Acc
end,
expand(V bsr 1, N+1, Acc2).
%%%===================================================================
%%% Experiments
%%%===================================================================
-ifdef(TEST).
run_local() ->
run_local(10000).
run_local(N) ->
timer:tc(fun do_local/1, [N]).
run_concurrent_build() ->
run_concurrent_build(10000).
run_concurrent_build(N) ->
run_concurrent_build(N, N).
run_concurrent_build(N1, N2) ->
timer:tc(fun do_concurrent_build/2, [N1, N2]).
run_multiple(Count, N) ->
Tasks = [fun() ->
do_concurrent_build(N, N)
end || _ <- lists:seq(1, Count)],
timer:tc(fun peval/1, [Tasks]).
run_remote() ->
run_remote(100000).
run_remote(N) ->
timer:tc(fun do_remote/1, [N]).
do_local(N) ->
A0 = insert_many(N, new()),
A1 = insert(<<"10">>, <<"42">>, A0),
A2 = insert(<<"10">>, <<"42">>, A1),
A3 = insert(<<"13">>, <<"52">>, A2),
B0 = insert_many(N, new()),
B1 = insert(<<"14">>, <<"52">>, B0),
B2 = insert(<<"10">>, <<"32">>, B1),
B3 = insert(<<"10">>, <<"422">>, B2),
A4 = update_tree(A3),
B4 = update_tree(B3),
KeyDiff = local_compare(A4, B4),
io:format("KeyDiff: ~p~n", [KeyDiff]),
close(A4),
close(B4),
destroy(A4),
destroy(B4),
ok.
do_concurrent_build(N1, N2) ->
F1 = fun() ->
A0 = insert_many(N1, new()),
A1 = insert(<<"10">>, <<"42">>, A0),
A2 = insert(<<"10">>, <<"42">>, A1),
A3 = insert(<<"13">>, <<"52">>, A2),
A4 = update_tree(A3),
A4
end,
F2 = fun() ->
B0 = insert_many(N2, new()),
B1 = insert(<<"14">>, <<"52">>, B0),
B2 = insert(<<"10">>, <<"32">>, B1),
B3 = insert(<<"10">>, <<"422">>, B2),
B4 = update_tree(B3),
B4
end,
[A4, B4] = peval([F1, F2]),
KeyDiff = local_compare(A4, B4),
io:format("KeyDiff: ~p~n", [KeyDiff]),
close(A4),
close(B4),
destroy(A4),
destroy(B4),
ok.
do_remote(N) ->
%% Spawn new process for remote tree
Other =
spawn(fun() ->
A0 = insert_many(N, new()),
A1 = insert(<<"10">>, <<"42">>, A0),
A2 = insert(<<"10">>, <<"42">>, A1),
A3 = insert(<<"13">>, <<"52">>, A2),
A4 = update_tree(A3),
message_loop(A4, 0, 0)
end),
%% Build local tree
B0 = insert_many(N, new()),
B1 = insert(<<"14">>, <<"52">>, B0),
B2 = insert(<<"10">>, <<"32">>, B1),
B3 = insert(<<"10">>, <<"422">>, B2),
B4 = update_tree(B3),
%% Compare with remote tree through message passing
Remote = fun(get_bucket, {L, B}) ->
Other ! {get_bucket, self(), L, B},
receive {remote, X} -> X end;
(start_exchange_level, {_Level, _Buckets}) ->
ok;
(start_exchange_segments, _Segments) ->
ok;
(key_hashes, Segment) ->
Other ! {key_hashes, self(), Segment},
receive {remote, X} -> X end
end,
KeyDiff = compare(B4, Remote),
io:format("KeyDiff: ~p~n", [KeyDiff]),
%% Signal spawned process to print stats and exit
Other ! done,
ok.
message_loop(Tree, Msgs, Bytes) ->
receive
{get_bucket, From, L, B} ->
Reply = get_bucket(L, B, Tree),
From ! {remote, Reply},
Size = byte_size(term_to_binary(Reply)),
message_loop(Tree, Msgs+1, Bytes+Size);
{key_hashes, From, Segment} ->
[{_, KeyHashes2}] = key_hashes(Tree, Segment),
Reply = KeyHashes2,
From ! {remote, Reply},
Size = byte_size(term_to_binary(Reply)),
message_loop(Tree, Msgs+1, Bytes+Size);
done ->
%% io:format("Exchanged messages: ~b~n", [Msgs]),
%% io:format("Exchanged bytes: ~b~n", [Bytes]),
ok
end.
insert_many(N, T1) ->
T2 =
lists:foldl(fun(X, TX) ->
insert(bin(-X), bin(X*100), TX)
end, T1, lists:seq(1,N)),
T2.
bin(X) ->
list_to_binary(integer_to_list(X)).
peval(L) ->
Parent = self(),
lists:foldl(
fun(F, N) ->
spawn(fun() ->
Parent ! {peval, N, F()}
end),
N+1
end, 0, L),
L2 = [receive {peval, N, R} -> {N,R} end || _ <- L],
{_, L3} = lists:unzip(lists:keysort(1, L2)),
L3.
%%%===================================================================
%%% EUnit
%%%===================================================================
-spec local_compare(hashtree(), hashtree()) -> [keydiff()].
local_compare(T1, T2) ->
Remote = fun(get_bucket, {L, B}) ->
get_bucket(L, B, T2);
(start_exchange_level, {_Level, _Buckets}) ->
ok;
(start_exchange_segments, _Segments) ->
ok;
(key_hashes, Segment) ->
[{_, KeyHashes2}] = key_hashes(T2, Segment),
KeyHashes2
end,
AccFun = fun(Keys, KeyAcc) ->
Keys ++ KeyAcc
end,
compare2(T1, Remote, AccFun, []).
-spec local_compare1(hashtree(), hashtree()) -> [keydiff()].
local_compare1(T1, T2) ->
Remote = fun(get_bucket, {L, B}) ->
get_bucket(L, B, T2);
(start_exchange_level, {_Level, _Buckets}) ->
ok;
(start_exchange_segments, _Segments) ->
ok;
(key_hashes, Segment) ->
[{_, KeyHashes2}] = key_hashes(T2, Segment),
KeyHashes2
end,
AccFun = fun(Keys, KeyAcc) ->
Keys ++ KeyAcc
end,
compare(T1, Remote, AccFun, []).
-spec compare(hashtree(), remote_fun()) -> [keydiff()].
compare(Tree, Remote) ->
compare(Tree, Remote, fun(Keys, KeyAcc) ->
Keys ++ KeyAcc
end).
-spec compare(hashtree(), remote_fun(), acc_fun(X)) -> X.
compare(Tree, Remote, AccFun) ->
compare(Tree, Remote, AccFun, []).
-spec fake_close(hashtree()) -> hashtree().
fake_close(State) ->
catch eleveldb:close(State#state.ref),
State.
%% Verify that `update_tree/1' generates a snapshot of the underlying
%% LevelDB store that is used by `compare', therefore isolating the
%% compare from newer/concurrent insertions into the tree.
snapshot_test() ->
A0 = insert(<<"10">>, <<"42">>, new()),
B0 = insert(<<"10">>, <<"52">>, new()),
A1 = update_tree(A0),
B1 = update_tree(B0),
B2 = insert(<<"10">>, <<"42">>, B1),
KeyDiff = local_compare(A1, B1),
close(A1),
close(B2),
destroy(A1),
destroy(B2),
?assertEqual([{different, <<"10">>}], KeyDiff),
ok.
delta_test() ->
T1 = update_tree(insert(<<"1">>, esha(term_to_binary(make_ref())),
new())),
T2 = update_tree(insert(<<"2">>, esha(term_to_binary(make_ref())),
new())),
Diff = local_compare(T1, T2),
?assertEqual([{remote_missing, <<"1">>}, {missing, <<"2">>}], Diff),
Diff2 = local_compare(T2, T1),
?assertEqual([{missing, <<"1">>}, {remote_missing, <<"2">>}], Diff2),
ok.
delete_without_update_test() ->
A1 = new({0,0},[{segment_path, "t1"}]),
A2 = insert(<<"k">>, <<1234:32>>, A1),
A3 = update_tree(A2),
B1 = new({0,0},[{segment_path, "t2"}]),
B2 = insert(<<"k">>, <<1234:32>>, B1),
B3 = update_tree(B2),
Diff = local_compare(A3, B3),
C1 = delete(<<"k">>, A3),
C2 = rehash_tree(C1),
C3 = flush_buffer(C2),
close(C3),
AA1 = new({0,0},[{segment_path, "t1"}]),
AA2 = update_tree(AA1),
Diff2 = local_compare(AA2, B3),
close(B3),
close(AA2),
destroy(C3),
destroy(B3),
destroy(AA2),
?assertEqual([], Diff),
?assertEqual([{missing, <<"k">>}], Diff2).
opened_closed_test() ->
TreeId0 = {0,0},
TreeId1 = term_to_binary({0,0}),
A1 = new(TreeId0, [{segment_path, "t1000"}]),
A2 = mark_open_and_check(TreeId0, A1),
A3 = insert(<<"totes">>, <<1234:32>>, A2),
A4 = update_tree(A3),
B1 = new(TreeId0, [{segment_path, "t2000"}]),
B2 = mark_open_empty(TreeId0, B1),
B3 = insert(<<"totes">>, <<1234:32>>, B2),
B4 = update_tree(B3),
StatusA4 = {proplists:get_value(opened, read_meta_term(TreeId1, [], A4)),
proplists:get_value(closed, read_meta_term(TreeId1, [], A4))},
StatusB4 = {proplists:get_value(opened, read_meta_term(TreeId1, [], B4)),
proplists:get_value(closed, read_meta_term(TreeId1, [], B4))},
A5 = set_next_rebuild(A4, incremental),
A6 = mark_clean_close(TreeId0, A5),
StatusA6 = {proplists:get_value(opened, read_meta_term(TreeId1, [], A6)),
proplists:get_value(closed, read_meta_term(TreeId1, [], A6))},
close(A6),
close(B4),
AA1 = new(TreeId0, [{segment_path, "t1000"}]),
AA2 = mark_open_and_check(TreeId0, AA1),
AA3 = update_tree(AA2),
StatusAA3 = {proplists:get_value(opened, read_meta_term(TreeId1, [], AA3)),
proplists:get_value(closed, read_meta_term(TreeId1, [], AA3))},
fake_close(AA3),
AAA1 = new(TreeId0,[{segment_path, "t1000"}]),
AAA2 = mark_open_and_check(TreeId0, AAA1),
StatusAAA2 = {proplists:get_value(opened, read_meta_term(TreeId1, [], AAA2)),
proplists:get_value(closed, read_meta_term(TreeId1, [], AAA2))},
AAA3 = mark_clean_close(TreeId0, AAA2),
close(AAA3),
AAAA1 = new({0,0},[{segment_path, "t1000"}]),
AAAA2 = mark_open_and_check(TreeId0, AAAA1),
StatusAAAA2 = {proplists:get_value(opened, read_meta_term(TreeId1, [], AAAA2)),
proplists:get_value(closed, read_meta_term(TreeId1, [], AAAA2))},
AAAA3 = mark_clean_close(TreeId0, AAAA2),
StatusAAAA3 = {proplists:get_value(opened, read_meta_term(TreeId1, [], AAAA3)),
proplists:get_value(closed, read_meta_term(TreeId1, [], AAAA3))},
close(AAAA3),
destroy(B3),
destroy(A6),
destroy(AA3),
destroy(AAA3),
destroy(AAAA3),
?assertEqual({1,undefined}, StatusA4),
?assertEqual({1,0}, StatusB4),
?assertEqual(full, A2#state.next_rebuild),
?assertEqual(incremental, B2#state.next_rebuild),
?assertEqual(incremental, A5#state.next_rebuild),
?assertEqual({1,1}, StatusA6),
?assertEqual({2,1}, StatusAA3),
?assertEqual(incremental, AA2#state.next_rebuild),
?assertEqual({3,1}, StatusAAA2),
?assertEqual(full, AAA1#state.next_rebuild),
?assertEqual({4,3}, StatusAAAA2),
?assertEqual({4,4}, StatusAAAA3).
-endif.
%%%===================================================================
%%% EQC
%%%===================================================================
-ifdef(TEST).
-ifdef(EQC).
prop_sha() ->
%% NOTE: Generating 1MB (1024 * 1024) size binaries is incredibly slow
%% with EQC and was using over 2GB of memory
?FORALL({Size, NumChunks}, {choose(1, 1024), choose(1, 16)},
?FORALL(Bin, binary(Size),
begin
%% we need at least one chunk,
%% and then we divide the binary size
%% into the number of chunks (as a natural
%% number)
ChunkSize = max(1, (Size div NumChunks)),
sha(ChunkSize, Bin) =:= esha(Bin)
end)).
objects() ->
?SIZED(Size, objects(Size+3)).
objects(N) ->
?LET(Keys, shuffle(lists:seq(1,N)),
[{bin(K), binary(8)} || K <- Keys]
).
lengths(N) ->
?LET(MissingN1, choose(0,N),
?LET(MissingN2, choose(0,N-MissingN1),
?LET(DifferentN, choose(0,N-MissingN1-MissingN2),
{MissingN1, MissingN2, DifferentN}))).
mutate(Binary) ->
L1 = binary_to_list(Binary),
[X|Xs] = L1,
X2 = (X+1) rem 256,
L2 = [X2|Xs],
list_to_binary(L2).
prop_correct() ->
?FORALL(Objects, objects(),
?FORALL({MissingN1, MissingN2, DifferentN}, lengths(length(Objects)),
begin
{RemoteOnly, Objects2} = lists:split(MissingN1, Objects),
{LocalOnly, Objects3} = lists:split(MissingN2, Objects2),
{Different, Same} = lists:split(DifferentN, Objects3),
Different2 = [{Key, mutate(Hash)} || {Key, Hash} <- Different],
Insert = fun(Tree, Vals) ->
lists:foldl(fun({Key, Hash}, Acc) ->
insert(Key, Hash, Acc)
end, Tree, Vals)
end,
A0 = new(),
B0 = new(),
[begin
A1 = new({0,Id}, A0),
B1 = new({0,Id}, B0),
A2 = Insert(A1, Same),
A3 = Insert(A2, LocalOnly),
A4 = Insert(A3, Different),
B2 = Insert(B1, Same),
B3 = Insert(B2, RemoteOnly),
B4 = Insert(B3, Different2),
A5 = update_tree(A4),
B5 = update_tree(B4),
Expected =
[{missing, Key} || {Key, _} <- RemoteOnly] ++
[{remote_missing, Key} || {Key, _} <- LocalOnly] ++
[{different, Key} || {Key, _} <- Different],
KeyDiff = local_compare(A5, B5),
?assertEqual(lists:usort(Expected),
lists:usort(KeyDiff)),
%% Reconcile trees
A6 = Insert(A5, RemoteOnly),
B6 = Insert(B5, LocalOnly),
B7 = Insert(B6, Different),
A7 = update_tree(A6),
B8 = update_tree(B7),
?assertEqual([], local_compare(A7, B8)),
true
end || Id <- lists:seq(0, 10)],
close(A0),
close(B0),
destroy(A0),
destroy(B0),
true
end)).
prop_est() ->
%% It's hard to estimate under 10000 keys
?FORALL(N, choose(10000, 500000),
begin
{ok, EstKeys} = estimate_keys(update_tree(insert_many(N, new()))),
Diff = abs(N - EstKeys),
MaxDiff = N div 5,
?debugVal(Diff), ?debugVal(EstKeys),?debugVal(MaxDiff),
?assertEqual(true, MaxDiff > Diff),
true
end).
-endif.
-endif. | src/hashtree.erl | 0.746139 | 0.548734 | hashtree.erl | starcoder |
%% Copyright (c) 2020 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : luerl_new.erl
%% Authors : <NAME>
%% Purpose : Basic LUA 5.3 interface.
-module(luerl_new).
-include("luerl.hrl").
-export([init/0,gc/1,
load/2,load/3,loadfile/2,loadfile/3,
load_module/3,
do/2,dofile/2,
call/3,call_chunk/3,call_function/3,call_method/4,
get_table_keys/2,set_table_keys/3,
get_stacktrace/1
]).
%% Encoding and decoding.
-export([encode/2,encode_list/2,decode/2,decode_list/2]).
%% Just call the other module!
init() -> new_luerl:init().
gc(St) -> new_luerl:gc(St).
load(Str, St) ->
new_luerl:load(Str, St).
load(Str, Opts, St) ->
new_luerl:load(Str, Opts, St).
loadfile(Name, St) ->
new_luerl:init(Name, St).
loadfile(Name, Opts, St) ->
new_luerl:loadfile(Name, Opts, St).
load_module(TabPath, Module, St) ->
new_luerl:load_module(TabPath, Module, St).
do(What, St) ->
new_luerl:do(What, St).
dofile(Name, St) ->
new_luerl:dofile(Name, St).
call(FuncRef, Args, St) ->
new_luerl:call(FuncRef, Args, St).
call_chunk(FuncRef, Args, St) ->
new_luerl:call_chunk(FuncRef, Args, St).
call_function(FuncRef, Args, St) ->
new_luerl:call_function(FuncRef, Args, St).
call_method(Obj, Meth, Args, St) ->
new_luerl:call_method(Obj, Meth, Args, St).
get_table_keys(Keys, St) ->
new_luerl:get_table_keys(Keys, St).
set_table_keys(Keys, Val, St) ->
new_luerl:set_table_keys(Keys, Val, St).
get_stacktrace(St) ->
new_luerl:get_stacktrace(St).
encode(Term, St) ->
new_luerl:encode(Term, St).
encode_list(Terms, St) ->
new_luerl:encode_list(Terms, St).
decode(Term, St) ->
new_luerl:decode(Term, St).
decode_list(Terms, St) ->
new_luerl:decode_list(Terms, St). | src/luerl_new.erl | 0.561575 | 0.406597 | luerl_new.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright <2013-2018> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
-module(vector_orddict).
-include("antidote.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% @doc The vector orddict is an ordered dictionary used to store materialized snapshots whose order
%% is described by vectorclocks.
%% Note that the elements are stored in a sorted list going from big to small (left to right);
%% i.e. the most recent snapshot is stored at the head of the list.
%% The second element of the tuple stores the size of the list.
-type vector_orddict() :: {[{vectorclock(), term()}], non_neg_integer()}.
-type nonempty_vector_orddict() :: {[{vectorclock(), term()}, ...], non_neg_integer()}.
-export_type([vector_orddict/0, nonempty_vector_orddict/0]).
-export([
new/0,
get_smaller/2,
get_smaller_from_id/3,
insert/3,
insert_bigger/3,
sublist/3,
size/1,
to_list/1,
from_list/1,
first/1,
last/1,
filter/2,
is_concurrent_with_any/2
]).
-spec new() -> {[], 0}.
new() ->
{[], 0}.
%% @doc Get the first appropriate element from the dict according to a monotonically increasing ordering.
%%
%% `get_smaller(Clock, Dict)' will return `{{DClock, _}=Entry, IsFirst}'',
%% where Entry is the most recent entry such that `DClock <= Clock'.
%%
%% In addition, return IsFirst, indicating if the selected entry was the newest entry
%% in the orddict.
%%
-spec get_smaller(vectorclock(), vector_orddict()) ->
{undefined | {vectorclock(), term()}, boolean()}.
get_smaller(Vector, {List, _Size}) ->
get_smaller_internal(Vector, List, true).
-spec get_smaller_internal(vectorclock(), [{vectorclock(), term()}], boolean()) ->
{undefined | {vectorclock(), term()}, boolean()}.
get_smaller_internal(_Vector, [], IsFirst) ->
{undefined, IsFirst};
get_smaller_internal(Vector, [{FirstClock, FirstVal} | Rest], IsFirst) ->
case vectorclock:le(FirstClock, Vector) of
true ->
{{FirstClock, FirstVal}, IsFirst};
false ->
get_smaller_internal(Vector, Rest, false)
end.
%% @doc Get the first element from the dict where the clock for some Id is smaller than or equal to Time.
-spec get_smaller_from_id(term(), clock_time(), vector_orddict()) ->
undefined | {vectorclock(), term()}.
get_smaller_from_id(_Id, _Time, {_List, Size}) when Size == 0 ->
undefined;
get_smaller_from_id(Id, Time, {List, _Size}) ->
get_smaller_from_id_internal(Id, Time, List).
-spec get_smaller_from_id_internal(term(), clock_time(), [{vectorclock, term()}, ...]) ->
undefined | {vectorclock(), term()}.
get_smaller_from_id_internal(_Id, _Time, []) ->
undefined;
get_smaller_from_id_internal(Id, Time, [{Clock, Val} | Rest]) ->
ValTime = vectorclock:get(Id, Clock),
case ValTime =< Time of
true ->
{Clock, Val};
false ->
get_smaller_from_id_internal(Id, Time, Rest)
end.
%% @doc Insert an new entry into the sorted list according to the vectorclock.
-spec insert(vectorclock(), term(), vector_orddict()) -> vector_orddict().
insert(Vector, Val, {List, Size}) ->
insert_internal(Vector, Val, List, Size + 1, []).
-spec insert_internal(vectorclock(), term(), [{vectorclock(), term()}], non_neg_integer(), [
{vectorclock(), term()}
]) -> vector_orddict().
insert_internal(Vector, Val, [], Size, PrevList) ->
{lists:reverse([{Vector, Val} | PrevList]), Size};
insert_internal(Vector, Val, [{FirstClock, FirstVal} | Rest], Size, PrevList) ->
case vectorclock:all_dots_greater(Vector, FirstClock) of
true ->
{lists:reverse(PrevList, [{Vector, Val} | [{FirstClock, FirstVal} | Rest]]), Size};
false ->
insert_internal(Vector, Val, Rest, Size, [{FirstClock, FirstVal} | PrevList])
end.
%% @doc Insert a new entry if it is more recent than all other entries.
-spec insert_bigger(vectorclock(), term(), vector_orddict()) -> nonempty_vector_orddict().
insert_bigger(Vector, Val, {List, Size}) ->
insert_bigger_internal(Vector, Val, List, Size).
-spec insert_bigger_internal(vectorclock(), term(), [{vectorclock(), term()}], non_neg_integer()) ->
nonempty_vector_orddict().
insert_bigger_internal(Vector, Val, [], 0) ->
{[{Vector, Val}], 1};
insert_bigger_internal(Vector, Val, [{FirstClock, FirstVal} | Rest], Size) ->
case not vectorclock:le(Vector, FirstClock) of
true ->
{[{Vector, Val} | [{FirstClock, FirstVal} | Rest]], Size + 1};
false ->
{[{FirstClock, FirstVal} | Rest], Size}
end.
%% @doc Returns sublist from position Start with length Len.
-spec sublist(vector_orddict(), non_neg_integer(), non_neg_integer()) -> vector_orddict().
sublist({List, _Size}, Start, Len) ->
Res = lists:sublist(List, Start, Len),
{Res, length(Res)}.
%% @doc Returns true if the vectorclock is concurrent with at least one of the entries in the vector orddict.
-spec is_concurrent_with_any(vector_orddict(), vectorclock()) -> boolean().
is_concurrent_with_any({List, _Size}, OtherClock) ->
lists:any(fun({Clock, _Val}) -> vectorclock:conc(Clock, OtherClock) end, List).
%% @doc Returns size of the vector orddict.
-spec size(vector_orddict()) -> non_neg_integer().
size({_List, Size}) ->
Size.
%% @doc Turns vector orddict into list.
-spec to_list(vector_orddict()) -> [{vectorclock(), term()}].
to_list({List, _Size}) ->
List.
%% @doc Turns list into vector orddict.
%% TODO Check that list is ordered!
-spec from_list([{vectorclock(), term()}]) -> vector_orddict().
from_list(List) ->
{List, length(List)}.
%% @doc Returns the first entry.
-spec first(vector_orddict()) -> {vectorclock(), term()}.
first({[First | _Rest], _Size}) ->
First.
%% @doc Returns the last entry.
-spec last(vector_orddict()) -> {vectorclock(), term()}.
last({List, _Size}) ->
lists:last(List).
%% @doc Returns all entries for which the filter function evaluates to true.
-spec filter(fun((term()) -> boolean()), vector_orddict()) -> vector_orddict().
filter(Fun, {List, _Size}) ->
Result = lists:filter(Fun, List),
{Result, length(Result)}.
-ifdef(TEST).
vector_oddict_get_smaller_from_id_test() ->
%% Fill up the vector
Vdict0 = vector_orddict:new(),
CT1 = vectorclock:from_list([{dc1, 4}, {dc2, 4}]),
Vdict1 = vector_orddict:insert(CT1, 1, Vdict0),
CT2 = vectorclock:from_list([{dc1, 8}, {dc2, 8}]),
Vdict2 = vector_orddict:insert(CT2, 2, Vdict1),
CT3 = vectorclock:from_list([{dc1, 1}, {dc2, 10}]),
Vdict3 = vector_orddict:insert(CT3, 3, Vdict2),
%% Check you get the correct smaller snapshot
?assertEqual(undefined, vector_orddict:get_smaller_from_id(dc1, 0, Vdict0)),
?assertEqual(undefined, vector_orddict:get_smaller_from_id(dc1, 0, Vdict3)),
?assertEqual({CT3, 3}, vector_orddict:get_smaller_from_id(dc1, 1, Vdict3)),
?assertEqual({CT2, 2}, vector_orddict:get_smaller_from_id(dc2, 9, Vdict3)).
vector_orddict_get_smaller_test() ->
%% Fill up the vector
Vdict0 = vector_orddict:new(),
CT1 = vectorclock:from_list([{dc1, 4}, {dc2, 4}]),
Vdict1 = vector_orddict:insert(CT1, 1, Vdict0),
CT2 = vectorclock:from_list([{dc1, 8}, {dc2, 8}]),
Vdict2 = vector_orddict:insert(CT2, 2, Vdict1),
CT3 = vectorclock:from_list([{dc1, 1}, {dc2, 10}]),
Vdict3 = vector_orddict:insert(CT3, 3, Vdict2),
%% Check you get the correct smaller snapshot
?assertEqual(
{undefined, false},
vector_orddict:get_smaller(vectorclock:from_list([{dc1, 0}, {dc2, 0}]), Vdict3)
),
?assertEqual(
{undefined, false},
vector_orddict:get_smaller(vectorclock:from_list([{dc1, 1}, {dc2, 6}]), Vdict3)
),
?assertEqual(
{{CT1, 1}, false},
vector_orddict:get_smaller(vectorclock:from_list([{dc1, 5}, {dc2, 5}]), Vdict3)
),
?assertEqual(
{{CT2, 2}, true},
vector_orddict:get_smaller(vectorclock:from_list([{dc1, 9}, {dc2, 9}]), Vdict3)
),
?assertEqual(
{{CT3, 3}, false},
vector_orddict:get_smaller(vectorclock:from_list([{dc1, 3}, {dc2, 11}]), Vdict3)
).
vector_orddict_insert_bigger_test() ->
Vdict0 = vector_orddict:new(),
%% Insert to empty dict
CT1 = vectorclock:from_list([{dc1, 4}, {dc2, 4}]),
Vdict1 = vector_orddict:insert_bigger(CT1, 1, Vdict0),
?assertEqual(1, vector_orddict:size(Vdict1)),
%% Should not insert because smaller
CT2 = vectorclock:from_list([{dc1, 3}, {dc2, 3}]),
Vdict2 = vector_orddict:insert_bigger(CT2, 2, Vdict1),
?assertEqual(1, vector_orddict:size(Vdict2)),
%% Should insert because bigger
CT3 = vectorclock:from_list([{dc1, 6}, {dc2, 10}]),
Vdict3 = vector_orddict:insert_bigger(CT3, 3, Vdict2),
?assertEqual(2, vector_orddict:size(Vdict3)).
vector_orddict_filter_test() ->
VDict = vector_orddict:from_list([
{vectorclock:from_list([{dc1, 4}, {dc2, 4}]), snapshot_1},
{vectorclock:from_list([{dc1, 0}, {dc2, 3}]), snapshot_2},
{vectorclock:new(), snapshot_3}
]),
?assertEqual(3, vector_orddict:size(VDict)),
Result = vector_orddict:filter(
fun({Vector, _}) ->
vectorclock:gt(Vector, vectorclock:new())
end,
VDict
),
?assertEqual(2, vector_orddict:size(Result)),
Filtered = [
{vectorclock:from_list([{dc1, 4}, {dc2, 4}]), snapshot_1},
{vectorclock:from_list([{dc1, 0}, {dc2, 3}]), snapshot_2}
],
?assertEqual(Filtered, vector_orddict:to_list(Result)).
vector_orddict_conc_test() ->
VDict = vector_orddict:from_list([
{vectorclock:from_list([{dc1, 4}, {dc2, 4}]), snapshot_1},
{vectorclock:from_list([{dc1, 0}, {dc2, 3}]), snapshot_2},
{vectorclock:new(), snapshot_3}
]),
CT1 = vectorclock:from_list([{dc1, 3}, {dc2, 3}]),
CT2 = vectorclock:from_list([{dc1, 2}, {dc2, 1}]),
?assertEqual(is_concurrent_with_any(VDict, CT1), false),
?assertEqual(is_concurrent_with_any(VDict, CT2), true).
-endif. | apps/antidote/src/vector_orddict.erl | 0.628179 | 0.553626 | vector_orddict.erl | starcoder |
%%%
%%% A matter of time.
-module(ff_time).
-export([now/0]).
-export([to_rfc3339/1]).
-export([from_rfc3339/1]).
-export([add_interval/2]).
-export_type([timestamp_ms/0]).
-type timestamp_ms() :: integer().
-type year() :: integer().
-type month() :: integer().
-type day() :: integer().
-type hour() :: integer().
-type minute() :: integer().
-type second() :: integer().
-type date() :: {year(), month(), day()}.
-type time() :: {hour(), minute(), second()}.
-type datetime_interval() :: {date(), time()}.
%% API
-spec now() -> timestamp_ms().
now() ->
erlang:system_time(millisecond).
-spec to_rfc3339(timestamp_ms()) -> binary().
to_rfc3339(Timestamp) ->
genlib_rfc3339:format_relaxed(Timestamp, millisecond).
-spec from_rfc3339(binary()) -> timestamp_ms().
from_rfc3339(BTimestamp) ->
genlib_rfc3339:parse(BTimestamp, millisecond).
-spec add_interval(timestamp_ms(), datetime_interval()) -> timestamp_ms().
add_interval(Timestamp, {Date, Time}) ->
Ms = Timestamp rem 1000,
TSSeconds = erlang:convert_time_unit(Timestamp, millisecond, second),
{D, T} = genlib_time:unixtime_to_daytime(TSSeconds),
NewDate = genlib_time:daytime_to_unixtime({genlib_time:shift_date(D, Date), T}),
DateTime = genlib_time:add_duration(NewDate, Time),
DateTime * 1000 + Ms.
%% TESTS
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-spec test() -> _.
-spec rfc3339_symmetry_test() -> _.
rfc3339_symmetry_test() ->
TimestampStr = <<"2000-01-01T00:00:00Z">>,
?assertEqual(TimestampStr, to_rfc3339(from_rfc3339(TimestampStr))).
-spec add_second_interval_test() -> _.
add_second_interval_test() ->
Timestamp = ff_time:now(),
NewTimestamp = add_interval(Timestamp, {{0, 0, 0}, {0, 0, 1}}),
?assertEqual(Timestamp + 1000, NewTimestamp).
-spec add_minute_interval_test() -> _.
add_minute_interval_test() ->
Timestamp = ff_time:now(),
NewTimestamp = add_interval(Timestamp, {{0, 0, 0}, {0, 1, 0}}),
?assertEqual(Timestamp + 60 * 1000, NewTimestamp).
-spec add_hour_interval_test() -> _.
add_hour_interval_test() ->
Timestamp = ff_time:now(),
NewTimestamp = add_interval(Timestamp, {{0, 0, 0}, {1, 0, 0}}),
?assertEqual(Timestamp + 60 * 60 * 1000, NewTimestamp).
-spec add_day_interval_test() -> _.
add_day_interval_test() ->
Timestamp = ff_time:now(),
NewTimestamp = add_interval(Timestamp, {{0, 0, 1}, {0, 0, 0}}),
?assertEqual(Timestamp + 24 * 60 * 60 * 1000, NewTimestamp).
-endif. | apps/ff_core/src/ff_time.erl | 0.505615 | 0.406391 | ff_time.erl | starcoder |
% Example of frequency server used in chapter 03 as a starting point to
% separate specif from generic behaviour
%
% compile: erlc chapter-03/01/frequency.erl
% check: dialyzer chapter-03/01/frequency.erl
%
% after compile, use: erl
% > frequency:start().
% > true
% > frequency:alocate().
% > {ok, 10}
% > frequency:alocate().
% > {ok, 11}
% > frequency:alocate().
% > {ok, 12}
% > frequency:deallocate(10).
% > ok
% > frequency:stop().
% > ok
-module(frequency).
-export([start/0, stop/0, allocate/0, deallocate/1]).
-export([init/0]).
-export_type([frequency/0]).
-type frequency() :: integer().
-type allocated_frequency() :: {frequency(), pid()}.
-type loop_data() :: {[frequency()], [allocated_frequency()]}.
%% client API
-spec start() -> true.
start() ->
register(frequency, spawn(frequency, init, [])).
-spec stop() -> ok.
stop() ->
call(stop).
-spec allocate() -> {ok, frequency()} | {error, no_frequency}.
allocate() ->
call(allocate).
-spec deallocate(frequency()) -> ok.
deallocate(Frequency) ->
call({deallocate, Frequency}).
%% callback API
-spec init() -> any().
init() ->
Frequencies = {possible_frequencies(), []},
loop(Frequencies).
%% generic
call(Message) ->
frequency ! {request, self(), Message},
receive
{reply, Reply} -> Reply
end.
reply(Pid, Reply) ->
Pid ! {reply, Reply}.
loop(Frequencies) ->
receive
{request, Pid, allocate} ->
{UpdatedFrequencies, Reply} = allocate(Frequencies, Pid),
reply(Pid, Reply),
loop(UpdatedFrequencies);
{request, Pid, {deallocate, Frequency}} ->
UpdatedFrequencies = deallocate(Frequencies, Frequency),
reply(Pid, ok),
loop(UpdatedFrequencies);
{request, Pid, stop} ->
reply(Pid, ok)
end.
%% private
-spec allocate(loop_data(), pid()) -> {loop_data(), {ok, frequency()} | {error, no_frequency}}.
allocate(Frequencies = {[], _Allocated}, _Pid) ->
{Frequencies, {error, no_frequency}};
allocate({[Frequency|Free], Allocated}, Pid) ->
{{Free, [{Frequency, Pid} | Allocated]}, {ok, Frequency}}.
-spec deallocate(loop_data(), frequency()) -> loop_data().
deallocate({Free, Allocated}, Frequency) ->
AllocatedAfterAllocation = lists:keydelete(Frequency, 1, Allocated),
{[Frequency|Free], AllocatedAfterAllocation}.
-spec possible_frequencies() -> [frequency()].
possible_frequencies() ->
[10, 11, 12, 13, 14, 15]. | chapter-03/01/frequency.erl | 0.658088 | 0.708757 | frequency.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% -------------------------------------------------------------------
-module(riak_search_schema_parser).
-export([
from_eterm/2
]).
-include("riak_search.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-import(riak_search_utils, [to_list/1, to_atom/1, to_binary/1, to_boolean/1, to_integer/1]).
%% Given an Erlang term (see riak_search/priv/search.def for example)
%% parse the term into a riak_search_schema parameterized module.
from_eterm(SchemaName, {schema, SchemaProps, FieldDefs}) when is_binary(SchemaName) ->
%% Read fields...
Version = to_list(proplists:get_value(version, SchemaProps)),
NVal = to_integer(proplists:get_value(n_val, SchemaProps, 3)),
DefaultField = to_binary(proplists:get_value(default_field, SchemaProps)),
UniqueKey = to_binary(proplists:get_value(unique_key, SchemaProps, "id")),
SchemaAnalyzer = proplists:get_value(analyzer_factory, SchemaProps),
DefaultOp = to_atom(proplists:get_value(default_op, SchemaProps, "or")),
%% Verify that version is defined...
Version /= undefined orelse
throw({error, {malformed_schema, version, {schema, SchemaProps}}}),
%% Verify that the unique key is defined...
UniqueKey /= undefined orelse
throw({error, {malformed_schema, unique_key, {schema, SchemaProps}}}),
%% Verify that default field is defined...
DefaultField /= undefined orelse
throw({error, {malformed_schema, default_field, {schema, SchemaProps}}}),
%% Verify that DefaultOp is either "and" or "or"...
lists:member(DefaultOp, ['and', 'or']) orelse
throw({error, {malformed_schema, default_op, {schema, SchemaProps}}}),
{ok, Fields} = parse_fields(FieldDefs, SchemaAnalyzer, []),
{ok, riak_search_schema:new(SchemaName, Version, NVal, DefaultField, UniqueKey,
Fields, DefaultOp, SchemaAnalyzer)}.
parse_fields([], _SchemaAnalyzer, Fields) ->
{ok, lists:reverse(Fields)};
parse_fields({fields, Fields}, SchemaAnalyzer, Fields) ->
parse_fields(Fields, SchemaAnalyzer, Fields);
parse_fields([{FieldClass, FieldProps}|T], SchemaAnalyzer, Fields)
when FieldClass == field orelse FieldClass == dynamic_field ->
%% Read fields...
IsDynamic = (FieldClass == dynamic_field),
Name = to_binary(proplists:get_value(name, FieldProps)),
Type = to_atom(proplists:get_value(type, FieldProps, string)),
IsRequired = (not IsDynamic) andalso (proplists:get_value(required, FieldProps, false) == true),
IsSkip = to_boolean(proplists:get_value(skip, FieldProps, false)),
Aliases = [to_binary(X) || X <- proplists:get_all_values(alias, FieldProps)],
DefaultPaddingSize = to_integer(get_default_padding_size(Type)),
PaddingSize = to_integer(proplists:get_value(padding_size, FieldProps, DefaultPaddingSize)),
DefaultPaddingChar = get_default_padding_char(Type),
PaddingChar = proplists:get_value(padding_char, FieldProps, DefaultPaddingChar),
DefaultFieldAnalyzer = get_default_field_analyzer(Type, SchemaAnalyzer),
FieldAnalyzer = proplists:get_value(analyzer_factory, FieldProps, DefaultFieldAnalyzer),
FieldAnalyzerArgs = proplists:get_value(analyzer_args, FieldProps, undefined),
Inline = proplists:get_value(inline, FieldProps, false),
%% Verify that name exists...
Name /= undefined orelse
throw({error, {malformed_schema, name, FieldProps}}),
%% Verify type...
valid_type(Type) orelse
throw({error, {malformed_schema, type, FieldProps}}),
%% Make sure no aliases on dynamic fields
(IsDynamic == true andalso Aliases /= []) andalso
throw({error, {malformed_schema, no_dynamic_field_aliases, FieldProps}}),
%% Calculate Name...
case FieldClass of
field ->
NewName = Name;
dynamic_field ->
NewName = calculate_name_pattern_regex(Name)
end,
%% Create the field...
Field0 = #riak_search_field {
name=NewName,
aliases=[calculate_alias_pattern(A) || A <- lists:usort(Aliases)],
type=Type,
padding_size=PaddingSize,
padding_char=normalize_padding_char(PaddingChar, Name),
required=IsRequired,
dynamic=IsDynamic,
skip=IsSkip,
analyzer_factory=FieldAnalyzer,
analyzer_args=FieldAnalyzerArgs,
inline=Inline
},
NewAnalyzerArgs = calculate_analyzer_args(Field0),
Field = Field0#riak_search_field{analyzer_args = NewAnalyzerArgs },
%% Add the field...
parse_fields(T, SchemaAnalyzer, [Field|Fields]).
get_default_padding_size(Type) ->
case Type == integer orelse Type == date of
true -> 10;
_ -> 0
end.
get_default_padding_char(Type) ->
case Type == integer orelse Type == date of
true -> $0;
_ -> $\s
end.
get_default_field_analyzer(Type, SchemaAnalyzer) ->
if
Type == integer ->
?INTEGER_ANALYZER;
Type == date ->
?NOOP_ANALYZER;
true ->
SchemaAnalyzer
end.
%% Return true if this is a type we know and love.
valid_type(string) -> true;
valid_type(integer) -> true;
valid_type(date) -> true;
valid_type(_Other) -> false.
%% Single char string
normalize_padding_char([Char], _Field) when is_integer(Char) ->
Char;
%% $0 type entry
normalize_padding_char(Char, _Field) when is_integer(Char) ->
Char;
normalize_padding_char(_BadValue, Field) ->
throw({error, {bad_padding_char, Field}}).
%% Checks to see if an alias is exact or a wildcard (and if
%% it is a wildcard, precompile the pattern to match)
calculate_alias_pattern(Alias) ->
IsWildcard = binary_contains($*, Alias),
case IsWildcard of
false ->
{exact, Alias};
true ->
case re:compile(calculate_name_pattern_regex(Alias)) of
{ok, MP} ->
{re, Alias, MP};
{error, ErrSpec} ->
throw({error, {bad_alias_wildcard, {Alias, ErrSpec}}})
end
end.
binary_contains(H, <<H,_/binary>>) ->
true;
binary_contains(H, <<_,T/binary>>) ->
binary_contains(H, T);
binary_contains(_, <<>>) ->
false.
%% A name pattern must have a wildcard. Check for it and
%% replace it with the regex ".*"
calculate_name_pattern_regex(Name) ->
list_to_binary("^" ++ calculate_name_pattern_regex_1(Name) ++ "$").
calculate_name_pattern_regex_1(<<$*, T/binary>>) ->
[$.,$*|calculate_name_pattern_regex_1(T)];
calculate_name_pattern_regex_1(<<H, T/binary>>) ->
[H|calculate_name_pattern_regex_1(T)];
calculate_name_pattern_regex_1(<<>>) ->
[].
%% Calculate the arguments for the analyzer_factory
calculate_analyzer_args(Field=#riak_search_field{analyzer_args=Args}) when
Args =/= undefined ->
case is_list(Args) andalso lists:all(fun is_string/1, Args) of
true ->
Args;
false ->
throw({error, {analyzer_args_must_be_strings, Field#riak_search_field.name}})
end;
calculate_analyzer_args(Field) ->
case Field#riak_search_field.analyzer_factory of
?INTEGER_ANALYZER ->
case Field#riak_search_field.padding_char of
$0 ->
[integer_to_list(Field#riak_search_field.padding_size)];
_ ->
throw({error, {integer_fields_only_pads_with_zeros,
Field#riak_search_field.name}})
end;
_ ->
undefined
end.
is_string([]) ->
true;
is_string([C|T]) when is_integer(C), C >= 0, C =< 255 ->
is_string(T);
is_string(_) ->
false.
-ifdef(TEST).
is_string_test() ->
?assertEqual(true, is_string("")),
?assertEqual(true, is_string("a")),
?assertEqual(true, is_string("a b c")),
?assertEqual(false, is_string(undefined)),
?assertEqual(false, is_string(["nested string"])),
?assertEqual(false, is_string([0, 256])), % char out of range
?assertEqual(false, is_string(<<"binaries are not strings">>)).
calculate_analyzer_args_test() ->
ZeroArgs=#riak_search_field{analyzer_args=[]},
?assertEqual([], calculate_analyzer_args(ZeroArgs)),
OneArgs=#riak_search_field{analyzer_args=["abc"]},
?assertEqual(["abc"], calculate_analyzer_args(OneArgs)),
TwoArgs=#riak_search_field{analyzer_args=["abc","123"]},
?assertEqual(["abc","123"], calculate_analyzer_args(TwoArgs)),
?assertThrow({error, {analyzer_args_must_be_strings, _}},
calculate_analyzer_args(#riak_search_field{analyzer_args=atom})),
?assertThrow({error, {analyzer_args_must_be_strings, _}},
calculate_analyzer_args(#riak_search_field{analyzer_args=[atomlist]})),
?assertThrow({error, {analyzer_args_must_be_strings, _}},
calculate_analyzer_args(#riak_search_field{analyzer_args="barestr"})),
?assertThrow({error, {analyzer_args_must_be_strings, _}},
calculate_analyzer_args(#riak_search_field{analyzer_args=123})),
?assertThrow({error, {analyzer_args_must_be_strings, _}},
calculate_analyzer_args(#riak_search_field{analyzer_args= <<"bin">>})).
normalize_padding_char_test() ->
?assertEqual($0, normalize_padding_char($0, fld)),
?assertEqual($0, normalize_padding_char("0", fld)),
?assertThrow({error, {bad_padding_char, _}},
normalize_padding_char(a, fld)),
?assertThrow({error, {bad_padding_char, fld}},
normalize_padding_char(<<"0">>, fld)),
?assertThrow({error, {bad_padding_char, fld}},
normalize_padding_char("00", fld)).
bad_alias_regexp_test() ->
SchemaProps = [{version, 1},{default_field, <<"field">>}],
FieldDefs = [{field, [{name, <<"badaliaswildre">>},
{alias, <<"[*">>}]}],
SchemaDef = {schema, SchemaProps, FieldDefs},
?assertThrow({error, {bad_alias_wildcard,
{<<"[*">>,
{"missing terminating ] for character class",5}}}},
from_eterm(<<"bad_alias_regexp_test">>, SchemaDef)).
alias_on_dynamic_field_invalid_test() ->
SchemaProps = [{version, 1},{default_field, <<"field">>}],
FieldDefs = [{dynamic_field, [{name, <<"field_*">>},
{alias, <<"analias">>}]}],
SchemaDef = {schema, SchemaProps, FieldDefs},
?assertThrow({error, {malformed_schema, no_dynamic_field_aliases, _FieldProps}},
from_eterm(<<"bad_alias_regexp_test">>, SchemaDef)).
-endif. % TEST | src/riak_search_schema_parser.erl | 0.560373 | 0.413418 | riak_search_schema_parser.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2010-2015. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%
%% An example Diameter server that can respond to the base protocol
%% RAR sent by the client example.
%%
%% The simplest example to start a server listening on the loopback
%% address (which will serve the example usage given in client.erl) is
%% like this assuming diameter is already started (eg. diameter:start()):
%%
%% server:start().
%% server:listen(tcp).
%%
%% The first call starts a service, the second adds a transport listening
%% on the default port.
%%
-module(server).
-export([start/1, %% start a service
start/2, %%
listen/2, %% add a listening transport
stop/1]). %% stop a service
%% Convenience functions using the default service name.
-export([start/0,
listen/1,
stop/0]).
-define(DEF_SVC_NAME, ?MODULE).
%% The service configuration. In a server supporting multiple Diameter
%% applications each application may have its own, although they could all
%% be configured with a common callback module.
-define(SERVICE(Name), [{'Origin-Host', atom_to_list(Name) ++ ".example.com"},
{'Origin-Realm', "example.com"},
{'Vendor-Id', 193},
{'Product-Name', "Server"},
{'Auth-Application-Id', [0]},
{restrict_connections, false},
{string_decode, false},
{application, [{alias, common},
{dictionary, diameter_gen_base_rfc6733},
{module, server_cb}]}]).
%% start/1
start(Name)
when is_atom(Name) ->
start(Name, []);
start(Opts)
when is_list(Opts) ->
start(?DEF_SVC_NAME, Opts).
%% start/0
start() ->
start(?DEF_SVC_NAME).
%% start/2
start(Name, Opts) ->
node:start(Name, Opts ++ [T || {K,_} = T <- ?SERVICE(Name),
false == lists:keymember(K, 1, Opts)]).
%% listen/2
listen(Name, T) ->
node:listen(Name, T).
listen(T) ->
listen(?DEF_SVC_NAME, T).
%% stop/1
stop(Name) ->
node:stop(Name).
stop() ->
stop(?DEF_SVC_NAME). | lib/diameter/examples/code/server.erl | 0.555194 | 0.423041 | server.erl | starcoder |
%%==============================================================================
%% Copyright 2010 Erlang Solutions Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
-module(example_SUITE).
-compile(export_all).
-include_lib("escalus/include/escalus.hrl").
-include_lib("common_test/include/ct.hrl").
%%--------------------------------------------------------------------
%% Suite configuration
%%--------------------------------------------------------------------
all() ->
[{group, messages}].
groups() ->
[{messages, [sequence], [messages_story]}].
suite() ->
escalus:suite().
%%--------------------------------------------------------------------
%% Init & teardown
%%--------------------------------------------------------------------
init_per_suite(Config) ->
escalus:init_per_suite(Config).
end_per_suite(Config) ->
escalus:end_per_suite(Config).
init_per_group(_GroupName, Config) ->
escalus:create_users(Config).
end_per_group(_GroupName, Config) ->
escalus:delete_users(Config).
init_per_testcase(CaseName, Config) ->
escalus:init_per_testcase(CaseName, Config).
end_per_testcase(CaseName, Config) ->
escalus:end_per_testcase(CaseName, Config).
%%--------------------------------------------------------------------
%% Message tests
%%--------------------------------------------------------------------
messages_story(Config) ->
escalus:story(Config, [{alice, 1}, {bob, 1}], fun(Alice, Bob) ->
%% Alice sends a message to Bob
escalus:send(Alice, escalus_stanza:chat_to(Bob, <<"OH, HAI!">>)),
%% Bob gets the message
escalus:assert(is_chat_message, [<<"OH, HAI!">>],
escalus:wait_for_stanza(Bob))
end). | test/example_SUITE.erl | 0.621541 | 0.42937 | example_SUITE.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright <2013-2018> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
%% antidote_crdt_counter_pn: A convergent, replicated, operation
%% based PN-Counter
-module(antidote_crdt_counter_pn).
-behaviour(antidote_crdt).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([ new/0,
new/1,
value/1,
downstream/2,
update/2,
equal/2,
to_binary/1,
from_binary/1,
is_operation/1,
require_state_downstream/1
]).
-type state() :: integer().
-type op() :: {increment, integer()} |
{decrement, integer()}.
-type effect() :: integer().
%% @doc Create a new, empty 'antidote_crdt_counter_pn'
new() ->
0.
%% @doc Create 'antidote_crdt_counter_pn' with initial value
-spec new(integer()) -> state().
new(Value) when is_integer(Value) ->
Value;
new(_) ->
new().
%% @doc The single, total value of a `pncounter()'
-spec value(state()) -> integer().
value(PNCnt) when is_integer(PNCnt) ->
PNCnt.
%% @doc Generate a downstream operation.
%% The first parameter is either `increment' or `decrement' or the two tuples
%% `{increment, pos_integer()}' or `{decrement, pos_integer()}'. The second parameter
%% is the pncounter (this parameter is not actually used).
-spec downstream(op(), state()) -> {ok, effect()}.
downstream(increment, _PNCnt) ->
{ok, 1};
downstream(decrement, _PNCnt) ->
{ok, -1};
downstream({increment, By}, _PNCnt) when is_integer(By) ->
{ok, By};
downstream({decrement, By}, _PNCnt) when is_integer(By) ->
{ok, -By}.
%% @doc Update a `pncounter()'. The first argument is either the atom
%% `increment' or `decrement' or the two tuples `{increment, pos_integer()}' or
%% `{decrement, pos_integer()}'.
%% In the case of the former, the operation's amount
%% is `1'. Otherwise it is the value provided in the tuple's second element.
%% The 2nd argument is the `pncounter()' to update.
%%
%% returns the updated `pncounter()'
-spec update(effect(), state()) -> {ok, state()}.
update(N, PNCnt) ->
{ok, PNCnt + N}.
%% @doc Compare if two `pncounter()' are equal. Only returns `true()' if both
%% of their positive and negative entries are equal.
-spec equal(state(), state()) -> boolean().
equal(PNCnt1, PNCnt2) ->
PNCnt1 =:= PNCnt2.
-spec to_binary(state()) -> binary().
to_binary(PNCounter) ->
term_to_binary(PNCounter).
from_binary(Bin) ->
%% @TODO something smarter
{ok, binary_to_term(Bin)}.
%% @doc The following operation verifies
%% that Operation is supported by this particular CRDT.
-spec is_operation(term()) -> boolean().
is_operation(increment) -> true;
is_operation(decrement) -> true;
is_operation({increment, By}) when is_integer(By) -> true;
is_operation({decrement, By}) when is_integer(By)-> true;
is_operation(_) -> false.
%% @doc Returns true if ?MODULE:downstream/2 needs the state of crdt
%% to generate downstream effect
require_state_downstream(_) ->
false.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
%% @priv
prepare_and_effect(Op, PNCounter) ->
{ok, Downstream} = downstream(Op, PNCounter),
update(Downstream, PNCounter).
new_test() ->
?assertEqual(0, new()).
%% @doc test the correctness of `value()' function
value_test() ->
PNCnt = 4,
?assertEqual(4, value(PNCnt)).
%% @doc test the correctness of increment without parameter.
update_increment_test() ->
PNCnt0 = new(),
{ok, PNCnt1} = prepare_and_effect({increment, 1}, PNCnt0),
{ok, PNCnt2} = prepare_and_effect({increment, 2}, PNCnt1),
{ok, PNCnt3} = prepare_and_effect({increment, 1}, PNCnt2),
?assertEqual(4, value(PNCnt3)).
%% @doc test the correctness of increment by some numbers.
update_increment_by_test() ->
PNCnt0 = new(),
{ok, PNCnt1} = prepare_and_effect({increment, 7}, PNCnt0),
?assertEqual(7, value(PNCnt1)).
%% @doc test the correctness of decrement.
update_decrement_test() ->
PNCnt0 = new(),
{ok, PNCnt1} = prepare_and_effect({increment, 1}, PNCnt0),
{ok, PNCnt2} = prepare_and_effect({increment, 2}, PNCnt1),
{ok, PNCnt3} = prepare_and_effect({increment, 1}, PNCnt2),
{ok, PNCnt4} = prepare_and_effect({decrement, 1}, PNCnt3),
?assertEqual(3, value(PNCnt4)).
update_negative_params_test() ->
PNCnt0 = new(),
{ok, PNCnt1} = prepare_and_effect({increment, -7}, PNCnt0),
{ok, PNCnt2} = prepare_and_effect({decrement, -5}, PNCnt1),
?assertEqual(-2, value(PNCnt2)).
equal_test() ->
PNCnt1 = 4,
PNCnt2 = 2,
PNCnt3 = 2,
?assertNot(equal(PNCnt1, PNCnt2)),
?assert(equal(PNCnt2, PNCnt3)).
binary_test() ->
PNCnt1 = 4,
BinaryPNCnt1 = to_binary(PNCnt1),
{ok, PNCnt2} = from_binary(BinaryPNCnt1),
?assert(equal(PNCnt1, PNCnt2)).
-endif. | src/antidote_crdt_counter_pn.erl | 0.52902 | 0.418756 | antidote_crdt_counter_pn.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2017 <NAME>
%% @doc Matrix operation functions.
%%
%% Matrix is a 2-dimensional array, which elements stored sequentially into Erlang binary object of
%% some IEEE758 floating point data type: single float, double float, single complex or double complex.
%%
%% Note that matrix indexes starts from 0, but not from 1 as for lists.
-module(nmatrix).
-include("erlynum.hrl").
-define(WE, erlynum_p:wrap_error).
-export([zeros/1, zeros/2, ones/1, ones/2, full/2, full/3]).
-export([identity/1, identity/2, eye/1, eye/2, eye/3, eye/4]).
-export([to_list/1, to_list/2, from_list/1, from_list/2]).
-export([get/2, get/3, row/2, col/2, diag/1, diag/2]).
-export([transpose/1]).
-spec zeros(Shape :: erlynum:shape()) -> erlynum:nmatrix().
%% @equiv zeros(Shape, [])
zeros(Shape) -> zeros(Shape, []).
-spec zeros(
Shape :: erlynum:shape(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by zeroes.
zeros(Shape, Options) -> ?WE(erlynum_nif:nmatrix_full(Shape, 0, Options)).
-spec ones(Shape :: erlynum:shape()) -> erlynum:nmatrix().
%% @equiv ones(Shape, [])
ones(Shape) -> ones(Shape, []).
-spec ones(
Shape :: erlynum:shape(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by ones.
ones(Shape, Options) -> ?WE(erlynum_nif:nmatrix_full(Shape, 1, Options)).
-spec full(
Shape :: erlynum:shape(),
FillValue :: erlynum:nscalar()
) -> erlynum:nmatrix().
%% @equiv full(Shape, FillValue, [])
full(Shape, FillValue) -> ?WE(erlynum_nif:nmatrix_full(Shape, FillValue, [])).
-spec full(
Shape :: erlynum:shape(),
FillValue :: erlynum:nscalar(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by the specified scalar value `FillValue'.
full(Shape, FillValue, Options) -> ?WE(erlynum_nif:nmatrix_full(Shape, FillValue, Options)).
-spec identity(N :: non_neg_integer()) -> erlynum:nmatrix().
%% @equiv identity(N, [])
identity(N) -> identity(N, []).
-spec identity(
N :: non_neg_integer(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a square matrix of size `N' with ones on the main diagonal.
identity(N, Options) -> eye({N, N}, 1.0, 0, Options).
-spec eye(Shape :: erlynum:shape()) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by ones on
%% the main diagonal and zeroes in all other places.
eye(Shape) -> eye(Shape, 1.0).
-spec eye(
Shape :: erlynum:shape(),
DiagValue_Opts :: erlynum:nscalar()
| [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by ones of specified value on
%% the main diagonal and zeroes in all other places.
%%
%% If last argument is a scalar value, it specifies a value to be filled.
%% If last argument is a list, it specifies create options.
eye(Shape, DiagValue_Opts) when is_list(DiagValue_Opts) ->
eye(Shape, 1.0, 0, DiagValue_Opts);
eye(Shape, DiagValue_Opts) ->
eye(Shape, DiagValue_Opts, 0).
-spec eye(
Shape :: erlynum:shape(),
DiagonalValue :: erlynum:nscalar(),
DiagNum_Options :: integer()
| [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by specified scalar `DiagonalValue' on
%% the specified of main diagonal and zeroes in all other places.
%%
%% If last argument is an integer value, it specifies diagonal number.
%% If last argument is a list, it specifies create options.
eye(Shape, DiagonalValue, DiagNum_Options) when is_integer(DiagNum_Options) ->
eye(Shape, DiagonalValue, DiagNum_Options, []);
eye(Shape, DiagonalValue, DiagNum_Options) when is_list(DiagNum_Options) ->
eye(Shape, DiagonalValue, 0, DiagNum_Options).
-spec eye(
Shape :: erlynum:shape(),
DiagonalValue :: erlynum:nscalar(),
DiagonalNumber :: integer(),
Options :: [ erlynum:create_option() ]
) -> erlynum:nmatrix().
%% @doc Returns a matrix of the given `Shape' filled by specified scalar `DiagonalValue' on
%% the specified diagonal `DiagonalNumber' and zeroes in all other places.
eye(Shape, DiagonalValue, DiagonalNumber, Options) ->
?WE(erlynum_nif:nmatrix_eye(Shape, DiagonalValue, DiagonalNumber, Options)).
-spec from_list([[erlynum:nscalar()]]) -> erlynum:nmatrix().
%% @equiv from_list(List, [])
from_list(List) -> from_list(List, []).
-spec from_list([[erlynum:nscalar()]], [erlynum:create_option()]) -> erlynum:nmatrix().
%% @doc Returns a matrix from the given two-dimensional `List' items.
from_list(List, Options) -> ?WE(erlynum_nif:nmatrix_from_list(List, Options)).
-spec to_list(NMatrix :: erlynum:nmatrix()) -> [ [ erlynum:nscalar() ] ].
%% @equiv to_list(NMatrix, [])
to_list(NMatrix) -> to_list(NMatrix, noconvert).
-spec to_list(
NMatrix :: erlynum:nmatrix(),
Convert :: erlynum:convert_option()
) -> [ [ erlynum:nscalar() ] ].
%% @doc Returns an Erlang list from the given matrix.
to_list(NMatrix, Convert) -> ?WE(erlynum_nif:nmatrix_to_list(NMatrix, Convert)).
-spec get(
NMatrix :: erlynum:nmatrix(),
Index :: { non_neg_integer(), non_neg_integer() }
) -> erlynum:nscalar().
%% @equiv get(NMatrix, Index, noconvert)
get(NMatrix, Index) -> get(NMatrix, Index, noconvert).
-spec get(
NMatrix :: erlynum:nmatrix(),
Index :: { non_neg_integer(), non_neg_integer() },
Convert :: erlynum:convert_option()
) -> erlynum:nscalar().
%% @doc Returns a scalar element value from the matrix.
%% Note that `Index' starts from 0, but not 1.
get(NMatrix, Index, Convert) -> ?WE(erlynum_nif:nmatrix_get(NMatrix, Index, Convert)).
-spec row(
NMatrix :: erlynum:nmatrix(),
RowIndex :: non_neg_integer()
) -> erlynum:nvector().
%% @doc Returns a row from the matrix as vector view.
%% Note that `RowIndex' starts from 0, but not 1.
row(NMatrix, RowIndex) -> ?WE(erlynum_nif:nmatrix_row(NMatrix, RowIndex)).
-spec col(
NMatrix :: erlynum:nmatrix(),
ColIndex :: non_neg_integer()
) -> erlynum:nvector().
%% @doc Returns a column from the matrix as vector view.
%% Note that `ColIndex' starts from 0, but not 1.
col(NMatrix, ColIndex) -> ?WE(erlynum_nif:nmatrix_col(NMatrix, ColIndex)).
-spec diag(NMatrix :: erlynum:nmatrix()) -> erlynum:nvector().
%% @doc Returns the main diagonal from the matrix as vector view.
diag(NMatrix) -> diag(NMatrix, 0).
-spec diag(
NMatrix :: erlynum:nmatrix(),
DiagNumber :: integer()
) -> erlynum:nvector().
%% @doc Returns a specified diagonal from the matrix as vector view.
%% Main diagonal number is 0. `DiagNumber' may be both positive and negative.
diag(NMatrix, DiagNumber) -> ?WE(erlynum_nif:nmatrix_diag(NMatrix, DiagNumber)).
-spec transpose(NMatrix :: erlynum:nmatrix()) -> erlynum:nmatrix().
%% @doc Returns a transposed view of matrix.
transpose(NMatrix) ->
{RowView, ColView} = NMatrix#nmatrix.view,
NMatrix#nmatrix{view = {ColView, RowView}}. | src/nmatrix.erl | 0.74158 | 0.683538 | nmatrix.erl | starcoder |
%%% Copy of string functions from the OTP-20 string.erl module
%%% These functions are kept to keep comparisons stable and
%%% to prevent them from being unicode-aware (which could allow
%%% an attacker to slip some interesting stuff through in hostnames)
%%% even though they are being deprecated starting in OTP-21.
%%%
%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(ssl_verify_string).
-export([to_lower/1, chr/2, str/2, substr/2, substr/3, strip/3]).
-spec to_lower(StringOrChar) -> StringOrChar when
StringOrChar :: io_lib:latin1_string() | char().
to_lower(S) when is_list(S) ->
[to_lower_char(C) || C <- S];
to_lower(C) when is_integer(C) ->
to_lower_char(C).
to_lower_char(C) when is_integer(C), $A =< C, C =< $Z ->
C + 32;
to_lower_char(C) when is_integer(C), 16#C0 =< C, C =< 16#D6 ->
C + 32;
to_lower_char(C) when is_integer(C), 16#D8 =< C, C =< 16#DE ->
C + 32;
to_lower_char(C) ->
C.
-spec chr(String, Character) -> Index when
String :: string(),
Character :: char(),
Index :: non_neg_integer().
chr(S, C) when is_integer(C) -> chr(S, C, 1).
chr([C|_Cs], C, I) -> I;
chr([_|Cs], C, I) -> chr(Cs, C, I+1);
chr([], _C, _I) -> 0.
-spec str(String, SubString) -> Index when
String :: string(),
SubString :: string(),
Index :: non_neg_integer().
str(S, Sub) when is_list(Sub) -> str(S, Sub, 1).
str([C|S], [C|Sub], I) ->
case l_prefix(Sub, S) of
true -> I;
false -> str(S, [C|Sub], I+1)
end;
str([_|S], Sub, I) -> str(S, Sub, I+1);
str([], _Sub, _I) -> 0.
l_prefix([C|Pre], [C|String]) -> l_prefix(Pre, String);
l_prefix([], String) when is_list(String) -> true;
l_prefix(Pre, String) when is_list(Pre), is_list(String) -> false.
-spec substr(String, Start) -> SubString when
String :: string(),
SubString :: string(),
Start :: pos_integer().
substr(String, 1) when is_list(String) ->
String;
substr(String, S) when is_integer(S), S > 1 ->
substr2(String, S).
-spec substr(String, Start, Length) -> SubString when
String :: string(),
SubString :: string(),
Start :: pos_integer(),
Length :: non_neg_integer().
substr(String, S, L) when is_integer(S), S >= 1, is_integer(L), L >= 0 ->
substr1(substr2(String, S), L).
substr1([C|String], L) when L > 0 -> [C|substr1(String, L-1)];
substr1(String, _L) when is_list(String) -> [].
substr2(String, 1) when is_list(String) -> String;
substr2([_|String], S) -> substr2(String, S-1).
-spec strip(String, Direction, Character) -> Stripped when
String :: string(),
Stripped :: string(),
Direction :: 'left' | 'right' | 'both',
Character :: char().
strip(String, right, Char) -> strip_right(String, Char);
strip(String, left, Char) -> strip_left(String, Char);
strip(String, both, Char) ->
strip_right(strip_left(String, Char), Char).
strip_left([Sc|S], Sc) ->
strip_left(S, Sc);
strip_left([_|_]=S, Sc) when is_integer(Sc) -> S;
strip_left([], Sc) when is_integer(Sc) -> [].
strip_right([Sc|S], Sc) ->
case strip_right(S, Sc) of
[] -> [];
T -> [Sc|T]
end;
strip_right([C|S], Sc) ->
[C|strip_right(S, Sc)];
strip_right([], Sc) when is_integer(Sc) ->
[]. | deps/ssl_verify_fun/src/ssl_verify_string.erl | 0.51879 | 0.596198 | ssl_verify_string.erl | starcoder |
%%
%% Query Tree process
%%
%% @copyright 2014-2019 UP FAMNIT and Yahoo Japan Corporation
%% @version 0.3
%% @since May, 2014
%% @author <NAME> <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%%
%% @doc Query tree module implements process that serves as front-end
%% of query tree represented as tree of inter-connected processes running
%% on array of servers. Message start initializes query tree. Message
%% eval starts evaluation of query tree.
%%
%% Query tree process determines the location of each query node in terms
%% of column and row in array of servers. Each query node is executed on
%% location determined by query tree process.
%% Firstly, the column of query node is computed by using distribution function
%% that translates triple-pattern to columns in array of servers.
%% Secondly, rows in given columns are selected dynamically based on
%% CPU load of servers in columns.
%%
%% Query of type qt_query() presented to query tree process as parameter of
%% message start is converted into tree data structure stored as process
%% dictionary entry query_tree. First element of
%% list representing qt_query() is triple-pattern of the left-most query
%% node. Last element of list is triple-pattern of the upper-most and
%% right-most query node. All other triple-patterns are placed as inner
%% query nodes in order between left-most and upper-right-most.
%%
%%
%% <table bgcolor="lemonchiffon">
%% <tr><th>Section Index</th></tr>
%% <tr><td>{@section property list}</td></tr>
%% <tr><td>{@section type qt_query_node()}</td></tr>
%% <tr><td>{@section handle_call (synchronous) message API}</td></tr>
%% <tr><td>{@section handle_cast (asynchronous) message API}</td></tr>
%% <tr><td>{@section existent shortcuts}</td></tr>
%% </table>
%%
%% == property list ==
%%
%% (LINK: {@section property list})
%%
%% The gen_server process uses following properties holded by {@link
%% qt_state()}.
%%
%% <table border="3">
%% <tr><th>Name</th><th>Type</th><th>Description</th></tr>
%%
%% <tr> <td>created</td> <td>boolean()</td> <td>true denotes that
%% process dictionary was created and used. false denotes that
%% completely new process.</td> </tr>
%%
%% <tr> <td>tree_id</td> <td>string()</td> <td>query tree identifier</td> </tr>
%%
%% <tr> <td>state</td> <td>atom()</td> <td>active | inactive | eos</td> </tr>
%%
%% <tr> <td>gp</td> <td>maps:map()</td> <td>graph pattern of query tree
%% represented as mapping from {@type query_node:qn_id()} to
%% {@type query_node:qn_triple_pattern()}</td> </tr>
%%
%% <tr> <td>node_pids</td> <td>maps:map()</td> <td>mapping from {@type query_node:qn_id()} to
%% [{@type node_state:ns_pid()}]</td> </tr>
%%
%% <tr> <td>self</td> <td>{@link
%% node_state:ns_pid()}</td> <td>location of query
%% tree process</td> </tr>
%%
%% <tr> <td>session_id</td> <td>integer()</td> <td>session identifier</td> </tr>
%%
%% <tr> <td>invoker</td> <td>{@link
%% node_state:ns_pid()}</td> <td>location of parent query
%% node process</td> </tr>
%%
%% <tr> <td>query</td> <td>{@link qt_query()}</td> <td>current
%% query in flat form</td> </tr>
%%
%% <tr> <td>query_tree</td> <td>{@link qt_query_node()}</td> <td>query
%% tree of input query of type {@section qt_query()}</td> </tr>
%%
%% <tr> <td>roots</td> <td>{@link node_state:ns_pid_list()}</td> <td>long pids of
%% roots of query tree</td> </tr>
%%
%% <tr> <td>queue_result</td> <td>queue()</td> <td>temporary storage for
%% results of query evaluation</td> </tr>
%%
%% <tr> <td>b3s_state_pid</td> <td>{@type node_state:ns_pid()}</td>
%% <td>process id of b3s_state.</td> </tr>
%%
%% <tr> <td>benchmark_task_pid</td> <td>{@type
%% node_state:ns_pid()}</td> <td>process id of executing benchmark
%% task.</td> </tr>
%%
%% <tr> <td>result_record_max</td> <td>integer()</td> <td>Max number
%% of records to be reported.</td> </tr>
%%
%% <tr> <td>pid_start</td> <td>maps:map()</td> <td>mapping from term()
%% to erlang:timestamp().</td> </tr>
%%
%% <tr> <td>pid_elapse</td> <td>maps:map()</td> <td>mapping from
%% term() to integer() (in microseconds).</td> </tr>
%%
%% <tr> <td>result_freq</td> <td>maps:map()</td> <td>mapping from
%% pid() to integer().</td> </tr>
%%
%% <tr> <td>start_date_time</td> <td>calendar:datetime()</td>
%% <td>started date and time of the process.</td> </tr>
%%
%% </table>
%%
%%
%% == type qt_query_node() ==
%%
%% (LINK: {@section type qt_query_node()})
%%
%% Query tree used for internal representation of query is based on maps.
%% Each node of query tree is a map storing all properties of query node to
%% be run as (tp|join)_query_node process. The properties of query nodes
%% follow structure and properties of (tp|join)_query_node processes.
%%
%% <table border="3">
%% <tr><th>Name</th><th>Type</th><th>Description</th></tr>
%%
%% <tr> <td>node_id</td> <td>string()</td> <td>query node identifier</td> </tr>
%%
%% <tr> <td>state</td> <td>atom()</td> <td>active | inactive | eos</td> </tr>
%%
%% <tr> <td>gp</td> <td>maps:map()</td> <td>graph pattern of query tree
%% represented as mapping from {@type query_node:qn_id()} to
%% {@type query_node:qn_triple_pattern()}</td> </tr>
%%
%% <tr> <td>select_pred</td> <td>{@link query_node:qn_select_predicate()}</td>
%% <td>select expression represented as abstract syntax tree including
%% operations on strings, booleans and integers</td> </tr>
%%
%% <tr> <td>project_list</td> <td>{@link query_node:qn_project_list()}</td>
%% <td>list of variables to be included in graphs computed in
%% a given query node. Only the triples that include values of specified
%% variables are included in result. </td> </tr>
%%
%% <tr> <td>location</td> <td>[{@link node_state:ns_pid()}]</td>
%% <td>list of locations of query node processes</td> </tr>
%%
%% <tr> <td>invoker</td> <td>{@link
%% node_state:ns_pid()}</td> <td>process id of parent query
%% node</td> </tr>
%%
%% <tr> <td>query</td> <td>{@link qt_query()}</td> <td>query represented
%% by given qt_query_node()</td> </tr>
%%
%% <tr> <td>outer</td> <td>qt_query_node()</td> <td>outer sub-tree of
%% given query tree</td> </tr>
%%
%% <tr> <td>inner</td> <td>qt_query_node()</td> <td>inner sub-tree of
%% given query tree</td> </tr>
%%
%% <tr> <td>outer_pids</td> <td>[{@link
%% node_state:ns_pid()}]</td> <td>process ids of outer
%% children query nodes</td> </tr>
%%
%% <tr> <td>inner_pids</td> <td>[{@link
%% node_state:ns_pid()}]</td> <td>process ids of inner
%% children query nodes</td> </tr>
%%
%% <tr> <td>join_vars</td> <td>[{@link query_node:qn_var()}]</td> <td>list of
%% variables used for joining</td> </tr>
%%
%% <tr> <td>var_pos</td> <td>maps:map()</td> <td>mapping from {@link
%% query_node:qn_var()} to {@link join_query_node:jqn_var_position()};
%% general form, to be used only in gp</td> </tr>
%%
%% <tr> <td>vars</td> <td>maps:map()</td> <td>mapping from {@link
%% query_node:qn_var()} to integer()</td>; to be used only in tp</tr>
%%
%% <tr> <td>var_values</td> <td>maps:map()</td> <td>mapping from
%% {@link query_node:qn_var()} to string() (not used)</td> </tr>
%%
%% <tr> <td>inner_outer</td> <td>inner | outer</td> <td>Position to
%% its parent query node.</td> </tr>
%%
%% </table>
%%
%%
%% == handle_call (synchronous) message API ==
%%
%% (LINK: {@section handle_call (synchronous) message API})
%%
%% === {start, Query, Self, Invoker, TreeId, SessionId} ===
%%
%% Initialization of join query node process. All parameters are
%% saved to process dictionary.
%% (LINK: {@section @{start, Query, Self, Invoker, TreeId, SessionId @}})
%%
%% Query is {@link qt_query()}, Self is {@link
%% node_state:ns_pid()}. Invoker is {@link
%% node_state:ns_pid()}, TreeId is integer(), SessionId is integer().
%% This request is implemented by {@link hc_start/5}.
%%
%% === {eval} ===
%%
%% Initiate evaluation of query tree by sending root query node message eval.
%% After sending eval, N empty messages are sent to the root of query tree, and
%% results are expected in the form of data_outer messages from root of
%% query tree.
%% (LINK: {@section @{eval@}})
%%
%% This request is implemented by {@link hc_eval/0}.
%%
%% === {get, Name} ===
%%
%% Return the value of specified property name. If Name=all then return complete
%% process dictionary. Variable Name is an atom().
%% (LINK: {@section @{get_property, Name@}}).
%%
%% This request is implemented by {@link hc_get_property/1}.
%%
%% == handle_cast (asynchronous) message API ==
%%
%% (LINK: {@section handle_cast (asynchronous) message API})
%%
%% === {data_outer, Pid, Graph} ===
%%
%% Root of query tree is sending results to its initiator query_tree process.
%% Each graph received from root of query tree as data_outer message
%% is presented to caller process.
%% (LINK: {@section @{data_outer, Pid, Graph@}})
%%
%% Pid is node_state:ns_pid() and Graph is query_node:qn_graph().
%% This request is implemented by {@link hc_data_outer/3}.
%%
%% == existent shortcuts ==
%%
%% (LINK: {@section existent shortcuts})
%%
%% Existent shortcuts in the implementation of query_tree.
%%
%% <table border="3">
%% <tr><th>Num</th><th>Function</th><th>Description</th></tr>
%%
%% <tr> <td>1.</td> <td>hc_start()</td> <td>We suppose that prop_clm returns
%% single column for a given predicate. However, data structures as well as
%% modules tp_query_node and join_query_node are implemented to support
%% accessing multiple tp_query_node access methods.</td> </tr>
%%
%% <tr> <td>2.</td> <td>hc_start()</td> <td>Load balancing among the row nodes
%% for a given column is currently implemented by selecting random row. Next
%% version will dynamically gather statistics about running query nodes and
%% relations among sessions and locations of runnning query nodes.</td> </tr>
%%
%% </table>
%%
%%
%% @type qt_state() = maps:map(). Map
%% structure that manages properties for operating the gen_server
%% process.
%%
%% @type qt_query() = qt_bin_query() | qt_tp_query().
%% Query provided to query_tree module is built from algebra operations
%% tp (triple pattern), join, leftjoin, union or differ.
%% Query is either binary query or triple pattern query.
%%
%% @type qt_bin_query() = {query_node:qn_opsyn(), qt_query(), qt_query(), query_node:qn_select_predicate(), query_node:qn_project_list()}.
%% Binary query is built from algebra operations join, leftjoin, union
%% or differ. Query operation is described by tuple including operation symbol,
%% two input streams of graphs which are results of qt_query()
%% select predicate and project list.
%%
%% @type qt_tp_query() = {query_node:qn_opsyn(), query_node:qn_triple_pattern(), query_node:qn_select_predicate(), query_node:qn_project_list()}.
%% Triple pattern query describes access method defined for given
%% triple pattern, select predicate and project list.
%%
%% @type qt_query_node() = maps:map(). Query node is represented as map
%% storing properties that resemble those of {tp|join}_query_node
%% processes. All properties must be prepared before creating the
%% tree of {tp|join}_query_node processes.
%% qt_query_node() map is presented in detail in
%% {@section Type qt_query_node()}.
%%
%%
-module(query_tree).
-behavior(gen_server).
-export(
[
eI/1, eT/1, eTP/1, dT/1, dTMap/1, dTML/1, dTMLL/1,
child_spec/1, init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3, spawn_process/2
]).
-include_lib("stdlib/include/qlc.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("record.hrl").
%% ======================================================================
%%
%% gen_server behavior
%%
%%
%% init/1
%%
%% @doc Initialize a join_query_node process.
%%
%% @spec init([]) -> {ok, qt_state()}
%%
init([]) ->
process_flag(trap_exit, true),
{ok, BSN} = application:get_env(b3s, b3s_state_nodes),
BS = {b3s_state, lists:nth(1, BSN)},
MQD = gen_server:call(BS, {get, mq_debug}),
State = #{wait => true,
start_date_time => calendar:local_time(),
mq_debug => MQD,
pid => self()},
%% init result queue
query_node:queue_init(result, plain, data_block),
info_msg(init, [], State, -1),
{ok, State}.
%%
%% handle_call/3
%%
%% @doc Handle synchronous query requests.
%%
%% @spec handle_call(term(), {pid(), term()}, qt_state()) -> {reply,
%% term(), qt_state()}
%%
handle_call({start, Query, Self, Invoker, TreeId, SessionId}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [Self, {message,start}, {query,Query}, {self,Self},
{invoker,Invoker}, {tree_id,TreeId}, {session_id,SessionId}, get(state)], message_received, 10),
hc_start(Query, Self, Invoker, TreeId, SessionId),
{reply, ok, hc_save_pd()};
handle_call({eval}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,eval}, get(state)], message_received, 10),
hc_eval(),
{reply, ok, hc_save_pd()};
handle_call({get, all}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,get}, {get,all}, {value,get()}, get(state)], message_received, 10),
{reply, get(), hc_save_pd()};
handle_call({get, Name}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,get}, {get,Name}, {value,get(Name)}, get(state)], message_received, 10),
{reply, get(Name), hc_save_pd()};
handle_call({put, Name, Value}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
put(Name, Value),
info_msg(handle_call, [get(self), {message,put}, {put,Name}, {value,Value}, get(state)], message_received, 10),
{reply, get(Name), hc_save_pd()};
handle_call({read}, _, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_call, [get(self), {message,read}, {invoker,get(invoker)}, get(state)], message_received, 30),
Block = hc_read(),
{reply, Block, hc_save_pd()};
%% default
handle_call(Request, From, State) ->
R = {unknown_request, Request},
error_msg(handle_call, [Request, From, State], R),
{reply, R, State}.
%%
%% handle_cast/2
%%
%% @doc Handle asynchronous query requests.
%%
%% @spec handle_cast(term(), qt_state()) -> {noreply, qt_state()}
%%
handle_cast({data_outer, From, Block}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
hc_restore_pd(get(created), State),
info_msg(handle_cast, [get(self), {message,data_outer}, {from,From}, {block,Block}, get(state)], message_received, 30),
hc_data_outer(From, Block),
{noreply, hc_save_pd()};
%% just for suppressing errors on benchmark task executions
handle_cast({empty, From}, State) ->
b3s_state:hc_monitor_mq(erlang:get(mq_debug)),
info_msg(handle_cast, [get(self), {message,empty}, {from,From}, get(state)], message_received, 30),
{noreply, State};
%% default
handle_cast(Request, State) ->
R = {unknown_request, Request},
error_msg(handle_cast, [get(self), Request, State], R),
{noreply, hc_save_pd()}.
%%
%% @doc Restore process dictionaries from state map structure.
%%
%% @spec hc_restore_pd([{atom(), term()}] | undefined, qt_state()) -> ok
%%
hc_restore_pd(undefined, State) ->
hc_restore_pd_1(maps:to_list(State));
hc_restore_pd(_, _) ->
ok.
hc_restore_pd_1([]) ->
ok;
hc_restore_pd_1([{K, V} | T]) ->
put(K, V),
hc_restore_pd_1(T).
%%
%% @doc Save process all dictionary contents into state map structure.
%%
%% @spec hc_save_pd() -> qt_state()
%%
hc_save_pd() ->
maps:from_list(get()).
%%
%% handle_info/2
%%
%% @doc Handle exceptional query requests.
%%
%% @spec handle_info(term(), qt_state()) -> {noreply, qt_state()}
%%
handle_info(_Info, State) ->
{noreply, State}.
%%
%% terminate/2
%%
%% @doc Process termination.
%%
%% @spec terminate(term(), qt_state()) -> none()
%%
terminate(Reason, State) ->
P = "pid: " ++ pid_to_list(self()),
info_msg(terminate, [Reason, State], P, -1),
ok.
%%
%% code_change/3
%%
%% @doc Process code change action.
%%
%% @spec code_change(term(), qt_state(), term()) -> {ok, qt_state()}
%%
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ======================================================================
%%
%% utility
%%
%%
%% @doc Report an error issue to the error_logger.
%%
%% @spec error_msg(atom(), term(), term()) -> ok
%%
error_msg(FunName, Argument, Result) ->
node_state:error_msg(?MODULE, FunName, Argument, Result).
%%
%% @doc Report an information issue to the error_logger if current
%% debug level is greater than ThresholdDL.
%%
%% @spec info_msg(atom(), term(), term(), integer()) -> ok
%%
info_msg(FunName, Argument, Result, ThresholdDL) ->
node_state:info_msg(?MODULE, FunName, Argument, Result, ThresholdDL).
%% ======================================================================
%%
%% api
%%
%%
%% @doc Return child spec for this process. It can be used in
%% supervisor:init/0 callback implementation.
%%
%% @spec child_spec( Id::atom() ) -> supervisor:child_spec()
%%
child_spec(Id) ->
GSOpt = [{local, Id}, query_tree, [], []],
StartFunc = {gen_server, start_link, GSOpt},
Restart = permanent,
Shutdwon = 1000,
Type = worker,
Modules = [query_tree],
{Id, StartFunc, Restart, Shutdwon, Type, Modules}.
%%
%% @doc Spawns process which is an instance of given module with given identifier
%% at given node.
%%
%% @spec spawn_process( Id::atom(), Node::node() ) -> node_state:ns_pid()
%%
spawn_process(Id, Node) ->
ChildSpec = query_tree:child_spec(Id),
supervisor:start_child({b3s, Node}, ChildSpec),
{Id, Node}.
%% ======================================================================
%%
%% handle call/cast implementation
%%
%%
%% hc_start/5
%%
%% @doc Initialize query tree process. Convert query represented as a list of
%% triple-patterns, filters, option statements and set operations into query tree
%% data structure internal to query_tree process. Determine node locations for
%% triple-patterns, and other operations. Compute all properties needed to construct
%% query tree by means of distributed processes. Finally, create processes for
%% query nodes and send start messages to all processes comprising query.
%%
%% @spec hc_start( Query::qt_query(), Self::node_state:ns_pid(),
%% Invoker::node_state:ns_pid(), TreeId::integer(), SessionId::integer() ) -> ok
%%
hc_start( Query, Self, Invoker, TreeId, SessionId ) ->
%% store patrameters in PD
put(created, true),
put(state, active),
put(session_id, SessionId),
put(tree_id, TreeId),
put(self, Self),
put(invoker, Invoker),
put(query, Query),
put(node_pids, maps:new()),
%% queue_result is in init()
%% reset here for executing restart successfully (1/22 knitta)
put(queue_result, queue:new()),
put(pause, false),
erase(sid_table_name),
erase(sid_max_id),
erase(di_cursor__),
erase(di_ets__),
info_msg(hc_start, [get(self), {query,Query}, {invoker,Invoker},
{tree_id,TreeId}, {session_id,SessionId}], entered, 10),
%% benchmark stuff
BSP = b3s_state_pid,
BMT = benchmark_task,
BTP = benchmark_task_pid,
RRM = result_record_max,
TDP = triple_distributor_pid,
DA = distribution_algorithm,
put(BSP, gen_server:call(node_state, {get, BSP})),
{_, FSN} = get(BSP),
put(BTP, {gen_server:call(get(BSP), {get, BMT}), FSN}),
put(RRM, gen_server:call(get(BSP), {get, RRM})),
put(TDP, gen_server:call(get(BSP), {get, TDP})),
put(DA, gen_server:call(get(TDP), {get_property, DA})),
%% retrieve pred_clm and clm_row_conf in PD (kiyoshi)
PredClm = gen_server:call(node_state, {get, pred_clm}),
ClmRowConf = gen_server:call(node_state, {get, clm_row_conf}),
put(pred_clm, PredClm),
put(clm_row_conf, ClmRowConf),
%%info_msg(hc_start, [get(self), PredClm, ClmRowConf], debug_b3s_configuration_read, 60),
%% convert query to query tree
QT = hcs_query_to_tree(Query),
%% add ids to query nodes
case maps:get(type, QT) of
tp ->
QT1A = hcs_ids_to_nodes(tp, QT, 1),
QT1 = maps:put(side, outer, QT1A);
QueryTreeType ->
QT1 = hcs_ids_to_nodes(QueryTreeType, QT, 1)
end,
%% compute all neccessary components of query nodes
QT2 = hcs_comp_qn_entries(maps:get(type, QT1), QT1),
info_msg(hc_start, [get(self), {query,Query}, {query_tree,QT2}], query_compiled, 10),
%% create processes for given query nodes
QT3 = hcs_create_processes( maps:get(type, QT2), QT2 ),
info_msg(hc_start, [get(self), {query_tree,QT3}], processes_created, 10),
%% set invoker of root to qt
QT4 = maps:put(invoker, get(self), QT3),
put(query_tree, QT4),
%% set roots of query tree
Loc = maps:get(location, QT4),
put(roots, Loc),
%% set gp of query in PD
put(gp, maps:get(gp, QT4)),
%% start individual query nodes of query
hcs_start_query( maps:get(type, QT4), QT4 ),
info_msg(hc_start, [get(self), {query,get(query)}, {node_pids,get(node_pids)}], processes_started, 10),
ok.
%%
%% hcs_query_to_tree/1
%%
%% @doc Transforms graph pattern expressed as list of triple patterns
%% (stored in PD as 'query')
%% to left-deep query tree. The last triple pattern in the list is
%% transformed to the leftmost outer triple pattern in tree. The first
%% triple pattern in list is transformed to the highest inner triple
%% pattern of query tree.
%%
%% <i>Note</i>: triple patterns will be augmented with projection and
%% selection lists attached to particular triple pattern.
%%
%% @spec hcs_query_to_tree(qt_query()) -> qt_query_node()
%%
hcs_query_to_tree({tp, TP, SL, PL}) ->
ETP = string_id:encode_triple_pattern(TP),
ESL = encode_select_expr(SL),
info_msg(hcs_query_to_tree, [get(self), {query,{tp,TP,SL,PL}}, {encoded_tp,ETP}, {encoded_select_pred,ESL}, get(state)], tp_encoded, 50),
#{ type => tp,
query => ETP,
select_pred => ESL,
project_list => PL };
hcs_query_to_tree({OP, QO, QI, SL, PL})
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
%% transform outer and inner query to tree
Outer = hcs_query_to_tree(QO),
Inner = hcs_query_to_tree(QI),
%% encode select predicate
ESL = encode_select_expr(SL),
info_msg(hcs_query_to_tree, [get(self), {query,{OP,QO,QI,SL,PL}}, {encoded_select_pred,ESL}, get(state)], join_encoded, 50),
%% construct join query node
#{ type => OP,
query => {OP, QO, QI, SL, PL},
select_pred => ESL,
project_list => PL,
outer => Outer,
inner => Inner }.
%%
%% encode_select_predicate/1
%%
%% @doc Encodes all URIs and strings (identifiers) in selection predicate
%% of a given query node to integer keys. Identifiers of selection predicate
%% parse tree are encoded in left depth-first order, recursively.
%%
%% @spec encode_select_expr(SP::query_node:qn_select_predicate()) -> query_node:qn_select_predicate()
%%
encode_select_expr(S) when is_atom(S) ->
case S of
none -> info_msg(encode_select_pred, [get(self), {expr,none}, {type,atom}, get(state)], encode_atom_expr, 50),
none;
_ -> error_msg(encode_select_expr, [get(self), {expr,S}, {type,atom}, {all,get()}, get(state)], illegal_select_expression),
fail
end;
encode_select_expr(S) when is_integer(S) ->
info_msg(encode_select_pred, [get(self), {expr,S}, {type,integer}, get(state)], encode_integer_expr, 50),
{S, integer};
encode_select_expr(S) when is_float(S) ->
info_msg(encode_select_pred, [get(self), {expr,S}, {type,float}, get(state)], encode_float_expr, 50),
{S, real};
%% selection predicate is a string S.
encode_select_expr(S) when is_list(S) ->
info_msg(encode_select_pred, [get(self), {expr,S}, {type,string}, get(state)], encode_string_expr, 50),
%% check if variable
IsVar = string:chr(S, $?) =:= 1,
case IsVar of
%% don't do anything with variables
true -> S;
%% encode string to integer key
false -> {string_id:get_id(S), code}
end;
%% selection predicate includes comparison ops
encode_select_expr({S1, equal, S2}) ->
{encode_select_expr(S1), equal, encode_select_expr(S2)};
encode_select_expr({S1, less, S2}) ->
{encode_select_expr(S1), less, encode_select_expr(S2)};
encode_select_expr({S1, lesseq, S2}) ->
{encode_select_expr(S1), lesseq, encode_select_expr(S2)};
encode_select_expr({S1, greater, S2}) ->
{encode_select_expr(S1), greater, encode_select_expr(S2)};
encode_select_expr({S1, greatereq, S2}) ->
{encode_select_expr(S1), greatereq, encode_select_expr(S2)};
encode_select_expr({S1, land, S2}) ->
{encode_select_expr(S1), land, encode_select_expr(S2)};
encode_select_expr({S1, lor, S2}) ->
{encode_select_expr(S1), lor, encode_select_expr(S2)};
encode_select_expr({lnot, S1}) ->
{lnot, encode_select_expr(S1)};
encode_select_expr(Expr) ->
error_msg(encode_select_expr, [get(self), {expr,Expr}, {all,get()}, get(state)], illegal_select_expression),
fail.
%%
%% hcs_ids_to_nodes/3
%%
%% @doc Define ids for each node of parameter query tree QT. Nodes are
%% numbered in depth-first post-order i.e. number first left sub-tree, then
%% number right sub-tree, and, finally root, recursively.
%%
%% @spec hcs_ids_to_nodes(tp | gp, QT::qt_query_node(), N::integer())
%% -> qt_query_node()
%%
hcs_ids_to_nodes(tp, QT, N) ->
%% save id
maps:put(node_id, integer_to_list(N), QT);
hcs_ids_to_nodes(OP, QT, N)
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
Outer = maps:get(outer, QT),
Inner = maps:get(inner, QT),
%% tag nodes in outer sub-tree
NO = hcs_ids_to_nodes(maps:get(type, Outer), Outer, N),
NO1 = maps:put(side, outer, NO),
IDO = list_to_integer(maps:get(node_id, NO)),
%% tag nodes in inner sub-tree
NI = hcs_ids_to_nodes(maps:get(type, Inner), Inner, IDO+1),
NI1 = maps:put(side, inner, NI),
IDI = list_to_integer(maps:get(node_id, NI)),
%% update QT with new inner and outer and tag with id
QT1 = maps:put(outer, NO1, QT),
QT2 = maps:put(inner, NI1, QT1),
QT3 = maps:put(side, outer, QT2),
maps:put(node_id, integer_to_list(IDI+1), QT3).
%%
%% hcs_comp_qn_entries/2
%%
%% @doc Given input query tree:
%% 1) construct graph patterns for all query nodes and store it in gp
%% (single triple pattern is also stored in gp to simplify merging gp-s);
%% 2) extract variables and their positions in triple patterns to construct
%% var_pos; and
%% 3) extract common variables of joins and prepare join_vars.
%%
%% hcs_comp_qn_entries(Type::atom(), QT::qt_query_node()) -> qt_query_node()
%%
hcs_comp_qn_entries(tp, QT) ->
%% convert triple pattern to list
TP = maps:get(query, QT),
TL = tuple_to_list(TP),
%% convert elements to pairs {element, index}
put(count, 1),
F = fun (E) ->
N = get(count),
put(count, N+1),
{ E, N }
end,
TL1 = lists:map(F, TL),
info_msg(hcs_comp_qn_entries, [get(self), {tp,TP}, {tp_list_pair,TL1}, get(state)], debug_comp_qn_entries_1, 50),
%% filter list of pairs to retain vars
F1 = fun (P) ->
E = element(1,P),
case is_list(E) of
true -> string:chr(E,$?) =:= 1;
false -> false
end
end,
TL2 = lists:filter(F1, TL1),
info_msg(hcs_comp_qn_entries, [get(self), {tp,TP}, {tp_var_list_pair,TL2}, get(state)], debug_comp_qn_entries_2, 50),
%% construct map : vars -> {node-id, inx}
NID = maps:get(node_id, QT),
F2 = fun (E, M) ->
{ V, I } = E,
maps:put(V, [{NID, I}], M)
end,
VP = lists:foldl(F2, #{}, TL2),
%% construct variable positions for tp
F3 = fun (_, V1) ->
[{ _, I1 }] = V1,
I1
end,
VP1 = maps:map(F3, VP),
%% save graph pattern and var_pos map in query node
QT1 = maps:put(gp, maps:put(NID, TP, #{}), QT),
QT2 = maps:put(var_pos, VP, QT1),
maps:put(vars, VP1, QT2);
hcs_comp_qn_entries(OP, QT)
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
Inner = maps:get(inner, QT),
Outer = maps:get(outer, QT),
%% get gp and var_pos from outer sub-tree
NO = hcs_comp_qn_entries(maps:get(type, Outer), Outer),
GPO = maps:get(gp, NO),
VPO = maps:get(var_pos, NO),
%% get gp and var_pos from inner sub-tree
NI = hcs_comp_qn_entries(maps:get(type, Inner), Inner),
GPI = maps:get(gp, NI),
VPI = maps:get(var_pos, NI),
%% update QT with new inner and outer
QT1 = maps:put(outer, NO, QT),
QT2 = maps:put(inner, NI, QT1),
%% compute gp, var_pos and join_vars and store them in query node
QT3 = maps:put(var_pos, merge_maps(VPO, VPI), QT2),
QT4 = maps:put(gp, maps:merge(GPO, GPI), QT3),
L1 = maps:keys(VPO),
L2 = maps:keys(VPI),
maps:put(join_vars, intersect_lists(L1, L2), QT4).
%%
%% hcs_create_processes/2
%%
%% @doc For each query node of QT select optimal rows of columns
%% associated with given predicate of triple-pattern. Spawn query node
%% at selected node. Join node is assigned to one of it's inner
%% query nodes. Therefore, current physical structure of query tree is
%% pipeline.
%%
%% @spec hcs_create_processes( OP::query_node:qn_opsym(), QT::qt_query_node()) -> qt_query_node()
%%
hcs_create_processes( tp, QT ) ->
%% get optimal rows in columns
Nodes = hcs_select_nodes( maps:get(type, QT), QT ),
info_msg(hcs_create_processes, [get(self), {tp,maps:get(gp, QT)},
{nodes,Nodes}], tp_nodes_selected, 50),
%% generic Id for all nodes
Id = generate_long_qnid( maps:get(node_id, QT)),
%% spawn tp processes at Nodes
put(count, 1),
F = fun (Node) ->
%% add instance number to generic Id
NId = list_to_atom(Id++"-"++integer_to_list(get(count))),
put(count,get(count)+1),
%% spawn tp query node process at node
tp_query_node:spawn_process(NId, Node)
end,
Pids = lists:map(F, Nodes),
info_msg(hcs_create_processes, [get(self), {pids,Pids}], tp_nodes_created, 50),
%% save pids for node_id in node_pids
NP = get(node_pids),
NP1 = maps:put(maps:get(node_id, QT), Pids, NP),
put(node_pids, NP1),
%% store ns_pid in PD
maps:put(location, Pids, QT);
hcs_create_processes(OP, QT)
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
%% create processes for nodes of subtrees
Outer = maps:get(outer, QT),
Inner = maps:get(inner, QT),
ONod = hcs_create_processes( maps:get(type, Outer), Outer ),
INod = hcs_create_processes( maps:get(type, Inner), Inner ),
%% store locs of inner and outer qn-s!
OPids = maps:get(location, ONod),
IPids = maps:get(location, INod),
QT1 = maps:put(inner_pids, IPids, QT),
QT2 = maps:put(outer_pids, OPids, QT1),
%% determine node and id, spawn it, and store
%% [NOTE] join query nodes are not parallelised!
[InnerPid] = hcs_select_nodes( maps:get(type, QT2), QT2 ),
Node = element(2, InnerPid),
Id = list_to_atom(generate_long_qnid( maps:get(node_id, QT2))),
info_msg(hcs_create_processes, [get(self), {gp, maps:get(gp, QT2)}, {location, {Id, Node}}], gp_nodes_selected, 50),
%% spawn process and save pid as location
case maps:get(type, QT) of
join -> Pid = join_query_node:spawn_process(Id, Node);
hjoin -> Pid = hj_query_node:spawn_process(Id, Node);
mjoin -> Pid = mj_query_node:spawn_process(Id, Node)
end,
QT3 = maps:put(location, [Pid], QT2),
%% save pid for node_id in node_pids
NP = get(node_pids),
NP1 = maps:put(maps:get(node_id, QT), [Pid], NP),
put(node_pids, NP1),
%% re-link query tree children
ONod1 = maps:put(invoker, Pid, ONod),
INod1 = maps:put(invoker, Pid, INod),
QT4 = maps:put(outer, ONod1, QT3),
maps:put(inner, INod1, QT4).
%%
%% @doc Generate long ID of query node.
%%
generate_long_qnid( Id ) ->
QTid = get(tree_id),
SSid = get(session_id),
"qn-"++SSid++"-"++QTid++"-"++Id.
%%
%% hcs_select_node/2
%%
%% @doc Select concrete node (data server) for a given root of query
%% tree. Given triple-pattern query node
%% determine first column and then choose the row by means of selected
%% scheduler.
%%
%% Available schedulers are: random, optimal based on
%% statistical data about executed query nodes (not yet), and affinity
%% scheduler with two levels of scheduling tending to use the same
%% nodes (data servers) for query nodes from the same session (not yet).
%%
%% Join query node is, for the time being, executed at the
%% same node as inner triple-pattern.
%%
%% @spec hcs_select_nodes( gp | tp, QT::qt_query_node ) -> [node()]
%%
hcs_select_nodes( OP, QT )
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
%% take first pid of inner_pids
%% join_qn will then be at same location as first inner_qn.
%% [TODO] what are alternatives? random selection of inner qn?
IP = lists:nth(1, maps:get(inner_pids, QT)),
info_msg(hcs_select_nodes, [get(self), {gp,maps:get(gp,QT)}, {location,[IP]}], locs_selected_for_gp, 50),
[IP];
hcs_select_nodes( tp, QT ) ->
DA = distribution_algorithm,
case get(DA) of
predicate_based ->
hcssn_predicate_based(tp, QT);
random ->
hcssn_random(tp, QT);
_ ->
E = {unknown_distribution_algorithm, get(DA)},
error_msg(hcs_select_nodes, [tp, QT], E),
[]
end.
hcssn_predicate_based( tp, QT ) ->
%% extract predicate from query
TP = maps:get(query, QT),
%% S = element(2, TP),
P = element(3, TP),
%% O = element(4, TP),
%% read pred_clm and clm_row_conf
PredClm = get(pred_clm),
ClmRowConf = get(clm_row_conf),
%% obtain columns for P
%% [NOTE] at this point distribution function can be changed.
%% [NOTE] subset of SPO may be used for selection.
case is_list(P) of
true -> IsVarP = string:chr(P, $?) =:= 1;
false -> IsVarP = false
end,
case is_integer(P) of
true -> PS = string_id:find(P);
false -> PS = undefined
end,
IsUndefP = PredClm =:= undefined,
%% A = [tp, QT],
%% M = [get(self), {tp,TP}, {predicate,P}],
%% info_msg(hcs_select_nodes, A, M, 50),
if
IsVarP -> Clms = maps:keys(ClmRowConf);
IsUndefP -> Clms = [1];
true -> Clms = [maps:get(PS, PredClm)]
end,
info_msg(hcs_select_nodes, [get(self), {tp,TP}, {predicate,P}, {clms,Clms}], clms_selected_for_tp, 50),
%% F maps column ids to selected row of given column
F = fun (C) ->
Rows = maps:get(C, ClmRowConf),
Rids = maps:keys(Rows),
%%info_msg(hcs_select_nodes, [get(self), Clms, Rows, Rids, length(Rids)], debug_show_rids, 60),
%% [NOTE] at this point other type of row selection can be used
%% [NOTE] based on SPO of TP
%% currently: select row randomly
Rid = rand:uniform(length(Rids)),
%% [TODO] check random generator
maps:get(Rid, Rows)
end,
Locs = lists:map(F, Clms),
info_msg(hcs_select_nodes, [get(self), {tp,TP}, {location,Locs}], locs_selected_for_tp, 50),
Locs.
hcssn_random(tp, TP) ->
ClmRowConf = get(clm_row_conf),
F = fun (C) ->
Rows = maps:get(C, ClmRowConf),
Rids = maps:keys(Rows),
Rid = rand:uniform(length(Rids)),
maps:get(Rid, Rows)
end,
Clms = maps:keys(ClmRowConf),
Locs = lists:map(F, Clms),
info_msg(hcs_select_nodes, [get(self), {tp,TP}, {location,Locs}], locs_selected_for_tp, 50),
Locs.
%%
%% hcs_start_query/2
%%
%% @doc Initiate all query nodes of a given query tree by sending
%% start message beginning with leafs and progressing towards the
%% root of query tree.
%%
%% @spec hcs_start_query( Type::atom(), QT::qt_query_node() ) -> ok
%%
hcs_start_query( tp, QT ) ->
%% gather data and start tp query node
QueryNodeId = maps:get(node_id, QT),
QueryId = get(tree_id),
SessionId = get(session_id),
[TriplePattern] = maps:values(maps:get(gp, QT)),
SelectPred = maps:get(select_pred, QT),
ProjectList = maps:get(project_list, QT),
ParentPid = maps:get(invoker, QT),
VarsPositions = maps:get(vars, QT),
Side = maps:get(side, QT),
%% send start to all tp query nodes
F = fun (Loc) ->
ST = {start, QueryNodeId, QueryId, SessionId, Loc, TriplePattern, SelectPred, ProjectList, ParentPid, VarsPositions, Side},
info_msg(hcs_start_query, [get(self), {type,tp}, {location,Loc}, {start,ST}], sending_start, 50),
gen_server:call(Loc, ST)
end,
lists:map(F, maps:get(location, QT)),
ok;
hcs_start_query(OP, QT)
when (OP =:= join) or (OP =:= hjoin) or (OP =:= mjoin) ->
%% start query nodes in sub-tree first
Outer = maps:get(outer, QT),
Inner = maps:get(inner, QT),
hcs_start_query( maps:get(type, Outer), Outer ),
hcs_start_query( maps:get(type, Inner), Inner ),
%% gather data and start this query node
QueryNodeId = maps:get(node_id, QT),
QueryId = get(tree_id),
SessionId = get(session_id),
GraphPattern = maps:get(gp, QT),
SelectPred = maps:get(select_pred, QT),
ProjectList = maps:get(project_list, QT),
ParentPid = maps:get(invoker, QT),
OuterPids = maps:get(outer_pids, QT),
InnerPids = maps:get(inner_pids, QT),
VarsPositions = maps:get(var_pos, QT),
JoinVars = maps:get(join_vars, QT),
%% send start message to join query node
[Loc] = maps:get(location, QT),
ST = {start, QueryNodeId, QueryId, SessionId, Loc, GraphPattern, SelectPred, ProjectList, ParentPid, OuterPids, InnerPids, VarsPositions, JoinVars},
info_msg(hcs_start_query, [get(self), {type,gp}, {location,Loc}, {start,ST}], sending_start, 50),
gen_server:call(Loc, ST),
ok.
%%
%% hc_start_test/0
%%
%% @doc Test function for hc_start.
%%
hc_start_test_() ->
application:load(b3s),
{ok, TM} = application:get_env(b3s, test_mode),
hcst_site(TM).
hcst_site(local1) ->
b3s:start(),
b3s:stop(),
b3s:start(),
b3s:bootstrap(),
NDS = node(),
BSS = {b3s_state, NDS},
CRC = clm_row_conf,
RMS = #{1 => NDS},
CM1 = #{1 => RMS, 2 => RMS},
R01 = [NDS], %%, NDC],
put(self, {'1-1-1', node()}),
{inorder,
[
?_assertMatch(ok, gen_server:call(BSS, {put, CRC, CM1})),
?_assertMatch(R01, gen_server:call(BSS, propagate)),
{generator, fun()-> tp_query_node:hcet_load_db() end},
% {generator, fun()-> join_query_node:hcet_q02() end},
{generator, fun()-> hcst_t01() end},
{generator, fun()-> join_query_node:hcet_load_db() end},
{generator, fun()-> hcst_t02() end},
?_assertMatch(ok, b3s:stop())
]};
hcst_site(_) ->
[].
%%
%% hcst_t01/0
%%
%% @doc Test 01 of hc_start.
%%
hcst_t01() ->
info_msg(hcst_t01, [], start, 50),
Q = {hjoin, {hjoin, {tp, { "?i1", "?y", "livesIn", "?x" }, none, none},
{tp, { "?i2", "slovenia", "hasCapital", "?x" }, none, none},
none, none},
{tp, { "?i3", "?y", "worksAt", "ijs" }, none, none},
none, none},
T = hcs_query_to_tree(Q),
info_msg(hcst_t01, [{query,Q},{tree,T}], after_query_to_tree, 50),
T1 = hcs_ids_to_nodes(maps:get(type, T), T, 1),
info_msg(hcst_t01, [{query,Q},{tree,T1}], after_ids_to_nodes, 50),
T2 = hcs_comp_qn_entries(maps:get(type, T1), T1),
info_msg(hcst_t01, [{query,Q},{tree,T2}], after_comp_qn_entries, 50),
{inorder,
[
?_assertMatch(11, hcst_log_cnt(T2))
]}.
%%
%% hcst_t02/0
%%
%% @doc Test 02 of hc_start.
%%
hcst_t02() ->
info_msg(hcst_t02, [get()], start, 50),
% tree ids will be handled by session
TreeId = "0",
SessionId = "1",
% spawn query tree process
ProcId = list_to_atom("qt-"++SessionId++"-"++TreeId),
info_msg(hcst_t02, [{ProcId,node()}], query_tree_pid, 50),
% spawn qt process locally
QT = query_tree:spawn_process(ProcId, node()),
% define query
Q = {hjoin, {hjoin, {tp, { "?i1", "?y", "livesIn", "?x" }, none, none},
{tp, { "?i2", "slovenia", "hasCapital", "?x" }, none, none},
none, none},
{tp, { "?i3", "?y", "worksAt", "ijs" }, none, none},
none, none},
% node_pids of query tree QT process
F1 = fun (NP) ->
Pids = lists:flatten(maps:values(NP)),
put(temp, Pids),
length(Pids)
end,
S1 = {start, Q, QT, self(), TreeId, SessionId},
info_msg(hcst_t02, [QT, S1], before_test, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(QT, S1)),
% check properties of query_tree
?_assertMatch(33, length(gen_server:call(QT, {get, all}))),
% check properties by qnodes
?_assertMatch(5, F1(gen_server:call(QT, {get, node_pids}))),
?_assertMatch(35, length(gen_server:call(lists:nth(1,get(temp)), {get_property, all}))),
?_assertMatch(35, length(gen_server:call(lists:nth(2,get(temp)), {get_property, all}))),
?_assertMatch(43, length(gen_server:call(lists:nth(3,get(temp)), {get_property, all}))),
?_assertMatch(35, length(gen_server:call(lists:nth(4,get(temp)), {get_property, all}))),
?_assertMatch(43, length(gen_server:call(lists:nth(5,get(temp)), {get_property, all}))),
?_assertMatch(ok, timer:sleep(2000))
]}.
%%
%% hcst_log_cnt/1
%%
%% @doc Log wathever you get as parameter and count number of entries.
%%
hcst_log_cnt(Val) when is_list(Val) ->
info_msg(hcst_log_cnt, [Val], logging_test, 60),
length(Val);
hcst_log_cnt(Val) when is_map(Val) ->
info_msg(hcst_log_cnt, [Val], logging_test, 60),
length(maps:to_list(Val)).
%%
%% hc_eval/0
%%
%% @doc Initiate evaluation of query tree by sending eval message
%% to all roots of query tree. In the case query has
%% two or more triple patterns then query tree has one root.
%% In the case that query is one triple pattern then query can
%% have more roots.
%%
%% @spec hc_eval() -> ok
%%
hc_eval() ->
%% initialize process dictionary properties
put(result_freq, #{}),
put(pid_start, #{}),
put(pid_elapse, #{}),
%% send eval to all root query nodes
F = fun (Pid) ->
%% send eval to one pid
gen_server:call(Pid, {eval, []}),
%% record starting time
PS = pid_start,
put(PS, maps:put(Pid, os:timestamp(), get(PS))),
%% send N empty messages to root of query tree
send_N_empty(Pid)
end,
lists:map(F, get(roots)),
info_msg(hc_eval, [get(self), {query,get(query)}, {roots,get(roots)},
{node_pids,get(node_pids)}], eval_done, 10),
ok.
%%hc_eval() ->
%% error_msg(hc_eval, [get()], forbiden_state),
%% ok.
%%
%% hc_read/0
%%
%% @doc Processing request to read block from resut queue and send it back to caller.
%%
%% @spec hc_read() -> [query_node:qn_graph()] | end_of_stream
%%
hc_read() ->
%% check queue_result
case query_node:queue_prepared(result) of
true -> Block = query_node:queue_read(result);
false -> Block = no_data
end,
%% report and return
info_msg(hc_read, [get(self), {reply,Block}, {invoker,get(invoker)}, get(state)], block_read, 50),
{data_block,Block}.
%%
%% hc_data_outer/2
%%
%% @doc Processing data message from root of query tree process.
%%
%% @spec hc_data_outer(From::pid(), Block::[query_node:qn_graph()]) -> ok
%%
hc_data_outer(Pid, end_of_stream) ->
A = [Pid, end_of_stream],
PS = pid_start,
PE = pid_elapse,
S = maps:get(Pid, get(PS)),
E = timer:now_diff(os:timestamp(), S),
put(PE, maps:put(Pid, E, get(PE))),
db_interface:db_disconnect(),
R = [get(self), {from, Pid}, {outer_data_stream_terminated, E, Pid}, get(state)],
SW = stop_watch,
RS = gen_server:call(SW, {record, R}),
info_msg(hc_data_outer, A, RS, 10);
hc_data_outer(From, Block) when is_list(Block) ->
F = fun(X) -> hc_data_outer(From, X) end,
R = lists:foreach(F, Block),
%%A = [From, Block],
info_msg(hc_data_outer, [get(self), {from,From}, {block,Block}, {process_each_element,R}, get(state)], block_received, 80),
%% store block in queue
%% [TODO] what to do with result exactly?
%% [TODO] block will be sent directly to session
Q = get(queue_result),
case maps:values(get(result_freq)) of
[] -> ok;
VRF ->
RRM = get(result_record_max),
case lists:max(VRF) of
MV when MV < RRM ->
Q1 = queue:in(Block, Q),
put(queue_result, Q1);
_ -> ok
end
end,
%% pass the stream to invoker
% case get(invoker) of
% undefined -> error_msg(hc_data_outer, [get(self), {all,get()}, get(state)], data_before_start);
% _ -> gen_server:cast(get(invoker), {data_outer, get(self), Block})
% end,
%% send empty back to
gen_server:cast(From, {empty, get(self)}),
info_msg(hc_data_outer, [get(self), {from,From}, {invoker,get(invoker)}, {result_freq,get(result_freq)},
{result_record_max,get(result_record_max)}, {queue_result,get(queue_result)}, get(state)], block_stored, 80),
ok;
hc_data_outer(Pid, Graph) ->
%%A = [Pid, Graph],
MapRF = get(result_freq),
case maps:is_key(Pid, MapRF) of
true -> Freq = maps:get(Pid, MapRF);
false -> Freq = 0
end,
NewRF = maps:put(Pid, Freq + 1, MapRF),
put(result_freq, NewRF),
info_msg(hc_data_outer, [get(self), {from,Pid}, {graph,Graph}, {result_freq, NewRF}, get(state)], graph_processed, 80).
%%
%% send_N_empty/1
%%
%% @doc Send N empty messages to Pid. N is stored in config.
%%
send_N_empty(Pid) ->
{ok, N} = application:get_env(b3s, num_of_empty_msgs), % [TODO] store id PD
send_N_empty_1(Pid, N),
info_msg(send_N_empty, [Pid, N], done, 100).
send_N_empty_1(_, 0) ->
ok;
send_N_empty_1(Pid, N) ->
QTpid = get(self),
gen_server:cast(Pid, {empty, QTpid}),
send_N_empty_1(Pid, N-1).
%%
%% Merges maps M1 and M2 into a single map. Keys of resulting map include
%% union of keys from M1 and M2. If key is in both maps M1 and M2 then
%% then the value of key includes union of values from M1 and M2. Values
%% are stored as lists of pairs (query node, and, index of key in triple).
%%
merge_maps(M1, M2) ->
F = fun (K, V, Acc) ->
MakeUnion = maps:is_key(K, M1),
if
MakeUnion ->
maps:put(K, lists:append(maps:get(K, M1), V), Acc);
true ->
maps:put(K, V, Acc)
end
end,
maps:fold(F, M1, M2).
%%
%% Compute intersection of two lists.
%%
intersect_lists(_, []) -> [];
intersect_lists(L1, [X|L]) ->
XInIntersection = lists:member(X, L1),
if
XInIntersection ->
L2 = intersect_lists(L1, L),
[X|L2];
true ->
intersect_lists(L1, L)
end.
%%
%% hc_eval_test/0
%%
%% @doc Test function for hc_eval.
%%
hc_eval_test_() ->
application:load(b3s),
{ok, TM} = application:get_env(b3s, test_mode),
hcet_site(TM).
hcet_site(local1) ->
NDS = node(),
BSS = {b3s_state, NDS},
CRC = clm_row_conf,
RMS = #{1 => NDS},
CM1 = #{1 => RMS, 2 => RMS},
R01 = [NDS], %%, NDC],
{inorder,
[
?_assertMatch(ok, b3s:start()),
?_assertMatch(ok, b3s:bootstrap()),
?_assertMatch(ok, gen_server:call(BSS, {put, CRC, CM1})),
?_assertMatch(R01, gen_server:call(BSS, propagate)),
{generator, fun()-> tp_query_node:hcet_load_db() end},
{generator, fun()-> join_query_node:hcet_load_db() end},
{generator, fun()-> hcet_t01() end},
{generator, fun()-> hcet_t01h() end},
{generator, fun()-> hcet_t02() end},
{generator, fun()-> hcet_t03() end},
?_assertMatch(ok, b3s:stop())
]};
hcet_site(_) ->
[].
%%
%% eI/1, eT/1, eTP/1, dT/1m dTMap/1, dTML/1, dTMLL/1
%%
%% @doc Encoding / decoding string, triple and triple pattern.
%%
eI(X) -> string_id:get_id(X).
eT({T,I,S,P,O}) ->
ET = string_id:encode_triple({I, S, P, O}),
list_to_tuple([T | tuple_to_list(ET)]).
eTP(X) -> string_id:encode_triple_pattern(X).
dT({T,I,S,P,O}) ->
DT = string_id:decode_triple({I, S, P, O}),
list_to_tuple([T | tuple_to_list(DT)]).
dTMap(M) when is_map(M) ->
F = fun ({K, T}) -> {K, dT(T)} end,
maps:from_list(lists:map(F, maps:to_list(M)));
dTMap(T) ->
T.
dTML(L) when is_list(L) ->
lists:map(fun dTMap/1, L).
dTMLL(L) when is_list(L) ->
lists:map(fun dTML/1, L).
%%
%% hcet_t01/0
%%
%% @doc Test 01 of hc_eval.
%%
hcet_t01() ->
info_msg(hcet_t01, [get()], start, 50),
% tree ids will be handled by session
TreeId = "1",
SessionId = "1",
% spawn query tree process
ProcId = list_to_atom("qt-"++SessionId++"-"++TreeId),
info_msg(hcst_t01, [{ProcId,node()}], query_tree_pid, 50),
% spawn qt process locally
QT = query_tree:spawn_process(ProcId, node()),
% define query
Q = {join, {hjoin, {tp, { "?i1", "?y", "livesIn", "?x" }, none, none},
{tp, { "?i2", "slovenia", "hasCapital", "?x" }, none, none},
none, none},
{tp, { "?i3", "?y", "worksAt", "ijs" }, none, none},
none, none},
%% data to be returned
T1 = eT({triple_store, "id22", "slovenia", "hasCapital", "ljubljana"}),
T2 = eT({triple_store, "id38", "luka", "livesIn", "ljubljana"}),
T3 = eT({triple_store, "id47", "luka", "worksAt", "ijs"}),
T4 = eT({triple_store, "id40", "nika", "livesIn", "ljubljana"}),
T5 = eT({triple_store, "id49", "nika", "worksAt", "ijs"}),
T6 = eT({triple_store, "id41", "marko", "livesIn", "ljubljana"}),
T7 = eT({triple_store, "id50", "marko", "worksAt", "ijs"}),
R1Map = maps:put("4", T3, maps:put("2", T1, maps:put("1", T2, maps:new()))),
R2Map = maps:put("4", T5, maps:put("2", T1, maps:put("1", T4, maps:new()))),
R3Map = maps:put("4", T7, maps:put("2", T1, maps:put("1", T6, maps:new()))),
QR = [[R3Map, R2Map, R1Map, end_of_stream]],
%% F1 = fun (RQ) ->
%% info_msg(hcst_t01, [QT, {received, dTMLL(RQ)}, {expected, dTMLL(QR)}], compare_results, 20),
%% RQ
%% end,
F2 = fun ([Rec], [Exp]) ->
A = [QT, {received, dTML(Rec)}, {expected, dTML(Exp)}],
info_msg(hcst_t01, A, compare_results, 20),
sets:from_list(Rec) == sets:from_list(Exp)
end,
S1 = {start, Q, QT, self(), TreeId, SessionId},
info_msg(hcst_t01, [QT, S1, QR], before_test, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(QT, S1)),
% check properties by qnodes
?_assertMatch(33, length(gen_server:call(QT, {get, all}))),
?_assertMatch(ok, gen_server:call(QT, {eval})),
?_assertMatch(ok, timer:sleep(1000)),
%% ?_assertMatch(QR, F1(queue:to_list(load_queue_result(QT)))),
?_assertMatch(true, F2(queue:to_list(load_queue_result(QT)), QR)),
?_assertMatch(ok, timer:sleep(1000))
]}.
%%
%% hcet_t01h/0
%%
%% @doc Test 01h of hc_eval.
%%
hcet_t01h() ->
info_msg(hcet_t01, [get()], start, 50),
% tree ids will be handled by session
TreeId = "1h",
SessionId = "1",
% spawn query tree process
ProcId = list_to_atom("qt-"++SessionId++"-"++TreeId),
info_msg(hcst_t01h, [{ProcId,node()}], query_tree_pid, 50),
% spawn qt process locally
QT = query_tree:spawn_process(ProcId, node()),
% define query
Q = {join, {hjoin, {tp, { "?i1", "?y", "livesIn", "?x" }, none, none},
{tp, { "?i2", "slovenia", "hasCapital", "?x" }, none, none},
none, none},
{tp, { "?i3", "?y", "worksAt", "ijs" }, none, none},
none, none},
%% data to be returned
T1 = eT({triple_store, "id22", "slovenia", "hasCapital", "ljubljana"}),
T2 = eT({triple_store, "id38", "luka", "livesIn", "ljubljana"}),
T3 = eT({triple_store, "id47", "luka", "worksAt", "ijs"}),
T4 = eT({triple_store, "id40", "nika", "livesIn", "ljubljana"}),
T5 = eT({triple_store, "id49", "nika", "worksAt", "ijs"}),
T6 = eT({triple_store, "id41", "marko", "livesIn", "ljubljana"}),
T7 = eT({triple_store, "id50", "marko", "worksAt", "ijs"}),
R1Map = maps:put("4", T3, maps:put("2", T1, maps:put("1", T2, maps:new()))),
R2Map = maps:put("4", T5, maps:put("2", T1, maps:put("1", T4, maps:new()))),
R3Map = maps:put("4", T7, maps:put("2", T1, maps:put("1", T6, maps:new()))),
QR = [[R3Map, R2Map, R1Map, end_of_stream]],
%% F1 = fun (RQ) ->
%% info_msg(hcst_t01, [QT, {received, dTMLL(RQ)}, {expected, dTMLL(QR)}], compare_results, 20),
%% RQ
%% end,
F2 = fun ([Rec], [Exp]) ->
A = [QT, {received, dTML(Rec)}, {expected, dTML(Exp)}],
info_msg(hcst_t01, A, compare_results, 20),
sets:from_list(Rec) == sets:from_list(Exp)
end,
S1 = {start, Q, QT, self(), TreeId, SessionId},
info_msg(hcst_t01, [QT, S1, QR], before_test, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(QT, S1)),
% check properties by qnodes
?_assertMatch(33, length(gen_server:call(QT, {get, all}))),
?_assertMatch(ok, gen_server:call(QT, {eval})),
?_assertMatch(ok, timer:sleep(1000)),
%% ?_assertMatch(QR, F1(queue:to_list(load_queue_result(QT)))),
?_assertMatch(true, F2(queue:to_list(load_queue_result(QT)), QR)),
?_assertMatch(ok, timer:sleep(1000))
]}.
%%
%% load_queue_result/1
%%
%% @doc Load queue_result from query tree QT and check if there is end_of_stram
%% at the end of queue. If end_of_stream found return complete queue queue_result,
%% otherwise wait for more data.
%%
load_queue_result(QT) ->
RQ = gen_server:call(QT, {get, queue_result}),
Last = queue:peek_r(RQ),
case Last of
empty -> timer:sleep(300),
load_queue_result(QT);
_ -> ok
end,
{value, B} = Last,
case lists:member(end_of_stream, B) of
true -> RQ;
false -> timer:sleep(300), load_queue_result(QT)
end.
%%
%% hcet_t02/0
%%
%% @doc Test 01 of hc_eval.
%%
hcet_t02() ->
info_msg(hcst_t02, [get()], start, 50),
% tree ids will be handled by session
TreeId = "2",
SessionId = "1",
% spawn query tree process localy
ProcId = list_to_atom("qt-"++SessionId++"-"++TreeId),
info_msg(hcst_t02, [{ProcId,node()}], query_tree_pid, 50),
QT = query_tree:spawn_process(ProcId, node()),
% define query
Q = {hjoin, {tp, { "?i1", "nika", "?x", "ijs" }, none, none},
{tp, { "?i2", "?y", "?x", "yj" }, none, none},
none, none},
%% data to be returned
T1 = eT({triple_store, "id49", "nika", "worksAt", "ijs"}),
T2 = eT({triple_store, "id44", "yoshio", "worksAt", "yj"}),
T3 = eT({triple_store, "id42", "shou", "worksAt", "yj"}),
R1Map = maps:put("2", T3, maps:put("1", T1, maps:new())),
R2Map = maps:put("2", T2, maps:put("1", T1, maps:new())),
QR = [[R2Map, R2Map, R1Map, R1Map, R2Map],
[R2Map, R1Map, R1Map, end_of_stream]],
F1 = fun (RQ) ->
info_msg(hcst_t02, [QT, {received, dTMLL(RQ)}, {expected, dTMLL(QR)}], compare_results, 20),
RQ
end,
S1 = {start, Q, QT, self(), TreeId, SessionId},
info_msg(hcst_t02, [QT, S1], before_test, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(QT, S1)),
% check properties by qnodes
?_assertMatch(33, length(gen_server:call(QT, {get, all}))),
?_assertMatch(ok, gen_server:call(QT, {eval})),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(QR, F1(queue:to_list(load_queue_result(QT)))),
?_assertMatch(ok, timer:sleep(1000))
]}.
%%
%% hcet_t03/0
%%
%% @doc Test 03 of hc_eval.
%%
hcet_t03() ->
info_msg(hcst_t03, [get()], start, 50),
% tree ids will be handled by session
TreeId = "3",
SessionId = "1",
% spawn query tree process localy
ProcId = list_to_atom("qt-"++SessionId++"-"++TreeId),
info_msg(hcst_t03, [{ProcId,node()}], query_tree_pid, 50),
QT = query_tree:spawn_process(ProcId, node()),
% define query
Q = {join, {hjoin, {tp, { "?i1", "?x", "livesIn", "ljubljana" }, none, none},
{tp, { "?i2", "?x", "graduatedFrom", "ul" }, none, none},
none, none },
{tp, {"?i3", "?x", "age", "?y"}, none, none},
{{"?y", less, 30}, land, {"?y", greatereq, 20}}, ["?y"]},
%% data to be returned
% T1 = {triple_store, "id40", "nika", "livesIn", "ljubljana"},
% T2 = {triple_store, "id56", "nika", "graduatedFrom", "ul"},
T3 = eT({triple_store, "id63", "nika", "age", "22"}),
R1Map = maps:put("4", T3, maps:new()),
QR = [[R1Map, end_of_stream]],
F1 = fun (RQ) ->
info_msg(hcst_t03, [QT, {received, dTMLL(RQ)}, {expected, dTMLL(QR)}], compare_results, 20),
RQ
end,
S1 = {start, Q, QT, self(), TreeId, SessionId},
% info_msg(hcst_t03, [QT, S1], before_test, 50),
{inorder,
[
?_assertMatch(ok, mnesia:start()),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(ok, gen_server:call(QT, S1)),
% check properties by qnodes
?_assertMatch(33, length(gen_server:call(QT, {get, all}))),
?_assertMatch(ok, gen_server:call(QT, {eval})),
?_assertMatch(ok, timer:sleep(1000)),
?_assertMatch(QR, F1(queue:to_list(load_queue_result(QT)))),
?_assertMatch(ok, timer:sleep(1000))
]}.
%% ====> END OF LINE <==== | src/query_tree.erl | 0.551091 | 0.587914 | query_tree.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(tricks_util).
-author("<NAME> <<EMAIL>>").
-include("tricks.hrl").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% API
-export([binary_join/1,
binary_join/2,
parse_binary/1,
parse_integer/1,
parse_event/1,
parse_pod_data/1,
parse_json/1,
compose_json/1,
dict_find/3]).
%% @doc Join a list of binaries.
-spec binary_join(list(binary())) -> binary().
binary_join(List) ->
binary_join(<<>>, List).
%% @doc Join a list of binaries using a given separator.
-spec binary_join(binary(), list(binary())) -> binary().
binary_join(_Sep, []) ->
<<>>;
binary_join(Sep, List) ->
binary_join(Sep, List, <<>>).
%% @doc Parse a binary.
-spec parse_binary(term()) -> binary().
parse_binary(A) when is_binary(A) ->
A;
parse_binary(A) when is_integer(A) ->
integer_to_binary(A);
parse_binary(A) when is_list(A) ->
list_to_binary(A);
parse_binary(A) when is_atom(A) ->
atom_to_binary(A, utf8).
%% @doc Parse an integer.
-spec parse_integer(term()) -> integer().
parse_integer(A) when is_integer(A) ->
A;
parse_integer(A) when is_binary(A) ->
binary_to_integer(A).
%% @doc Parse an event.
-spec parse_event(term()) -> event().
parse_event({A, B}) ->
{parse_binary(A), parse_integer(B)}.
%% @doc Parse pod data.
-spec parse_pod_data(term()) -> pod_data().
parse_pod_data({A, B}) ->
{parse_integer(A), parse_binary(B)}.
%% @doc Parse JSON. Return a map where labels are atoms.
-spec parse_json(binary()) -> maps:map().
parse_json(A) ->
jsx:decode(A, [return_maps, {labels, atom}]).
%% @doc Compose JSON.
-spec compose_json(maps:map()) -> binary().
compose_json(A) ->
jsx:encode(A).
%% @doc Find a key in a dictionary,
%% returning a default in case it's not found.
-spec dict_find(term(), dict:dict(), term()) -> term().
dict_find(Key, Dict, Default) ->
case dict:find(Key, Dict) of
{ok, V} -> V;
error -> Default
end.
%% @private
binary_join(_Sep, [E], Bin) ->
EBin = parse_binary(E),
<<Bin/binary, EBin/binary>>;
binary_join(Sep, [E|Rest], Bin) ->
EBin = parse_binary(E),
binary_join(Sep, Rest, <<Bin/binary, EBin/binary, Sep/binary>>).
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
binary_join_test() ->
L0 = [],
L1 = [<<"a">>],
L2 = [<<"a">>, <<"b">>],
L3 = [<<"a">>, <<"b">>, <<"c">>],
Sep0 = <<>>,
Sep1 = <<";">>,
Sep2 = <<"%3D">>,
?assertEqual(<<>>, binary_join(L0)),
?assertEqual(<<>>, binary_join(Sep0, L0)),
?assertEqual(<<>>, binary_join(Sep1, L0)),
?assertEqual(<<"a">>, binary_join(Sep0, L1)),
?assertEqual(<<"a">>, binary_join(Sep1, L1)),
?assertEqual(<<"ab">>, binary_join(Sep0, L2)),
?assertEqual(<<"a;b">>, binary_join(Sep1, L2)),
?assertEqual(<<"abc">>, binary_join(Sep0, L3)),
?assertEqual(<<"a;b;c">>, binary_join(Sep1, L3)),
?assertEqual(<<"a%3Db%3Dc">>, binary_join(Sep2, L3)).
-endif. | src/tricks_util.erl | 0.601125 | 0.471832 | tricks_util.erl | starcoder |
%%%===================================================================
%% @author <NAME>
%% @copyright 2016 Pundun Labs AB
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
%% implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% -------------------------------------------------------------------
%% @doc
%% Module Description:
%% @end
%%%===================================================================
-module(gb_reg_worker).
-behaviour(gen_server).
-export([start_link/1]).
%% gen_server callbacks
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the server
%%
%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
%% @end
%%--------------------------------------------------------------------
start_link(Args) ->
Module = proplists:get_value(mod, Args),
case proplists:get_value(load, Args, false) of
true ->
gen_server:start_link({local, Module}, ?MODULE,
[load | Args], []);
false ->
Module = proplists:get_value(mod, Args),
gen_server:start_link({local, Module}, ?MODULE,
[new | Args], [])
end.
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Initializes the server
%%
%% @spec init(Args) -> {ok, State} |
%% {ok, State, Timeout} |
%% ignore |
%% {stop, Reason}
%% @end
%%--------------------------------------------------------------------
init([load | Args]) ->
File = proplists:get_value(file, Args),
Module = proplists:get_value(mod, Args),
Dir = proplists:get_value(dir, Args),
Filename = filename:join([Dir, File]),
{ok, Beam} = file:read_file(Filename),
load_register(Module, Filename, Beam),
{ok, #{filename => Filename}};
init([new | Args]) ->
Dir = proplists:get_value(dir, Args),
Module = proplists:get_value(mod, Args),
Filename = filename:join([Dir, Module]),
Entries = proplists:get_value(entries, Args, []),
{ok, _, Beam} = gen_beam(Module, Entries, 0),
store_beam(Filename, Beam),
load_register(Module, Filename, Beam),
{ok, #{filename => Filename}}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling call messages
%%
%% @spec handle_call(Request, From, State) ->
%% {reply, Reply, State} |
%% {reply, Reply, State, Timeout} |
%% {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, Reply, State} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_call({add_keys, Mod, Keys}, _From, State = #{filename := Filename}) ->
Filter = fun(Key) ->
case Mod:lookup(Key) of
undefined -> true;
_ -> false
end
end,
NewKeys = lists:filter(Filter, Keys),
case generate_entries(Mod:ref(), NewKeys, []) of
{_, Add} when map_size(Add) == 0 ->
{reply, ok, State};
{Ref, Add} ->
Merged = maps:merge(Mod:entries(), Add),
Reply = regen_register(Mod, Filename, Merged, Ref),
{reply, element(1, Reply), State}
end;
handle_call({add_kvl, Mod, Kvl}, _From, State = #{filename := Filename}) ->
Filter = fun({Key, _}) ->
case Mod:lookup(Key) of
undefined -> true;
_ -> false
end
end,
NewKvl = lists:filter(Filter, Kvl),
Add = maps:from_list(NewKvl),
Merged = maps:merge(Mod:entries(), Add),
Reply = regen_register(Mod, Filename, Merged, Mod:ref()),
{reply, element(1, Reply), State};
handle_call({insert, Mod, Key, Val}, _From, State = #{filename := Filename}) ->
case Mod:entries() of
#{Key := Val} ->
{reply, ok, State};
Entries ->
Reply = regen_register(Mod, Filename, Entries#{Key => Val}, Mod:ref()),
{reply, element(1,Reply), State}
end;
handle_call({insert_kvl, Mod, Kvl}, _From, State = #{filename := Filename}) ->
Add = maps:from_list(Kvl),
Entries = Mod:entries(),
case maps:merge(Entries, Add) of
Entries ->
{reply, ok, State};
Merged ->
Reply = regen_register(Mod, Filename, Merged, Mod:ref()),
{reply, element(1, Reply), State}
end;
handle_call({delete, Mod, Key}, _From, State = #{filename := Filename}) ->
case Mod:entries() of
#{Key := _} = Entries ->
Reply = regen_register(Mod, Filename,
maps:remove(Key, Entries), Mod:ref()),
{reply, element(1,Reply), State};
_ ->
{reply, ok, State}
end;
handle_call({purge, Mod}, _From, State = #{filename := Filename}) ->
code:purge(Mod),
code:delete(Mod),
file:delete(Filename),
{stop, normal,ok, State};
handle_call(_Request, _From, State) ->
Reply = ok,
{reply, Reply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling cast messages
%%
%% @spec handle_cast(Msg, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_cast(_Msg, State) ->
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling all non call/cast messages
%%
%% @spec handle_info(Info, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_info(_Info, State) ->
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function is called by a gen_server when it is about to
%% terminate. It should be the opposite of Module:init/1 and do any
%% necessary cleaning up. When it returns, the gen_server terminates
%% with Reason. The return value is ignored.
%%
%% @spec terminate(Reason, State) -> void()
%% @end
%%--------------------------------------------------------------------
terminate(_Reason, _State) ->
ok.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Convert process state when code is changed
%%
%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
%% @end
%%--------------------------------------------------------------------
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Generate register with given Entries.
%% @end
%%--------------------------------------------------------------------
-spec gen_beam(Mod :: module(),
Tuples :: [{term(), term()}],
Ref :: integer()) ->
{ok, Mod :: module(), Beam :: binary()}.
gen_beam(Mod, Tuples, Ref) ->
CEForms = make_mod(Mod, maps:from_list(Tuples), Ref),
compile:forms(CEForms, [from_core, binary]).
%%--------------------------------------------------------------------
%% @doc
%% Re-generate register with given Entries.
%% @end
%%--------------------------------------------------------------------
-spec regen_register(Mod :: module(),
Filename :: string(),
Entries :: map(),
Ref :: integer()) ->
{ok, Beam :: binary()}.
regen_register(Mod, Filename, Entries, Ref) ->
CEForms = make_mod(Mod, Entries, Ref),
{ok, _, Beam} = compile:forms(CEForms, [from_core, binary]),
store_beam(Filename, Beam),
load_register(Mod, Filename, Beam).
%%--------------------------------------------------------------------
%% @doc
%% Write object code to file to store persistent.
%% @end
%%--------------------------------------------------------------------
-spec store_beam(Filename :: string(),
Bin :: binary()) ->
ok.
store_beam(Filename, Bin) ->
file:write_file(Filename, Bin).
%%--------------------------------------------------------------------
%% @doc
%% Load object code of register module code on node().
%% @end
%%--------------------------------------------------------------------
-spec load_register(Mod :: module(),
Filename :: string(),
Bin :: binary()) ->
{ok, Bin :: binary()}.
load_register(Mod, Filename, Bin) ->
{module, _ } = code:load_binary(Mod, Filename, Bin),
{ok, Bin}.
%%--------------------------------------------------------------------
%% @doc
%% Make module Mod with lookup function that matches
%% terms in Entries.
%% @end
%%--------------------------------------------------------------------
-spec make_mod(Mod :: module(),
Entries :: map(),
Ref :: integer()) ->
term().
make_mod(Mod, Entries, Ref) ->
ModuleName = cerl:c_atom(Mod),
cerl:c_module(ModuleName,
[cerl:c_fname(entries, 0),
cerl:c_fname(ref, 0),
cerl:c_fname(lookup, 1),
cerl:c_fname(module_info, 0),
cerl:c_fname(module_info, 1)],
[make_entries_fun(Entries),
make_ref_fun(Ref),
make_lookup_fun(Entries) | mod_info(ModuleName)]).
%%--------------------------------------------------------------------
%% @doc
%% Make entries/0 function.
%% @end
%%--------------------------------------------------------------------
make_entries_fun(Entries) ->
{cerl:c_fname(entries,0), cerl:c_fun([], cerl:abstract(Entries))}.
%%--------------------------------------------------------------------
%% @doc
%% Make ref/0 function.
%% @end
%%--------------------------------------------------------------------
make_ref_fun(Ref) ->
{cerl:c_fname(ref,0), cerl:c_fun([], cerl:c_int(Ref))}.
%%--------------------------------------------------------------------
%% @doc
%% Make lookup/1 function.
%% @end
%%--------------------------------------------------------------------
make_lookup_fun(Entries) ->
Arg1 = cerl:c_var('FuncArg1'),
Else = cerl:c_var('Else'),
True = cerl:c_atom(true),
Undefined = cerl:c_atom(undefined),
Clauses = make_lookup_clauses(Arg1, Entries),
LastClause = cerl:c_clause([Else], True, Undefined),
Case = cerl:c_case(Arg1, Clauses ++ [LastClause]),
{cerl:c_fname(lookup,1), cerl:c_fun([Arg1], Case)}.
%%--------------------------------------------------------------------
%% @doc
%% Make case clauses for lookup/1 function.
%% @end
%%--------------------------------------------------------------------
make_lookup_clauses(Arg1, Entries) ->
{_, Acc} = maps:fold(fun make_lookup_clauses/3, {Arg1, []}, Entries),
Acc.
%%--------------------------------------------------------------------
%% @doc
%% Make case clauses for lookup/1 function.
%% @end
%%--------------------------------------------------------------------
make_lookup_clauses(Key, Value, {Arg1, Acc}) ->
Pattern = [cerl:abstract(Key)],
Guard = cerl:c_atom(true),
Body = cerl:abstract(Value),
Clause = cerl:c_clause(Pattern, Guard, Body),
{Arg1, [Clause | Acc]}.
%%--------------------------------------------------------------------
%% @doc
%% Make module_info/1 function.
%% @end
%%--------------------------------------------------------------------
mod_info(Name) ->
M = cerl:c_atom(erlang),
F = cerl:c_atom(get_module_info),
Info0 = {cerl:c_fname(module_info, 0),
cerl:c_fun([], cerl:c_call(M, F, [Name]))},
Key = cerl:c_var('Key'),
Info1 = {cerl:c_fname(module_info, 1),
cerl:c_fun([Key], cerl:c_call(M, F, [Name, Key]))},
[Info0, Info1].
-spec generate_entries(Ref :: integer(),
Keys :: [term()],
Acc :: [{term(), term()}]) ->
{NewRef :: integer(), Map :: map()}.
generate_entries(Ref, [Key | Rest], Acc) ->
generate_entries(Ref + 1, Rest, [{Key, Ref}, {Ref, Key} | Acc]);
generate_entries(Ref, [], Acc) ->
{Ref, maps:from_list(Acc)}. | src/gb_reg_worker.erl | 0.531453 | 0.402099 | gb_reg_worker.erl | starcoder |
%% The contents of this file are subject to the Mozilla Public License
%% Version 1.1 (the "License"); you may not use this file except in
%% compliance with the License. You may obtain a copy of the License
%% at http://www.mozilla.org/MPL/
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and
%% limitations under the License.
%%
%% The Original Code is RabbitMQ.
%%
%% The Initial Developer of the Original Code is GoPivotal, Inc.
%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
%%
%% Priority queues have essentially the same interface as ordinary
%% queues, except that a) there is an in/3 that takes a priority, and
%% b) we have only implemented the core API we need.
%%
%% Priorities should be integers - the higher the value the higher the
%% priority - but we don't actually check that.
%%
%% in/2 inserts items with priority 0.
%%
%% We optimise the case where a priority queue is being used just like
%% an ordinary queue. When that is the case we represent the priority
%% queue as an ordinary queue. We could just call into the 'queue'
%% module for that, but for efficiency we implement the relevant
%% functions directly in here, thus saving on inter-module calls and
%% eliminating a level of boxing.
%%
%% When the queue contains items with non-zero priorities, it is
%% represented as a sorted kv list with the inverted Priority as the
%% key and an ordinary queue as the value. Here again we use our own
%% ordinary queue implemention for efficiency, often making recursive
%% calls into the same function knowing that ordinary queues represent
%% a base case.
-module(pique).
-export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, from_list/1,
in/2, in/3, out/1, out_p/1, join/2, filter/2, fold/3, highest/1]).
%%----------------------------------------------------------------------------
-export_type([pique/0]).
-type(pique() :: pqueue()).
-type(priority() :: integer() | 'infinity').
-type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
-spec new() -> pqueue().
-spec is_queue(any()) -> boolean().
-spec is_empty(pqueue()) -> boolean().
-spec len(pqueue()) -> non_neg_integer().
-spec to_list(pqueue()) -> [{priority(), any()}].
-spec from_list([{priority(), any()}]) -> pqueue().
-spec in(any(), pqueue()) -> pqueue().
-spec in(any(), priority(), pqueue()) -> pqueue().
-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}.
-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}.
-spec join(pqueue(), pqueue()) -> pqueue().
-spec filter(fun ((any()) -> boolean()), pqueue()) -> pqueue().
-spec fold
(fun ((any(), priority(), A) -> A), A, pqueue()) -> A.
-spec highest(pqueue()) -> priority() | 'empty'.
%%----------------------------------------------------------------------------
new() ->
{queue, [], [], 0}.
is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
true;
is_queue({pqueue, Queues}) when is_list(Queues) ->
lists:all(fun ({infinity, Q}) -> is_queue(Q);
({P, Q}) -> is_integer(P) andalso is_queue(Q)
end, Queues);
is_queue(_) ->
false.
is_empty({queue, [], [], 0}) ->
true;
is_empty(_) ->
false.
len({queue, _R, _F, L}) ->
L;
len({pqueue, Queues}) ->
lists:sum([len(Q) || {_, Q} <- Queues]).
to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
[{0, V} || V <- Out ++ lists:reverse(In, [])];
to_list({pqueue, Queues}) ->
[{maybe_negate_priority(P), V} || {P, Q} <- Queues,
{0, V} <- to_list(Q)].
from_list(L) ->
lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
in(Item, Q) ->
in(Item, 0, Q).
in(X, 0, {queue, [_] = In, [], 1}) ->
{queue, [X], In, 2};
in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
{queue, [X|In], Out, Len + 1};
in(X, Priority, _Q = {queue, [], [], 0}) ->
in(X, Priority, {pqueue, []});
in(X, Priority, Q = {queue, _, _, _}) ->
in(X, Priority, {pqueue, [{0, Q}]});
in(X, Priority, {pqueue, Queues}) ->
P = maybe_negate_priority(Priority),
{pqueue, case lists:keysearch(P, 1, Queues) of
{value, {_, Q}} ->
lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
false when P == infinity ->
[{P, {queue, [X], [], 1}} | Queues];
false ->
case Queues of
[{infinity, InfQueue} | Queues1] ->
[{infinity, InfQueue} |
lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
_ ->
lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
end
end}.
out({queue, [], [], 0} = Q) ->
{empty, Q};
out({queue, [V], [], 1}) ->
{{value, V}, {queue, [], [], 0}};
out({queue, [Y|In], [], Len}) ->
[V|Out] = lists:reverse(In, []),
{{value, V}, {queue, [Y], Out, Len - 1}};
out({queue, In, [V], Len}) when is_list(In) ->
{{value,V}, r2f(In, Len - 1)};
out({queue, In,[V|Out], Len}) when is_list(In) ->
{{value, V}, {queue, In, Out, Len - 1}};
out({pqueue, [{P, Q} | Queues]}) ->
{R, Q1} = out(Q),
NewQ = case is_empty(Q1) of
true -> case Queues of
[] -> {queue, [], [], 0};
[{0, OnlyQ}] -> OnlyQ;
[_|_] -> {pqueue, Queues}
end;
false -> {pqueue, [{P, Q1} | Queues]}
end,
{R, NewQ}.
out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
add_p(R, P) -> case R of
{empty, Q} -> {empty, Q};
{{value, V}, Q} -> {{value, V, P}, Q}
end.
join(A, {queue, [], [], 0}) ->
A;
join({queue, [], [], 0}, B) ->
B;
join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
{queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
{Pre, Post} =
lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
Post1 = case Post of
[] -> [ {0, A} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
_ -> [ {0, A} | Post ]
end,
{pqueue, Pre ++ Post1};
join({pqueue, APQ}, B = {queue, _, _, _}) ->
{Pre, Post} =
lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
Post1 = case Post of
[] -> [ {0, B} ];
[ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
_ -> [ {0, B} | Post ]
end,
{pqueue, Pre ++ Post1};
join({pqueue, APQ}, {pqueue, BPQ}) ->
{pqueue, merge(APQ, BPQ, [])}.
merge([], BPQ, Acc) ->
lists:reverse(Acc, BPQ);
merge(APQ, [], Acc) ->
lists:reverse(Acc, APQ);
merge([{P, A}|As], [{P, B}|Bs], Acc) ->
merge(As, Bs, [ {P, join(A, B)} | Acc ]);
merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
merge(As, Bs, [ {PA, A} | Acc ]);
merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
merge(As, Bs, [ {PB, B} | Acc ]).
filter(Pred, Q) -> fold(fun(V, P, Acc) ->
case Pred(V) of
true -> in(V, P, Acc);
false -> Acc
end
end, new(), Q).
fold(Fun, Init, Q) -> case out_p(Q) of
{empty, _Q} -> Init;
{{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
end.
highest({queue, [], [], 0}) -> empty;
highest({queue, _, _, _}) -> 0;
highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
r2f([], 0) -> {queue, [], [], 0};
r2f([_] = R, 1) -> {queue, [], R, 1};
r2f([X,Y], 2) -> {queue, [X], [Y], 2};
r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
maybe_negate_priority(infinity) -> infinity;
maybe_negate_priority(P) -> -P. | src/pique.erl | 0.629205 | 0.47591 | pique.erl | starcoder |
%
% reia_class: Build modules conforming to the gen_server behavior from Reia classes
% Copyright (C)2008 <NAME>
%
% Redistribution is permitted under the MIT license. See LICENSE for details.
%
-module(reia_class).
-export([build/1, call/2]).
-compile(export_all).
%% Convert a Reia class definition into a Reia module which conforms to the
%% gen_server behavior, then load it into the code server
build({class, Line, Name, Functions}) ->
Functions2 = process_functions(Name, Functions),
% [io:format(erl_pp:form(Function)) || Function <- Functions2],
Module = {module, Line, Name, Functions2},
% io:format("~p~n", [Module]),
reia_module:build(Module);
build(_) ->
{error, "invalid class"}.
%% Call a method on a Reia object at the given Pid
call(Pid, {_Method, _Arguments} = Request) ->
case gen_server:call(Pid, Request) of
{ok, Value} -> Value;
{error, Error} -> throw(Error)
end.
%% Process incoming functions, substituting custom versions for defaults
process_functions(Module, Functions) ->
{FunctionDict, Methods} = lists:foldr(
fun process_function/2,
{dict:from_list(default_functions()), []},
Functions
),
% Pull the initialize function out for special case processing
{ok, InitializeMethod} = dict:find(initialize, FunctionDict),
FunctionDict2 = dict:erase(initialize, FunctionDict),
DefaultFunctions = start_functions(Module, function_arity(InitializeMethod)),
ImmediateFunctions = [Function || {_, Function} <- dict:to_list(FunctionDict2)],
lists:flatten([
DefaultFunctions,
ImmediateFunctions,
initialize_method(InitializeMethod),
process_methods(Methods)
]).
%% If a method name matches one of the default_functions(), then it has a
%% special purpose and should be mapped to a function rather than a method.
process_function({function, _Line, Name, _Arity, _Clauses} = Function, {Dict, Functions}) ->
case dict:find(Name, Dict) of
{ok, _} ->
{dict:store(Name, Function, Dict), Functions};
error ->
{Dict, [Function|Functions]}
end.
%% Construct the initialize method (as a function call for now, ugh)
initialize_method({function, Line, Name, _Arity, Clauses}) ->
{function, Line, Name, 1, [initialize_clause(Clause) || Clause <- Clauses]}.
%% Process a clause of initialize
initialize_clause({clause, Line, Arguments, Guards, Expressions}) ->
Arguments2 = [argument_list_cons(Arguments, Line)],
InitIvars = {match, Line,
{var, Line, '__instance_variables_0'},
{call, Line, {remote, Line, {atom, Line, dict}, {atom, Line, new}}, []}
},
ReturnValue = {var, Line, final_ivars(Expressions)},
Expressions2 = lists:flatten([InitIvars, Expressions, ReturnValue]),
{clause, Line, Arguments2, Guards, Expressions2}.
%% Convert individual method definitions into a single dispatch_method function
process_methods([]) ->
build_method_dispatch_function(1, lists:flatten([process_method(Method) || Method <- default_methods()]));
process_methods([FirstMeth|_] = Methods) ->
% Extract the line number from the first method
{function, Line, _, _, _} = FirstMeth,
% Decompose the function clauses for methods into handle_call clauses
Clauses = lists:flatten([process_method(Method) || Method <- Methods ++ default_methods()]),
build_method_dispatch_function(Line, Clauses).
%% Generate Erlang forms for the class's method dispatch function
build_method_dispatch_function(Line, Clauses) ->
% Add a clause which thunks to method_missing
MethodMissingThunk = "dispatch_method({Method, Args}, _, State) -> method_missing(State, Method, Args).",
{function, _, _, _, MethodMissingClause} = parse_function(MethodMissingThunk),
% New master handle_call function
[{function, Line, dispatch_method, 3, Clauses ++ MethodMissingClause}].
%% Extract a method into clauses for dispatch_method
process_method({function, _Line, Name, _Arity, Clauses}) ->
[process_method_clause(Clause, Name) || Clause <- Clauses].
%% Build a clause for dispatch_method from the original clauses for a method
process_method_clause({clause, Line, Arguments, [], Expressions}, Name) ->
{clause, Line, [
{tuple, Line, [{atom, Line, Name}, argument_list_cons(Arguments, Line)]},
{var, Line, '_From'},
{var, Line, '__instance_variables_0'}
], [], process_return_value(Line, Expressions)}.
%% Convert a method's return value into a gen_server reply
process_return_value(Line, []) ->
process_return_value(Line, [{atom, Line, 'nil'}]);
process_return_value(Line, Expressions) ->
[Result|Expressions2] = lists:reverse(Expressions),
Result2 = {match, Line, {var, Line, '__method_return_value'}, Result},
Result3 = {tuple, Line, [
{atom, Line, reply},
{tuple, Line, [{atom, Line, ok}, {var, Line, '__method_return_value'}]},
{var, Line, final_ivars(Expressions)}
]},
lists:reverse([Result3,Result2|Expressions2]).
%% Find the name of the last SSA-transformed __instance_variables variable
%% present in a given function.
final_ivars(Expressions) ->
{ok, Newest, _} = reia_visitor:transform(Expressions, 0, fun newest_ivars/2),
Name = io_lib:format("~s~w", ["__instance_variables_", Newest]),
list_to_atom(lists:flatten(Name)).
%% Locate the number of the last SSA transformation of the __instance_variables
%% variable in a given function.
newest_ivars(Newest, {var, _Line, Name} = Node) ->
case atom_to_list(Name) of
"__instance_variables_" ++ VersionStr ->
Version = list_to_integer(VersionStr),
Newest2 = if
Version > Newest ->
Version;
true ->
Newest
end,
{stop, Newest2, Node};
_ ->
{stop, Newest, Node}
end;
newest_ivars(Newest, Node) ->
{walk, Newest, Node}.
%% Generate cons for arguments
argument_list_cons([], Line) ->
{nil, Line};
argument_list_cons([Element|Rest], Line) ->
{cons, Line, Element, argument_list_cons(Rest, Line)}.
%% Default functions to incorporate into Reia classes
default_functions() ->
[default_function(Function) || Function <- [
"init(Args) -> {ok, initialize(Args)}.",
"method_missing(_State, Method, _Args) -> throw({error, {Method, \"undefined\"}}).",
"handle_call(Request, From, State) -> try dispatch_method(Request, From, State) catch throw:Error -> {reply, {error, Error}, State} end.",
"handle_cast(_Msg, State) -> {noreply, State}.",
"handle_info(_Info, State) -> {noreply, State}.",
"terminate(_Reason, _State) -> ok.",
"code_change(_OldVsn, State, _Extra) -> {ok, State}.",
% A bit sneaky here as this is an untransformed method:
"initialize() -> nil."
]].
%% Parse a default function and return a dict entry for it
default_function(String) ->
Form = parse_function(String),
{function, _, Name, _, _} = Form,
{Name, Form}.
%% Default methods that Reia objects respond to
default_methods() ->
[parse_function(Function) || Function <- [
"to_s() -> {string, <<\"#<Object>\">>}.",
"inspect() -> {string, <<\"#<Object>\">>}."
]].
%% Functions for starting a new object
start_functions(Module, Arity) ->
[start_function(Module, Function, Arity) || Function <- ["start", "start_link"]].
start_function(Module, Function, Arity) ->
Vars = variable_list(Arity),
String = [Function, "("] ++ Vars ++
[") -> {ok, Pid} = gen_server:", Function, "('", Module, "', ["] ++ Vars ++ [
"], []), {object, {Pid, '", Module, "'}}."],
parse_function(lists:concat(String)).
variable_list(0) ->
[];
variable_list(Size) ->
add_commas([lists:flatten(io_lib:format("Var~w", [N])) || N <- lists:seq(1, Size)]).
add_commas(List) ->
add_commas(List, []).
add_commas([Arg], Result) ->
lists:reverse([Arg|Result]);
add_commas([Head|Rest], Result) ->
add_commas(Rest, [",",Head|Result]).
%% Parse a function from a string
parse_function(String) ->
{ok, Scanned, _} = erl_scan:string(String),
{ok, Form} = erl_parse:parse_form(Scanned),
Form.
%% Return the arity of a function in Erlang abstract format
function_arity({function, _Line, _Name, Arity, _Clauses}) ->
Arity. | src/core/reia_class.erl | 0.539954 | 0.424591 | reia_class.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author ngunder
%%% @copyright (C) 2017, <COMPANY>
%%% @doc
%%%
%%% @end
%%% Created : 03. Dec 2017 09.42
%%%-------------------------------------------------------------------
-module(day3).
-author("ngunder").
%% --- Day 3: Spiral Memory ---
%%
%% You come across an experimental new kind of memory stored on an infinite two-dimensional grid.
%%
%% Each square on the grid is allocated in a spiral pattern starting at a location marked 1 and then counting up while
%% spiraling outward. For example, the first few squares are allocated like this:
%%
%% 17 16 15 14 13
%% 18 5 4 3 12
%% 19 6 1 2 11
%% 20 7 8 9 10
%% 21 22 23---> ...
%% While this is very space-efficient (no squares are skipped), requested data must be carried back to square 1 (the
%% location of the only access port for this memory system) by programs that can only move up, down, left, or right.
%% They always take the shortest path: the Manhattan Distance between the location of the data and square 1.
%%
%% For example:
%%
%% Data from square 1 is carried 0 steps, since it's at the access port.
%% Data from square 12 is carried 3 steps, such as: down, left, left.
%% Data from square 23 is carried only 2 steps: up twice.
%% Data from square 1024 must be carried 31 steps.
%% How many steps are required to carry the data from the square identified in your puzzle input all the way to the
%% access port?
%%
%% Your puzzle answer was 371.
%%
%% --- Part Two ---
%%
%% As a stress test on the system, the programs here clear the grid and then store the value 1 in square 1. Then, in
%% the same allocation order as shown above, they store the sum of the values in all adjacent squares, including
%% diagonals.
%%
%% So, the first few squares' values are chosen as follows:
%%
%% Square 1 starts with the value 1.
%% Square 2 has only one adjacent filled square (with value 1), so it also stores 1.
%% Square 3 has both of the above squares as neighbors and stores the sum of their values, 2.
%% Square 4 has all three of the aforementioned squares as neighbors and stores the sum of their values, 4.
%% Square 5 only has the first and fourth squares as neighbors, so it gets the value 5.
%% Once a square is written, its value does not change. Therefore, the first few squares would receive the following
%% values:
%%
%% 147 142 133 122 59
%% 304 5 4 2 57
%% 330 10 1 1 54
%% 351 11 23 25 26
%% 362 747 806---> ...
%% What is the first value written that is larger than your puzzle input?
%%
%% Your puzzle answer was 369601.
%%
%% Both parts of this puzzle are complete! They provide two gold stars: **
%%
%% At this point, you should return to your advent calendar and try another puzzle.
%%
%% Your puzzle input was 368078.
-define(INPUT, 368078).
-record(square, {x, y, val=0, pos}).
%% API
-export([part_a/0, part_b/0, part_b/1, test/0]).
part_a() ->
part_a(?INPUT).
part_b() ->
part_b(?INPUT).
part_b(Num) ->
part_b(Num, 1).
part_b(_, 100) ->
io:format("Stop...Number is too large");
part_b(Num, Find) ->
#square{val=V} = hd(gen_map(Find-1, fun part_b_val/2)),
case V > Num of
true ->
V;
false ->
part_b(Num, Find+1)
end.
part_a(Num) ->
#square{x=X, y=Y} = hd(gen_map(Num-1, fun part_a_val/2)),
abs(X) + abs(Y).
gen_map(Num, Fun) ->
gen_map(Num, 2, 1, {1,0}, [#square{x=0, y=0, val=1, pos=1}], Fun).
gen_map(0, _, _, _, Acc, _) ->
Acc;
% Push Out X and Square Size
gen_map(Num, Cur, Max, {CPosX, CPosY}, Acc, F) when CPosX =:= Max andalso CPosY =:= -Max ->
S = #square{x=CPosX, y=CPosY, pos=Cur},
gen_map(Num-1, Cur+1, Max+1, {CPosX+1, CPosY}, [S#square{val=F(S, Acc)}|Acc], F);
% Reach Top-Right corner
gen_map(Num, Cur, Max, {CPosX, CPosY}, Acc, F) when CPosX =:= Max andalso CPosY < Max ->
S = #square{x=CPosX, y=CPosY, pos=Cur},
gen_map(Num-1, Cur+1, Max, {CPosX, CPosY+1}, [S#square{val=F(S, Acc)}|Acc], F);
% Reach Top-Left corner
gen_map(Num, Cur, Max, {CPosX, CPosY}, Acc, F) when CPosX > -Max andalso CPosY =:= Max ->
S = #square{x=CPosX, y=CPosY, pos=Cur},
gen_map(Num-1, Cur+1, Max, {CPosX-1, CPosY}, [S#square{val=F(S, Acc)}|Acc], F);
% Reach Bottom-Left corner
gen_map(Num, Cur, Max, {CPosX, CPosY}, Acc, F) when CPosX =:= -Max andalso CPosY > -Max ->
S = #square{x=CPosX, y=CPosY, pos=Cur},
gen_map(Num-1, Cur+1, Max, {CPosX, CPosY-1}, [S#square{val=F(S, Acc)}|Acc], F);
% Reach Bottom-Right corner
gen_map(Num, Cur, Max, {CPosX, CPosY}, Acc, F) ->
S = #square{x=CPosX, y=CPosY, pos=Cur},
gen_map(Num-1, Cur+1, Max, {CPosX+1, CPosY}, [S#square{val=F(S, Acc)}|Acc], F).
part_a_val(_, _) ->
0.
part_b_val(S = #square{}, Acc) ->
lists:sum([V||#square{val=V}<-get_nabo(S,Acc,[])]).
get_nabo(_, [], Nabos) ->
Nabos;
get_nabo(S=#square{pos = P}, [#square{pos = P}|Rest], Nabos) ->
get_nabo(S, Rest, Nabos);
get_nabo(S=#square{x=SX, y=SY}, [Nabo=#square{x=X, y=Y}|Rest], Nabos)
when abs(SX - X) =< 1 andalso abs(SY - Y) =< 1 ->
get_nabo(S, Rest, [Nabo|Nabos]);
get_nabo(S, [_|Rest], Nabos) ->
get_nabo(S, Rest, Nabos).
test() ->
0=part_a(1),
3=part_a(12),
2=part_a(23),
31=part_a(1024),
1=part_b(1),
1=part_b(2),
2=part_b(3),
4=part_b(4),
5=part_b(5),
806=part_b(23),
pass. | src/day3.erl | 0.510985 | 0.625181 | day3.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.