code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%%%-------------------------------------------------------------------
%% wrapper implementations for the APIs & RPCs for the gateway service
%% basically this module handles various RPC and function calls from grpcbox_stream
%% and routes it to the required application specific handler module
%% due to issues with the rust grpc client, we have amalgamated what were
%% previously distinct grpc services ( such as state channels and routing )
%% which had module specific implementations
%% into a single service as defined in the gateway proto
%% rather than combining the server side implementations into one
%% single module, this top level module was added instead and simply
%% routes incoming RPCs to their service specific module
%% this was we can maintain functional seperation of concerns
%%%-------------------------------------------------------------------
-module(helium_gateway_service).
-behavior(helium_gateway_bhvr).
-include("../grpc/autogen/server/gateway_pb.hrl").
%% common APIs
-export([
init/2,
handle_info/3
]).
%% routing APIs
-export([
routing/2
]).
%% state channel related APIs
-export([
is_active_sc/2,
is_overpaid_sc/2,
close_sc/2,
follow_sc/2
]).
%%%-------------------------------------------------------------------
%% common API implementations
%%%-------------------------------------------------------------------
%% its really only stream RPCs which need to handle the init
%% as its those which are likely to manage their own state
%% unary APIs only need to return the same passed in StreamState
%%
%% routing streaming APIs
%%
-spec init(atom(), grpcbox_stream:t()) -> grpcbox_stream:t().
init(RPC = routing, StreamState) ->
helium_routing_impl:init(RPC, StreamState);
%%
%% state channel streaming APIs
%%
init(RPC = follow_sc, StreamState) ->
helium_state_channels_impl:init(RPC, StreamState);
%%
%% state channel unary APIs
%%
init(_RPC = is_active_sc, StreamState) ->
StreamState;
init(_RPC = is_overpaid_sc, StreamState) ->
StreamState;
init(_RPC = close_sc, StreamState) ->
StreamState.
%%
%% Any API can potentially handle info msgs
%%
-spec handle_info(atom(), any(), grpcbox_stream:t()) -> grpcbox_stream:t().
handle_info(_RPC = routing, Msg, StreamState) ->
helium_routing_impl:handle_info(Msg, StreamState);
handle_info(_RPC = is_active_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = is_overpaid_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = close_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = follow_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC, _Msg, StreamState) ->
lager:warning("got unhandled info msg, RPC ~p, Msg, ~p", [_RPC, _Msg]),
StreamState.
%%%-------------------------------------------------------------------
%% Routing RPC implementations
%%%-------------------------------------------------------------------
-spec routing(gateway_pb:gateway_routing_req_v1_pb(), grpcbox_stream:t()) ->
{ok, grpcbox_stream:t()} | grpcbox_stream:grpc_error_response().
routing(Msg, StreamState) -> helium_routing_impl:routing(Msg, StreamState).
%%%-------------------------------------------------------------------
%% State channel RPC implementations
%%%-------------------------------------------------------------------
-spec is_active_sc(
ctx:ctx(),
gateway_pb:gateway_sc_is_active_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
is_active_sc(Ctx, Message) -> helium_state_channels_impl:is_active_sc(Ctx, Message).
-spec is_overpaid_sc(
ctx:ctx(),
gateway_pb:gateway_sc_is_overpaid_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
is_overpaid_sc(Ctx, Message) -> helium_state_channels_impl:is_overpaid_sc(Ctx, Message).
-spec close_sc(
ctx:ctx(),
gateway_pb:gateway_sc_close_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()}.
close_sc(Ctx, Message) -> helium_state_channels_impl:close_sc(Ctx, Message).
-spec follow_sc(
gateway_pb:gateway_sc_follow_req_v1(),
grpcbox_stream:t()
) -> {ok, grpcbox_stream:t()} | grpcbox_stream:grpc_error_response().
follow_sc(Msg, StreamState) -> helium_state_channels_impl:follow_sc(Msg, StreamState). | src/grpc/helium_gateway_service.erl | 0.50952 | 0.479016 | helium_gateway_service.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_kv_mrc_sink: A simple process to act as a Pipe sink for
%% MapReduce queries
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc This FSM acts as a Riak Pipe sink, and dumbly accumulates
%% messages received from the pipe, until it is asked to send them to
%% its owner. The owner is whatever process started this FSM.
%% This FSM will speak both `raw' and `fsm' sink types (it
%% answers appropriately to each, without parameterization).
%% The FSM enforces a soft cap on the number of results and logs
%% accumulated when receiving `fsm' sink type messages. When the
%% number of results+logs that have been delivered exceeds the cap
%% between calls to {@link next/1}, the sink stops delivering result
%% acks to workers. The value of this cap can be specified by
%% including a `buffer' property in the `Options' parameter of {@link
%% start/2}, or by setting the `mrc_sink_buffer' environment variable
%% in the `riak_kv' application. If neither settings is specified, or
%% they are not specified as non-negative integers, the default
%% (currently 1000) is used.
%% Messages are delivered to the owners as an erlang message that is a
%% `#kv_mrc_pipe{}' record. The `logs' field is a list of log messages
%% received, ordered oldest to youngest, each having the form
%% `{PhaseId, Message}'. The `results' field is an orddict keyed by
%% `PhaseId', with each value being a list of results received from
%% that phase, ordered oldest to youngest. The `ref' field is the
%% reference from the `#pipe{}' record. The `done' field is `true' if
%% the `eoi' message has been received, or `false' otherwise.
%% There should be three states: `which_pipe', `collect_output', and
%% `send_output'.
%% The FSM starts in `which_pipe', and waits there until it
%% is told which pipe to expect output from.
%% From `which_pipe', the FSM moves to `collect_output'. While in
%% `collect_output', the FSM simply collects `#pipe_log{}',
%% `#pipe_result{}', and `#pipe_eoi{}' messages.
%% If the FSM has received logs, results, or the eoi before it
%% receives a `next' event, it sends everything it has accumulated to
%% the owner, wrapped in a `#kv_mrc_sink{}' record, clears its buffers,
%% and returns to collecting pipe messages.
%% If the FSM has not received any logs, results, or the eoi before it
%% receives a `next' event, it enters the `send_ouput' state. As soon
%% as the FSM receives any log, result, or eoi message in the
%% `send_output' state, it sends that message to the owner process,
%% and then returns to the `collect_output' state.
%% The FSM only exits on its own in three cases. The first is when its
%% owner exits. The second is when the builder of the pipe for which
%% it is consuming messages exits abnormally. The third is after it
%% delivers the a `#kv_mrc_sink{}' in which it has marked
%% `done=true'.
-module(riak_kv_mrc_sink).
-export([
start/2,
start_link/2,
use_pipe/2,
next/1,
stop/1,
merge_outputs/1,
init/1,
which_pipe/2, which_pipe/3,
collect_output/2, collect_output/3,
send_output/2, send_output/3,
handle_event/3,
handle_sync_event/4,
handle_info/3,
terminate/3,
code_change/4
]).
-behaviour(gen_fsm).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-include_lib("riak_pipe/include/riak_pipe.hrl").
-include("riak_kv_mrc_sink.hrl").
-define(BUFFER_SIZE_DEFAULT, 1000).
-record(state, {
owner :: pid(),
builder :: pid(),
ref :: reference(),
results=[] :: [{PhaseId::term(), Results::list()}],
delayed_acks=[] :: list(),
logs=[] :: list(),
done=false :: boolean(),
buffer_max :: integer(),
buffer_left :: integer()
}).
start(OwnerPid, Options) ->
riak_kv_mrc_sink_sup:start_sink(OwnerPid, Options).
start_link(OwnerPid, Options) ->
gen_fsm:start_link(?MODULE, [OwnerPid, Options], []).
use_pipe(Sink, Pipe) ->
gen_fsm:sync_send_event(Sink, {use_pipe, Pipe}).
%% @doc Trigger the send of the next result/log/eoi batch received.
next(Sink) ->
gen_fsm:send_event(Sink, next).
stop(Sink) ->
riak_kv_mrc_sink_sup:terminate_sink(Sink).
%% @doc Convenience: If outputs are collected as a list of orddicts,
%% with the first being the most recently received, merge them into
%% one orddict.
%%
%% That is, for one keep, our input should look like:
%% [ [{0, [G,H,I]}], [{0, [D,E,F]}], [{0, [A,B,C]}] ]
%% And we want it to come out as:
%% [{0, [A,B,C,D,E,F,G,H,I]}]
-spec merge_outputs([ [{integer(), list()}] ]) -> [{integer(), list()}].
merge_outputs(Acc) ->
%% each orddict has its outputs in oldest->newest; since we're
%% iterating from newest->oldest overall, we can just tack the
%% next list onto the front of the accumulator
DM = fun(_K, O, A) -> O++A end,
lists:foldl(fun(O, A) -> orddict:merge(DM, O, A) end, [], Acc).
%% gen_fsm exports
init([OwnerPid, Options]) ->
erlang:monitor(process, OwnerPid),
Buffer = buffer_size(Options),
{ok, which_pipe, #state{owner=OwnerPid,
buffer_max=Buffer,
buffer_left=Buffer}}.
%%% which_pipe: waiting to find out what pipe we're listening to
which_pipe(_, State) ->
{next_state, which_pipe, State}.
which_pipe({use_pipe, #pipe{builder=Builder, sink=Sink}}, _From, State) ->
erlang:monitor(process, Builder),
{reply, ok, collect_output,
State#state{builder=Builder, ref=Sink#fitting.ref}};
which_pipe(_, _, State) ->
{next_state, which_pipe, State}.
%%% collect_output: buffering results and logs until asked for them
collect_output(next, State) ->
case State#state.done of
true ->
NewState = send_to_owner(State),
{stop, normal, NewState};
false ->
case has_output(State) of
true ->
NewState = send_to_owner(State),
{next_state, collect_output, NewState};
false ->
%% nothing to send yet, prepare to send as soon as
%% there is something
{next_state, send_output, State}
end
end;
collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res},
#state{ref=Ref, results=Acc, buffer_left=Left}=State) ->
NewAcc = add_result(PhaseId, Res, Acc),
{next_state, collect_output,
State#state{results=NewAcc, buffer_left=Left-1}};
collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg},
#state{ref=Ref, logs=Acc, buffer_left=Left}=State) ->
{next_state, collect_output,
State#state{logs=[{PhaseId, Msg}|Acc], buffer_left=Left-1}};
collect_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) ->
{next_state, collect_output, State#state{done=true}};
collect_output(_, State) ->
{next_state, collect_output, State}.
collect_output(#pipe_result{ref=Ref, from=PhaseId, result=Res},
From,
#state{ref=Ref, results=Acc}=State) ->
NewAcc = add_result(PhaseId, Res, Acc),
maybe_ack(From, State#state{results=NewAcc});
collect_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg},
From,
#state{ref=Ref, logs=Acc}=State) ->
maybe_ack(From, State#state{logs=[{PhaseId, Msg}|Acc]});
collect_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) ->
{reply, ok, collect_output, State#state{done=true}};
collect_output(_, _, State) ->
{next_state, collect_output, State}.
maybe_ack(_From, #state{buffer_left=Left}=State) when Left > 0 ->
%% there's room for more, tell the worker it can continue
{reply, ok, collect_output, State#state{buffer_left=Left-1}};
maybe_ack(From, #state{buffer_left=Left, delayed_acks=Delayed}=State) ->
%% there's no more room, hold up the worker
%% not actually necessary to update buffer_left, but it could make
%% for interesting stats
{next_state, collect_output,
State#state{buffer_left=Left-1, delayed_acks=[From|Delayed]}}.
%% send_output: waiting for output to send, after having been asked
%% for some while there wasn't any
send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res},
#state{ref=Ref, results=Acc}=State) ->
NewAcc = add_result(PhaseId, Res, Acc),
NewState = send_to_owner(State#state{results=NewAcc}),
{next_state, collect_output, NewState};
send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg},
#state{ref=Ref, logs=Acc}=State) ->
NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}),
{next_state, collect_output, NewState};
send_output(#pipe_eoi{ref=Ref}, #state{ref=Ref}=State) ->
NewState = send_to_owner(State#state{done=true}),
{stop, normal, NewState};
send_output(_, State) ->
{next_state, send_output, State}.
send_output(#pipe_result{ref=Ref, from=PhaseId, result=Res},
_From, #state{ref=Ref, results=Acc}=State) ->
NewAcc = add_result(PhaseId, Res, Acc),
NewState = send_to_owner(State#state{results=NewAcc}),
{reply, ok, collect_output, NewState};
send_output(#pipe_log{ref=Ref, from=PhaseId, msg=Msg},
_From, #state{ref=Ref, logs=Acc}=State) ->
NewState = send_to_owner(State#state{logs=[{PhaseId, Msg}|Acc]}),
{reply, ok, collect_output, NewState};
send_output(#pipe_eoi{ref=Ref}, _From, #state{ref=Ref}=State) ->
NewState = send_to_owner(State#state{done=true}),
{stop, normal, ok, NewState};
send_output(_, _, State) ->
{next_state, send_output, State}.
handle_event(_, StateName, State) ->
{next_state, StateName, State}.
handle_sync_event(_, _, StateName, State) ->
{next_state, StateName, State}.
%% Clusters containing nodes running Riak version 1.2 and previous
%% will send raw results, regardless of sink type. We can't block
%% these worker sending raw results, but we can still track these
%% additions, and block other workers because of them.
handle_info(#pipe_result{ref=Ref, from=PhaseId, result=Res},
StateName,
#state{ref=Ref, results=Acc, buffer_left=Left}=State) ->
NewAcc = add_result(PhaseId, Res, Acc),
info_response(StateName,
State#state{results=NewAcc, buffer_left=Left-1});
handle_info(#pipe_log{ref=Ref, from=PhaseId, msg=Msg},
StateName,
#state{ref=Ref, logs=Acc, buffer_left=Left}=State) ->
info_response(StateName,
State#state{logs=[{PhaseId, Msg}|Acc],
buffer_left=Left-1});
handle_info(#pipe_eoi{ref=Ref},
StateName, #state{ref=Ref}=State) ->
info_response(StateName, State#state{done=true});
handle_info({'DOWN', _, process, Pid, _Reason}, _,
#state{owner=Pid}=State) ->
%% exit as soon as the owner dies
{stop, normal, State};
handle_info({'DOWN', _, process, Pid, Reason}, _,
#state{builder=Pid}=State) when Reason /= normal ->
%% don't stop when the builder exits 'normal', because that's
%% probably just the pipe shutting down normally - wait for the
%% owner to ask for the last outputs
_ = lager:warning("Pipe builder down. Reason: ~p", [Reason]),
{stop, normal, State};
handle_info(_, StateName, State) ->
{next_state, StateName, State}.
%% continue buffering, unless we've been waiting to reply; stop if we
%% were waiting to reply and we've received eoi
info_response(collect_output, State) ->
{next_state, collect_output, State};
info_response(send_output, #state{done=Done}=State) ->
NewState = send_to_owner(State),
if Done -> {stop, normal, NewState};
true -> {next_state, collect_output, NewState}
end.
terminate(_, _, _) ->
ok.
code_change(_, StateName, State, _) ->
{ok, StateName, State}.
%% internal
has_output(#state{results=[], logs=[]}) ->
false;
has_output(_) ->
true.
%% also clears buffers
send_to_owner(#state{owner=Owner, ref=Ref,
results=Results, logs=Logs, done=Done,
buffer_max=Max, delayed_acks=Delayed}=State) ->
Owner ! #kv_mrc_sink{ref=Ref,
results=finish_results(Results),
logs=lists:reverse(Logs),
done=Done},
_ = [ gen_fsm:reply(From, ok) || From <- Delayed ],
State#state{results=[], logs=[],
buffer_left=Max, delayed_acks=[]}.
%% results are kept as lists in a proplist
add_result(PhaseId, Result, Acc) ->
case lists:keytake(PhaseId, 1, Acc) of
{value, {PhaseId, IAcc}, RAcc} ->
[{PhaseId,[Result|IAcc]}|RAcc];
false ->
[{PhaseId,[Result]}|Acc]
end.
%% transform the proplist buffers into orddicts time-ordered
finish_results(Results) ->
[{I, lists:reverse(R)} || {I, R} <- lists:keysort(1, Results)].
%% choose buffer size, given Options, app env, default
-spec buffer_size(list()) -> non_neg_integer().
buffer_size(Options) ->
case buffer_size_options(Options) of
{ok, Size} -> Size;
false ->
case buffer_size_app_env() of
{ok, Size} -> Size;
false ->
?BUFFER_SIZE_DEFAULT
end
end.
-spec buffer_size_options(list()) -> {ok, non_neg_integer()} | false.
buffer_size_options(Options) ->
case lists:keyfind(buffer, 1, Options) of
{buffer, Size} when is_integer(Size), Size >= 0 ->
{ok, Size};
_ ->
false
end.
-spec buffer_size_app_env() -> {ok, non_neg_integer()} | false.
buffer_size_app_env() ->
case application:get_env(riak_kv, mrc_sink_buffer) of
{ok, Size} when is_integer(Size), Size >= 0 ->
{ok, Size};
_ ->
false
end.
%% TEST
-ifdef(TEST).
buffer_size_test_() ->
Tests = [ {"buffer option", 5, [{buffer, 5}], []},
{"buffer app env", 5, [], [{mrc_sink_buffer, 5}]},
{"buffer default", ?BUFFER_SIZE_DEFAULT, [], []} ],
FillFuns = [ {"send_event", fun gen_fsm:send_event/2},
{"sync_send_event", fun gen_fsm:sync_send_event/2},
{"erlang:send", fun(S, R) -> S ! R, ok end} ],
{foreach,
fun() -> application:load(riak_kv) end,
fun(_) -> application:unload(riak_kv) end,
[buffer_size_test_helper(Name, FillFun, Size, Options, AppEnv)
|| {Name, Size, Options, AppEnv} <- Tests,
FillFun <- FillFuns]}.
buffer_size_test_helper(Name, {FillName, FillFun}, Size, Options, AppEnv) ->
{Name++" "++FillName,
fun() ->
application:load(riak_kv),
[ application:set_env(riak_kv, K, V) || {K, V} <- AppEnv ],
%% start up our sink
{ok, Sink} = ?MODULE:start_link(self(), Options),
Ref = make_ref(),
Pipe = #pipe{builder=self(),
sink=#fitting{pid=Sink, ref=Ref}},
?MODULE:use_pipe(Sink, Pipe),
%% fill its buffer
[ ok = FillFun(
Sink,
#pipe_result{from=tester, ref=Ref, result=I})
|| I <- lists:seq(1, Size) ],
%% ensure extra result will block
{'EXIT',{timeout,{gen_fsm,sync_send_event,_}}} =
(catch gen_fsm:sync_send_event(
Sink,
#pipe_result{from=tester, ref=Ref, result=Size+1},
1000)),
%% now drain what's there
?MODULE:next(Sink),
%% make sure that all results were received, including
%% blocked one
receive
#kv_mrc_sink{ref=Ref, results=[{tester,R}]} ->
?assertEqual(Size+1, length(R))
end,
%% make sure that the delayed ack was received
receive
{GenFsmRef, ok} when is_reference(GenFsmRef) ->
ok
end
end}.
-endif. | deps/riak_kv/src/riak_kv_mrc_sink.erl | 0.693992 | 0.457621 | riak_kv_mrc_sink.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% @author <NAME> <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%% @version 1.0
%% @doc Functional, extendible arrays. Arrays can have fixed size, or
%% can grow automatically as needed. A default value is used for entries
%% that have not been explicitly set.
%%
%% Arrays uses <b>zero</b> based indexing. This is a deliberate design
%% choice and differs from other erlang datastructures, e.g. tuples.
%%
%% Unless specified by the user when the array is created, the default
%% value is the atom `undefined'. There is no difference between an
%% unset entry and an entry which has been explicitly set to the same
%% value as the default one (cf. {@link reset/2}). If you need to
%% differentiate between unset and set entries, you must make sure that
%% the default value cannot be confused with the values of set entries.
%%
%% The array never shrinks automatically; if an index `I' has been used
%% successfully to set an entry, all indices in the range [0,`I'] will
%% stay accessible unless the array size is explicitly changed by
%% calling {@link resize/2}.
%%
%% Examples:
%% ```
%% %% Create a fixed-size array with entries 0-9 set to 'undefined'
%% A0 = array:new(10).
%% 10 = array:size(A0).
%%
%% %% Create an extendible array and set entry 17 to 'true',
%% %% causing the array to grow automatically
%% A1 = array:set(17, true, array:new()).
%% 18 = array:size(A1).
%%
%% %% Read back a stored value
%% true = array:get(17, A1).
%%
%% %% Accessing an unset entry returns the default value
%% undefined = array:get(3, A1).
%%
%% %% Accessing an entry beyond the last set entry also returns the
%% %% default value, if the array does not have fixed size
%% undefined = array:get(18, A1).
%%
%% %% "sparse" functions ignore default-valued entries
%% A2 = array:set(4, false, A1).
%% [{4, false}, {17, true}] = array:sparse_to_orddict(A2).
%%
%% %% An extendible array can be made fixed-size later
%% A3 = array:fix(A2).
%%
%% %% A fixed-size array does not grow automatically and does not
%% %% allow accesses beyond the last set entry
%% {'EXIT',{badarg,_}} = (catch array:set(18, true, A3)).
%% {'EXIT',{badarg,_}} = (catch array:get(18, A3)).
%% '''
%% @type array(). A functional, extendible array. The representation is
%% not documented and is subject to change without notice. Note that
%% arrays cannot be directly compared for equality.
-module(array).
-export([new/0, new/1, new/2, is_array/1, set/3, get/2, size/1,
sparse_size/1, default/1, reset/2, to_list/1, sparse_to_list/1,
from_list/1, from_list/2, to_orddict/1, sparse_to_orddict/1,
from_orddict/1, from_orddict/2, map/2, sparse_map/2, foldl/3,
foldr/3, sparse_foldl/3, sparse_foldr/3, fix/1, relax/1, is_fix/1,
resize/1, resize/2]).
-export_type([array/0, array/1]).
%%-define(TEST,1).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% Developers:
%%
%% For OTP devs: Both tests and documentation is extracted from this
%% file, keep and update this file,
%% test are extracted with array_SUITE:extract_tests().
%% Doc with docb_gen array.erl
%%
%% The key to speed is to minimize the number of tests, on
%% large input. Always make the most probable path as short as possible.
%% In particular, keep in mind that for large trees, the probability of
%% a leaf node is small relative to that of an internal node.
%%
%% If you try to tweak the set_1 and get_1 loops: Measure, look at the
%% generated Beam code, and measure again! The argument order matters!
%% Representation:
%%
%% A tree is either a leaf, with LEAFSIZE elements (the "base"), an
%% internal node with LEAFSIZE+1 elements, or an unexpanded tree,
%% represented by a single integer: the number of elements that may be
%% stored in the tree when it is expanded. The last element of an
%% internal node caches the number of elements that may be stored in
%% each of its subtrees.
%%
%% Note that to update an entry in a tree of height h = log[b] n, the
%% total number of written words is (b+1)+(h-1)*(b+2), since tuples use
%% a header word on the heap. 4 is the optimal base for minimizing the
%% number of words written, but causes higher trees, which takes time.
%% The best compromise between speed and memory usage seems to lie
%% around 8-10. Measurements indicate that the optimum base for speed is
%% 24 - above that, it gets slower again due to the high memory usage.
%% Base 10 is a good choice, giving 2/3 of the possible speedup from
%% base 4, but only using 1/3 more memory. (Base 24 uses 65% more memory
%% per write than base 10, but the speedup is only 21%.)
-define(DEFAULT, undefined).
-define(LEAFSIZE, 10). % the "base"
-define(NODESIZE, ?LEAFSIZE). % (no reason to have a different size)
-define(NODEPATTERN(S), {_,_,_,_,_,_,_,_,_,_,S}). % NODESIZE+1 elements!
-define(NEW_NODE(S), % beware of argument duplication!
setelement((?NODESIZE+1),erlang:make_tuple((?NODESIZE+1),(S)),(S))).
-define(NEW_LEAF(D), erlang:make_tuple(?LEAFSIZE,(D))).
-define(NODELEAFS, ?NODESIZE*?LEAFSIZE).
%% These make the code a little easier to experiment with.
%% It turned out that using shifts (when NODESIZE=2^n) was not faster.
-define(reduce(X), ((X) div (?NODESIZE))).
-define(extend(X), ((X) * (?NODESIZE))).
%%--------------------------------------------------------------------------
-type element_tuple(T) ::
{T, T, T, T, T, T, T, T, T, T}
| {element_tuple(T), element_tuple(T), element_tuple(T),
element_tuple(T), element_tuple(T), element_tuple(T),
element_tuple(T), element_tuple(T), element_tuple(T),
element_tuple(T), non_neg_integer()}.
-type elements(T) :: non_neg_integer()
| element_tuple(T)
| nil(). % kill reference, for GC
-record(array, {size :: non_neg_integer(), %% number of defined entries
max :: non_neg_integer(), %% maximum number of entries
%% in current tree
default, %% the default value (usually 'undefined')
elements :: elements(_) %% the tuple tree
}).
-type array() :: array(term()).
-opaque array(Type) ::
#array{default :: Type, elements :: elements(Type)}.
%%
%% Types
%%
-type array_indx() :: non_neg_integer().
-type array_opt() :: {'fixed', boolean()} | 'fixed'
| {'default', Type :: term()}
| {'size', N :: non_neg_integer()}
| (N :: non_neg_integer()).
-type array_opts() :: array_opt() | [array_opt()].
-type indx_pair(Type) :: {Index :: array_indx(), Type}.
-type indx_pairs(Type) :: [indx_pair(Type)].
%%--------------------------------------------------------------------------
%% @doc Create a new, extendible array with initial size zero.
%% @equiv new([])
%%
%% @see new/1
%% @see new/2
-spec new() -> array().
new() ->
new([]).
%% @doc Create a new array according to the given options. By default,
%% the array is extendible and has initial size zero. Array indices
%% start at 0.
%%
%% `Options' is a single term or a list of terms, selected from the
%% following:
%% <dl>
%% <dt>`N::integer()' or `{size, N::integer()}'</dt>
%% <dd>Specifies the initial size of the array; this also implies
%% `{fixed, true}'. If `N' is not a nonnegative integer, the call
%% fails with reason `badarg'.</dd>
%% <dt>`fixed' or `{fixed, true}'</dt>
%% <dd>Creates a fixed-size array; see also {@link fix/1}.</dd>
%% <dt>`{fixed, false}'</dt>
%% <dd>Creates an extendible (non fixed-size) array.</dd>
%% <dt>`{default, Value}'</dt>
%% <dd>Sets the default value for the array to `Value'.</dd>
%% </dl>
%% Options are processed in the order they occur in the list, i.e.,
%% later options have higher precedence.
%%
%% The default value is used as the value of uninitialized entries, and
%% cannot be changed once the array has been created.
%%
%% Examples:
%% ```array:new(100)''' creates a fixed-size array of size 100.
%% ```array:new({default,0})''' creates an empty, extendible array
%% whose default value is 0.
%% ```array:new([{size,10},{fixed,false},{default,-1}])''' creates an
%% extendible array with initial size 10 whose default value is -1.
%%
%% @see new/0
%% @see new/2
%% @see set/3
%% @see get/2
%% @see from_list/2
%% @see fix/1
-spec new(Options :: array_opts()) -> array().
new(Options) ->
new_0(Options, 0, false).
%% @doc Create a new array according to the given size and options. If
%% `Size' is not a nonnegative integer, the call fails with reason
%% `badarg'. By default, the array has fixed size. Note that any size
%% specifications in `Options' will override the `Size' parameter.
%%
%% If `Options' is a list, this is simply equivalent to `new([{size,
%% Size} | Options]', otherwise it is equivalent to `new([{size, Size} |
%% [Options]]'. However, using this function directly is more efficient.
%%
%% Example:
%% ```array:new(100, {default,0})''' creates a fixed-size array of size
%% 100, whose default value is 0.
%%
%% @see new/1
-spec new(Size :: non_neg_integer(), Options :: array_opts()) -> array().
new(Size, Options) when is_integer(Size), Size >= 0 ->
new_0(Options, Size, true);
new(_, _) ->
erlang:error(badarg).
new_0(Options, Size, Fixed) when is_list(Options) ->
new_1(Options, Size, Fixed, ?DEFAULT);
new_0(Options, Size, Fixed) ->
new_1([Options], Size, Fixed, ?DEFAULT).
new_1([fixed | Options], Size, _, Default) ->
new_1(Options, Size, true, Default);
new_1([{fixed, Fixed} | Options], Size, _, Default)
when is_boolean(Fixed) ->
new_1(Options, Size, Fixed, Default);
new_1([{default, Default} | Options], Size, Fixed, _) ->
new_1(Options, Size, Fixed, Default);
new_1([{size, Size} | Options], _, _, Default)
when is_integer(Size), Size >= 0 ->
new_1(Options, Size, true, Default);
new_1([Size | Options], _, _, Default)
when is_integer(Size), Size >= 0 ->
new_1(Options, Size, true, Default);
new_1([], Size, Fixed, Default) ->
new(Size, Fixed, Default);
new_1(_Options, _Size, _Fixed, _Default) ->
erlang:error(badarg).
new(0, false, undefined) ->
%% Constant empty array
#array{size=0, max=?LEAFSIZE, elements=?LEAFSIZE};
new(Size, Fixed, Default) ->
E = find_max(Size - 1, ?LEAFSIZE),
M = if Fixed -> 0;
true -> E
end,
#array{size = Size, max = M, default = Default, elements = E}.
-spec find_max(integer(), integer()) -> integer().
find_max(I, M) when I >= M ->
find_max(I, ?extend(M));
find_max(_I, M) ->
M.
%% @doc Returns `true' if `X' appears to be an array, otherwise `false'.
%% Note that the check is only shallow; there is no guarantee that `X'
%% is a well-formed array representation even if this function returns
%% `true'.
-spec is_array(X :: term()) -> boolean().
is_array(#array{size = Size, max = Max})
when is_integer(Size), is_integer(Max) ->
true;
is_array(_) ->
false.
%% @doc Get the number of entries in the array. Entries are numbered
%% from 0 to `size(Array)-1'; hence, this is also the index of the first
%% entry that is guaranteed to not have been previously set.
%% @see set/3
%% @see sparse_size/1
-spec size(Array :: array()) -> non_neg_integer().
size(#array{size = N}) -> N;
size(_) -> erlang:error(badarg).
%% @doc Get the value used for uninitialized entries.
%%
%% @see new/2
-spec default(Array :: array(Type)) -> Value :: Type.
default(#array{default = D}) -> D;
default(_) -> erlang:error(badarg).
-ifdef(EUNIT).
new_test_() ->
N0 = ?LEAFSIZE,
N01 = N0+1,
N1 = ?NODESIZE*N0,
N11 = N1+1,
N2 = ?NODESIZE*N1,
[?_test(new()),
?_test(new([])),
?_test(new(10)),
?_test(new({size,10})),
?_test(new(fixed)),
?_test(new({fixed,true})),
?_test(new({fixed,false})),
?_test(new({default,undefined})),
?_test(new([{size,100},{fixed,false},{default,undefined}])),
?_test(new([100,fixed,{default,0}])),
?_assert(new() =:= new([])),
?_assert(new() =:= new([{size,0},{default,undefined},{fixed,false}])),
?_assert(new() =:= new(0, {fixed,false})),
?_assert(new(fixed) =:= new(0)),
?_assert(new(fixed) =:= new(0, [])),
?_assert(new(10) =:= new([{size,0},{size,5},{size,10}])),
?_assert(new(10) =:= new(0, {size,10})),
?_assert(new(10, []) =:= new(10, [{default,undefined},{fixed,true}])),
?_assertError(badarg, new(-1)),
?_assertError(badarg, new(10.0)),
?_assertError(badarg, new(undefined)),
?_assertError(badarg, new([undefined])),
?_assertError(badarg, new([{default,0} | fixed])),
?_assertError(badarg, new(-1, [])),
?_assertError(badarg, new(10.0, [])),
?_assertError(badarg, new(undefined, [])),
?_assertMatch(#array{size=0,max=N0,default=undefined,elements=N0},
new()),
?_assertMatch(#array{size=0,max=0,default=undefined,elements=N0},
new(fixed)),
?_assertMatch(#array{size=N0,max=N0,elements=N0},
new(N0, {fixed,false})),
?_assertMatch(#array{size=N01,max=N1,elements=N1},
new(N01, {fixed,false})),
?_assertMatch(#array{size=N1,max=N1,elements=N1},
new(N1, {fixed,false})),
?_assertMatch(#array{size=N11,max=N2,elements=N2},
new(N11, {fixed,false})),
?_assertMatch(#array{size=N2, max=N2, default=42,elements=N2},
new(N2, [{fixed,false},{default,42}])),
?_assert(0 =:= array:size(new())),
?_assert(17 =:= array:size(new(17))),
?_assert(100 =:= array:size(array:set(99,0,new()))),
?_assertError(badarg, array:size({bad_data,gives_error})),
?_assert(undefined =:= default(new())),
?_assert(4711 =:= default(new({default,4711}))),
?_assert(0 =:= default(new(10, {default,0}))),
?_assertError(badarg, default({bad_data,gives_error})),
?_assert(is_array(new())),
?_assert(false =:= is_array({foobar, 23, 23})),
?_assert(false =:= is_array(#array{size=bad})),
?_assert(false =:= is_array(#array{max=bad})),
?_assert(is_array(new(10))),
?_assert(is_array(new(10, {fixed,false})))
].
-endif.
%% @doc Fix the size of the array. This prevents it from growing
%% automatically upon insertion; see also {@link set/3}.
%% @see relax/1
-spec fix(Array :: array(Type)) -> array(Type).
fix(#array{}=A) ->
A#array{max = 0}.
%% @doc Check if the array has fixed size.
%% Returns `true' if the array is fixed, otherwise `false'.
%% @see fix/1
-spec is_fix(Array :: array()) -> boolean().
is_fix(#array{max = 0}) -> true;
is_fix(#array{}) -> false.
-ifdef(EUNIT).
fix_test_() ->
[?_assert(is_array(fix(new()))),
?_assert(fix(new()) =:= new(fixed)),
?_assertNot(is_fix(new())),
?_assertNot(is_fix(new([]))),
?_assertNot(is_fix(new({fixed,false}))),
?_assertNot(is_fix(new(10, {fixed,false}))),
?_assert(is_fix(new({fixed,true}))),
?_assert(is_fix(new(fixed))),
?_assert(is_fix(new(10))),
?_assert(is_fix(new(10, []))),
?_assert(is_fix(new(10, {fixed,true}))),
?_assert(is_fix(fix(new()))),
?_assert(is_fix(fix(new({fixed,false})))),
?_test(set(0, 17, new())),
?_assertError(badarg, set(0, 17, new(fixed))),
?_assertError(badarg, set(1, 42, fix(set(0, 17, new())))),
?_test(set(9, 17, new(10))),
?_assertError(badarg, set(10, 17, new(10))),
?_assertError(badarg, set(10, 17, fix(new(10, {fixed,false}))))
].
-endif.
%% @doc Make the array resizable. (Reverses the effects of {@link
%% fix/1}.)
%% @see fix/1
-spec relax(Array :: array(Type)) -> array(Type).
relax(#array{size = N}=A) ->
A#array{max = find_max(N-1, ?LEAFSIZE)}.
-ifdef(EUNIT).
relax_test_() ->
[?_assert(is_array(relax(new(fixed)))),
?_assertNot(is_fix(relax(fix(new())))),
?_assertNot(is_fix(relax(new(fixed)))),
?_assert(new() =:= relax(new(fixed))),
?_assert(new() =:= relax(new(0))),
?_assert(new(17, {fixed,false}) =:= relax(new(17))),
?_assert(new(100, {fixed,false})
=:= relax(fix(new(100, {fixed,false}))))
].
-endif.
%% @doc Change the size of the array. If `Size' is not a nonnegative
%% integer, the call fails with reason `badarg'. If the given array has
%% fixed size, the resulting array will also have fixed size.
-spec resize(Size :: non_neg_integer(), Array :: array(Type)) ->
array(Type).
resize(Size, #array{size = N, max = M, elements = E}=A)
when is_integer(Size), Size >= 0 ->
if Size > N ->
{E1, M1} = grow(Size-1, E,
if M > 0 -> M;
true -> find_max(N-1, ?LEAFSIZE)
end),
A#array{size = Size,
max = if M > 0 -> M1;
true -> M
end,
elements = E1};
Size < N ->
%% TODO: shrink physical representation when shrinking the array
A#array{size = Size};
true ->
A
end;
resize(_Size, _) ->
erlang:error(badarg).
%% @doc Change the size of the array to that reported by {@link
%% sparse_size/1}. If the given array has fixed size, the resulting
%% array will also have fixed size.
%% @equiv resize(sparse_size(Array), Array)
%% @see resize/2
%% @see sparse_size/1
-spec resize(Array :: array(Type)) -> array(Type).
resize(Array) ->
resize(sparse_size(Array), Array).
-ifdef(EUNIT).
resize_test_() ->
[?_assert(resize(0, new()) =:= new()),
?_assert(resize(99, new(99)) =:= new(99)),
?_assert(resize(99, relax(new(99))) =:= relax(new(99))),
?_assert(is_fix(resize(100, new(10)))),
?_assertNot(is_fix(resize(100, relax(new(10))))),
?_assert(array:size(resize(100, new())) =:= 100),
?_assert(array:size(resize(0, new(100))) =:= 0),
?_assert(array:size(resize(99, new(10))) =:= 99),
?_assert(array:size(resize(99, new(1000))) =:= 99),
?_assertError(badarg, set(99, 17, new(10))),
?_test(set(99, 17, resize(100, new(10)))),
?_assertError(badarg, set(100, 17, resize(100, new(10)))),
?_assert(array:size(resize(new())) =:= 0),
?_assert(array:size(resize(new(8))) =:= 0),
?_assert(array:size(resize(array:set(7, 0, new()))) =:= 8),
?_assert(array:size(resize(array:set(7, 0, new(10)))) =:= 8),
?_assert(array:size(resize(array:set(99, 0, new(10,{fixed,false}))))
=:= 100),
?_assert(array:size(resize(array:set(7, undefined, new()))) =:= 0),
?_assert(array:size(resize(array:from_list([1,2,3,undefined])))
=:= 3),
?_assert(array:size(
resize(array:from_orddict([{3,0},{17,0},{99,undefined}])))
=:= 18),
?_assertError(badarg, resize(foo, bad_argument))
].
-endif.
%% @doc Set entry `I' of the array to `Value'. If `I' is not a
%% nonnegative integer, or if the array has fixed size and `I' is larger
%% than the maximum index, the call fails with reason `badarg'.
%%
%% If the array does not have fixed size, and `I' is greater than
%% `size(Array)-1', the array will grow to size `I+1'.
%%
%% @see get/2
%% @see reset/2
-spec set(I :: array_indx(), Value :: Type, Array :: array(Type)) -> array(Type).
set(I, Value, #array{size = N, max = M, default = D, elements = E}=A)
when is_integer(I), I >= 0 ->
if I < N ->
A#array{elements = set_1(I, E, Value, D)};
I < M ->
%% (note that this cannot happen if M == 0, since N >= 0)
A#array{size = I+1, elements = set_1(I, E, Value, D)};
M > 0 ->
{E1, M1} = grow(I, E, M),
A#array{size = I+1, max = M1,
elements = set_1(I, E1, Value, D)};
true ->
erlang:error(badarg)
end;
set(_I, _V, _A) ->
erlang:error(badarg).
%% See get_1/3 for details about switching and the NODEPATTERN macro.
set_1(I, E=?NODEPATTERN(S), X, D) ->
I1 = I div S + 1,
setelement(I1, E, set_1(I rem S, element(I1, E), X, D));
set_1(I, E, X, D) when is_integer(E) ->
expand(I, E, X, D);
set_1(I, E, X, _D) ->
setelement(I+1, E, X).
%% Enlarging the array upwards to accommodate an index `I'
grow(I, E, _M) when is_integer(E) ->
M1 = find_max(I, E),
{M1, M1};
grow(I, E, M) ->
grow_1(I, E, M).
grow_1(I, E, M) when I >= M ->
grow(I, setelement(1, ?NEW_NODE(M), E), ?extend(M));
grow_1(_I, E, M) ->
{E, M}.
%% Insert an element in an unexpanded node, expanding it as necessary.
expand(I, S, X, D) when S > ?LEAFSIZE ->
S1 = ?reduce(S),
setelement(I div S1 + 1, ?NEW_NODE(S1),
expand(I rem S1, S1, X, D));
expand(I, _S, X, D) ->
setelement(I+1, ?NEW_LEAF(D), X).
%% @doc Get the value of entry `I'. If `I' is not a nonnegative
%% integer, or if the array has fixed size and `I' is larger than the
%% maximum index, the call fails with reason `badarg'.
%%
%% If the array does not have fixed size, this function will return the
%% default value for any index `I' greater than `size(Array)-1'.
%% @see set/3
-spec get(I :: array_indx(), Array :: array(Type)) -> Value :: Type.
get(I, #array{size = N, max = M, elements = E, default = D})
when is_integer(I), I >= 0 ->
if I < N ->
get_1(I, E, D);
M > 0 ->
D;
true ->
erlang:error(badarg)
end;
get(_I, _A) ->
erlang:error(badarg).
%% The use of NODEPATTERN(S) to select the right clause is just a hack,
%% but it is the only way to get the maximum speed out of this loop
%% (using the Beam compiler in OTP 11).
get_1(I, E=?NODEPATTERN(S), D) ->
get_1(I rem S, element(I div S + 1, E), D);
get_1(_I, E, D) when is_integer(E) ->
D;
get_1(I, E, _D) ->
element(I+1, E).
%% @doc Reset entry `I' to the default value for the array.
%% If the value of entry `I' is the default value the array will be
%% returned unchanged. Reset will never change size of the array.
%% Shrinking can be done explicitly by calling {@link resize/2}.
%%
%% If `I' is not a nonnegative integer, or if the array has fixed size
%% and `I' is larger than the maximum index, the call fails with reason
%% `badarg'; cf. {@link set/3}
%%
%% @see new/2
%% @see set/3
%% TODO: a reset_range function
-spec reset(I :: array_indx(), Array :: array(Type)) -> array(Type).
reset(I, #array{size = N, max = M, default = D, elements = E}=A)
when is_integer(I), I >= 0 ->
if I < N ->
try A#array{elements = reset_1(I, E, D)}
catch throw:default -> A
end;
M > 0 ->
A;
true ->
erlang:error(badarg)
end;
reset(_I, _A) ->
erlang:error(badarg).
reset_1(I, E=?NODEPATTERN(S), D) ->
I1 = I div S + 1,
setelement(I1, E, reset_1(I rem S, element(I1, E), D));
reset_1(_I, E, _D) when is_integer(E) ->
throw(default);
reset_1(I, E, D) ->
Indx = I+1,
case element(Indx, E) of
D -> throw(default);
_ -> setelement(I+1, E, D)
end.
-ifdef(EUNIT).
set_get_test_() ->
N0 = ?LEAFSIZE,
N1 = ?NODESIZE*N0,
[?_assert(array:get(0, new()) =:= undefined),
?_assert(array:get(1, new()) =:= undefined),
?_assert(array:get(99999, new()) =:= undefined),
?_assert(array:get(0, new(1)) =:= undefined),
?_assert(array:get(0, new(1,{default,0})) =:= 0),
?_assert(array:get(9, new(10)) =:= undefined),
?_assertError(badarg, array:get(0, new(fixed))),
?_assertError(badarg, array:get(1, new(1))),
?_assertError(badarg, array:get(-1, new(1))),
?_assertError(badarg, array:get(10, new(10))),
?_assertError(badarg, array:set(-1, foo, new(10))),
?_assertError(badarg, array:set(10, foo, no_array)),
?_assert(array:size(set(0, 17, new())) =:= 1),
?_assert(array:size(set(N1-1, 17, new())) =:= N1),
?_assert(array:size(set(0, 42, set(0, 17, new()))) =:= 1),
?_assert(array:size(set(9, 42, set(0, 17, new()))) =:= 10),
?_assert(array:get(0, set(0, 17, new())) =:= 17),
?_assert(array:get(0, set(1, 17, new())) =:= undefined),
?_assert(array:get(1, set(1, 17, new())) =:= 17),
?_assert(array:get(0, fix(set(0, 17, new()))) =:= 17),
?_assertError(badarg, array:get(1, fix(set(0, 17, new())))),
?_assert(array:get(N1-2, set(N1-1, 17, new())) =:= undefined),
?_assert(array:get(N1-1, set(N1-1, 17, new())) =:= 17),
?_assertError(badarg, array:get(N1, fix(set(N1-1, 17, new())))),
?_assert(array:get(0, set(0, 42, set(0, 17, new()))) =:= 42),
?_assertError(badarg, array:get(0, reset(11, new([{size,10}])))),
?_assertError(badarg, array:get(0, reset(-1, new([{size,10}])))),
?_assert(array:get(0, reset(0, new())) =:= undefined),
?_assert(array:get(0, reset(0, set(0, 17, new()))) =:= undefined),
?_assert(array:get(0, reset(9, set(9, 17, new()))) =:= undefined),
?_assert(array:get(0, reset(11, set(11, 17, new()))) =:= undefined),
?_assert(array:get(0, reset(11, set(12, 17, new()))) =:= undefined),
?_assert(array:get(0, reset(1, set(12, 17, new()))) =:= undefined),
?_assert(array:get(0, reset(11, new())) =:= undefined),
?_assert(array:get(0, reset(0, set(0, 17, new({default,42})))) =:= 42),
?_assert(array:get(0, reset(0, new({default,42}))) =:= 42)
].
-endif.
%% @doc Converts the array to a list.
%%
%% @see from_list/2
%% @see sparse_to_list/1
-spec to_list(Array :: array(Type)) -> list(Value :: Type).
to_list(#array{size = 0}) ->
[];
to_list(#array{size = N, elements = E, default = D}) ->
to_list_1(E, D, N - 1);
to_list(_) ->
erlang:error(badarg).
%% this part handles the rightmost subtrees
to_list_1(E=?NODEPATTERN(S), D, I) ->
N = I div S,
to_list_3(N, D, to_list_1(element(N+1, E), D, I rem S), E);
to_list_1(E, D, I) when is_integer(E) ->
push(I+1, D, []);
to_list_1(E, _D, I) ->
push_tuple(I+1, E, []).
%% this part handles full trees only
to_list_2(E=?NODEPATTERN(_S), D, L) ->
to_list_3(?NODESIZE, D, L, E);
to_list_2(E, D, L) when is_integer(E) ->
push(E, D, L);
to_list_2(E, _D, L) ->
push_tuple(?LEAFSIZE, E, L).
to_list_3(0, _D, L, _E) ->
L;
to_list_3(N, D, L, E) ->
to_list_3(N-1, D, to_list_2(element(N, E), D, L), E).
push(0, _E, L) ->
L;
push(N, E, L) ->
push(N - 1, E, [E | L]).
push_tuple(0, _T, L) ->
L;
push_tuple(N, T, L) ->
push_tuple(N - 1, T, [element(N, T) | L]).
-ifdef(EUNIT).
to_list_test_() ->
N0 = ?LEAFSIZE,
[?_assert([] =:= to_list(new())),
?_assert([undefined] =:= to_list(new(1))),
?_assert([undefined,undefined] =:= to_list(new(2))),
?_assert(lists:duplicate(N0,0) =:= to_list(new(N0,{default,0}))),
?_assert(lists:duplicate(N0+1,1) =:= to_list(new(N0+1,{default,1}))),
?_assert(lists:duplicate(N0+2,2) =:= to_list(new(N0+2,{default,2}))),
?_assert(lists:duplicate(666,6) =:= to_list(new(666,{default,6}))),
?_assert([1,2,3] =:= to_list(set(2,3,set(1,2,set(0,1,new()))))),
?_assert([3,2,1] =:= to_list(set(0,3,set(1,2,set(2,1,new()))))),
?_assert([1|lists:duplicate(N0-2,0)++[1]] =:=
to_list(set(N0-1,1,set(0,1,new({default,0}))))),
?_assert([1|lists:duplicate(N0-1,0)++[1]] =:=
to_list(set(N0,1,set(0,1,new({default,0}))))),
?_assert([1|lists:duplicate(N0,0)++[1]] =:=
to_list(set(N0+1,1,set(0,1,new({default,0}))))),
?_assert([1|lists:duplicate(N0*3,0)++[1]] =:=
to_list(set((N0*3)+1,1,set(0,1,new({default,0}))))),
?_assertError(badarg, to_list(no_array))
].
-endif.
%% @doc Converts the array to a list, skipping default-valued entries.
%%
%% @see to_list/1
-spec sparse_to_list(Array :: array(Type)) -> list(Value :: Type).
sparse_to_list(#array{size = 0}) ->
[];
sparse_to_list(#array{size = N, elements = E, default = D}) ->
sparse_to_list_1(E, D, N - 1);
sparse_to_list(_) ->
erlang:error(badarg).
%% see to_list/1 for details
sparse_to_list_1(E=?NODEPATTERN(S), D, I) ->
N = I div S,
sparse_to_list_3(N, D,
sparse_to_list_1(element(N+1, E), D, I rem S),
E);
sparse_to_list_1(E, _D, _I) when is_integer(E) ->
[];
sparse_to_list_1(E, D, I) ->
sparse_push_tuple(I+1, D, E, []).
sparse_to_list_2(E=?NODEPATTERN(_S), D, L) ->
sparse_to_list_3(?NODESIZE, D, L, E);
sparse_to_list_2(E, _D, L) when is_integer(E) ->
L;
sparse_to_list_2(E, D, L) ->
sparse_push_tuple(?LEAFSIZE, D, E, L).
sparse_to_list_3(0, _D, L, _E) ->
L;
sparse_to_list_3(N, D, L, E) ->
sparse_to_list_3(N-1, D, sparse_to_list_2(element(N, E), D, L), E).
sparse_push_tuple(0, _D, _T, L) ->
L;
sparse_push_tuple(N, D, T, L) ->
case element(N, T) of
D -> sparse_push_tuple(N - 1, D, T, L);
E -> sparse_push_tuple(N - 1, D, T, [E | L])
end.
-ifdef(EUNIT).
sparse_to_list_test_() ->
N0 = ?LEAFSIZE,
[?_assert([] =:= sparse_to_list(new())),
?_assert([] =:= sparse_to_list(new(1))),
?_assert([] =:= sparse_to_list(new(1,{default,0}))),
?_assert([] =:= sparse_to_list(new(2))),
?_assert([] =:= sparse_to_list(new(2,{default,0}))),
?_assert([] =:= sparse_to_list(new(N0,{default,0}))),
?_assert([] =:= sparse_to_list(new(N0+1,{default,1}))),
?_assert([] =:= sparse_to_list(new(N0+2,{default,2}))),
?_assert([] =:= sparse_to_list(new(666,{default,6}))),
?_assert([1,2,3] =:= sparse_to_list(set(2,3,set(1,2,set(0,1,new()))))),
?_assert([3,2,1] =:= sparse_to_list(set(0,3,set(1,2,set(2,1,new()))))),
?_assert([0,1] =:= sparse_to_list(set(N0-1,1,set(0,0,new())))),
?_assert([0,1] =:= sparse_to_list(set(N0,1,set(0,0,new())))),
?_assert([0,1] =:= sparse_to_list(set(N0+1,1,set(0,0,new())))),
?_assert([0,1,2] =:= sparse_to_list(set(N0*10+1,2,set(N0*2+1,1,set(0,0,new()))))),
?_assertError(badarg, sparse_to_list(no_array))
].
-endif.
%% @equiv from_list(List, undefined)
-spec from_list(List :: list(Value :: Type)) -> array(Type).
from_list(List) ->
from_list(List, undefined).
%% @doc Convert a list to an extendible array. `Default' is used as the value
%% for uninitialized entries of the array. If `List' is not a proper list,
%% the call fails with reason `badarg'.
%%
%% @see new/2
%% @see to_list/1
-spec from_list(List :: list(Value :: Type), Default :: term()) -> array(Type).
from_list([], Default) ->
new({default,Default});
from_list(List, Default) when is_list(List) ->
{E, N, M} = from_list_1(?LEAFSIZE, List, Default, 0, [], []),
#array{size = N, max = M, default = Default, elements = E};
from_list(_, _) ->
erlang:error(badarg).
%% Note: A cleaner but slower algorithm is to first take the length of
%% the list and compute the max size of the final tree, and then
%% decompose the list. The below algorithm is almost twice as fast,
%% however.
%% Building the leaf nodes (padding the last one as necessary) and
%% counting the total number of elements.
from_list_1(0, Xs, D, N, As, Es) ->
E = list_to_tuple(lists:reverse(As)),
case Xs of
[] ->
case Es of
[] ->
{E, N, ?LEAFSIZE};
_ ->
from_list_2_0(N, [E | Es], ?LEAFSIZE)
end;
[_|_] ->
from_list_1(?LEAFSIZE, Xs, D, N, [], [E | Es]);
_ ->
erlang:error(badarg)
end;
from_list_1(I, Xs, D, N, As, Es) ->
case Xs of
[X | Xs1] ->
from_list_1(I-1, Xs1, D, N+1, [X | As], Es);
_ ->
from_list_1(I-1, Xs, D, N, [D | As], Es)
end.
%% Building the internal nodes (note that the input is reversed).
from_list_2_0(N, Es, S) ->
from_list_2(?NODESIZE, pad((N-1) div S + 1, ?NODESIZE, S, Es),
S, N, [S], []).
from_list_2(0, Xs, S, N, As, Es) ->
E = list_to_tuple(As),
case Xs of
[] ->
case Es of
[] ->
{E, N, ?extend(S)};
_ ->
from_list_2_0(N, lists:reverse([E | Es]),
?extend(S))
end;
_ ->
from_list_2(?NODESIZE, Xs, S, N, [S], [E | Es])
end;
from_list_2(I, [X | Xs], S, N, As, Es) ->
from_list_2(I-1, Xs, S, N, [X | As], Es).
%% left-padding a list Es with elements P to the nearest multiple of K
%% elements from N (adding 0 to K-1 elements).
pad(N, K, P, Es) ->
push((K - (N rem K)) rem K, P, Es).
-ifdef(EUNIT).
from_list_test_() ->
N0 = ?LEAFSIZE,
N1 = ?NODESIZE*N0,
N2 = ?NODESIZE*N1,
N3 = ?NODESIZE*N2,
N4 = ?NODESIZE*N3,
[?_assert(array:size(from_list([])) =:= 0),
?_assert(array:is_fix(from_list([])) =:= false),
?_assert(array:size(from_list([undefined])) =:= 1),
?_assert(array:is_fix(from_list([undefined])) =:= false),
?_assert(array:size(from_list(lists:seq(1,N1))) =:= N1),
?_assert(to_list(from_list(lists:seq(1,N0))) =:= lists:seq(1,N0)),
?_assert(to_list(from_list(lists:seq(1,N0+1))) =:= lists:seq(1,N0+1)),
?_assert(to_list(from_list(lists:seq(1,N0+2))) =:= lists:seq(1,N0+2)),
?_assert(to_list(from_list(lists:seq(1,N2))) =:= lists:seq(1,N2)),
?_assert(to_list(from_list(lists:seq(1,N2+1))) =:= lists:seq(1,N2+1)),
?_assert(to_list(from_list(lists:seq(0,N3))) =:= lists:seq(0,N3)),
?_assert(to_list(from_list(lists:seq(0,N4))) =:= lists:seq(0,N4)),
?_assertError(badarg, from_list([a,b,a,c|d])),
?_assertError(badarg, from_list(no_array))
].
-endif.
%% @doc Convert the array to an ordered list of pairs `{Index, Value}'.
%%
%% @see from_orddict/2
%% @see sparse_to_orddict/1
-spec to_orddict(Array :: array(Type)) -> indx_pairs(Value :: Type).
to_orddict(#array{size = 0}) ->
[];
to_orddict(#array{size = N, elements = E, default = D}) ->
I = N - 1,
to_orddict_1(E, I, D, I);
to_orddict(_) ->
erlang:error(badarg).
%% see to_list/1 for comparison
to_orddict_1(E=?NODEPATTERN(S), R, D, I) ->
N = I div S,
I1 = I rem S,
to_orddict_3(N, R - I1 - 1, D,
to_orddict_1(element(N+1, E), R, D, I1),
E, S);
to_orddict_1(E, R, D, I) when is_integer(E) ->
push_pairs(I+1, R, D, []);
to_orddict_1(E, R, _D, I) ->
push_tuple_pairs(I+1, R, E, []).
to_orddict_2(E=?NODEPATTERN(S), R, D, L) ->
to_orddict_3(?NODESIZE, R, D, L, E, S);
to_orddict_2(E, R, D, L) when is_integer(E) ->
push_pairs(E, R, D, L);
to_orddict_2(E, R, _D, L) ->
push_tuple_pairs(?LEAFSIZE, R, E, L).
to_orddict_3(0, _R, _D, L, _E, _S) -> %% when is_integer(R) ->
L;
to_orddict_3(N, R, D, L, E, S) ->
to_orddict_3(N-1, R - S, D,
to_orddict_2(element(N, E), R, D, L),
E, S).
-spec push_pairs(non_neg_integer(), array_indx(), term(), indx_pairs(Type)) ->
indx_pairs(Type).
push_pairs(0, _I, _E, L) ->
L;
push_pairs(N, I, E, L) ->
push_pairs(N-1, I-1, E, [{I, E} | L]).
-spec push_tuple_pairs(non_neg_integer(), array_indx(), term(), indx_pairs(Type)) ->
indx_pairs(Type).
push_tuple_pairs(0, _I, _T, L) ->
L;
push_tuple_pairs(N, I, T, L) ->
push_tuple_pairs(N-1, I-1, T, [{I, element(N, T)} | L]).
-ifdef(EUNIT).
to_orddict_test_() ->
N0 = ?LEAFSIZE,
[?_assert([] =:= to_orddict(new())),
?_assert([{0,undefined}] =:= to_orddict(new(1))),
?_assert([{0,undefined},{1,undefined}] =:= to_orddict(new(2))),
?_assert([{N,0}||N<-lists:seq(0,N0-1)]
=:= to_orddict(new(N0,{default,0}))),
?_assert([{N,1}||N<-lists:seq(0,N0)]
=:= to_orddict(new(N0+1,{default,1}))),
?_assert([{N,2}||N<-lists:seq(0,N0+1)]
=:= to_orddict(new(N0+2,{default,2}))),
?_assert([{N,6}||N<-lists:seq(0,665)]
=:= to_orddict(new(666,{default,6}))),
?_assert([{0,1},{1,2},{2,3}] =:=
to_orddict(set(2,3,set(1,2,set(0,1,new()))))),
?_assert([{0,3},{1,2},{2,1}] =:=
to_orddict(set(0,3,set(1,2,set(2,1,new()))))),
?_assert([{0,1}|[{N,0}||N<-lists:seq(1,N0-2)]++[{N0-1,1}]]
=:= to_orddict(set(N0-1,1,set(0,1,new({default,0}))))),
?_assert([{0,1}|[{N,0}||N<-lists:seq(1,N0-1)]++[{N0,1}]]
=:= to_orddict(set(N0,1,set(0,1,new({default,0}))))),
?_assert([{0,1}|[{N,0}||N<-lists:seq(1,N0)]++[{N0+1,1}]]
=:= to_orddict(set(N0+1,1,set(0,1,new({default,0}))))),
?_assert([{0,0} | [{N,undefined}||N<-lists:seq(1,N0*2)]] ++
[{N0*2+1,1} | [{N,undefined}||N<-lists:seq(N0*2+2,N0*10)]] ++
[{N0*10+1,2}] =:=
to_orddict(set(N0*10+1,2,set(N0*2+1,1,set(0,0,new()))))),
?_assertError(badarg, to_orddict(no_array))
].
-endif.
%% @doc Convert the array to an ordered list of pairs `{Index, Value}',
%% skipping default-valued entries.
%%
%% @see to_orddict/1
-spec sparse_to_orddict(Array :: array(Type)) -> indx_pairs(Value :: Type).
sparse_to_orddict(#array{size = 0}) ->
[];
sparse_to_orddict(#array{size = N, elements = E, default = D}) ->
I = N - 1,
sparse_to_orddict_1(E, I, D, I);
sparse_to_orddict(_) ->
erlang:error(badarg).
%% see to_orddict/1 for details
sparse_to_orddict_1(E=?NODEPATTERN(S), R, D, I) ->
N = I div S,
I1 = I rem S,
sparse_to_orddict_3(N, R - I1 - 1, D,
sparse_to_orddict_1(element(N+1, E), R, D, I1),
E, S);
sparse_to_orddict_1(E, _R, _D, _I) when is_integer(E) ->
[];
sparse_to_orddict_1(E, R, D, I) ->
sparse_push_tuple_pairs(I+1, R, D, E, []).
sparse_to_orddict_2(E=?NODEPATTERN(S), R, D, L) ->
sparse_to_orddict_3(?NODESIZE, R, D, L, E, S);
sparse_to_orddict_2(E, _R, _D, L) when is_integer(E) ->
L;
sparse_to_orddict_2(E, R, D, L) ->
sparse_push_tuple_pairs(?LEAFSIZE, R, D, E, L).
sparse_to_orddict_3(0, _R, _D, L, _E, _S) -> % when is_integer(R) ->
L;
sparse_to_orddict_3(N, R, D, L, E, S) ->
sparse_to_orddict_3(N-1, R - S, D,
sparse_to_orddict_2(element(N, E), R, D, L),
E, S).
-spec sparse_push_tuple_pairs(non_neg_integer(), array_indx(),
_, _, indx_pairs(Type)) -> indx_pairs(Type).
sparse_push_tuple_pairs(0, _I, _D, _T, L) ->
L;
sparse_push_tuple_pairs(N, I, D, T, L) ->
case element(N, T) of
D -> sparse_push_tuple_pairs(N-1, I-1, D, T, L);
E -> sparse_push_tuple_pairs(N-1, I-1, D, T, [{I, E} | L])
end.
-ifdef(EUNIT).
sparse_to_orddict_test_() ->
N0 = ?LEAFSIZE,
[?_assert([] =:= sparse_to_orddict(new())),
?_assert([] =:= sparse_to_orddict(new(1))),
?_assert([] =:= sparse_to_orddict(new(1,{default,0}))),
?_assert([] =:= sparse_to_orddict(new(2))),
?_assert([] =:= sparse_to_orddict(new(2,{default,0}))),
?_assert([] =:= sparse_to_orddict(new(N0,{default,0}))),
?_assert([] =:= sparse_to_orddict(new(N0+1,{default,1}))),
?_assert([] =:= sparse_to_orddict(new(N0+2,{default,2}))),
?_assert([] =:= sparse_to_orddict(new(666,{default,6}))),
?_assert([{0,1},{1,2},{2,3}] =:=
sparse_to_orddict(set(2,3,set(1,2,set(0,1,new()))))),
?_assert([{0,3},{1,2},{2,1}] =:=
sparse_to_orddict(set(0,3,set(1,2,set(2,1,new()))))),
?_assert([{0,1},{N0-1,1}] =:=
sparse_to_orddict(set(N0-1,1,set(0,1,new({default,0}))))),
?_assert([{0,1},{N0,1}] =:=
sparse_to_orddict(set(N0,1,set(0,1,new({default,0}))))),
?_assert([{0,1},{N0+1,1}] =:=
sparse_to_orddict(set(N0+1,1,set(0,1,new({default,0}))))),
?_assert([{0,0},{N0*2+1,1},{N0*10+1,2}] =:=
sparse_to_orddict(set(N0*10+1,2,set(N0*2+1,1,set(0,0,new()))))),
?_assertError(badarg, sparse_to_orddict(no_array))
].
-endif.
%% @equiv from_orddict(Orddict, undefined)
-spec from_orddict(Orddict :: indx_pairs(Value :: Type)) -> array(Type).
from_orddict(Orddict) ->
from_orddict(Orddict, undefined).
%% @doc Convert an ordered list of pairs `{Index, Value}' to a
%% corresponding extendible array. `Default' is used as the value for
%% uninitialized entries of the array. If `List' is not a proper,
%% ordered list of pairs whose first elements are nonnegative
%% integers, the call fails with reason `badarg'.
%%
%% @see new/2
%% @see to_orddict/1
-spec from_orddict(Orddict :: indx_pairs(Value :: Type), Default :: Type) ->
array(Type).
from_orddict([], Default) ->
new({default,Default});
from_orddict(List, Default) when is_list(List) ->
{E, N, M} = from_orddict_0(List, 0, ?LEAFSIZE, Default, []),
#array{size = N, max = M, default = Default, elements = E};
from_orddict(_, _) ->
erlang:error(badarg).
%% 2 pass implementation, first pass builds the needed leaf nodes
%% and adds hole sizes.
%% (inserts default elements for missing list entries in the leafs
%% and pads the last tuple if necessary).
%% Second pass builds the tree from the leafs and the holes.
%%
%% Doesn't build/expand unnecessary leaf nodes which costs memory
%% and time for sparse arrays.
from_orddict_0([], N, _Max, _D, Es) ->
%% Finished, build the resulting tree
case Es of
[E] ->
{E, N, ?LEAFSIZE};
_ ->
collect_leafs(N, Es, ?LEAFSIZE)
end;
from_orddict_0(Xs=[{Ix1, _}|_], Ix, Max0, D, Es0)
when Ix1 > Max0, is_integer(Ix1) ->
%% We have a hole larger than a leaf
Hole = Ix1-Ix,
Step = Hole - (Hole rem ?LEAFSIZE),
Next = Ix+Step,
from_orddict_0(Xs, Next, Next+?LEAFSIZE, D, [Step|Es0]);
from_orddict_0(Xs0=[{_, _}|_], Ix0, Max, D, Es) ->
%% Fill a leaf
{Xs,E,Ix} = from_orddict_1(Ix0, Max, Xs0, Ix0, D, []),
from_orddict_0(Xs, Ix, Ix+?LEAFSIZE, D, [E|Es]);
from_orddict_0(Xs, _, _, _,_) ->
erlang:error({badarg, Xs}).
from_orddict_1(Ix, Ix, Xs, N, _D, As) ->
%% Leaf is full
E = list_to_tuple(lists:reverse(As)),
{Xs, E, N};
from_orddict_1(Ix, Max, Xs, N0, D, As) ->
case Xs of
[{Ix, Val} | Xs1] ->
N = Ix+1,
from_orddict_1(N, Max, Xs1, N, D, [Val | As]);
[{Ix1, _} | _] when is_integer(Ix1), Ix1 > Ix ->
N = Ix+1,
from_orddict_1(N, Max, Xs, N, D, [D | As]);
[_ | _] ->
erlang:error({badarg, Xs});
_ ->
from_orddict_1(Ix+1, Max, Xs, N0, D, [D | As])
end.
%% Es is reversed i.e. starting from the largest leafs
collect_leafs(N, Es, S) ->
I = (N-1) div S + 1,
Pad = ((?NODESIZE - (I rem ?NODESIZE)) rem ?NODESIZE) * S,
case Pad of
0 ->
collect_leafs(?NODESIZE, Es, S, N, [S], []);
_ -> %% Pad the end
collect_leafs(?NODESIZE, [Pad|Es], S, N, [S], [])
end.
collect_leafs(0, Xs, S, N, As, Es) ->
E = list_to_tuple(As),
case Xs of
[] ->
case Es of
[] ->
{E, N, ?extend(S)};
_ ->
collect_leafs(N, lists:reverse([E | Es]),
?extend(S))
end;
_ ->
collect_leafs(?NODESIZE, Xs, S, N, [S], [E | Es])
end;
collect_leafs(I, [X | Xs], S, N, As0, Es0)
when is_integer(X) ->
%% A hole, pad accordingly.
Step0 = (X div S),
if
Step0 < I ->
As = push(Step0, S, As0),
collect_leafs(I-Step0, Xs, S, N, As, Es0);
I =:= ?NODESIZE ->
Step = Step0 rem ?NODESIZE,
As = push(Step, S, As0),
collect_leafs(I-Step, Xs, S, N, As, [X|Es0]);
I =:= Step0 ->
As = push(I, S, As0),
collect_leafs(0, Xs, S, N, As, Es0);
true ->
As = push(I, S, As0),
Step = Step0 - I,
collect_leafs(0, [Step*S|Xs], S, N, As, Es0)
end;
collect_leafs(I, [X | Xs], S, N, As, Es) ->
collect_leafs(I-1, Xs, S, N, [X | As], Es);
collect_leafs(?NODESIZE, [], S, N, [_], Es) ->
collect_leafs(N, lists:reverse(Es), ?extend(S)).
-ifdef(EUNIT).
from_orddict_test_() ->
N0 = ?LEAFSIZE,
N1 = ?NODESIZE*N0,
N2 = ?NODESIZE*N1,
N3 = ?NODESIZE*N2,
N4 = ?NODESIZE*N3,
[?_assert(array:size(from_orddict([])) =:= 0),
?_assert(array:is_fix(from_orddict([])) =:= false),
?_assert(array:size(from_orddict([{0,undefined}])) =:= 1),
?_assert(array:is_fix(from_orddict([{0,undefined}])) =:= false),
?_assert(array:size(from_orddict([{N0-1,undefined}])) =:= N0),
?_assert(array:size(from_orddict([{N,0}||N<-lists:seq(0,N1-1)]))
=:= N1),
?_assertError({badarg,_}, from_orddict([foo])),
?_assertError({badarg,_}, from_orddict([{200,foo},{1,bar}])),
?_assertError({badarg,_}, from_orddict([{N,0}||N<-lists:seq(0,N0-1)] ++ not_a_list)),
?_assertError(badarg, from_orddict(no_array)),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N0-1)],
L =:= to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N0)],
L =:= to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N2-1)],
L =:= to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N2)],
L =:= to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N3-1)],
L =:= to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N,0}||N<-lists:seq(0,N4-1)],
L =:= to_orddict(from_orddict(L)))),
%% Hole in the begining
?_assert(?LET(L, [{0,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N0,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N3,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N4,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N0-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N1-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N3-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{N4-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
%% Hole in middle
?_assert(?LET(L, [{0,0},{N0,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N3,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N4,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N0-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N1-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N3-1,0}],
L =:= sparse_to_orddict(from_orddict(L)))),
?_assert(?LET(L, [{0,0},{N4-1,0}],
L =:= sparse_to_orddict(from_orddict(L))))
].
-endif.
%% Function = (Index::integer(), Value::term()) -> term()
%% @doc Map the given function onto each element of the array. The
%% elements are visited in order from the lowest index to the highest.
%% If `Function' is not a function, the call fails with reason `badarg'.
%%
%% @see foldl/3
%% @see foldr/3
%% @see sparse_map/2
-spec map(Function, Array :: array(Type1)) -> array(Type2) when
Function :: fun((Index :: array_indx(), Type1) -> Type2).
map(Function, Array=#array{size = N, elements = E, default = D})
when is_function(Function, 2) ->
if N > 0 ->
A = Array#array{elements = []}, % kill reference, for GC
A#array{elements = map_1(N-1, E, 0, Function, D)};
true ->
Array
end;
map(_, _) ->
erlang:error(badarg).
%% It might be simpler to traverse the array right-to-left, as done e.g.
%% in the to_orddict/1 function, but it is better to guarantee
%% left-to-right application over the elements - that is more likely to
%% be a generally useful property.
map_1(N, E=?NODEPATTERN(S), Ix, F, D) ->
list_to_tuple(lists:reverse([S | map_2(1, E, Ix, F, D, [],
N div S + 1, N rem S, S)]));
map_1(N, E, Ix, F, D) when is_integer(E) ->
map_1(N, unfold(E, D), Ix, F, D);
map_1(N, E, Ix, F, D) ->
list_to_tuple(lists:reverse(map_3(1, E, Ix, F, D, N+1, []))).
map_2(I, E, Ix, F, D, L, I, R, _S) ->
map_2_1(I+1, E, [map_1(R, element(I, E), Ix, F, D) | L]);
map_2(I, E, Ix, F, D, L, N, R, S) ->
map_2(I+1, E, Ix + S, F, D,
[map_1(S-1, element(I, E), Ix, F, D) | L],
N, R, S).
map_2_1(I, E, L) when I =< ?NODESIZE ->
map_2_1(I+1, E, [element(I, E) | L]);
map_2_1(_I, _E, L) ->
L.
-spec map_3(pos_integer(), _, array_indx(),
fun((array_indx(),_) -> _), _, non_neg_integer(), [X]) -> [X].
map_3(I, E, Ix, F, D, N, L) when I =< N ->
map_3(I+1, E, Ix+1, F, D, N, [F(Ix, element(I, E)) | L]);
map_3(I, E, Ix, F, D, N, L) when I =< ?LEAFSIZE ->
map_3(I+1, E, Ix+1, F, D, N, [D | L]);
map_3(_I, _E, _Ix, _F, _D, _N, L) ->
L.
unfold(S, _D) when S > ?LEAFSIZE ->
?NEW_NODE(?reduce(S));
unfold(_S, D) ->
?NEW_LEAF(D).
-ifdef(EUNIT).
map_test_() ->
N0 = ?LEAFSIZE,
Id = fun (_,X) -> X end,
Plus = fun(N) -> fun (_,X) -> X+N end end,
Default = fun(_K,undefined) -> no_value;
(K,V) -> K+V
end,
[?_assertError(badarg, map([], new())),
?_assertError(badarg, map([], new(10))),
?_assert(to_list(map(Id, new())) =:= []),
?_assert(to_list(map(Id, new(1))) =:= [undefined]),
?_assert(to_list(map(Id, new(5,{default,0}))) =:= [0,0,0,0,0]),
?_assert(to_list(map(Id, from_list([1,2,3,4]))) =:= [1,2,3,4]),
?_assert(to_list(map(Plus(1), from_list([0,1,2,3]))) =:= [1,2,3,4]),
?_assert(to_list(map(Plus(-1), from_list(lists:seq(1,11))))
=:= lists:seq(0,10)),
?_assert(to_list(map(Plus(11), from_list(lists:seq(0,99999))))
=:= lists:seq(11,100010)),
?_assert([{0,0},{N0*2+1,N0*2+1+1},{N0*100+1,N0*100+1+2}] =:=
sparse_to_orddict((map(Default,
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new())))))#array{default = no_value}))
].
-endif.
%% Function = (Index::integer(), Value::term()) -> term()
%% @doc Map the given function onto each element of the array, skipping
%% default-valued entries. The elements are visited in order from the
%% lowest index to the highest. If `Function' is not a function, the
%% call fails with reason `badarg'.
%%
%% @see map/2
-spec sparse_map(Function, Array :: array(Type1)) -> array(Type2) when
Function :: fun((Index :: array_indx(), Type1) -> Type2).
sparse_map(Function, Array=#array{size = N, elements = E, default = D})
when is_function(Function, 2) ->
if N > 0 ->
A = Array#array{elements = []}, % kill reference, for GC
A#array{elements = sparse_map_1(N-1, E, 0, Function, D)};
true ->
Array
end;
sparse_map(_, _) ->
erlang:error(badarg).
%% see map/2 for details
%% TODO: we can probably optimize away the use of div/rem here
sparse_map_1(N, E=?NODEPATTERN(S), Ix, F, D) ->
list_to_tuple(lists:reverse([S | sparse_map_2(1, E, Ix, F, D, [],
N div S + 1,
N rem S, S)]));
sparse_map_1(_N, E, _Ix, _F, _D) when is_integer(E) ->
E;
sparse_map_1(_N, E, Ix, F, D) ->
list_to_tuple(lists:reverse(sparse_map_3(1, E, Ix, F, D, []))).
sparse_map_2(I, E, Ix, F, D, L, I, R, _S) ->
sparse_map_2_1(I+1, E,
[sparse_map_1(R, element(I, E), Ix, F, D) | L]);
sparse_map_2(I, E, Ix, F, D, L, N, R, S) ->
sparse_map_2(I+1, E, Ix + S, F, D,
[sparse_map_1(S-1, element(I, E), Ix, F, D) | L],
N, R, S).
sparse_map_2_1(I, E, L) when I =< ?NODESIZE ->
sparse_map_2_1(I+1, E, [element(I, E) | L]);
sparse_map_2_1(_I, _E, L) ->
L.
-spec sparse_map_3(pos_integer(), _, array_indx(),
fun((array_indx(),_) -> _), _, [X]) -> [X].
sparse_map_3(I, T, Ix, F, D, L) when I =< ?LEAFSIZE ->
case element(I, T) of
D -> sparse_map_3(I+1, T, Ix+1, F, D, [D | L]);
E -> sparse_map_3(I+1, T, Ix+1, F, D, [F(Ix, E) | L])
end;
sparse_map_3(_I, _E, _Ix, _F, _D, L) ->
L.
-ifdef(EUNIT).
sparse_map_test_() ->
N0 = ?LEAFSIZE,
Id = fun (_,X) -> X end,
Plus = fun(N) -> fun (_,X) -> X+N end end,
KeyPlus = fun (K,X) -> K+X end,
[?_assertError(badarg, sparse_map([], new())),
?_assertError(badarg, sparse_map([], new(10))),
?_assert(to_list(sparse_map(Id, new())) =:= []),
?_assert(to_list(sparse_map(Id, new(1))) =:= [undefined]),
?_assert(to_list(sparse_map(Id, new(5,{default,0}))) =:= [0,0,0,0,0]),
?_assert(to_list(sparse_map(Id, from_list([1,2,3,4]))) =:= [1,2,3,4]),
?_assert(to_list(sparse_map(Plus(1), from_list([0,1,2,3])))
=:= [1,2,3,4]),
?_assert(to_list(sparse_map(Plus(-1), from_list(lists:seq(1,11))))
=:= lists:seq(0,10)),
?_assert(to_list(sparse_map(Plus(11), from_list(lists:seq(0,99999))))
=:= lists:seq(11,100010)),
?_assert(to_list(sparse_map(Plus(1), set(1,1,new({default,0}))))
=:= [0,2]),
?_assert(to_list(sparse_map(Plus(1),
set(3,4,set(0,1,new({default,0})))))
=:= [2,0,0,5]),
?_assert(to_list(sparse_map(Plus(1),
set(9,9,set(1,1,new({default,0})))))
=:= [0,2,0,0,0,0,0,0,0,10]),
?_assert([{0,0},{N0*2+1,N0*2+1+1},{N0*100+1,N0*100+1+2}] =:=
sparse_to_orddict(sparse_map(KeyPlus,
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new()))))))
].
-endif.
%% @doc Fold the elements of the array using the given function and
%% initial accumulator value. The elements are visited in order from the
%% lowest index to the highest. If `Function' is not a function, the
%% call fails with reason `badarg'.
%%
%% @see foldr/3
%% @see map/2
%% @see sparse_foldl/3
-spec foldl(Function, InitialAcc :: A, Array :: array(Type)) -> B when
Function :: fun((Index :: array_indx(), Value :: Type, Acc :: A) -> B).
foldl(Function, A, #array{size = N, elements = E, default = D})
when is_function(Function, 3) ->
if N > 0 ->
foldl_1(N-1, E, A, 0, Function, D);
true ->
A
end;
foldl(_, _, _) ->
erlang:error(badarg).
foldl_1(N, E=?NODEPATTERN(S), A, Ix, F, D) ->
foldl_2(1, E, A, Ix, F, D, N div S + 1, N rem S, S);
foldl_1(N, E, A, Ix, F, D) when is_integer(E) ->
foldl_1(N, unfold(E, D), A, Ix, F, D);
foldl_1(N, E, A, Ix, F, _D) ->
foldl_3(1, E, A, Ix, F, N+1).
foldl_2(I, E, A, Ix, F, D, I, R, _S) ->
foldl_1(R, element(I, E), A, Ix, F, D);
foldl_2(I, E, A, Ix, F, D, N, R, S) ->
foldl_2(I+1, E, foldl_1(S-1, element(I, E), A, Ix, F, D),
Ix + S, F, D, N, R, S).
-spec foldl_3(pos_integer(), _, A, array_indx(),
fun((array_indx, _, A) -> B), integer()) -> B.
foldl_3(I, E, A, Ix, F, N) when I =< N ->
foldl_3(I+1, E, F(Ix, element(I, E), A), Ix+1, F, N);
foldl_3(_I, _E, A, _Ix, _F, _N) ->
A.
-ifdef(EUNIT).
foldl_test_() ->
N0 = ?LEAFSIZE,
Count = fun (_,_,N) -> N+1 end,
Sum = fun (_,X,N) -> N+X end,
Reverse = fun (_,X,L) -> [X|L] end,
Vals = fun(_K,undefined,{C,L}) -> {C+1,L};
(K,X,{C,L}) -> {C,[K+X|L]}
end,
[?_assertError(badarg, foldl([], 0, new())),
?_assertError(badarg, foldl([], 0, new(10))),
?_assert(foldl(Count, 0, new()) =:= 0),
?_assert(foldl(Count, 0, new(1)) =:= 1),
?_assert(foldl(Count, 0, new(10)) =:= 10),
?_assert(foldl(Count, 0, from_list([1,2,3,4])) =:= 4),
?_assert(foldl(Count, 10, from_list([0,1,2,3,4,5,6,7,8,9])) =:= 20),
?_assert(foldl(Count, 1000, from_list(lists:seq(0,999))) =:= 2000),
?_assert(foldl(Sum, 0, from_list(lists:seq(0,10))) =:= 55),
?_assert(foldl(Reverse, [], from_list(lists:seq(0,1000)))
=:= lists:reverse(lists:seq(0,1000))),
?_assert({999,[N0*100+1+2,N0*2+1+1,0]} =:=
foldl(Vals, {0,[]},
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new())))))
].
-endif.
%% @doc Fold the elements of the array using the given function and
%% initial accumulator value, skipping default-valued entries. The
%% elements are visited in order from the lowest index to the highest.
%% If `Function' is not a function, the call fails with reason `badarg'.
%%
%% @see foldl/3
%% @see sparse_foldr/3
-spec sparse_foldl(Function, InitialAcc :: A, Array :: array(Type)) -> B when
Function :: fun((Index :: array_indx(), Value :: Type, Acc :: A) -> B).
sparse_foldl(Function, A, #array{size = N, elements = E, default = D})
when is_function(Function, 3) ->
if N > 0 ->
sparse_foldl_1(N-1, E, A, 0, Function, D);
true ->
A
end;
sparse_foldl(_, _, _) ->
erlang:error(badarg).
%% see foldl/3 for details
%% TODO: this can be optimized
sparse_foldl_1(N, E=?NODEPATTERN(S), A, Ix, F, D) ->
sparse_foldl_2(1, E, A, Ix, F, D, N div S + 1, N rem S, S);
sparse_foldl_1(_N, E, A, _Ix, _F, _D) when is_integer(E) ->
A;
sparse_foldl_1(N, E, A, Ix, F, D) ->
sparse_foldl_3(1, E, A, Ix, F, D, N+1).
sparse_foldl_2(I, E, A, Ix, F, D, I, R, _S) ->
sparse_foldl_1(R, element(I, E), A, Ix, F, D);
sparse_foldl_2(I, E, A, Ix, F, D, N, R, S) ->
sparse_foldl_2(I+1, E, sparse_foldl_1(S-1, element(I, E), A, Ix, F, D),
Ix + S, F, D, N, R, S).
sparse_foldl_3(I, T, A, Ix, F, D, N) when I =< N ->
case element(I, T) of
D -> sparse_foldl_3(I+1, T, A, Ix+1, F, D, N);
E -> sparse_foldl_3(I+1, T, F(Ix, E, A), Ix+1, F, D, N)
end;
sparse_foldl_3(_I, _T, A, _Ix, _F, _D, _N) ->
A.
-ifdef(EUNIT).
sparse_foldl_test_() ->
N0 = ?LEAFSIZE,
Count = fun (_,_,N) -> N+1 end,
Sum = fun (_,X,N) -> N+X end,
Reverse = fun (_,X,L) -> [X|L] end,
Vals = fun(_K,undefined,{C,L}) -> {C+1,L};
(K,X,{C,L}) -> {C,[K+X|L]}
end,
[?_assertError(badarg, sparse_foldl([], 0, new())),
?_assertError(badarg, sparse_foldl([], 0, new(10))),
?_assert(sparse_foldl(Count, 0, new()) =:= 0),
?_assert(sparse_foldl(Count, 0, new(1)) =:= 0),
?_assert(sparse_foldl(Count, 0, new(10,{default,1})) =:= 0),
?_assert(sparse_foldl(Count, 0, from_list([0,1,2,3,4],0)) =:= 4),
?_assert(sparse_foldl(Count, 0, from_list([0,1,2,3,4,5,6,7,8,9,0],0))
=:= 9),
?_assert(sparse_foldl(Count, 0, from_list(lists:seq(0,999),0))
=:= 999),
?_assert(sparse_foldl(Sum, 0, from_list(lists:seq(0,10), 5)) =:= 50),
?_assert(sparse_foldl(Reverse, [], from_list(lists:seq(0,1000), 0))
=:= lists:reverse(lists:seq(1,1000))),
?_assert({0,[N0*100+1+2,N0*2+1+1,0]} =:=
sparse_foldl(Vals, {0,[]},
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new())))))
].
-endif.
%% @doc Fold the elements of the array right-to-left using the given
%% function and initial accumulator value. The elements are visited in
%% order from the highest index to the lowest. If `Function' is not a
%% function, the call fails with reason `badarg'.
%%
%% @see foldl/3
%% @see map/2
-spec foldr(Function, InitialAcc :: A, Array :: array(Type)) -> B when
Function :: fun((Index :: array_indx(), Value :: Type, Acc :: A) -> B).
foldr(Function, A, #array{size = N, elements = E, default = D})
when is_function(Function, 3) ->
if N > 0 ->
I = N - 1,
foldr_1(I, E, I, A, Function, D);
true ->
A
end;
foldr(_, _, _) ->
erlang:error(badarg).
%% this is based on to_orddict/1
foldr_1(I, E=?NODEPATTERN(S), Ix, A, F, D) ->
foldr_2(I div S + 1, E, Ix, A, F, D, I rem S, S-1);
foldr_1(I, E, Ix, A, F, D) when is_integer(E) ->
foldr_1(I, unfold(E, D), Ix, A, F, D);
foldr_1(I, E, Ix, A, F, _D) ->
I1 = I+1,
foldr_3(I1, E, Ix-I1, A, F).
foldr_2(0, _E, _Ix, A, _F, _D, _R, _R0) ->
A;
foldr_2(I, E, Ix, A, F, D, R, R0) ->
foldr_2(I-1, E, Ix - R - 1,
foldr_1(R, element(I, E), Ix, A, F, D),
F, D, R0, R0).
-spec foldr_3(array_indx(), term(), integer(), A,
fun((array_indx(), _, A) -> B)) -> B.
foldr_3(0, _E, _Ix, A, _F) ->
A;
foldr_3(I, E, Ix, A, F) ->
foldr_3(I-1, E, Ix, F(Ix+I, element(I, E), A), F).
-ifdef(EUNIT).
foldr_test_() ->
N0 = ?LEAFSIZE,
Count = fun (_,_,N) -> N+1 end,
Sum = fun (_,X,N) -> N+X end,
List = fun (_,X,L) -> [X|L] end,
Vals = fun(_K,undefined,{C,L}) -> {C+1,L};
(K,X,{C,L}) -> {C,[K+X|L]}
end,
[?_assertError(badarg, foldr([], 0, new())),
?_assertError(badarg, foldr([], 0, new(10))),
?_assert(foldr(Count, 0, new()) =:= 0),
?_assert(foldr(Count, 0, new(1)) =:= 1),
?_assert(foldr(Count, 0, new(10)) =:= 10),
?_assert(foldr(Count, 0, from_list([1,2,3,4])) =:= 4),
?_assert(foldr(Count, 10, from_list([0,1,2,3,4,5,6,7,8,9])) =:= 20),
?_assert(foldr(Count, 1000, from_list(lists:seq(0,999))) =:= 2000),
?_assert(foldr(Sum, 0, from_list(lists:seq(0,10))) =:= 55),
?_assert(foldr(List, [], from_list(lists:seq(0,1000)))
=:= lists:seq(0,1000)),
?_assert({999,[0,N0*2+1+1,N0*100+1+2]} =:=
foldr(Vals, {0,[]},
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new())))))
].
-endif.
%% @doc Fold the elements of the array right-to-left using the given
%% function and initial accumulator value, skipping default-valued
%% entries. The elements are visited in order from the highest index to
%% the lowest. If `Function' is not a function, the call fails with
%% reason `badarg'.
%%
%% @see foldr/3
%% @see sparse_foldl/3
-spec sparse_foldr(Function, InitialAcc :: A, Array :: array(Type)) -> B when
Function :: fun((Index :: array_indx(), Value :: Type, Acc :: A) -> B).
sparse_foldr(Function, A, #array{size = N, elements = E, default = D})
when is_function(Function, 3) ->
if N > 0 ->
I = N - 1,
sparse_foldr_1(I, E, I, A, Function, D);
true ->
A
end;
sparse_foldr(_, _, _) ->
erlang:error(badarg).
%% see foldr/3 for details
%% TODO: this can be optimized
sparse_foldr_1(I, E=?NODEPATTERN(S), Ix, A, F, D) ->
sparse_foldr_2(I div S + 1, E, Ix, A, F, D, I rem S, S-1);
sparse_foldr_1(_I, E, _Ix, A, _F, _D) when is_integer(E) ->
A;
sparse_foldr_1(I, E, Ix, A, F, D) ->
I1 = I+1,
sparse_foldr_3(I1, E, Ix-I1, A, F, D).
sparse_foldr_2(0, _E, _Ix, A, _F, _D, _R, _R0) ->
A;
sparse_foldr_2(I, E, Ix, A, F, D, R, R0) ->
sparse_foldr_2(I-1, E, Ix - R - 1,
sparse_foldr_1(R, element(I, E), Ix, A, F, D),
F, D, R0, R0).
-spec sparse_foldr_3(array_indx(), _, array_indx(), A,
fun((array_indx(), _, A) -> B), _) -> B.
sparse_foldr_3(0, _T, _Ix, A, _F, _D) ->
A;
sparse_foldr_3(I, T, Ix, A, F, D) ->
case element(I, T) of
D -> sparse_foldr_3(I-1, T, Ix, A, F, D);
E -> sparse_foldr_3(I-1, T, Ix, F(Ix+I, E, A), F, D)
end.
%% @doc Get the number of entries in the array up until the last
%% non-default valued entry. In other words, returns `I+1' if `I' is the
%% last non-default valued entry in the array, or zero if no such entry
%% exists.
%% @see size/1
%% @see resize/1
-spec sparse_size(Array :: array()) -> non_neg_integer().
sparse_size(A) ->
F = fun (I, _V, _A) -> throw({value, I}) end,
try sparse_foldr(F, [], A) of
[] -> 0
catch
{value, I} ->
I + 1
end.
-ifdef(EUNIT).
sparse_foldr_test_() ->
N0 = ?LEAFSIZE,
Count = fun (_,_,N) -> N+1 end,
Sum = fun (_,X,N) -> N+X end,
List = fun (_,X,L) -> [X|L] end,
Vals = fun(_K,undefined,{C,L}) -> {C+1,L};
(K,X,{C,L}) -> {C,[K+X|L]}
end,
[?_assertError(badarg, sparse_foldr([], 0, new())),
?_assertError(badarg, sparse_foldr([], 0, new(10))),
?_assert(sparse_foldr(Count, 0, new()) =:= 0),
?_assert(sparse_foldr(Count, 0, new(1)) =:= 0),
?_assert(sparse_foldr(Count, 0, new(10,{default,1})) =:= 0),
?_assert(sparse_foldr(Count, 0, from_list([0,1,2,3,4],0)) =:= 4),
?_assert(sparse_foldr(Count, 0, from_list([0,1,2,3,4,5,6,7,8,9,0],0))
=:= 9),
?_assert(sparse_foldr(Count, 0, from_list(lists:seq(0,999),0))
=:= 999),
?_assert(sparse_foldr(Sum, 0, from_list(lists:seq(0,10),5)) =:= 50),
?_assert(sparse_foldr(List, [], from_list(lists:seq(0,1000),0))
=:= lists:seq(1,1000)),
?_assert(sparse_size(new()) =:= 0),
?_assert(sparse_size(new(8)) =:= 0),
?_assert(sparse_size(array:set(7, 0, new())) =:= 8),
?_assert(sparse_size(array:set(7, 0, new(10))) =:= 8),
?_assert(sparse_size(array:set(99, 0, new(10,{fixed,false})))
=:= 100),
?_assert(sparse_size(array:set(7, undefined, new())) =:= 0),
?_assert(sparse_size(array:from_list([1,2,3,undefined])) =:= 3),
?_assert(sparse_size(array:from_orddict([{3,0},{17,0},{99,undefined}]))
=:= 18),
?_assert({0,[0,N0*2+1+1,N0*100+1+2]} =:=
sparse_foldr(Vals, {0,[]},
set(N0*100+1,2,
set(N0*2+1,1,
set(0,0,new())))))
].
-endif. | lib/stdlib/src/array.erl | 0.756537 | 0.578865 | array.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_debug).
-export([
help/0,
help/1
]).
-export([
opened_files/0,
opened_files_by_regexp/1,
opened_files_contains/1
]).
-export([
process_name/1,
link_tree/1,
link_tree/2,
mapfold_tree/3,
map_tree/2,
fold_tree/3,
linked_processes_info/2,
print_linked_processes/1
]).
help() ->
[
opened_files,
opened_files_by_regexp,
opened_files_contains,
process_name,
link_tree,
mapfold,
map,
fold,
linked_processes_info,
print_linked_processes
].
-spec help(Function :: atom()) -> ok.
help(opened_files) ->
io:format("
opened_files()
--------------
Returns list of currently opened files
It iterates through `erlang:ports` and filters out all ports which are not efile.
It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
---
", []);
help(opened_files_by_regexp) ->
io:format("
opened_files_by_regexp(FileRegExp)
----------------------------------
Returns list of currently opened files which name match the provided regular expression.
It iterates through `erlang:ports()` and filter out all ports which are not efile.
It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
---
", []);
help(opened_files_contains) ->
io:format("
opened_files_contains(SubString)
--------------------------------
Returns list of currently opened files whose names contain the provided SubString.
It iterates through `erlang:ports()` and filters out all ports which are not efile.
It uses `process_info(Pid, dictionary)` to get info about couch_file properties.
---
", []);
help(process_name) ->
io:format("
process_name(Pid)
-----------------
Uses heuristics to figure out the process name.
The heuristic is based on the following information about the process:
- process_info(Pid, registered_name)
- '$initial_call' key in process dictionary
- process_info(Pid, initial_call)
---
", []);
help(link_tree) ->
io:format("
link_tree(Pid)
--------------
Returns a tree which represents a cluster of linked processes.
This function receives the initial Pid to start from.
The function doesn't recurse to pids older than initial one.
The Pids which are lesser than initial Pid are still shown in the output.
This is analogue of `link_tree(RootPid, []).`
link_tree(Pid, Info)
--------------------
Returns a tree which represents a cluster of linked processes.
This function receives the initial Pid to start from.
The function doesn't recurse to pids older than initial one.
The Pids which are lesser than initial Pid are still shown in the output.
The info argument is a list of process_info_item() as documented in
erlang:process_info/2. We don't do any attempts to prevent dangerous items.
Be warn that passing some of them such as `messages` for example
can be dangerous in a very busy system.
---
", []);
help(mapfold_tree) ->
io:format("
mapfold_tree(Tree, Acc, Fun)
-----------------------
Traverses all nodes of the tree. It is a combination of a map and fold.
It calls a user provided callback for every node of the tree.
`Fun(Key, Value, Pos, Acc) -> {NewValue, NewAcc}`.
Where:
- Key of the node (usualy Pid of a process)
- Value of the node (usualy information collected by link_tree)
- Pos - depth from the root of the tree
- Acc - user's accumulator
---
", []);
help(map_tree) ->
io:format("
map_tree(Tree, Fun)
-----------------------
Traverses all nodes of the tree in order to modify them.
It calls a user provided callback
`Fun(Key, Value, Pos) -> NewValue`
Where:
- Key of the node (usualy Pid of a process)
- Value of the node (usualy information collected by link_tree)
- Pos - depth from the root of the tree
---
", []);
help(fold_tree) ->
io:format("
fold_tree(Tree, Fun)
Traverses all nodes of the tree in order to collect some aggregated information
about the tree. It calls a user provided callback
`Fun(Key, Value, Pos) -> NewValue`
Where:
- Key of the node (usualy Pid of a process)
- Value of the node (usualy information collected by link_tree)
- Pos - depth from the root of the tree
---
", []);
help(linked_processes_info) ->
io:format("
linked_processes_info(Pid, Info)
--------------------------------
Convenience function which reduces the amount of typing compared to direct
use of link_tree.
- Pid: initial Pid to start from
- Info: a list of process_info_item() as documented
in erlang:process_info/2.
---
", []);
help(print_linked_processes) ->
io:format("
- print_linked_processes(Pid)
- print_linked_processes(RegisteredName)
- print_linked_processes(couch_index_server)
---------------------------
Print cluster of linked processes. This function receives the
initial Pid to start from. The function doesn't recurse to pids
older than initial one. The output would look like similar to:
```
couch_debug:print_linked_processes(whereis(couch_index_server)).
name | reductions | message_queue_len | memory
couch_index_server[<0.288.0>] | 478240 | 0 | 109696
couch_index:init/1[<0.3520.22>] | 4899 | 0 | 109456
couch_file:init/1[<0.886.22>] | 11973 | 0 | 67984
couch_index:init/1[<0.3520.22>] | 4899 | 0 | 109456
```
---
", []);
help(Unknown) ->
io:format("Unknown function: `~p`. Please try one of the following:~n", [Unknown]),
[io:format(" - ~s~n", [Function]) || Function <- help()],
io:format(" ---~n", []),
ok.
-spec opened_files() ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files() ->
Info = [couch_file_port_info(Port)
|| Port <- erlang:ports(),
{name, "efile"} =:= erlang:port_info(Port, name)],
[I || I <- Info, is_tuple(I)].
couch_file_port_info(Port) ->
{connected, Pid} = erlang:port_info(Port, connected),
case couch_file:process_info(Pid) of
{Fd, FilePath} ->
{Port, Pid, Fd, FilePath};
undefined ->
undefined
end.
-spec opened_files_by_regexp(FileRegExp :: iodata()) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_by_regexp(FileRegExp) ->
{ok, RegExp} = re:compile(FileRegExp),
lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
re:run(Path, RegExp) =/= nomatch
end, couch_debug:opened_files()).
-spec opened_files_contains(FileNameFragment :: iodata()) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_contains(FileNameFragment) ->
lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
string:str(Path, FileNameFragment) > 0
end, couch_debug:opened_files()).
process_name(Pid) when is_pid(Pid) ->
case process_info(Pid, registered_name) of
{registered_name, Name} ->
iolist_to_list(io_lib:format("~s[~p]", [Name, Pid]));
_ ->
{dictionary, Dict} = process_info(Pid, dictionary),
case proplists:get_value('$initial_call', Dict) of
undefined ->
{initial_call, {M, F, A}} = process_info(Pid, initial_call),
iolist_to_list(io_lib:format("~p:~p/~p[~p]", [M, F, A, Pid]));
{M, F, A} ->
iolist_to_list(io_lib:format("~p:~p/~p[~p]", [M, F, A, Pid]))
end
end;
process_name(Else) ->
iolist_to_list(io_lib:format("~p", [Else])).
iolist_to_list(List) ->
binary_to_list(iolist_to_binary(List)).
link_tree(RootPid) ->
link_tree(RootPid, []).
link_tree(RootPid, Info) ->
link_tree(RootPid, Info, fun(_, Props) -> Props end).
link_tree(RootPid, Info, Fun) ->
{_, Result} = link_tree(
RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun),
Result.
link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
case gb_trees:lookup(Pid, Visited0) of
{value, Props} ->
{Visited0, [{Pos, {Pid, Fun(Pid, Props), []}}]};
none when RootPid =< Pid ->
Props = info(Pid, Info),
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{links, Children} = lists:keyfind(links, 1, Props),
{Visited2, NewTree} = link_tree(
RootPid, Info, Visited1, Pos + 1, Children, Fun),
{Visited3, Result} = link_tree(
RootPid, Info, Visited2, Pos, Rest, Fun),
{Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
none ->
Props = info(Pid, Info),
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{Visited2, Result} = link_tree(
RootPid, Info, Visited1, Pos, Rest, Fun),
{Visited2, [{Pos, {Pid, Fun(Pid, Props), []}}] ++ Result}
end;
link_tree(_RootPid, _Info, Visited, _Pos, [], _Fun) ->
{Visited, []}.
info(Pid, Info) when is_pid(Pid) ->
ValidProps = [
backtrace,
binary,
catchlevel,
current_function,
current_location,
current_stacktrace,
dictionary,
error_handler,
garbage_collection,
garbage_collection_info,
group_leader,
heap_size,
initial_call,
links,
last_calls,
memory,
message_queue_len,
messages,
min_heap_size,
min_bin_vheap_size,
monitored_by,
monitors,
message_queue_data,
priority,
reductions,
registered_name,
sequential_trace_token,
stack_size,
status,
suspending,
total_heap_size,
trace,
trap_exit
],
Validated = lists:filter(fun(P) -> lists:member(P, ValidProps) end, Info),
process_info(Pid, lists:usort(Validated));
info(Port, Info) when is_port(Port) ->
ValidProps = [
registered_name,
id,
connected,
links,
name,
input,
output,
os_pid
],
Validated = lists:filter(fun(P) -> lists:member(P, ValidProps) end, Info),
port_info(Port, lists:usort(Validated)).
port_info(Port, Items) ->
lists:foldl(fun(Item, Acc) ->
case (catch erlang:port_info(Port, Item)) of
{Item, _Value} = Info -> [Info | Acc];
_Else -> Acc
end
end, [], Items).
mapfold_tree([], Acc, _Fun) ->
{[], Acc};
mapfold_tree([{Pos, {Key, Value0, SubTree0}} | Rest0], Acc0, Fun) ->
{Value1, Acc1} = Fun(Key, Value0, Pos, Acc0),
{SubTree1, Acc2} = mapfold_tree(SubTree0, Acc1, Fun),
{Rest1, Acc3} = mapfold_tree(Rest0, Acc2, Fun),
{[{Pos, {Key, Value1, SubTree1}} | Rest1], Acc3}.
map_tree(Tree, Fun) ->
{Result, _} = mapfold_tree(Tree, nil, fun(Key, Value, Pos, Acc) ->
{Fun(Key, Value, Pos), Acc}
end),
Result.
fold_tree(Tree, Acc, Fun) ->
{_, Result} = mapfold_tree(Tree, Acc, fun(Key, Value, Pos, AccIn) ->
{Value, Fun(Key, Value, Pos, AccIn)}
end),
Result.
linked_processes_info(Pid, Info) ->
link_tree(Pid, Info, fun(P, Props) -> {process_name(P), Props} end).
print_linked_processes(couch_index_server) ->
print_couch_index_server_processes();
print_linked_processes(Name) when is_atom(Name) ->
case whereis(Name) of
undefined -> {error, {unknown, Name}};
Pid -> print_linked_processes(Pid)
end;
print_linked_processes(Pid) when is_pid(Pid) ->
Info = [reductions, message_queue_len, memory],
TableSpec = [
{50, left, name}, {12, centre, reductions},
{19, centre, message_queue_len}, {10, centre, memory}
],
Tree = linked_processes_info(Pid, Info),
print_tree(Tree, TableSpec).
id("couch_file:init" ++ _, Pid, _Props) ->
case couch_file:process_info(Pid) of
{{file_descriptor, prim_file, {Port, Fd}}, FilePath} ->
term2str([
term2str(Fd), ":",
term2str(Port), ":",
shorten_path(FilePath)]);
undefined ->
""
end;
id(_IdStr, _Pid, _Props) ->
"".
print_couch_index_server_processes() ->
Info = [reductions, message_queue_len, memory],
TableSpec = [
{50, left, name}, {12, centre, reductions},
{19, centre, message_queue_len}, {14, centre, memory}, {id}
],
Tree = link_tree(whereis(couch_index_server), Info, fun(P, Props) ->
IdStr = process_name(P),
{IdStr, [{id, id(IdStr, P, Props)} | Props]}
end),
print_tree(Tree, TableSpec).
shorten_path(Path) ->
ViewDir = list_to_binary(config:get("couchdb", "view_index_dir")),
DatabaseDir = list_to_binary(config:get("couchdb", "database_dir")),
File = list_to_binary(Path),
Len = max(
binary:longest_common_prefix([File, DatabaseDir]),
binary:longest_common_prefix([File, ViewDir])
),
<<_:Len/binary, Rest/binary>> = File,
binary_to_list(Rest).
%% Pretty print functions
%% Limmitations:
%% - The first column has to be specified as {Width, left, Something}
%% The TableSpec is a list of either:
%% - {Value}
%% - {Width, Align, Value}
%% Align is one of the following:
%% - left
%% - centre
%% - right
print_tree(Tree, TableSpec) ->
io:format("~s~n", [format(TableSpec)]),
map_tree(Tree, fun(_, {Id, Props}, Pos) ->
io:format("~s~n", [table_row(Id, Pos * 2, Props, TableSpec)])
end),
ok.
format(Spec) ->
Fields = [format_value(Format) || Format <- Spec],
string:join(Fields, "|").
format_value({Value}) -> term2str(Value);
format_value({Width, Align, Value}) -> string:Align(term2str(Value), Width).
bind_value({K}, Props) when is_list(Props) ->
{element(2, lists:keyfind(K, 1, Props))};
bind_value({Width, Align, K}, Props) when is_list(Props) ->
{Width, Align, element(2, lists:keyfind(K, 1, Props))}.
term2str(Atom) when is_atom(Atom) -> atom_to_list(Atom);
term2str(Binary) when is_binary(Binary) -> binary_to_list(Binary);
term2str(Integer) when is_integer(Integer) -> integer_to_list(Integer);
term2str(Float) when is_float(Float) -> float_to_list(Float);
term2str(String) when is_list(String) -> lists:flatten(String);
term2str(Term) -> iolist_to_list(io_lib:format("~p", [Term])).
table_row(Key, Indent, Props, [{KeyWidth, Align, _} | Spec]) ->
Values = [bind_value(Format, Props) || Format <- Spec],
KeyStr = string:Align(term2str(Key), KeyWidth - Indent),
[string:copies(" ", Indent), KeyStr, "|" | format(Values)].
-ifdef(TEST).
-include_lib("couch/include/couch_eunit.hrl").
random_processes(Depth) ->
random_processes([], Depth).
random_processes(Pids, 0) ->
lists:usort(Pids);
random_processes(Acc, Depth) ->
Caller = self(),
Ref = make_ref(),
Pid = case oneof([spawn_link, open_port]) of
spawn_monitor ->
{P, _} = spawn_monitor(fun() ->
Caller ! {Ref, random_processes(Depth - 1)},
receive looper -> ok end
end),
P;
spawn ->
spawn(fun() ->
Caller ! {Ref, random_processes(Depth - 1)},
receive looper -> ok end
end);
spawn_link ->
spawn_link(fun() ->
Caller ! {Ref, random_processes(Depth - 1)},
receive looper -> ok end
end);
open_port ->
spawn_link(fun() ->
Port = erlang:open_port({spawn, "sleep 10"}, []),
true = erlang:link(Port),
Caller ! {Ref, random_processes(Depth - 1)},
receive looper -> ok end
end)
end,
receive
{Ref, Pids} -> random_processes([Pid | Pids] ++ Acc, Depth - 1)
end.
oneof(Options) ->
lists:nth(couch_rand:uniform(length(Options)), Options).
tree() ->
[InitialPid | _] = Processes = random_processes(5),
{InitialPid, Processes, link_tree(InitialPid)}.
setup() ->
tree().
teardown({_InitialPid, Processes, _Tree}) ->
[exit(Pid, normal) || Pid <- Processes].
link_tree_test_() ->
{
"link_tree tests",
{
foreach,
fun setup/0, fun teardown/1,
[
fun should_have_same_shape/1,
fun should_include_extra_info/1
]
}
}.
should_have_same_shape({InitialPid, _Processes, Tree}) ->
?_test(begin
InfoTree = linked_processes_info(InitialPid, []),
?assert(is_equal(InfoTree, Tree)),
ok
end).
should_include_extra_info({InitialPid, _Processes, _Tree}) ->
Info = [reductions, message_queue_len, memory],
?_test(begin
InfoTree = linked_processes_info(InitialPid, Info),
map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
case Key of
Pid when is_pid(Pid) ->
?assert(lists:keymember(reductions, 1, Props)),
?assert(lists:keymember(message_queue_len, 1, Props)),
?assert(lists:keymember(memory, 1, Props));
Port ->
ok
end,
Props
end),
ok
end).
is_equal([], []) -> true;
is_equal([{Pos, {Pid, _, A}} | RestA], [{Pos, {Pid, _, B}} | RestB]) ->
case is_equal(RestA, RestB) of
false -> false;
true -> is_equal(A, B)
end.
-endif. | src/couch/src/couch_debug.erl | 0.831793 | 0.636678 | couch_debug.erl | starcoder |
% Both garbage collection (GC) and re-replication (RR) require working
% from a consistent snapshot of DDFS. Since computing this snapshot
% is expensive in both time and memory, once computed, the snapshot is
% used for both purposes, and GC and RR are implemented together. GC
% is performed before RR, so that GC can free up disk space that can
% be used by RR.
%
% GC performs the following operations:
%
% GC1) Remove leftover !partial. files (from failed PUT operations)
% GC2) Remove orphaned tags (old versions and deleted tags)
% GC3) Remove orphaned blobs (blobs not referred by any tag)
% GC4) Recover lost replicas for non-orphaned blobs (from lost tag updates)
% GC5) Delete old deleted tags from the +deleted metatag
%
% while RR does the following:
%
% RR1) Re-replicate blobs that don't have enough replicas
% RR2) Update tags that contain blobs that were re-replicated, and/or
% re-replicate tags that don't have enough replicas.
%
% GC and RR are performed in the following phases:
%
% - Startup and initialization (phase = start)
%
% The various master and node processes are started up, and a
% current listing of tags in the system is retrieved from
% ddfs_master. Each node, meanwhile, traverses all its volumes
% and creates a cache of all existing blobs and tags it finds,
% deleting any partial files along the way (GC1).
%
% - Build snapshot (phase = build_map/map_wait)
%
% Two maps (gc_tag_map, gc_blob_map) of the current tags and blobs
% in use (or referenced) in DDFS is built up. These maps are
% built on the master, which iterates over the current list of
% tags in DDFS, and checks whether each blob referenced by a tag
% is present on the node that is supposed to host it. The node
% keeps track of (touches) these in-use blobs.
%
% To handle transient node disconnections, the master keeps state
% so that it can resume this process when a node connection comes
% back. In this case, the master re-sends any in-use requests to
% which it was expecting responses from the node. On the node,
% its list of in-use tags and blobs will be incomplete after a
% re-connect, since the master only re-sends pending requests, not
% all requests. To handle this (and other cases, described
% below), before deleting an object, a node needs to check with
% the master whether it is in use.
%
% GC cannot proceed safely unless this snapshot is built, since
% otherwise it might delete data that is still in-use. Hence, GC
% aborts if the snapshot cannot be built. For simplicity, we also
% do not proceed with RR in this case.
%
% Once the gc maps are built, the master notifies the nodes, which
% enter the GC phase.
%
% - GC (phase = gc)
%
% Each node now processes the untouched blobs and tags in its
% cache, which are now potential orphans. However, before
% deleting any (GC2 and GC3), each node now checks with the master
% whether each untouched tag or blob in its cache is still in use
% at the master. Also, to handle recent objects added to DDFS
% after the snapshot was built, which would otherwise erroneously
% be considered as orphans, it only deletes objects that are older
% than ORPHANED_{BLOB,TAG}_EXPIRES.
%
% The deletion of each potential orphan object is preceded by a
% two-way check (tag -> node, node -> master). As described
% above, the node needs to re-do the check in the case it was
% restarted during GC, since it will then have an incomplete
% knowledge of in-use objects.
%
% Another reason is the following: if node deleted its potential
% orphans right away, without checking their status on the master,
% objects would be deleted every time the node's hostname changes
% and the first tag->node check fails. If all hostnames were
% changed at once, all files would be deleted without a warning.
%
% Another reason is that during RR, tags need to be updated after
% any RR of an in-use blob. However, if the master goes down
% after a blob RR, but before the tag is updated, then the blob RR
% is lost. Having the node check with the master before deleting
% a blob allows the recovery of such lost blob replicas (GC4).
%
% Once a node is done processing its potential orphans, it informs
% the master. Once all nodes are done with GC, the master then
% deletes old tags from the +deleted metatag (GC5), and proceeds
% to RR.
%
% If a node connection is reestablished during this phase, the
% node rebuilds its cache, and treats each object as a potential
% orphan. This will make GC take longer, but is still safe.
%
% - RR (phase = rr_blobs/rr_blobs_wait/rr_tags)
%
% Re-replication is done in two parts. In the first part
% (rr_blobs, rr_blobs_wait), the master first re-replicates each
% blob that has fewer replicas than DDFS_BLOB_REPLICAS (RR1).
% Then, in the second part (rr_tags), it processes tags that need
% to be updated.
%
% Tags may need to be updated (RR2) for one or more reasons: (i)
% it has fewer than DDFS_TAG_MIN_REPLICAS, (ii) it contains blobs
% that have been re-replicated, (iii) lost blob replicas have been
% recovered for blobs in the tag.
%
% Tag updates are performed by sending an update message to the
% ddfs_tag process responsible for the tag. This is done because
% the tag may have been updated by the user after the snapshot was
% built, in which case the update needs to be merged with the
% current contents of the tag.
%
% RR continues as long as possible, regardless of the loss of node
% connections, since it is an inherently safe process. However,
% tag RR may fail due to too_many_failed_nodes if too many node
% connections are lost.
%
% RR completes once all blobs and tags are processed.
%
%
% DDFS node removal
%
% For a DDFS node to be safely removed from the cluster, the following
% conditions have to be satisfied:
%
% (1) no new data or metadata, or replicas of existing data or
% metadata, should be written to the node while the removal is in
% progress
%
% (2) the data and metadata already on the node needs to be replicated
% to the other nodes in the cluster, so that blob and tag replica
% quotas can be met without counting the replicas hosted on the
% node
%
% (3) all references to blobs on the node should be removed from their
% containing tags
%
% The third step is safety-critical: for instance, it should not
% result in a reference to the last available blob being removed, in
% case the other replicas of the blob are on nodes that are currently
% down.
%
% Implementation:
%
% A node pending removal is put on a 'blacklist'. This blacklist is
% removed from the set of the writable DDFS nodes by ddfs_master when
% new blobs or tags are created, or when existing ones are
% re-replicated (1).
%
% Any blob or tag replicas found on a blacklisted node are not counted
% towards satisfying their replica quotas (2), and blob and tag
% replication is initiated if those quotas are not met (in phase
% rr_blobs and rr_tags). Once enough backup blob replicas are ensured
% to be available, a 'filter' update message is sent to the tag (in
% phase rr_tags) to remove any references to blob replicas hosted on
% the blacklisted node (3). The actual removal is performed by the
% corresponding ddfs_tag.
%
% The blob reference removal requires an invariant: that any operations
% on a tag do not modify the replica set for a blob in the tag, other
% than perhaps removing the replica set completely. This is because
% the safety computation is not atomic with the reference removal. If
% the replica set is modified (e.g. by removing some replicas from the
% set that were relied on by the safety check) after the safety check
% but before the reference removal, the removal becomes unsafe.
%
% This invariant is ensured by comparing the tag id used for the
% safety check, with the tag id at the time of the filter operation.
% If the two differ, the filter operation is not performed.
%
% In the rr_tags phase, as the tags are scanned for updates, we also
% track whether there exist references to blob replicas on gc
% blacklisted nodes, and whether sufficient tag replicas exist on
% non-blacklisted nodes. At the end of the phase, this helps to
% compute the set of blacklisted nodes that can be safely removed from
% DDFS.
-module(ddfs_gc_main).
-behaviour(gen_server).
-export([start_link/2, gc_status/2]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3, format_status/2]).
-export([is_orphan/4, node_gc_done/2]).
-include("common_types.hrl").
-include("config.hrl").
-include("ddfs.hrl").
-include("ddfs_tag.hrl").
-include("ddfs_gc.hrl").
-include("gs_util.hrl").
-define(NODE_RETRY_WAIT, 30000).
-type node_info() :: {node(), {non_neg_integer(), non_neg_integer()}}.
-type rr_next() :: object_name().
-type node_map() :: disco_gbtree(node(), {pid(), non_neg_integer()}).
-type tag_set() :: disco_gbset(tagname()).
-record(state, {
% dynamic state
deleted_ages :: ets:tab(),
phase = start :: phase(),
gc_peers = gb_trees:empty() :: node_map(),
last_response_time = now() :: erlang:timestamp(),
progress_timer = undefined :: 'undefined' | timer:tref(),
gc_stats = init_gc_stats() :: gc_run_stats(),
num_pending_reqs = 0 :: non_neg_integer(), % build_map/map_wait
pending_nodes = gb_sets:empty() :: disco_gbset(node()), % gc
rr_reqs = 0 :: non_neg_integer(), % rr_blobs
rr_pid = undefined :: 'undefined' | pid(), % rr_blobs
safe_blacklist = gb_sets:empty() :: tag_set(), % rr_tags
nodestats = [] :: [node_info()],
overused_nodes = [] :: [node()],
underused_nodes = [] :: [node()],
most_overused_node = undefined :: 'undefined' | node(),
% static state
tags = [] :: [object_name()],
root :: string(),
blacklist = [] :: [node()],
tagk :: non_neg_integer(),
blobk :: non_neg_integer()}).
-type state() :: #state{}.
-type object_location() :: {node(), volume_name()}.
%% ===================================================================
%% external API
-spec start_link(string(), ets:tab()) -> {ok, pid()} | {error, term()}.
start_link(Root, DeletedAges) ->
gen_server:start_link(?MODULE, {Root, DeletedAges}, []).
-spec gc_status(pid(), pid()) -> ok.
gc_status(Master, From) ->
gen_server:cast(Master, {gc_status, From}).
%% ===================================================================
%% internal API
-spec is_orphan(pid(), object_type(), object_name(), volume_name())
-> {ok, boolean() | unknown}.
is_orphan(Master, Type, ObjName, Vol) ->
gen_server:call(Master, {is_orphan, Type, ObjName, node(), Vol}).
-spec node_gc_done(pid(), gc_run_stats()) -> ok.
node_gc_done(Master, GCStats) ->
gen_server:cast(Master, {gc_done, node(), GCStats}).
-spec add_replicas(pid(), object_name(), [url()]) -> ok.
add_replicas(Master, BlobName, NewUrls) ->
gen_server:cast(Master, {add_replicas, BlobName, NewUrls}).
%% ===================================================================
%% gen_server callbacks
-spec init({string(), ets:tab()}) -> gs_init().
init({Root, DeletedAges}) ->
% Ensure only one gc process is running at a time. We don't use a
% named process to implement uniqueness so that our message queue
% isn't corrupted by stray delayed messages.
register(gc_lock, self()),
State = #state{deleted_ages = DeletedAges,
root = Root,
tagk = list_to_integer(disco:get_setting("DDFS_TAG_REPLICAS")),
blobk = list_to_integer(disco:get_setting("DDFS_BLOB_REPLICAS"))},
% In-use blobs and tags are tracked differently. Blobs are
% immutable objects, and need to be explicitly re-replicated.
% Tags are mutable objects, whose incarnation ids are updated on
% every mutation; they are also implicitly re-replicated, since the
% storage of every mutation creates the appropriate number of
% replicas.
%
% Since blobs need to be re-replicated explicitly, we track all
% their current locations. Since tags are mutable, we don't track
% their locations, but track their current incarnation: all older
% incarnations will be garbage. During the rr_tags phase, the
% number of tag locations will be used to decide whether they need
% re-replication.
% gc_blob_map: {Key :: {object_name(), node()},
% State :: 'pending' | 'missing' | check_blob_result()}
% gc_tag_map: {Key :: tagname(),
% Id :: erlang:timestamp()}
_ = ets:new(gc_blob_map, [named_table, set, private]),
_ = ets:new(gc_tag_map, [named_table, set, private]),
process_flag(trap_exit, true),
gen_server:cast(self(), start),
{ok, State}.
-type is_orphan_msg() :: {is_orphan, object_type(), object_name(),
node(), volume_name()}.
-spec handle_call(is_orphan_msg(), from(), state()) -> gs_reply(boolean() | unknown);
(dbg_state_msg(), from(), state()) -> gs_reply(state()).
handle_call({is_orphan, Type, ObjName, Node, Vol}, _, S) ->
S1 = S#state{last_response_time = now()},
{reply, check_is_orphan(S, Type, ObjName, Node, Vol), S1};
handle_call(dbg_get_state, _, S) ->
{reply, S, S}.
-type gc_status_msg() :: {gc_status, pid()}.
-type retry_node_msg() :: {retry_node, node()}.
-type build_map_msg() :: {build_map, [tagname()]}.
-type gc_done_msg() :: {gc_done, node(), gc_run_stats()}.
-type rr_blob_msg() :: {rr_blob, term()}.
-type add_replicas_msg() :: {add_replicas, object_name(), [url()]}.
-type rr_tags_msg() :: {rr_tags, [tagname()], non_neg_integer()}.
-spec handle_cast(gc_status_msg() | retry_node_msg() | build_map_msg()
| gc_done_msg() | rr_blob_msg() | add_replicas_msg()
| rr_tags_msg(),
state()) -> gs_noreply() | gs_stop(stop_requested | shutdown).
handle_cast({gc_status, From}, #state{phase = P} = S) when is_pid(From) ->
From ! {ok, P},
{noreply, S};
handle_cast(start, #state{phase = start} = S) ->
lager:info("GC: initializing"),
{ok, Blacklist} = ddfs_master:gc_blacklist(),
case get_all_tags() of
{ok, Tags, OkNodes} ->
Phase = build_map,
Peers = start_gc_peers(OkNodes, self(), now(), Phase),
% We iterate over the tags by messaging ourselves, so that
% we keep processing our message queue, which would
% otherwise fill up when we process very large numbers of
% blobs and tags, causing potential for deadlock. This
% way, we can also react to node disconnects during the
% tag processing.
gen_server:cast(self(), {build_map, Tags}),
{noreply, S#state{phase = Phase,
gc_peers = Peers,
tags = Tags,
blacklist = Blacklist}};
E ->
lager:error("GC: unable to start: ~p", [E]),
cleanup_for_exit(S),
{stop, shutdown, S}
end;
handle_cast({retry_node, Node},
#state{phase = Phase, gc_peers = GCPeers,
num_pending_reqs = NumPendingReqs, most_overused_node = Overused} = S) ->
lager:info("GC: retrying connection to ~p (in phase ~p)", [Node, Phase]),
Mode = case Node of
Overused -> overused;
_ -> normal
end,
Pid = ddfs_gc_node:start_gc_node(Node, self(), now(), Phase, Mode),
Peers = update_peer(GCPeers, Node, Pid),
case Phase of
P when P =:= build_map; P =:= map_wait ->
Pending = resend_pending(Node, Pid),
% Assert an invariant.
true = (Pending =< NumPendingReqs),
ok;
_ ->
ok
end,
{noreply, S#state{gc_peers = Peers}};
handle_cast({build_map, [T|Tags]},
#state{phase = build_map, num_pending_reqs = PendingReqs} = S) ->
Check = try check_tag(T, S, ?MAX_TAG_OP_RETRIES)
catch K:V -> {error, {K,V}}
end,
case Check of
{ok, Sent} ->
gen_server:cast(self(), {build_map, Tags}),
Pending = PendingReqs + Sent,
{noreply, S#state{num_pending_reqs = Pending}};
E ->
% We failed to handle this tag; we cannot safely proceed
% since otherwise we might erroneously consider its blobs
% as orphans and delete them.
lager:error("GC: stopping, unable to get tag ~p: ~p", [T, E]),
cleanup_for_exit(S),
{stop, shutdown, S}
end;
handle_cast({build_map, []}, #state{phase = build_map} = S) ->
S1 = case num_pending_objects() of
0 ->
% We have no more responses to wait for, we can enter
% the GC phase directly.
start_gc_phase(S#state{num_pending_reqs = 0});
Pending ->
% We've sent out all our status requests, start a
% progress tracker to avoid stalling indefinitely.
%
% We currently use a timer that tracks gc protocol
% messages. This will not catch spurious progress
% caused by protocol messages from repeated
% disconnects/reconnects of the same node. A better
% way to ensure progress is for the timer to ensure
% that pending_msgs/pending_nodes decreases at each
% wakeup. However, ensuring pending_nodes decreases
% requires estimating how long the gc phase at a node
% would last.
%
% Initialize last_response_time, which will now be
% updated whenever we get a gc protocol message.
lager:info("GC: all build_map requests sent, entering map_wait"),
{ok, ProgressTimer} =
timer:send_after(?GC_PROGRESS_INTERVAL, check_progress),
S#state{phase = map_wait,
last_response_time = now(),
progress_timer = ProgressTimer,
num_pending_reqs = Pending}
end,
{noreply, S1};
handle_cast({gc_done, Node, GCNodeStats}, #state{phase = gc,
gc_stats = Stats,
pending_nodes = Pending,
deleted_ages = DeletedAges,
rr_reqs = RReqs,
nodestats = NS,
tags = Tags,
blacklist = BL} = S) ->
print_gc_stats(Node, GCNodeStats),
NewStats = add_gc_stats(Stats, GCNodeStats),
NewPending = gb_sets:delete(Node, Pending),
NewNS = case lists:member(Node, BL) of
true ->
NS;
false ->
update_nodestats(Node, GCNodeStats, NS)
end,
S1 = case gb_sets:size(NewPending) of
0 ->
% This was the last node we were waiting for to
% finish GC. Update the deleted tag.
process_deleted(Tags, DeletedAges),
% Due to tag deletion by GC, we might be able to free
% up some entries from the tag cache in ddfs_master.
ddfs_master:refresh_tag_cache(),
% Update stats.
print_gc_stats(all, NewStats),
ddfs_master:update_gc_stats(NewStats),
{_, _} = find_unstable_nodes(NewNS), % print the current stats.
FutureNS = estimate_rr_blobs(S#state{nodestats = NewNS}),
{NewUnderused , NewOverused} = find_unstable_nodes(FutureNS),
ok = case disco:has_setting("DDFS_SPACE_AWARE") of
false -> ok;
true ->
case NewOverused of
[] -> ok;
_ -> rebalance(NewOverused, BL, FutureNS)
end
end,
lager:info("GC: entering rr_blobs phase"),
% Start the replicator process which will
% synchronously replicate any blobs it is told to,
% and then iterate over all the blobs.
Sr = S#state{rr_pid = start_replicator(self())},
Start = ets:first(gc_blobs),
Reqs = rereplicate_blob(Sr, Start),
Sr#state{phase = rr_blobs,
rr_reqs = RReqs + Reqs,
nodestats = FutureNS,
underused_nodes = NewUnderused,
overused_nodes = NewOverused};
Remaining ->
lager:info("GC: ~p nodes pending in gc", [Remaining]),
S#state{nodestats = NewNS}
end,
{noreply, S1#state{pending_nodes = NewPending,
gc_stats = NewStats,
last_response_time = now()}};
handle_cast({rr_blob, '$end_of_table'},
#state{phase = rr_blobs, rr_pid = RR, rr_reqs = RReqs} = S) ->
% We are done with sending replication requests; we now wait for
% the replicator to terminate.
lager:info("GC: sent ~p blob replication requests, entering rr_blobs_wait",
[RReqs]),
stop_replicator(RR),
{noreply, S#state{phase = rr_blobs_wait}};
handle_cast({rr_blob, Next}, #state{phase = rr_blobs, rr_reqs = RReqs} = S) ->
Reqs = rereplicate_blob(S, Next),
{noreply, S#state{rr_reqs = RReqs + Reqs}};
handle_cast({add_replicas, BlobName, NewUrls}, #state{phase = Phase,
rr_reqs = RReqs} = S)
when Phase =:= rr_blobs; Phase =:= rr_blobs_wait ->
update_replicas(S, BlobName, NewUrls),
lager:info("GC: ~p replication requests pending", [RReqs - 1]),
{noreply, S#state{rr_reqs = RReqs - 1}};
handle_cast({add_replicas, _BlobName, _NewUrls} = M, #state{phase = Phase,
rr_reqs = RReqs} = S) ->
lager:info("GC: ignoring late response ~p (~p,~p)", [M, RReqs, Phase]),
{noreply, S};
handle_cast({rr_tags, [T|Tags], Count}, #state{phase = rr_tags} = S) ->
S1 = update_tag(S, Count, T, ?MAX_TAG_OP_RETRIES),
gen_server:cast(self(), {rr_tags, Tags, Count + 1}),
{noreply, S1};
handle_cast({rr_tags, [], Count}, #state{phase = rr_tags, gc_peers = Peers,
safe_blacklist = Blacklist} = S) ->
% We are done with the RR phase, and hence with GC!
lager:info("GC: ~p tags updated/replication done, done with GC!", [Count]),
node_broadcast(Peers, end_rr),
% Update ddfs_master with the safe_blacklist.
ddfs_master:safe_gc_blacklist(Blacklist),
cleanup_for_exit(S),
{stop, shutdown, S}.
-type check_blob_result_msg() :: {check_blob_result, local_object(),
check_blob_result()}.
-spec handle_info({diskinfo, node(), diskinfo()}
| check_blob_result_msg() | check_progress
| {'EXIT', pid(), term()} | {reference(), term()},
state()) -> gs_noreply() | gs_stop(shutdown).
handle_info({diskinfo, Node, {Free, Used}},
#state{nodestats = NodeStats, blacklist = BL} = S) ->
lager:info("GC: disk information for ~p (free: ~p bytes, used: ~p bytes)",
[Node, Free, Used]),
case lists:member(Node, BL) of
true ->
{noreply, S};
false ->
{noreply, S#state{nodestats = lists:keystore(Node, 1, NodeStats,
{Node, {Free, Used}})}}
end;
handle_info({check_blob_result, LocalObj, Status},
#state{phase = Phase, num_pending_reqs = NumPendingReqs} = S)
when Phase =:= build_map;
Phase =:= map_wait ->
Checked = check_blob_result(LocalObj, Status),
Pending = NumPendingReqs - Checked,
% Assert an invariant.
true = (Pending >= 0),
S1 = case Pending of
0 when Phase =:= map_wait ->
% That was the last result we were waiting for; we
% can now enter the GC phase.
start_gc_phase(S);
_ ->
S#state{num_pending_reqs = Pending,
last_response_time = now()}
end,
{noreply, S1};
handle_info(check_progress, #state{phase = Phase, last_response_time = LRT} = S)
when Phase =:= build_map; Phase =:= map_wait; Phase =:= gc ->
Since = timer:now_diff(now(), LRT),
case Since < ?GC_PROGRESS_INTERVAL of
true ->
% We have been making forward progress, restart the
% progress timer.
{ok, T} = timer:send_after(?GC_PROGRESS_INTERVAL, check_progress),
{noreply, S#state{progress_timer = T}};
false ->
% We haven't made progress. Stop GC.
lager:error("GC: progress timeout in ~p", [Phase]),
cleanup_for_exit(S),
{stop, shutdown, S}
end;
handle_info(check_progress, S) ->
% We don't need this timer in the RR phases.
{noreply, S#state{progress_timer = undefined}};
handle_info({'EXIT', Pid, Reason}, S) when Pid =:= self() ->
lager:error("GC: dying on error: ~p", [Reason]),
cleanup_for_exit(S),
{stop, stop_requested, S};
handle_info({'EXIT', RR, normal},
#state{phase = rr_blobs_wait, rr_pid = RR,
blacklist = BlackList, tags = Tags} = S) ->
% The RR process has finished normally, and we can proceed to the
% second phase of RR: start updating the tags. Also, we
% initialize the safe blacklist here.
lager:info("GC: done with blob replication, replicating tags (~p pending)",
[length(Tags)]),
gen_server:cast(self(), {rr_tags, Tags, 0}),
{noreply, S#state{rr_pid = undefined,
safe_blacklist = gb_sets:from_list(BlackList),
phase = rr_tags}};
handle_info({'EXIT', RR, Reason}, #state{phase = Phase, rr_pid = RR} = S) ->
% Unexpected exit of RR process; exit.
lager:error("GC: unexpected exit of replicator in ~p: ~p", [Phase, Reason]),
cleanup_for_exit(S),
{stop, shutdown, S};
handle_info({'EXIT', Pid, Reason},
#state{phase = Phase, gc_peers = Peers} = S) ->
case find_node(Peers, Pid) of
undefined ->
{noreply, S};
{Node, Failures} ->
lager:warning("GC: Node ~p disconnected (~p): ~p (~p)",
[Node, Failures, Reason, Phase]),
schedule_retry(Node),
{noreply, S}
end;
% handle late replies to "catch gen_server:call" (via ddfs_master).
handle_info({Ref, _Msg}, S) when is_reference(Ref) ->
{noreply, S}.
%% ===================================================================
%% gen_server callback stubs
-spec terminate(term(), state()) -> ok.
terminate(_Reason, _S) -> ok.
-spec code_change(term(), state(), term()) -> {ok, state()}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-spec format_status(term(), [term()]) -> [term()].
format_status(_Opt, [_PDict, S]) ->
[{data, [{"State", [{phase, S#state.phase}]}]}].
%% ===================================================================
%% peer connection management and messaging protocol
-spec schedule_retry(node()) -> ok.
schedule_retry(Node) ->
Self = self(),
_ = spawn(fun() ->
timer:sleep(?NODE_RETRY_WAIT),
gen_server:cast(Self, {retry_node, Node})
end),
ok.
-spec start_gc_peers([node()], pid(), erlang:timestamp(), phase()) -> node_map().
start_gc_peers(Nodes, Self, Now, Phase) ->
lists:foldl(
fun(N, Peers) ->
Pid = ddfs_gc_node:start_gc_node(N, Self, Now, Phase, normal),
gb_trees:insert(N, {Pid, 0}, Peers)
end, gb_trees:empty(), Nodes).
-spec find_peer(node_map(), node()) -> pid() | 'undefined'.
find_peer(Peers, Node) ->
case gb_trees:lookup(Node, Peers) of
none -> undefined;
{value, {Pid, _}} -> Pid
end.
-spec find_node(node_map(), pid()) -> {node(), non_neg_integer()} | 'undefined'.
find_node(Peers, Pid) ->
Iter = gb_trees:iterator(Peers),
Looper = fun(I, Loop) ->
case gb_trees:next(I) of
none -> undefined;
{Node, {Pid, Failures}, _} -> {Node, Failures};
{_, _, Next} -> Loop(Next, Loop)
end
end,
Looper(Iter, Looper).
-spec update_peer(node_map(), node(), pid()) -> node_map().
update_peer(Peers, Node, Pid) ->
{_OldPid, Failures} = gb_trees:get(Node, Peers),
gb_trees:enter(Node, {Pid, Failures+1}, Peers).
-spec resend_pending(node(), pid()) -> non_neg_integer().
resend_pending(Node, Pid) ->
Objects = ets:match(gc_blob_map, {{'$1', Node}, pending}),
lists:foreach(fun([ObjName]) ->
node_send(Pid, {check_blob, ObjName})
end, Objects),
length(Objects).
-spec node_broadcast(node_map(), protocol_msg()) -> ok.
node_broadcast(Peers, Msg) ->
lists:foreach(fun({Pid, _}) ->
node_send(Pid, Msg)
end, gb_trees:values(Peers)).
-spec node_send(pid(), protocol_msg()) -> ok.
node_send(Pid, Msg) ->
Pid ! Msg,
ok.
-spec cleanup_for_exit(state()) -> ok.
cleanup_for_exit(#state{gc_peers = Peers, progress_timer = Timer, rr_pid = RR}) ->
lists:foreach(fun({Pid, _}) -> exit(Pid, terminate)
end, gb_trees:values(Peers)),
case is_pid(RR) of
true -> exit(RR, terminate);
_ -> ok
end,
_ = timer:cancel(Timer),
ok.
%% ===================================================================
%% build_map and map_wait phases
-spec get_all_tags() -> {ok, [tagname()], [node()]} | {error, term()}.
get_all_tags() ->
get_all_tags(?MAX_TAG_OP_RETRIES).
get_all_tags(Retries) ->
try case ddfs_master:get_tags(gc) of
{ok, _Tags, _OkNodes} = AllTags -> AllTags;
E -> E
end
catch _:_ when Retries =/= 0 -> get_all_tags(Retries - 1);
_:_ -> {error, timeout}
end.
-spec check_tag(tagname(), state(), non_neg_integer()) ->
{ok, non_neg_integer()} | {error, term()}.
check_tag(Tag, S, Retries) ->
try case ddfs_master:tag_operation(gc_get, Tag, ?GET_TAG_TIMEOUT) of
{{missing, deleted}, false} ->
{ok, 0};
{TagId, TagUrls, TagReplicas} ->
{ok, check_tag(S, Tag, TagId, TagUrls, TagReplicas)};
E ->
E
end
catch _:_ when Retries =/= 0 -> check_tag(Tag, S, Retries - 1);
_:_ -> {error, timeout}
end.
-spec check_tag(state(), tagname(), tagid(), [[url()]], [node()])
-> non_neg_integer().
check_tag(S, <<"+deleted">> = Tag, TagId, _TagUrls, TagReplicas) ->
record_tag(S, Tag, TagId, TagReplicas),
% Optimize out the list traversal over TagUrls, since it contains
% tag:// urls only.
0;
check_tag(S, Tag, TagId, TagUrls, TagReplicas) ->
record_tag(S, Tag, TagId, TagReplicas),
lists:foldl(fun(BlobSet, Sent) -> check_blobset(S, BlobSet, Sent) end,
0, TagUrls).
-spec record_tag(state(), object_name(), tagid(), [node()]) -> ok.
record_tag(_S, Tag, TagId, _TagReplicas) ->
% Assert that TagId embeds the specified Tag name.
{Tag, Tstamp} = ddfs_util:unpack_objname(TagId),
% This should be the first and only entry for this tag.
true = ets:insert_new(gc_tag_map, {Tag, Tstamp}),
ok.
-spec check_blobset(state(), [url()], non_neg_integer())
-> non_neg_integer().
check_blobset(S, [Blob|BlobSet], Pending) ->
Sent = check_blob_status(S, ddfs_url(Blob)),
check_blobset(S, BlobSet, Pending + Sent);
check_blobset(_S, [], Pending) -> Pending.
-spec ddfs_url(url()) -> {'ignore', 'unknown'} | local_object().
ddfs_url(<<"tag://", _/binary>>) -> {ignore, unknown};
ddfs_url(Url) ->
case ddfs_util:parse_url(Url) of
not_ddfs ->
{ignore, unknown};
{Host, _Vol, _Type, _Hash, BlobName} ->
case disco:slave_safe(Host) of
false ->
lager:warning("GC: Unknown host ~p", [Host]),
{BlobName, unknown};
Node ->
{BlobName, Node}
end
end.
-spec check_blob_status(state(), {'ignore', 'unknown'} | local_object())
-> non_neg_integer().
check_blob_status(_S, {ignore, _}) -> 0;
check_blob_status(#state{gc_peers = Peers}, {ObjName, Node} = Key) ->
case {find_peer(Peers, Node), ets:lookup(gc_blob_map, Key)} of
{_, [{_, _}]} -> % Previously seen object
0;
{undefined, []} -> % Unknown node, new object
ets:insert(gc_blob_map, {Key, missing}),
0;
{Pid, []} -> % Known node, new object
% Mark the object as pending, and send a status request.
ets:insert(gc_blob_map, {Key, pending}),
node_send(Pid, {check_blob, ObjName}),
1
end.
-spec num_pending_objects() -> non_neg_integer().
num_pending_objects() ->
ets:select_count(gc_blob_map, [{{'_', pending}, [], [true]}]).
-spec check_blob_result(local_object(), check_blob_result()) -> non_neg_integer().
check_blob_result(LocalObj, Status) ->
case ets:lookup_element(gc_blob_map, LocalObj, 2) of
pending ->
ets:update_element(gc_blob_map, LocalObj, {2, Status}),
1;
_ ->
0
end.
%% ===================================================================
%% gc phase and replica recovery
-spec start_gc_phase(state()) -> state().
start_gc_phase(#state{gc_peers = Peers, nodestats = NodeStats} = S) ->
% We are done with building the in-use map. Now, we need to
% collect the nodes that host a replica of each known in-use blob,
% and also add the nodes that host any recovered replicas. We
% store this in a new ETS, gc_blobs, which will store for each
% blob,
% (1) key: the blob name,
% (2) the known locations,
% (3) Recovered locations,
% (4) New replica locations,
% (5) Size of the blob in kilobytes
% (6) rebalance | norebalance
% (7) nodes that do not host the blob, but received a request about it
% gc_blobs: {Key :: object_name(),
% Present :: [object_location()],
% Recovered :: [object_location()],
% Update :: rep_update(),
% Size :: 'undefined' | non_neg_integer(),
% Rebalance :: rebalance(),
% Update_missing :: [node()]}
_ = ets:new(gc_blobs, [named_table, set, private]),
_ = ets:foldl(
% There is no clause for the 'pending' status, so that we
% can assert if we start gc with any still-pending entries.
fun({{BlobName, Node}, {true, Vol, Size}}, _) ->
case ets:lookup(gc_blobs, BlobName) of
[] ->
SizeKB = trunc(Size / ?KB),
Entry = {BlobName, [{Node, Vol}], [],
noupdate, SizeKB, norebalance, []},
ets:insert(gc_blobs, Entry);
[{_, Present, _, _, _, _, _}] ->
Acc = [{Node, Vol} | Present],
ets:update_element(gc_blobs, BlobName, {2, Acc})
end;
({{BlobName, _Node}, missing}, _) ->
% Create an entry for missing blobs. This allows
% us to recover them from other nodes if present
% (e.g. after hostname changes).
case ets:lookup(gc_blobs, BlobName) of
[] ->
Entry = {BlobName, [], [], noupdate, undefined, norebalance, []},
ets:insert(gc_blobs, Entry);
[{_, _, _, _, _, _, _}] ->
true
end;
({{BlobName, Node}, false}, _) ->
% Save node in the list of locations to be deleted
% from tags containing references to this blob.
case ets:lookup(gc_blobs, BlobName) of
[] ->
Entry = {BlobName, [], [], noupdate,
undefined, norebalance, [Node]},
ets:insert(gc_blobs, Entry);
[{_, _, _, _, _, _, Delete}] ->
ets:update_element(gc_blobs, BlobName, {7, [Node | Delete]})
end
end, true, gc_blob_map),
ets:delete(gc_blob_map),
{UnderusedNodes, OverusedNodes} = find_unstable_nodes(NodeStats),
Utilization = [{N, ddfs_rebalance:utility(Node)} || {N, _} = Node <- NodeStats,
lists:member(N, OverusedNodes)],
MostOverused = case disco:has_setting("DDFS_SPACE_AWARE") of
false -> undefined;
true ->
SortedUtilization = [N || {N, _} <- lists:reverse(lists:keysort(2, Utilization))],
case SortedUtilization of
[] -> undefined;
[N | _] -> N
end
end,
lager:info("GC: entering gc phase"),
OverusedPeer = find_peer(Peers, MostOverused),
NewPeers = case OverusedPeer of
undefined ->
Peers;
_ ->
node_send(OverusedPeer, {start_gc, overused}),
gb_trees:delete(MostOverused, Peers)
end,
node_broadcast(NewPeers, {start_gc, normal}),
% Update the last_response_time to indicate forward progress.
S#state{num_pending_reqs = 0,
pending_nodes = gb_sets:from_list(gb_trees:keys(Peers)),
phase = gc,
last_response_time = now(),
overused_nodes = OverusedNodes,
underused_nodes = UnderusedNodes,
most_overused_node = MostOverused}.
-spec update_nodestats(node(), gc_run_stats(), [node_info()]) -> [node_info()].
update_nodestats(Node, {Tags, Blobs}, NodeStats) ->
{_, {_, DelTag}} = Tags,
{_, {_, DelBlob}} = Blobs,
{_, {Free, Used}} = lists:keyfind(Node, 1, NodeStats),
Deleted = trunc((DelTag + DelBlob) / ?KB),
lists:keystore(Node, 1, NodeStats, {Node, {Free + Deleted, Used - Deleted}}).
-spec estimate_rr_blobs(state()) -> [node_info()].
estimate_rr_blobs(#state{blacklist = BL, nodestats = NS, blobk = BlobK}) ->
ets:foldl(
fun({BlobName, Present, Recovered, _, Size, _, _}, NodeStats) ->
PresentNodes = [N || {N, _V} <- Present],
SafePresent = find_usable(BL, PresentNodes),
SafeRecovered = [N || {N, _V} <- Recovered, not lists:member(N, BL)],
case {length(SafePresent), length(SafeRecovered)} of
{NumPresent, NumRecovered}
when NumPresent + NumRecovered >= BlobK ->
NodeStats;
{0, 0}
when Present =:= [], Recovered =:= [] ->
NodeStats;
{_NumPresent, _NumRecovered} ->
ets:update_element(gc_blobs, BlobName, {4, {update, []}}),
Exclude = SafeRecovered ++ PresentNodes,
OtherNodes = [{N, S} || {N, S} <- NodeStats, not lists:member(N, Exclude)],
case ddfs_rebalance:weighted_select_from_nodes(OtherNodes, 1) of
error ->
lager:warning("Could not replicate ~s.", [BlobName]),
NodeStats;
[Node] ->
{Node, {Free, Used}} = lists:keyfind(Node, 1,
NodeStats),
case Size of
undefined -> NodeStats;
_ -> lists:keyreplace(Node, 1, NodeStats,
{Node, {Free - Size, Used + Size}})
end
end
end
end, NS, gc_blobs).
-spec rebalance([node()], [node()], [node_info()]) -> ok.
rebalance(Overused, BL, NodeStats) ->
% [{Node, {Total disk space, Bytes to replicate}}]
RebalanceStats = [{N, {F + U, 0}} || {N, {F, U}} <- NodeStats,
lists:member(N, Overused)],
Threshold = ddfs_rebalance:threshold(),
ets:foldl(
fun({BlobName, Present, Recovered, Update, Size, _, _}, Stats) ->
PresentNodes = [N || {N, _V} <- Present],
SafePresent = find_usable(BL, PresentNodes),
SafeRecovered = [N || {N, _V} <- Recovered, not lists:member(N, BL)],
OverusedPresent = [N || N <- SafePresent ++ SafeRecovered,
lists:member(N, Overused)],
case {Update, length(OverusedPresent)} of
{noupdate, NumPresent}
when NumPresent > 0 ->
% The blob is present on an over-utilized node and not already
% marked for rereplication due to insuffcient number of replicas.
% Possibly mark the blob for replication to balance the cluster.
PresentStats = [{N, S} || {N, S} <- Stats,
lists:member(N, OverusedPresent)],
[{N, {DiskSpace, Balanced}} | _] =
lists:sort(
fun({_, {DS1, B1}}, {_, {DS2, B2}}) ->
ddfs_rebalance:less(B1, DS1, B2, DS2)
end, PresentStats),
case ddfs_rebalance:is_balanced(Balanced, DiskSpace, Threshold) of
true ->
% The node has passed the threshold for how much of
% its diskspace that can be selected to replicate
% for balancing.
Stats;
false ->
% The blob is present on an over-utilized node which
% has not passed the rebalancing threshold. Mark the
% blob for replication to balance the cluster and
% update the stats.
case Size of
undefined -> Stats;
_ ->
ets:update_element(gc_blobs, BlobName, {6, rebalance}),
lists:keyreplace(N, 1, Stats,
{N, {DiskSpace, Balanced + Size}})
end
end;
{_, _} ->
Stats
end
end, RebalanceStats, gc_blobs),
ok.
-spec check_is_orphan(state(), object_type(), object_name(), node(), volume_name())
-> {ok, boolean() | unknown}.
check_is_orphan(_S, tag, Tag, _Node, _Vol) ->
{TagName, Tstamp} = ddfs_util:unpack_objname(Tag),
case ets:lookup(gc_tag_map, TagName) of
[] ->
% This tag was not present in our snapshot, but could have
% been newly created. Mark it as unknown, and the node
% will not delete it if it is recent.
{ok, unknown};
[{_, GcTstamp}] when Tstamp < GcTstamp ->
% This is an older incarnation of the tag, hence
% definitely an orphan.
{ok, true};
[{_, _GcTstamp}] ->
% This is a current or newer incarnation.
{ok, false}
end;
check_is_orphan(#state{blobk = BlobK, blacklist = BlackList,
most_overused_node = MostOverused}, blob, BlobName, Node, Vol) ->
MaxReps = BlobK + ?NUM_EXTRA_REPLICAS,
% The gc mark/in-use protocol is resumable, but the node loses its
% in-use knowledge when it goes down. On reconnect, it might
% perform orphan-checks on blobs that were already marked by the
% master in a previous session with that node. Similarly, we
% might re-recover blobs again after a reconnect.
case ets:lookup(gc_blobs, BlobName) of
[] ->
% This blob was not present in our snapshot, but could
% have been newly created. Mark it as unknown, but the
% node will not delete it if it is recent.
{ok, unknown};
[{_, Present, Recovered, _, _, _, _}] ->
PresentNodes = [N || {N, _V} <- Present],
case {lists:member(Node, PresentNodes),
lists:member({Node, Vol}, Recovered),
Node =:= MostOverused} of
{true, _, false} ->
% Re-check of an already marked blob.
{ok, false};
{_, true, false} ->
% Re-check of an already recovered blob.
{ok, false};
% Use a fast path for the normal case when there is no
% blacklist.
{false, false, false}
when BlackList =:= [],
length(Present) + length(Recovered) > MaxReps ->
% This is a newly recovered replica, but we have
% more than enough replicas, so we can afford
% marking this as an orphan.
lager:info("GC: discarding replica of ~p on ~p/~p",
[BlobName, Node, Vol]),
{ok, true};
{false, false, false}
when BlackList =:= [] ->
% This is a usable, newly-recovered, lost replica;
% record the volume for later use.
lager:info("GC: recovering replica of ~p from ~p/~p",
[BlobName, Node, Vol]),
NewRecovered = [{Node, Vol} | Recovered],
ets:update_element(gc_blobs, BlobName, {3, NewRecovered}),
{ok, false};
{false, false, false} ->
{RepNodes, _RepVols} = lists:unzip(Recovered),
Usable = find_usable(BlackList, lists:usort(RepNodes ++ PresentNodes)),
case length(Usable) > MaxReps of
true ->
{ok, true};
false ->
% Note that Node could belong to the blacklist; we
% still record the replica so that we can use it for
% re-replication if needed.
lager:info("GC: recovering replica of ~p from ~p/~p",
[BlobName, Node, Vol]),
NewRecovered = [{Node, Vol} | Recovered],
ets:update_element(gc_blobs, BlobName, {3, NewRecovered}),
{ok, false}
end;
{IsPresent, IsRecovered, true} ->
{RepNodes, _RepVols} = lists:unzip(Recovered),
Usable = find_usable(BlackList, lists:usort(RepNodes ++ PresentNodes)),
case {IsPresent orelse IsRecovered, length(Usable) > BlobK} of
{_, true} ->
% This blob can be deleted since it has more than BlobK
% replicas and is located on the most over-utilized node.
lager:info("GC: discarding replica of ~p on ~p/~p",
[BlobName, Node, Vol]),
{ok, true};
{true, false} ->
{ok, false};
{false, false} ->
lager:info("GC: recovering replica of ~p from ~p/~p",
[BlobName, Node, Vol]),
NewRecovered = [{Node, Vol} | Recovered],
ets:update_element(gc_blobs, BlobName, {3, NewRecovered}),
{ok, false}
end
end
end.
-spec init_gc_stats() -> gc_run_stats().
init_gc_stats() ->
{{{0,0}, {0,0}}, {{0,0}, {0,0}}}.
-spec add_gc_stats(gc_run_stats(), gc_run_stats()) -> gc_run_stats().
add_gc_stats({T1, B1}, {T2, B2}) ->
{add_obj_stats(T1, T2), add_obj_stats(B1, B2)}.
-spec add_obj_stats(obj_stats(), obj_stats()) -> obj_stats().
add_obj_stats({K1, D1}, {K2, D2}) ->
{add_gc_stat(K1, K2), add_gc_stat(D1, D2)}.
-spec add_gc_stat(gc_stat(), gc_stat()) -> gc_stat().
add_gc_stat({F1, B1}, {F2, B2}) ->
{F1 + F2, B1 + B2}.
-spec print_gc_stats(node(), gc_run_stats()) -> ok.
print_gc_stats(all, {Tags, Blobs}) ->
lager:info("Total GC Stats: ~p ~p",
[obj_stats(tag, Tags), obj_stats(blob, Blobs)]);
print_gc_stats(Node, {Tags, Blobs}) ->
lager:info("Node GC Stats for ~p: ~p ~p",
[Node, obj_stats(tag, Tags), obj_stats(blob, Blobs)]).
-spec obj_stats(object_type(), obj_stats()) -> term().
obj_stats(Type, {{KeptF, KeptB}, {DelF, DelB}}) ->
{Type, "kept", {KeptF, KeptB}, "deleted", {DelF, DelB}}.
% GC5) Delete old deleted tags from the +deleted metatag
%
% We don't want to accumulate deleted tags in the +deleted list
% infinitely. The downside of removing a tag from the list too early
% is that there might be a node still hosting a version of the tag
% file, which we just haven't seen yet. If this node reappears and
% the tag has been already removed from +deleted, the tag will come
% back from dead.
%
% To prevent this from happening, we wait until all known entries of
% the tag have been garbage collected and the ?DELETED_TAG_EXPIRES
% quarantine period has passed. We assume that the quarantine is long
% enough so that all temporarily unavailable nodes have time to
% resurrect during that time, i.e. no nodes can re-appear after being
% gone for ?DELETED_TAG_EXPIRES milliseconds.
%
% The Ages table persists the time of death for each deleted tag.
-spec process_deleted([object_name()], ets:tab()) -> ok.
process_deleted(Tags, Ages) ->
lager:info("GC: Pruning +deleted"),
Now = now(),
% Let's start with the current list of deleted tags
{ok, Deleted} = ddfs_master:tag_operation(get_tagnames,
<<"+deleted">>,
?NODEOP_TIMEOUT),
% Update the time of death for newly deleted tags
gb_sets:fold(fun(Tag, none) ->
ets:insert_new(Ages, {Tag, Now}), none
end, none, Deleted),
% Remove those tags from the candidate set which still have active
% copies around.
DelSet = gb_sets:subtract(Deleted, gb_sets:from_ordset(Tags)),
% Build up a list of tags ready to be removed from +deleted.
ExpiredTags =
lists:foldl(
fun({Tag, Age}, Acc) ->
Diff = timer:now_diff(Now, Age) / 1000,
case gb_sets:is_member(Tag, DelSet) of
false ->
% Copies of tag still alive, remove from Ages.
ets:delete(Ages, Tag),
Acc;
true when Diff > ?DELETED_TAG_EXPIRES ->
% Tag ready to be removed from +deleted.
lager:info("GC: tag ~p ready for removal from +DELETED", [Tag]),
[Tag | Acc];
true ->
% Tag hasn't been dead long enough to be
% removed from +deleted.
Acc
end
end, [], ets:tab2list(Ages)),
case ExpiredTags of
[] -> ok;
_ -> case ddfs_master:tag_operation({delete_tagnames, ExpiredTags},
<<"+deleted">>,
?TAG_UPDATE_TIMEOUT)
of {ok, _} ->
lager:info("GC: ~p dead tags removed from +DELETED",
[length(ExpiredTags)]);
{error, E} ->
lager:info("GC: error removing dead tags from +DELETED: ~p", [E])
end
end.
%% ===================================================================
%% blacklist utilities
% This is more dialyzer friendly than an inline call.
-spec find_usable([node()], [node()]) -> [node()].
find_usable(BL, Nodes) ->
[N || N <- Nodes, not lists:member(N, BL)].
-spec find_unusable([node()], [node()]) -> [node()].
find_unusable(BL, Nodes) ->
[N || N <- Nodes, lists:member(N, BL)].
%% ===================================================================
%% blob rereplication
-type rep_result() :: 'noupdate' | {'update', [url()]}.
% Rereplicate at most one blob, and then return.
-spec rereplicate_blob(state(), rr_next() | '$end_of_table') -> non_neg_integer().
rereplicate_blob(_S, '$end_of_table' = End) ->
gen_server:cast(self(), {rr_blob, End}),
0;
rereplicate_blob(S, BlobName) ->
[{_, Present, Recovered, _, _, Rebalance, _}] = ets:lookup(gc_blobs, BlobName),
{FinalReps, Reqs} = rereplicate_blob(S, BlobName, Present, Recovered, Rebalance, S#state.blobk),
ets:update_element(gc_blobs, BlobName, {4, FinalReps}),
Next = ets:next(gc_blobs, BlobName),
gen_server:cast(self(), {rr_blob, Next}),
Reqs.
% RR1) Re-replicate blobs that don't have enough replicas
-spec rereplicate_blob(state(), object_name(), [object_location()],
[object_location()], rebalance(), non_neg_integer())
-> {rep_result(), non_neg_integer()}.
rereplicate_blob(#state{blacklist = BL} = S,
BlobName, Present, Recovered, Rebalance, Blobk) ->
PresentNodes = [N || {N, _V} <- Present],
SafePresent = find_usable(BL, PresentNodes),
SafeRecovered = [{N, V} || {N, V} <- Recovered, not lists:member(N, BL)],
case {length(SafePresent), length(SafeRecovered), Rebalance} of
{NumPresent, NumRecovered, norebalance}
when NumRecovered =:= 0, NumPresent >= Blobk ->
% No need for replication or blob update.
{noupdate, 0};
{NumPresent, NumRecovered, norebalance}
when NumRecovered > 0, NumPresent + NumRecovered >= Blobk ->
% No need for new replication; containing tags need updating to
% recover lost blob replicas.
{{update, []}, 0};
{0, 0, _}
when Present =:= [], Recovered =:= [] ->
% We have no good copies from which to generate new replicas;
% we have no option but to live with the current information.
lager:warning("GC: all replicas missing for ~p!!!", [BlobName]),
{noupdate, 0};
{NumPresent, NumRecovered, _} ->
% Extra replicas are needed; we generate one new replica at a
% time, in a single-shot way. We use any available replicas as
% sources, including those from blacklisted nodes.
{RepNodes, _RepVols} = lists:unzip(Recovered),
OkNodes = RepNodes ++ PresentNodes,
case {try_put_blob(S, BlobName, OkNodes, BL), NumRecovered} of
{{error, E}, 0} ->
lager:info("GC: rr for ~p "
"(with ~p replicas recorded) "
"failed: ~p",
[BlobName, NumPresent, E]),
{noupdate, 0};
{{error, E}, _} ->
lager:info("GC: rr for ~p "
"(with ~p/~p replicas recorded/recovered) "
"failed: ~p",
[BlobName, NumPresent, NumRecovered, E]),
% We should record the usable recovered replicas.
{{update, []}, 0};
{pending, _} ->
lager:info("GC: rr for ~p "
"(with ~p/~p replicas recorded/recovered) "
"initiated",
[BlobName, NumPresent, NumRecovered]),
% Mark the blob as updatable (see update_replicas/3).
{{update, []}, 1}
end
end.
-spec try_put_blob(state(), object_name(), [node(),...], [node()]) ->
pending | {error, term()}.
try_put_blob(#state{rr_pid = RR, gc_peers = Peers}, BlobName, OkNodes, BL) ->
case ddfs_master:new_blob(BlobName, 1, [], OkNodes ++ BL) of
{ok, [PutUrl]} ->
Srcs = [{find_peer(Peers, N), N} || N <- OkNodes],
{SrcPeer, SrcNode} = disco_util:choose_random(Srcs),
RealPutUrl = ddfs_util:cluster_url(PutUrl, put),
RR ! {put_blob, BlobName, SrcPeer, SrcNode, RealPutUrl},
pending;
{ok, []} ->
{error, "not enough replicas."};
E ->
{error, E}
end.
% gen_server update handler.
-spec update_replicas(state(), object_name(), [url()]) -> ok.
update_replicas(_S, BlobName, NewUrls) ->
% An update can only arrive for a blob that was marked for replication
% (see rereplicate_blob/5).
[{_, _P, _R, {update, Urls}, _, _, _}] = ets:lookup(gc_blobs, BlobName),
Update = {update, NewUrls ++ Urls},
_ = ets:update_element(gc_blobs, BlobName, {4, Update}),
ok.
% Coordinate blob transfers in a separate process, which can wait for
% the result of the attempted replication, and reports the result back
% to the main gen_server. If a peer goes down during the transfer, it
% is not retried, but a timeout error is reported instead.
-record(rep_state, {
master :: pid(),
ref = 0 :: non_neg_integer(),
timeouts = 0 :: non_neg_integer()}).
-type rep_state() :: #rep_state{}.
-spec start_replicator(pid()) -> pid().
start_replicator(Master) ->
spawn_link(fun() -> replicator(#rep_state{master = Master}) end).
-spec replicator(rep_state()) -> no_return().
replicator(#rep_state{ref = Ref, timeouts = TO} = S) ->
receive
{put_blob, BlobName, SrcPeer, SrcNode, PutUrl} ->
S1 = do_put_blob(S, BlobName, SrcPeer, SrcNode, PutUrl),
replicator(S1#rep_state{ref = Ref + 1});
rr_end ->
lager:info("GC: replication ending with Ref ~p, TO ~p", [Ref, TO]),
ok
end.
-spec stop_replicator(pid()) -> ok.
stop_replicator(RR) ->
RR ! rr_end,
ok.
-spec do_put_blob(rep_state(), object_name(), pid(), node(), binary())
-> rep_state().
do_put_blob(#rep_state{ref = Ref} = S, BlobName, _SrcPeer, SrcNode, PutUrl) ->
% We don't use SrcPeer, but instead use the named pid on SrcNode
% to send the put_blob to the peer. This is because the pid in
% SrcPeer might now be invalid, as the SrcNode might have been
% restarted (say due to a transient network issue), and the
% restarted GC-node peer will have a pid different from SrcPeer.
% This can happen since the message queue for the replicator is
% typically quite deep, with a significant interval between the
% time the put_blob entered our (i.e. the replicator's) message
% queue, and the time (now) that we process it. This latency in
% the message handling is caused by the fact that the replicator
% functions in a synchronous manner, while the main GC/RR
% gen_server process functions asynchronously (and generates
% messages faster than the replicator can process).
{ddfs_gc_node, SrcNode} ! {put_blob, self(), Ref, BlobName, PutUrl},
wait_put_blob(S, SrcNode, PutUrl).
-spec wait_put_blob(rep_state(), node(), url()) -> rep_state().
wait_put_blob(#rep_state{ref = Ref, timeouts = TO, master = Master} = S,
SrcNode, PutUrl) ->
receive
{Ref, _B, _PU, {ok, BlobName, NewUrls}} ->
lager:info("GC: replicated ~p (~p) to ~p", [BlobName, Ref, NewUrls]),
add_replicas(Master, BlobName, NewUrls),
S;
{Ref, B, PU, E} ->
lager:info("GC: error replicating ~p (~p) to ~p: ~p",
[B, Ref, PU, E]),
S;
{OldRef, _B, PU, {ok, BlobName, NewUrls}} ->
% Delayed response.
lager:info("GC: delayed replication of ~p (~p/~p) to ~p: ~p",
[BlobName, OldRef, Ref, PU, NewUrls]),
add_replicas(Master, BlobName, NewUrls),
wait_put_blob(S#rep_state{timeouts = TO - 1}, SrcNode, PutUrl);
{OldRef, B, PU, OldResult} ->
lager:info("GC: error replicating ~p (~p/~p) to ~p: ~p",
[B, OldRef, Ref, PU, OldResult]),
wait_put_blob(S#rep_state{timeouts = TO - 1}, SrcNode, PutUrl)
after ?GC_PUT_TIMEOUT ->
lager:info("GC: replication timeout on ~p (~p) for ~p",
[SrcNode, Ref, PutUrl]),
S#rep_state{timeouts = TO + 1}
end.
%% ===================================================================
%% tag updates
-spec update_tag(state(), non_neg_integer(), object_name(),
non_neg_integer()) -> state().
update_tag(S, _Cnt, _T, 0) ->
S;
update_tag(S, Cnt, T, Retries) ->
try case ddfs_master:tag_operation(gc_get, T, ?GET_TAG_TIMEOUT) of
{{missing, deleted}, false} ->
S;
{TagId, TagUrls, TagReplicas} ->
update_tag_body(S, Cnt, T, TagId, TagUrls, TagReplicas);
_E ->
% If there is any error, we cannot ensure safety in
% removing any blacklisted nodes; reset the safe
% blacklist.
lager:info("GC: Unable to retrieve tag ~p (rr_tags)", [T]),
S#state{safe_blacklist = gb_sets:empty()}
end
catch _:_ -> update_tag(S, Cnt, T, Retries - 1)
end.
-spec log_blacklist_change(tagname(), tag_set(), tag_set()) -> ok.
log_blacklist_change(Tag, Old, New) ->
case gb_sets:size(Old) =:= gb_sets:size(New) of
true ->
ok;
false ->
lager:info("GC: safe blacklist shrunk from ~p to ~p while processing ~p",
[gb_sets:to_list(Old), gb_sets:to_list(New), Tag])
end.
% RR2) Update tags that contain blobs that were re-replicated, and/or
% re-replicate tags that don't have enough replicas.
-spec update_tag_body(state(), non_neg_integer(),
tagname(), tagid(), [[url()]], [node()]) -> state().
update_tag_body(#state{safe_blacklist = SBL, blacklist = BL, tagk = TagK} = S,
Cnt, Tag, Id, TagUrls, TagReplicas) ->
% Collect the blobs that need updating, and compute their new
% replica locations.
{Updates, SBL1} = collect_updates(S, TagUrls, SBL),
UsableTagReplicas = find_usable(BL, TagReplicas),
case {Updates, length(UsableTagReplicas)} of
{[], NumTagReps} when NumTagReps >= TagK ->
% There are no blob updates, and there are the requisite
% number of tag replicas; tag doesn't need update.
log_blacklist_change(Tag, SBL, SBL1),
S#state{safe_blacklist = SBL1};
_ ->
% In all other cases, send the tag an update, and update
% the safe_blacklist.
SBL2 = gb_sets:subtract(SBL1, gb_sets:from_list(TagReplicas)),
Msg = {gc_rr_update, Updates, BL, Id},
lager:info("Updating tag ~p (~p) with ~p (blacklist ~p)",
[Id, Cnt, Updates, BL]),
ddfs_master:tag_notify(Msg, Tag),
log_blacklist_change(Tag, SBL, SBL2),
S#state{safe_blacklist = SBL2}
end.
-spec collect_updates(state(), [[url()]], tag_set()) ->
{[blob_update()], tag_set()}.
collect_updates(S, BlobSets, SafeBlacklist) ->
collect(S, BlobSets, {[], SafeBlacklist}).
collect(_S, [], {_Updates, _SBL} = Result) ->
Result;
collect(S, [[]|Rest], {_Updates, _SBL} = Acc) ->
collect(S, Rest, Acc);
collect(#state{blacklist = BL, blobk = BlobK} = S,
[BlobSet|Rest], {Updates, SBL} = Acc) ->
{[BlobName|_], Nodes} = lists:unzip([ddfs_url(Url) || Url <- BlobSet]),
case BlobName of
ignore ->
collect(S, Rest, Acc);
_ ->
% Any referenced nodes are not safe to be removed from DDFS.
NewSBL = gb_sets:subtract(SBL, gb_sets:from_list(Nodes)),
Remove = removable_locations(BlobSet),
NewLocations = usable_locations(S, BlobName) -- BlobSet,
BListed = find_unusable(BL, Nodes),
Usable = find_usable(BL, Nodes),
CanFilter = [] =/= BListed andalso length(Usable) >= BlobK,
case {NewLocations ++ Remove, CanFilter} of
{[], false} ->
% Blacklist filtering is not needed, and there are
% no updates.
collect(S, Rest, {Updates, NewSBL});
{[], true} ->
% Safely remove blacklisted nodes from the
% blobset. (The tag will perform another safety
% check before removal using the tag id.)
Update = {BlobName, filter},
collect(S, Rest, {[Update | Updates], NewSBL});
{[_|_], _} ->
% There are new usable locations for the blobset.
Update = {BlobName, {NewLocations, Remove}},
collect(S, Rest, {[Update | Updates], NewSBL})
end
end.
-spec removable_locations([url()]) -> [url()].
removable_locations(BlobSet) ->
[{BlobName, _} | _] = Locations = [ddfs_url(Url) || Url <- BlobSet],
case ets:lookup(gc_blobs, BlobName) of
[] ->
% New blob added after GC/RR started.
[];
[{_, _, _, _, _, _, []}] ->
% No locations need to be removed
[];
[{_, _, _, _, _, _, UpdateMissing}] ->
Remove = [{B, N} || {B, N} <- Locations, lists:member(N, UpdateMissing)],
[Url || Url <- BlobSet, lists:member(ddfs_url(Url), Remove)]
end.
-spec usable_locations(state(), object_name()) -> [url()].
usable_locations(S, BlobName) ->
case ets:lookup(gc_blobs, BlobName) of
[] ->
% New blob added after GC/RR started.
[];
[{_, P, R, noupdate, _, _, _}] ->
usable_locations(S, BlobName, P ++ R, []);
[{_, P, R, {update, NewUrls}, _, _, _}] ->
usable_locations(S, BlobName, P ++ R, NewUrls)
end.
-spec usable_locations(state(), object_name(), [object_location()],
[url()]) -> [url()].
usable_locations(#state{blacklist = BL, root = Root},
BlobName, Locations, NewUrls) ->
CurUrls = [url(N, V, BlobName, Root) || {N, V} <- Locations,
not lists:member(N, BL)],
lists:usort(CurUrls ++ NewUrls).
url(N, V, Blob, Root) ->
{ok, _Local, Url} = ddfs_util:hashdir(Blob, disco:host(N), "blob", Root, V),
Url.
find_unstable_nodes(NS) ->
DiskUsage = ddfs_rebalance:avg_disk_usage(NS),
UnderUsed = find_unstable_nodes(underused, NS, DiskUsage),
OverUsed = find_unstable_nodes(overused, NS, DiskUsage),
lager:info("GC: average disk utilization: ~p, "
"over utilized nodes: ~p, "
"under utilized nodes: ~p",
[DiskUsage, length(OverUsed), length(UnderUsed)]),
{UnderUsed, OverUsed}.
-spec find_unstable_nodes(underused | overused, [node_info()], non_neg_integer())
-> [node()].
find_unstable_nodes(underused, NodeStats, AvgUsage) ->
Threshold = ddfs_rebalance:threshold(),
[N || {N, _} = Node <- NodeStats, ddfs_rebalance:utility(Node) < AvgUsage - Threshold];
find_unstable_nodes(overused, NodeStats, AvgUsage) ->
Threshold = ddfs_rebalance:threshold(),
[N || {N, _} = Node <- NodeStats, ddfs_rebalance:utility(Node) > AvgUsage + Threshold]. | master/src/ddfs/ddfs_gc_main.erl | 0.504639 | 0.528168 | ddfs_gc_main.erl | starcoder |
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
-module(cache_shards_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-compile([export_all]).
-compile(nowarn_export_all).
all() ->
[Test || {Test, NAry} <- ?MODULE:module_info(exports),
Test =/= module_info,
Test =/= init_per_suite,
Test =/= end_per_suite,
NAry =:= 1
].
init_per_suite(Config) ->
ok = application:start(cache),
Config.
end_per_suite(_Config) ->
ok.
init_per_testcase(TestCase, Config) ->
CacheName = list_to_atom("cache_" ++ atom_to_list(TestCase)),
[{cache_name, CacheName} | Config].
end_per_testcase(_TestCase, Config) ->
proplists:delete(cache_name, Config).
%%
%%
lifecycle_sharded_cache(_Config) ->
?assert(is_pid(whereis(cache_sup))),
?assertMatch({ok, _}, cache_shards:start(cache1, 4)),
?assertMatch({error, {already_started, _}}, cache_shards:start(cache1, 4)),
?assertMatch({ok, _}, cache_shards:start(cache2, 8)),
?assertMatch({error, {already_started, _}}, cache_shards:start(cache2, 8)),
{ok, CacheShards1} = application:get_env(cache, cache_shards),
?assertEqual(2, maps:size(CacheShards1)),
?assertMatch(#{cache1 := 4, cache2 := 8}, CacheShards1),
?assertEqual(ok, cache_shards:drop(cache1)),
?assertEqual({error, invalid_cache}, cache_shards:drop(cache1)),
?assertEqual(ok, cache_shards:drop(cache2)),
?assertEqual({error, invalid_cache}, cache_shards:drop(cache2)),
?assertEqual({error, invalid_cache}, cache_shards:drop(some_invalid_cache_name)),
{ok, CacheShards2} = application:get_env(cache, cache_shards),
?assertEqual(0, maps:size(CacheShards2)),
?assertMatch(#{}, CacheShards2),
ok.
get_shard(Config) ->
CacheName = ?config(cache_name, Config),
{ok, _} = cache_shards:start(CacheName, 4),
Shards = [cache_get_shard_1, cache_get_shard_2, cache_get_shard_3, cache_get_shard_4],
lists:foreach(
fun(ID) ->
{ok, Shard} = cache_shards:get_shard(CacheName, ID),
?assert(lists:member(Shard, Shards))
end,
lists:seq(1, 100)
),
?assertEqual({error, invalid_cache}, cache_shards:get_shard(some_invalid_cache_name, 1)),
ok = cache_shards:drop(CacheName),
ok.
get_put_delete(Config) ->
CacheName = ?config(cache_name, Config),
{ok, _} = cache_shards:start(CacheName, 4),
?assertEqual({error, not_found}, cache_shards:get(CacheName, key1)),
?assertEqual(ok, cache_shards:put(CacheName, key1, value1)),
?assertEqual({ok, value1}, cache_shards:get(CacheName, key1)),
?assertEqual({error, invalid_cache}, cache_shards:get(some_invalid_cache_name, key1)),
{ok, Shard} = cache_shards:get_shard(CacheName, key1),
?assertEqual(value1, cache:get(Shard, key1)),
?assertEqual(ok, cache_shards:delete(CacheName, key1)),
?assertEqual({error, not_found}, cache_shards:get(CacheName, key1)),
ok = cache_shards:drop(CacheName),
ok. | test/cache_shards_SUITE.erl | 0.681833 | 0.465023 | cache_shards_SUITE.erl | starcoder |
% @doc Digilent Pmod_ALS module
% This component provides ambient light-to-digital
% sensing through the SPI2 interface on a 6-pin connector.
%
% For further information about the Digilent Pmod_ALS
% and its components :
% https://store.digilentinc.com/pmod-als-ambient-light-sensor/
%
% Texas Instrument's ADC081S021 analog-to-digital converter
% http://www.ti.com/lit/ds/symlink/adc081s021.pdf
%
% Vishay Semiconductor's TEMT6000X01.
% http://www.vishay.com/docs/81579/temt6000.pdf
%
% Start the driver with
% ```
% 1> grisp:add_device(spi2, pmod_als).
% '''
% @end
% Created : 06. Nov 2018 21:47
-module(pmod_als).
-behaviour(gen_server).
-include("grisp.hrl").
% API
-export([start_link/2]).
-export([read/0]).
-export([percentage/0]).
% gen_server callbacks
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([terminate/2]).
-export([code_change/3]).
%===================================================================
% Macros
%===================================================================
-define(SPI_MODE, #{cpol => high, cpha => trailing}).
%===================================================================
% Records
%===================================================================
-record(state , {
options = []
}).
%===================================================================
% API
%===================================================================
% @private
start_link(Slot, Opts) ->
gen_server:start_link(?MODULE, [Slot, Opts], []).
% @doc Returns the ambient light value that is currently sensed
% by the ALS module. On success, the return value is a number
% in the 0..255 range that is proportional to the luminous
% intensity.
%
% Technically, the values are representative of the
% power perceived by the phototransistor from the light source
% incoming at an angle of ±60°. It is a model of the response
% of the human eye to the same light source that is obtained
% by calculating the luminosity function.
%
% The peak wavelength sensitivity of the module is at 570nm
% making it close to the human eye (555nm). This implies that
% return values will be the highest when the ALS is exposed to
% wavelengths of green light with a slight yellow tint.
-spec read() -> 0..255 | no_return().
read() ->
Dev = grisp_devices:default(?MODULE),
case gen_server:call(Dev#device.pid, {read, Dev#device.slot}) of
{error, Reason} -> error(Reason);
Result -> Result
end.
% @doc Returns a the percentage of current ambient light
% based on the {@link pmod_als:read/0} function. The value
% is rounded to the closest integer.
-spec percentage() -> 0..100.
percentage() ->
Raw = read(),
round((Raw / 255) * 100).
%===================================================================
% gen_server callbacks
%===================================================================
% @private
init([Slot = spi2, Opts]) ->
ok = grisp_devices:register(Slot, ?MODULE),
{ok, #state{options = Opts}};
init(Slot) ->
error({incompatible_slot, Slot}).
%--------------------------------------------------------------------
% @private
handle_call({read, Slot} , _From , State) when Slot =:= spi2 ->
Val = get_value(Slot),
{reply, Val, State};
handle_call(Request , _From , _State) ->
error({unknown_call, Request}).
% @private
handle_cast(Request , _State) ->
error({unknown_cast, Request}).
% @private
handle_info(Info , _State) ->
error({unknown_info, Info}).
% @private
terminate(_Reason , _State) ->
ok.
% @private
code_change(_OldVsn , State , _Extra) ->
{ok , State}.
%===================================================================
% Internal functions
%===================================================================
% @private
get_value(Slot) ->
<<_:3,Resp:8,_Pad:5>> = grisp_spi:send_recv(Slot, ?SPI_MODE, <<0:8>>, 0, 1),
Resp. | src/pmod_als.erl | 0.683842 | 0.473536 | pmod_als.erl | starcoder |
%% Copyright (c) Facebook, Inc. and its affiliates.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(erlfmt_algebra_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
-include_lib("proper/include/proper.hrl").
%% Test server callbacks
-export([
suite/0,
all/0,
groups/0,
init_per_suite/1,
end_per_suite/1,
init_per_group/2,
end_per_group/2,
init_per_testcase/2,
end_per_testcase/2
]).
%% Test cases
-export([
string_append_case/1,
string_spaces_case/1,
lines_combine_case/1,
lines_unit/1,
metric_combine_case/1,
metric_unit/1,
document80_combine_case/1,
document20_combine_case/1,
document80_choice_case/1,
document20_choice_case/1,
document_unit/1,
document_fail/1,
document_prepend/1,
document_paper_example/1
]).
-define(alg, erlfmt_algebra).
suite() ->
[{timetrap, {seconds, 10}}].
init_per_suite(Config) ->
[{property_test_tool, proper} | Config].
end_per_suite(_Config) ->
ok.
init_per_group(_GroupName, Config) ->
Config.
end_per_group(_GroupName, _Config) ->
ok.
init_per_testcase(_TestCase, Config) ->
Config.
end_per_testcase(_TestCase, _Config) ->
ok.
groups() ->
[
{string_api, [parallel], [string_append_case, string_spaces_case]},
{lines_api, [parallel], [lines_combine_case, lines_unit]},
{metric_api, [parallel], [metric_combine_case, metric_unit]},
{document_api, [parallel], [
document80_combine_case,
document20_combine_case,
document80_choice_case,
document20_choice_case,
document_unit,
document_fail,
document_prepend,
document_paper_example
]}
].
all() ->
[{group, string_api}, {group, lines_api}, {group, metric_api}, {group, document_api}].
%%--------------------------------------------------------------------
%% TEST CASESS
string_append_equal_prop() ->
?FORALL({Left, Right}, {str(), str()}, begin
Appended = ?alg:string_append(Left, Right),
string:equal(?alg:string_text(Appended), [
?alg:string_text(Left) | ?alg:string_text(Right)
])
end).
string_append_length_prop() ->
?FORALL({Left, Right}, {str(), str()}, begin
Appended = ?alg:string_append(Left, Right),
?alg:string_length(Appended) =:= string:length(?alg:string_text(Appended))
end).
string_append_case(Config) when is_list(Config) ->
ct_property_test:quickcheck(string_append_equal_prop(), Config),
ct_property_test:quickcheck(string_append_length_prop(), Config).
string_spaces_prop() ->
?FORALL(Count, non_neg_integer(), begin
string:length(?alg:string_text(?alg:string_spaces(Count))) =:= Count
end).
string_spaces_case(Config) when is_list(Config) ->
ct_property_test:quickcheck(string_spaces_prop(), Config).
-record(layout, {new, flush, combine, render}).
combine_assoc_prop(#layout{combine = Combine, render = Render} = Layout) ->
Gen = layout(Layout),
?FORALL({L1, L2, L3}, {Gen, Gen, Gen}, begin
Combined1 = Combine(L1, Combine(L2, L3)),
Combined2 = Combine(Combine(L1, L2), L3),
string:equal(Render(Combined1), Render(Combined2))
end).
combine_flush_prop(#layout{combine = Combine, render = Render, flush = Flush} = Layout) ->
Gen = layout(Layout),
?FORALL({L1, L2}, {Gen, Gen}, begin
Combined1 = Combine(Flush(L1), Flush(L2)),
Combined2 = Flush(Combine(Flush(L1), L2)),
string:equal(Render(Combined1), Render(Combined2))
end).
lines_combine_case(Config) when is_list(Config) ->
Lines = lines_layout(),
ct_property_test:quickcheck(combine_assoc_prop(Lines), Config),
ct_property_test:quickcheck(combine_flush_prop(Lines), Config).
lines_unit(Config) when is_list(Config) ->
% xxxxxxxx yyyyyyyy xxxxxxxx
% xxx <> yyyy = xxx
% xxxxxxx xxxxxxx
% xxxxx xxxxxyyyyyyyy
% yyyy
New = fun (Text) -> ?alg:lines_new(?alg:string_new(Text)) end,
#layout{flush = Flush, combine = Combine, render = Render} = lines_layout(),
Left = Combine(
Flush(New("xxxxxxxx")),
Combine(Flush(New("xxx")), Combine(Flush(New("xxxxxxx")), New("xxxxx")))
),
?assertEqual(
"xxxxxxxx\n"
"xxx\n"
"xxxxxxx\n"
"xxxxx",
unicode:characters_to_list(Render(Left))
),
Right = Combine(Flush(New("yyyyyyyy")), New("yyyy")),
?assertEqual(
"yyyyyyyy\n"
"yyyy",
unicode:characters_to_list(Render(Right))
),
Combined = Combine(Left, Right),
?assertEqual(
"xxxxxxxx\n"
"xxx\n"
"xxxxxxx\n"
"xxxxxyyyyyyyy\n"
" yyyy",
unicode:characters_to_list(Render(Combined))
).
metric_combine_case(Config) when is_list(Config) ->
Metric = metric_layout(),
ct_property_test:quickcheck(combine_assoc_prop(Metric), Config),
ct_property_test:quickcheck(combine_flush_prop(Metric), Config).
metric_unit(Config) when is_list(Config) ->
% xxxxxxxx yyyyyyyy xxxxxxxxxxxxx
% xxx <> yyyy = xxxxxxxxxxxxx
% xxxxxxx xxxxxxxxxxxxx
% xxxxx xxxxxxxxxxxxx
% xxxxxxxxx
New = fun (Text) -> ?alg:metric_new(?alg:string_new(Text)) end,
#layout{flush = Flush, combine = Combine, render = Render} = metric_layout(),
Left = Combine(
Flush(New("xxxxxxxx")),
Combine(Flush(New("xxx")), Combine(Flush(New("xxxxxxx")), New("xxxxx")))
),
?assertEqual(
"xxxxxxxx\n"
"xxxxxxxx\n"
"xxxxxxxx\n"
"xxxxx",
unicode:characters_to_list(Render(Left))
),
Right = Combine(Flush(New("yyyyyyyy")), New("yyyy")),
?assertEqual(
"xxxxxxxx\n"
"xxxx",
unicode:characters_to_list(Render(Right))
),
Combined = Combine(Left, Right),
?assertEqual(
"xxxxxxxxxxxxx\n"
"xxxxxxxxxxxxx\n"
"xxxxxxxxxxxxx\n"
"xxxxxxxxxxxxx\n"
"xxxxxxxxx",
unicode:characters_to_list(Render(Combined))
).
choice_assoc_prop(Choice, #layout{render = Render} = Layout) ->
Gen = layout(Layout),
?FORALL({L1, L2, L3}, {Gen, Gen, Gen}, begin
Alternative1 = Choice(L1, Choice(L2, L3)),
Alternative2 = Choice(Choice(L1, L2), L3),
equal_height(Render(Alternative1), Render(Alternative2))
end).
choice_combine_left_distribute_prop(
Choice,
#layout{combine = Combine, render = Render} = Layout
) ->
Gen = layout(Layout),
?FORALL({L1, L2, L3}, {Gen, Gen, Gen}, begin
Combined1 = Combine(Choice(L1, L2), L3),
Combined2 = Choice(Combine(L1, L3), Combine(L2, L3)),
equal_height(Render(Combined1), Render(Combined2))
end).
choice_combine_right_distribute_prop(
Choice,
#layout{combine = Combine, render = Render} = Layout
) ->
Gen = layout(Layout),
?FORALL({L1, L2, L3}, {Gen, Gen, Gen}, begin
Combined1 = Combine(L3, Choice(L1, L2)),
Combined2 = Choice(Combine(L3, L1), Combine(L3, L2)),
equal_height(Render(Combined1), Render(Combined2))
end).
choice_flush_distribute_prop(Choice, #layout{flush = Flush, render = Render} = Layout) ->
Gen = layout(Layout),
?FORALL({L1, L2}, {Gen, Gen}, begin
Combined1 = Flush(Choice(L1, L2)),
Combined2 = Choice(Flush(L1), Flush(L2)),
equal_height(Render(Combined1), Render(Combined2))
end).
choice_commutative_prop(Choice, #layout{render = Render} = Layout) ->
Gen = layout(Layout),
?FORALL({L1, L2}, {Gen, Gen}, begin
Combined1 = Choice(L1, L2),
Combined2 = Choice(L2, L1),
equal_height(Render(Combined1), Render(Combined2))
end).
document80_combine_case(Config) when is_list(Config) ->
Document80 = document_layout(80),
true = ct_property_test:quickcheck(combine_assoc_prop(Document80), Config),
true = ct_property_test:quickcheck(combine_flush_prop(Document80), Config).
document20_combine_case(Config) when is_list(Config) ->
Document20 = document_layout(20),
true = ct_property_test:quickcheck(combine_assoc_prop(Document20), Config),
true = ct_property_test:quickcheck(combine_flush_prop(Document20), Config).
document80_choice_case(Config) when is_list(Config) ->
Document80 = document_layout(80),
Choice = fun ?alg:document_choice/2,
true = ct_property_test:quickcheck(choice_assoc_prop(Choice, Document80), Config),
true = ct_property_test:quickcheck(
choice_combine_left_distribute_prop(Choice, Document80),
Config
),
true = ct_property_test:quickcheck(
choice_combine_right_distribute_prop(Choice, Document80),
Config
),
true = ct_property_test:quickcheck(
choice_flush_distribute_prop(Choice, Document80),
Config
),
true = ct_property_test:quickcheck(choice_commutative_prop(Choice, Document80), Config).
document20_choice_case(Config) when is_list(Config) ->
Document20 = document_layout(20),
Choice = fun ?alg:document_choice/2,
true = ct_property_test:quickcheck(choice_assoc_prop(Choice, Document20), Config),
true = ct_property_test:quickcheck(
choice_combine_left_distribute_prop(Choice, Document20),
Config
),
true = ct_property_test:quickcheck(
choice_combine_right_distribute_prop(Choice, Document20),
Config
),
true = ct_property_test:quickcheck(
choice_flush_distribute_prop(Choice, Document20),
Config
),
true = ct_property_test:quickcheck(choice_commutative_prop(Choice, Document20), Config).
document_unit(Config) when is_list(Config) ->
% xxxxxxxx yyyyyyyy xxxxxxxx
% xxx <> yyyy = xxx
% xxxxxxx xxxxxxx
% xxxxx xxxxxyyyyyyyy
% yyyy
New = fun (Text) -> ?alg:document_text(Text) end,
#layout{flush = Flush, combine = Combine, render = Render} = document_layout(20),
Left = Combine(
Flush(New("xxxxxxxx")),
Combine(Flush(New("xxx")), Combine(Flush(New("xxxxxxx")), New("xxxxx")))
),
?assertEqual(
"xxxxxxxx\n"
"xxx\n"
"xxxxxxx\n"
"xxxxx",
unicode:characters_to_list(Render(Left))
),
Right = Combine(Flush(New("yyyyyyyy")), New("yyyy")),
?assertEqual(
"yyyyyyyy\n"
"yyyy",
unicode:characters_to_list(Render(Right))
),
Combined = Combine(Left, Right),
?assertEqual(
"xxxxxxxx\n"
"xxx\n"
"xxxxxxx\n"
"xxxxxyyyyyyyy\n"
" yyyy",
unicode:characters_to_list(Render(Combined))
).
document_fail(Config) when is_list(Config) ->
Doc = ?alg:document_fail(),
?assertError(no_viable_layout, ?alg:document_render(Doc, [])).
document_prepend(Config) when is_list(Config) ->
New = fun (Text) -> ?alg:document_text(Text) end,
#layout{flush = Flush, combine = Combine, render = Render} = document_layout(20),
Left = New("foo("),
Right = New(")"),
Doc = Combine(Flush(New("[")), Combine(Flush(New(" X,")), New("]"))),
Combined = ?alg:document_prepend(Left, Combine(Doc, Right)),
?assertEqual(
"foo([\n"
" X,\n"
"])",
unicode:characters_to_list(Render(Combined))
).
document_paper_example(Config) when is_list(Config) ->
Abcd = [a, b, c, d],
Abcd4 = [Abcd, Abcd, Abcd, Abcd],
Sexpr = pretty_sexpr([[abcde, Abcd4], [abcdefghi, Abcd4]]),
?assertEqual(
"((abcde ((a b c d) (a b c d) (a b c d) (a b c d)))\n"
" (abcdefghi ((a b c d) (a b c d) (a b c d) (a b c d))))",
render_sexpr(Sexpr, 80)
),
?assertEqual(
"((abcde ((a b c d) (a b c d) (a b c d) (a b c d)))\n"
" (abcdefghi\n"
" ((a b c d) (a b c d) (a b c d) (a b c d))))",
render_sexpr(Sexpr, 50)
),
?assertEqual(
"((abcde ((a b c d)\n"
" (a b c d)\n"
" (a b c d)\n"
" (a b c d)))\n"
" (abcdefghi\n"
" ((a b c d)\n"
" (a b c d)\n"
" (a b c d)\n"
" (a b c d))))",
render_sexpr(Sexpr, 20)
).
render_sexpr(Document, PageWidth) ->
Charlist = ?alg:document_render(Document, [{page_width, PageWidth}]),
unicode:characters_to_list(Charlist).
pretty_sexpr(List) when is_list(List) ->
Lparen = ?alg:document_text("("),
Rparen = ?alg:document_text(")"),
Space = ?alg:document_text(" "),
HorizontalFold = fun (Elem, Acc) ->
?alg:document_combine(Elem, ?alg:document_combine(Space, Acc))
end,
VerticalFold = fun (Elem, Acc) ->
?alg:document_combine(?alg:document_flush(Elem), Acc)
end,
Rendered = lists:map(fun pretty_sexpr/1, List),
Horizontal = ?alg:document_reduce(HorizontalFold, Rendered),
Vertical = ?alg:document_reduce(VerticalFold, Rendered),
Elements = ?alg:document_choice(Horizontal, Vertical),
?alg:document_combine(Lparen, ?alg:document_combine(Elements, Rparen));
pretty_sexpr(Atom) when is_atom(Atom) ->
?alg:document_text(atom_to_binary(Atom, utf8)).
lines_layout() ->
#layout{
new = fun ?alg:lines_new/1,
flush = fun ?alg:lines_flush/1,
combine = fun ?alg:lines_combine/2,
render = fun ?alg:lines_render/1
}.
metric_layout() ->
#layout{
new = fun ?alg:metric_new/1,
flush = fun ?alg:metric_flush/1,
combine = fun ?alg:metric_combine/2,
render = fun ?alg:metric_render/1
}.
document_layout(PageWidth) ->
#layout{
new = fun (String) -> ?alg:document_text(?alg:string_text(String)) end,
flush = fun ?alg:document_flush/1,
combine = fun ?alg:document_combine/2,
render = fun (Document) -> document_render(Document, PageWidth) end
}.
%% The properties don't hold if we start accepting too wide documents
document_render(Document, PageWidth) ->
try ?alg:document_render(Document, [{page_width, PageWidth}, {allow_unfit, false}])
catch
error:no_viable_layout -> ""
end.
equal_height(Text1, Text2) ->
Height1 = length(string:split(Text1, "\n", all)),
Height2 = length(string:split(Text2, "\n", all)),
Height1 =:= Height2.
%% It's possible for the utf8 generator to produce strings that start or end with
%% a decomposed accent or something else like this - this means that when appended
%% it composes into one grapheme with the other string and lengths are off.
str() ->
ClosedUTF8 = ?SUCHTHAT(Str, utf8(), begin
Length = string:length(Str),
string:length([" " | Str]) =/= Length andalso string:length([Str | " "]) =/= Length
end),
?LET(
Str,
ClosedUTF8,
?alg:string_new(binary:replace(Str, [<<" ">>, <<"\n">>, <<"\r">>], <<>>))
).
layout(Layout) ->
?SIZED(Size, limited_layout(Size, Layout)).
limited_layout(Size, #layout{new = New}) when Size =< 1 ->
?LET(Str, str(), New(Str));
limited_layout(Size, #layout{new = New, flush = Flush, combine = Combine} = Layout) ->
Self = ?LAZY(limited_layout(Size - 1, Layout)),
union([
?LET(Str, str(), New(Str)),
?LET(Lines, Self, Flush(Lines)),
?LET({Left, Right}, {Self, Self}, Combine(Left, Right))
]). | test/erlfmt_algebra_SUITE.erl | 0.649912 | 0.432483 | erlfmt_algebra_SUITE.erl | starcoder |
%% This module documents the callbacks that an FMKE driver for a Key-Value Store must implement.
%%
%% A brief explanation about FMKe adapters and drivers:
%%
%% An adapter is an Erlang module that implements the complete FMKe callback set, but that is able to make assumptions
%% about the data model, connection pool or any other configurable parameter. Adapters don't communicate directly with
%% client libraries for databases, but instead do it through drivers.
%%
%% A driver is a simple wrapper over a database's client library that exposes a common interface to all databases.
%% When implementing a driver it is necessary to implement additional logic required to maintain correct application
%% state, such as keeping track of previously read values within a transaction. Failure to implement the additional
%% logic may result in anomalies which will should be documented. The performance to correctness trade-off is common
%% in these types of storage systems, and the documentation of the presented anomalies along with performance values
%% is paramount.
%%
%% Since adapters do not make assumptions about the capabilities of the database, the drivers will need to export
%% callbacks related to transactions (e.g. start_transaction/1, commit_transaction/1). These functions are expected to
%% return opaque state that is passed in to further operations, meaning that you can add contextual information by
%% returning {ok, term()}, or just {ok, []} if there is no need for context in order to perform the operations.
%%
%% Drivers might also need to set up additional components and state for themselves, which is why the start/0
%% hooks exist. In these functions you may open a pool of connections to the database (but for that purpose you can
%% already use the fmke_db_conn_manager module), create an ETS table for caching results, etc.
%% Conversely, the stop/0 function will allow you to terminate gracefully and perform any teardown you feel necessary.
%%
%% The get and put functions that drivers need to implement contain extra parameters in order to give operation context
%% to the drivers. This is to avoid all possible overhead from using a generic approach (for instance, having to derive
%% which entity is being obtained from the key passed in get/3, if you used separate buckets in the database for each
%% one) as well as trying to provide optimal compatibility with other storage systems that may require extra context to
%% perform operations.
-module(gen_fmke_kv_driver).
-include("fmke.hrl").
-type value() :: term().
-type context() :: term().
-type options() :: list({atom(), term()}).
-type txn_result() :: ok | {error, term()}.
% -type data_model() :: nested | non_nested.
%% ---------------------------------------------------------------------------------------------------------------------
%% Setup and teardown callbacks
%% ---------------------------------------------------------------------------------------------------------------------
%% Startup hook that provides information about whether the current benchmark execution is using a normalized or nested
%% data layout. It is the driver's responsability to implement the logic for both data layouts, although the code should
%% not change significantly between them. (See example below)
%% A typical way of storing objects in CRDT databases would be to nest every field inside a top level record, which has
%% so far proved to have worse performance, since each CRDT state size will increase over multiple operations.
%% Furthermore, application records such as patients will need to store their associated prescriptions, which are
%% separate entities/records, further increasing CRDT state size. One way to bypass this is to store a reference to the
%% prescription key inside the patient, and we consider this to be a "normalized" (non-nested) data layout.
%% Implementing a driver may be done for a single data layout, ignoring the other completely. When test executions are
%% run, only valid data model implementations are considered for performance results.
% -callback start(DataModel::data_model()) -> {error, term()} | {ok, pid()}.
%% Teardown hook, called when the application is stopped.
% -callback stop() -> ok.
%% ---------------------------------------------------------------------------------------------------------------------
%% Transactional support callbacks
%% ---------------------------------------------------------------------------------------------------------------------
%% Starts a transaction while providing some context of the type of operations that are going to be performed.
%% A proplist of options (Options) is passed in, with the following values considered valid options:
%% {entity, Entity :: entity()} ->
%% The following operations that are going to be performed in this transaction only concern one entity, Entity.
%%
%% Returns any erlang term containing the state that is required by the driver to execute each operation related with a
%% transaction. It is common for the returned state to include a Pid that contains a connection to the database and
%% possibly identifier(s) for the transaction. Any erlang term is considered valid and will be passed in to subsequent
%% operations related to the same transaction.
-callback start_transaction(Options::options()) -> {ok, OperationContext::context()}.
%% Signals the end of a transaction, passing in the current operation context as well as a list of options that
%% currently serves no purpose. A typical implementation of commit_transaction includes calling commit_transaction on
%% client library (if supported) and returning the Pid to the connection pool.
%%
%% See some implementations in the fmke_db_adapter_driver_antidote.erl and fmke_db_adapter_driver_riak.erl modules.
-callback commit_transaction(OperationContext::context(), Options::options()) -> Result::txn_result().
%% ---------------------------------------------------------------------------------------------------------------------
%% Key value callbacks
%% ---------------------------------------------------------------------------------------------------------------------
%% get/2 - Fetches a list of keys from the database.
%% To provide context, some information about the entity being retrieved is included, and additionally the operation
%% context is also passed in from a previous get/3, put/4, or start_transaction/1.
%%
%% Returns a triple with {ok, GetResult, NextOperationContext} if the operation was executed successfully or
%% {error, Reason, NextOperationContext} otherwise.
-callback get(list({Key::key(), Type::entity()}), OperationContext::context()) ->
{list(app_record() | {error, term()}), context()}.
%% put/3 - Adds a list of key-value entries to the database.
%% To provide context, some information about the each entry being added is included, and additionally the operation
%% context is also passed in from a previous get/3, put/4, or start_transaction/1.
%%
%% Returns a pair with {list(put_results()), NextOperationContext} if the operation was executed successfully or
%% {error, Reason, NextOperationContext} otherwise.
%%
%% The Key to be written is passed in binary string format, as that is currently universally supported by all libraries.
%% The Value to be written is a value that the driver is able to recognize, which means that the adapters need to pass
%% valid values that the drivers are able to recognize and convert to a proper internal representation.
%%
%% A more in-depth explanation of what key() and value() should be:
%% 1. key() is a binary string representation of the key that is going to be written.
%% 2. value() is either an application record (in which case it is considered that every field is supposed to stay under
%% the same key, )
-callback put(list({Key::key(), Type::entity(), Value::value()}), OperationContext::context()) ->
{list(ok | {error, term()}), context()}. | src/gen_fmke_kv_driver.erl | 0.730578 | 0.729761 | gen_fmke_kv_driver.erl | starcoder |
%% -*- mode: erlang; indent-tabs-mode: nil; -*-
%%=============================================================================
%% Copyright 2014 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%=============================================================================
%% @doc Setup utility for erlang applications
%%
%% This API contains:
%% * Support functions for system install ({@link find_hooks/0},
%% {@link run_hooks/0}, {@link lib_dirs/0}).
%% * Functions for managing and inspecting the system environment
%% ({@link home/0}, {@link log_dir/0}, {@link data_dir/0},
%% {@link verify_directories/0}, {@link verify_dir/0}).
%% * Support functions for application environments ({@link get_env/2},
%% {@link get_all_env/1}, {@link find_env_vars/1}, {@link expand_value/2}).
%% * Functions for controlling dynamic load/upgrade of applications
%% ({@link find_app/1}, {@link pick_vsn/3}, {@link reload_app/1},
%% {@link patch_app/1}).
%%
%% == Variable expansion ==
%%
%% Setup supports variable substitution in application environments. It provides
%% some global variables, `"$HOME", "$DATA_DIR", "$LOG_DIR"', corresponding to
%% the API functions {@link home/0}, {@link data_dir/0} and {@link log_dir},
%% as well as some application-specific variables, `"$APP", "$PRIV_DIR",
%% "$LIB_DIR".
%%
%% The normal way to use these variables is by embedding them in file names,
%% e.g. `{my_logs, "$LOG_DIR/$APP"}', but a variable can also be referenced as:
%% * ``{'$value',Var}'' - The variable's value is used as-is (which means that
%% ``{'$value', "$APP"}'' expands to an atom corresponding to the current
%% app name.)
%% * ``{'$string', Var}'' - The value is represented as a string (list). If the
%% value isn't a "string type", `io_lib:format("~w",[Value])' is used.
%% * ``{'$binary', Var}'' - Like ``'$string''', but using binary representation.
%%
%% Custom variables can be defined by using either:
%% * *global scope* - The `setup' environment variable `vars', containing a
%% list of `{VarName, Definition}' tuples
%% * *application-local scope* - Defining an application-local environment
%% variable ``'$setup_vars''', on the same format as above.
%%
%% The `VarName' shall be a string, e.g. `"MYVAR"' (no `$' prefix).
%% `Definition' can be one of:
%% * `{value, Val}' - the value of the variable is exactly `Val'
%% * `{expand, Val}' - `Val' is expanded in its turn
%% * `{apply, M, F, A}' - Use the return value of `apply(M, F, A)'.
%%
%% When using a variable expansion, either insert the variable reference in
%% a string (or binary), or use one of the following formats:
%% * ``'{'$value', Var}''' - Use value as-is
%% * ``'{'$string', Var}''' - Use the string representation of the value
%% * ``'{'$binary', Var}''' - Use the binary representation of the value.
%%
%% Example:
%% <pre lang="erlang">
%% 2> application:set_env(setup, vars, [{"PLUS", {apply,erlang,'+',[1,2]}},
%% 2> {"FOO", {value, {foo,1}}}]).
%% ok
%% 3> application:set_env(stdlib, '$setup_vars',
%% 3> [{"MINUS", {apply,erlang,'-',[4,3]}},
%% 3> {"BAR", {value, "bar"}}]).
%% ok
%% 4> application:set_env(setup, v1, "/$BAR/$PLUS/$MINUS/$FOO").
%% ok
%% 5> setup:get_env(setup,v1).
%% {ok,"/$BAR/3/$MINUS/{foo,1}"}
%% 6> application:set_env(stdlib, v1, "/$BAR/$PLUS/$MINUS/$FOO").
%% ok
%% 7> setup:get_env(stdlib,v1).
%% {ok,"/bar/3/1/{foo,1}"}
%% </pre>
%%
%% In the above example, the first expansion (command no. 5), leaves `$BAR'
%% and `$MINUS' unexpanded, since they are defined in the `stdlib' application,
%% and thus not known to `setup'. In command no. 6, however, they <em>are</em>
%% in context, and are expanded. The variables `$PLUS' and `$FOO' have global
%% context and are expanded in both cases.
%%
%% It is also possible to refer to environment variables in the same
%% application. These are referenced as `"$env(VarName)"'. The corresponding
%% values are expanded in turn - take care not to create expansion loops!
%% The same rules for expansion as above apply.
%%
%% Example:
%% <pre lang="erlang">
%% 2> application:set_env(setup,foo,"foo").
%% ok
%% 3> application:set_env(setup,foo_dir,"$HOME/$env(foo)").
%% ok
%% 4> setup:get_env(setup,foo_dir).
%% {ok,"/Users/uwiger/git/setup/foo"}
%% </pre>
%%
%% == Customizing setup ==
%% The following environment variables can be used to customize `setup':
%% * `{home, Dir}' - The topmost directory of the running system. This should
%% be a writeable area.
%% * `{data_dir, Dir}' - A directory where applications are allowed to create
%% their own subdirectories and save data. Default is `Home/data.Node'.
%% * `{log_dir, Dir}' - A directory for logging. Default is `Home/log.Node'.
%% * `{stop_when_done, true|false}' - When invoking `setup' for an install,
%% `setup' normally remains running, allowing for other operations to be
%% * `{stop_delay, Millisecs}' - If `stop_when_done' is true, and the node
%% is going to shut down, setup will first wait for a specified number of
%% milliseconds (default: 5000). This can be useful in order to allow
%% asynchronous operations to complete before shutting down.
%% performed from the shell or otherwise. If `{stop_when_done, true}', the
%% node is shut down once `setup' is finished.
%% * `{abort_on_error, true|false}' - When running install or upgrade hooks,
%% `setup' will normally keep going even if some hooks fail. A more strict
%% semantics can be had by setting `{abort_on_error, true}', in which case
%% `setup' will raise an exception if an error occurs.
%% * `{mode, atom()}' - Specifies the context for running 'setup'. Default is
%% `normal'. The `setup' mode has special significance, since it's the default
%% mode for setup hooks, if no other mode is specified and the node has been
%% started with the setup-generated `install.boot' script. In theory, one may
%% specify any atom value, but it's probably wise to stick to the values
%% 'normal', 'setup' and 'upgrade' as global contexts, and instead trigger
%% other mode hooks by explicitly calling {@link run_hooks/1}.
%% * `{verify_directories, boolean()}' - At startup, setup will normally ensure that
%% the directories used by setup actually exist. This behavior can be disabled through
%% the environment variable `{verify_directories, false}'. This can be desirable
%% if setup is used mainly e.g. for environment variable expansion, but not for
%% disk storage.
%% * `{run_timeout, Millisecs}' - Set a time limit for how long it may take for
%% setup to process the setup hooks. Default is `infinity'. If the timeout
%% is exceeded, the application start sequence will be aborted, which will
%% cause a (rather inelegant) boot sequence failure.
%% @end
-module(setup).
-export([home/0,
log_dir/0,
data_dir/0,
verify_directories/0,
verify_dir/1,
mode/0,
find_hooks/0, find_hooks/1, find_hooks/2,
run_hooks/0, run_hooks/1, run_hooks/2,
find_env_vars/1,
get_env/2, get_env/3,
get_all_env/1,
expand_value/2, % expand_value/3 recommended instead
expand_value/3,
patch_app/1,
find_app/1, find_app/2,
pick_vsn/3,
reload_app/1, reload_app/2, reload_app/3,
keep_release/1,
lib_dirs/0, lib_dirs/1]).
-export([read_config_script/3, % (Name, F, Opts)
read_config_script/4]). % (Name, F, Vars, Opts)
-export([ok/1]).
-export([run_setup/0]).
-export([main/1]). % new escript entry point
-include_lib("kernel/include/file.hrl").
-ifdef(TEST).
-compile([export_all, nowarn_export_all]).
-include_lib("eunit/include/eunit.hrl").
-endif.
-ifdef(OTP_RELEASE). %% this implies 21 or higher
-define(EXCEPTION(Class, Reason, Stacktrace), Class:Reason:Stacktrace).
-define(GET_STACK(Stacktrace), Stacktrace).
-else.
-define(EXCEPTION(Class, Reason, _), Class:Reason).
-define(GET_STACK(_), erlang:get_stacktrace()).
-endif.
-define(THROW(E), {'___SETUP_THROW___', E}).
-define(if_verbose(Expr),
case get(verbose) of
true -> Expr;
_ -> ok
end).
%% @spec home() -> Directory
%% @doc Returns the configured `home' directory, or a best guess (`$CWD')
%% @end
%%
home() ->
home_([]).
home_(Vis) ->
case get_env_v(setup, home, Vis) of
undefined ->
CWD = cwd(),
D = filename:absname(CWD),
application:set_env(setup, home, D),
D;
{ok, D} when is_binary(D) ->
binary_to_list(D);
{ok, D} when is_list(D) ->
D;
{error,_} = Error ->
Error;
Other ->
{error, Other}
end.
%% @spec log_dir() -> Directory
%% @doc Returns the configured log dir, or a best guess (`home()/log.Node')
%% @end
%%
log_dir() ->
log_dir_([]).
log_dir_(Vis) ->
setup_dir(log_dir, "log." ++ atom_to_list(node()), Vis).
%% @spec data_dir() -> Directory
%% @doc Returns the configured data dir, or a best guess (`home()/data.Node').
%%
%% @end
%%
data_dir() ->
data_dir_([]).
data_dir_(Vis) ->
setup_dir(data_dir, "data." ++ atom_to_list(node()), Vis).
setup_dir(Key, Default, Vis) ->
case get_env_v(setup, Key, Vis) of
undefined ->
D = filename:absname(filename:join(home(), Default)),
application:set_env(setup, Key, D),
D;
{ok, D} when is_binary(D) ->
binary_to_list(D);
{ok, D} when is_list(D) ->
D;
Other ->
{error, Other}
end.
maybe_verify_directories() ->
case get_env(setup, verify_directories, true) of
true ->
verify_directories();
false ->
ok
end.
%% @spec verify_directories() -> ok
%% @doc Ensures that essential directories exist and are writable.
%% Currently, the directories corresponding to {@link home/0},
%% {@link log_dir/0} and {@link data_dir/0} are verified.
%% @end
%%
verify_directories() ->
_ = verify_dir(home()),
_ = verify_dir(log_dir()),
_ = verify_dir(data_dir()),
ok.
%% @spec verify_dir(Dir) -> Dir
%% @doc Ensures that the directory Dir exists and is writable.
%% @end
%%
verify_dir(Directory) ->
ok = filelib:ensure_dir(filename:join(Directory, "dummy")),
Directory.
ok({ok, Result}) ->
Result;
ok(Other) ->
setup_lib:abort("Expected {ok, Value}~n", [Other]).
%% @spec find_env_vars(Env) -> [{AppName, Value}]
%% @doc Searches all loaded apps for instances of the `Env' environment variable.
%%
%% The environment variables are expanded according to the rules outlined in
%% {@section Variable expansion}
%% @end
find_env_vars(Env) ->
GEnv = global_env(),
lists:flatmap(
fun({A,_,_}) ->
case app_get_env(A, Env) of
{ok, Val} when Val =/= undefined ->
NewEnv = private_env(A, GEnv),
[{A, expand_env(NewEnv, Val, A, [Env])}];
_ ->
[]
end
end, application:loaded_applications()).
get_env(A, Key) ->
case app_get_env(A, Key) of
{ok, Val} ->
{ok, expand_value(A, Val)};
Other ->
Other
end.
get_env(A, Key, Default) ->
case get_env(A, Key) of
{ok, Val} ->
Val;
_ ->
Default
end.
get_env_v(A, Key, V) ->
try get_env_v_(A, Key, V)
catch
throw:?THROW(Error) ->
Error
end.
get_env_v_(A, Key, V) ->
case lists:member(Key, V) of
false ->
case app_get_env(A, Key) of
{ok, Val} ->
{ok, expand_value_v(A, Key, Val, V)};
Other ->
Other
end;
true ->
throw(?THROW({error, {loop_detected, Key}}))
end.
-spec get_all_env(atom()) -> [{atom(), any()}].
%% @doc Like `application:get_all_env/1', but with variable expansion.
%%
%% The variable expansion is performed according to the rules outlined in
%% {@section Variable expansion}.
%% @end
get_all_env(A) ->
Vars = private_env(A, global_env()),
[{K, expand_env(Vars, V, A, [K])} ||
{K, V} <- application:get_all_env(A)].
-spec expand_value(atom(), any()) -> any().
%% @doc Expand `Value' using global variables and the variables of `App'
%%
%% The variable expansion is performed according to the rules outlined in
%% {@section Variable expansion}. If a loop is detected (a variable ends
%% up referencing itself), an exception is raised.
%% Use of {@link expand_value/3} (also providing the initial key name) is
%% recommended; this function is primarily here for backward compatibility
%% purposes.
%% @end
expand_value(App, Value) ->
expand_env(private_env(App, global_env()), Value, App, []).
-spec expand_value(atom(), atom(), any()) -> any().
%% @doc Expand `Value' using global variables and the variables of `App'
%%
%% The variable expansion is performed according to the rules outlined in
%% {@section Variable expansion}. The `Key' name as second argument is used
%% for loop detection, in which case an exception will be raised..
%% @end
expand_value(App, Key, Value) ->
try expand_value_v(App, Key, Value, [])
catch
throw:?THROW(Error) ->
error(Error)
end.
expand_value_v(App, K, Value, V) ->
expand_env(private_env(App), Value, App, [K|V]).
global_env() ->
Acc = [{K, fun(V1) -> env_value(K, V1) end} ||
K <- ["DATA_DIR", "LOG_DIR", "HOME"]],
custom_global_env(Acc).
custom_global_env(Acc) ->
lists:foldl(fun(E, Acc1) ->
custom_env1(E, Acc1, setup)
end, Acc,
[{K,V} || {K,V} <- app_get_env(setup, vars, []),
is_list(K)]).
private_env(A) ->
private_env(A, global_env()).
private_env(A, GEnv) ->
Acc = [{K, fun(Vis1) -> env_value(K, A, Vis1) end} ||
K <- ["APP", "PRIV_DIR", "LIB_DIR"]],
custom_private_env(A, Acc ++ GEnv).
custom_private_env(A, Acc) ->
lists:foldl(fun(E, Acc1) ->
custom_env1(E, Acc1, A)
end, Acc,
[{K, V} ||
{K,V} <- app_get_env(A, '$setup_vars', []),
is_list(K)]).
%% Wrapped for tracing purposes
app_get_env(A, K) ->
application:get_env(A, K).
%% Wrapped for tracing purposes
app_get_env(A, K, Default) ->
%% Apparently, some still use setup on R15B ...
case application:get_env(A, K) of
{ok, Val} -> Val;
_ ->
Default
end.
%% Wrapped for tracing purposes
app_get_key(A, K) ->
application:get_key(A, K).
custom_env1({K, V}, Acc, A) ->
[{K, fun(Vis1) -> custom_env_value(K, V, Acc, A, Vis1) end} | Acc].
expand_env(_, {T,"$env(" ++ S} = X, A, Vis)
when T=='$value'; T=='$string'; T=='$binary' ->
try Res = case get_env_name_l(S) of
false -> undefined;
{Name,[]} ->
get_env_v_(A, Name, Vis)
end,
case {Res, T} of
{undefined, '$value'} -> undefined;
{undefined, '$string'} -> "";
{undefined, '$binary'} -> <<>>;
{{ok,V} , '$value'} -> V;
{{ok,V} , '$string'} -> binary_to_list(stringify(V));
{{ok,V} , '$binary'} -> stringify(V)
end
catch
error:_ -> X
end;
expand_env(Vs, {T,"$" ++ S}, _, Vis)
when T=='$value'; T=='$string'; T=='$binary' ->
case {lists:keyfind(S, 1, Vs), T} of
{false, '$value'} -> undefined;
{false, '$string'} -> "";
{false, '$binary'} -> <<>>;
{{_,V}, '$value'} -> V(Vis);
{{_,V}, '$string'} -> binary_to_list(stringify(V(Vis)));
{{_,V}, '$binary'} -> stringify(V(Vis))
end;
expand_env(Vs, T, A, V) when is_tuple(T) ->
list_to_tuple([expand_env(Vs, X, A, V) || X <- tuple_to_list(T)]);
expand_env(Vs, L, A, V) when is_list(L) ->
case setup_lib:is_string(L) of
true ->
do_expand_env(L, Vs, A, list, V);
false ->
%% [expand_env(Vs, X, A) || X <- L]
expand_env_l(Vs, L, A, V)
end;
expand_env(Vs, M, A, V) when is_map(M) ->
maps:from_list(expand_env_l(Vs, maps:to_list(M), A, V));
expand_env(Vs, B, A, V) when is_binary(B) ->
do_expand_env(B, Vs, A, binary, V);
expand_env(_, X, _, _) ->
X.
-spec expand_env_l(list(), maybe_improper_list(), any(), any()) ->
maybe_improper_list().
expand_env_l(_Vs, [], _A, _V) ->
[];
expand_env_l(Vs, [H|T], A, V) when is_list(T) ->
[expand_env(Vs, H, A, V) | expand_env_l(Vs, T, A, V)];
expand_env_l(Vs, [H|T], A, V) ->
[expand_env(Vs, H, A, V) | expand_env(Vs, T, A, V)].
%% do_expand_env(X, Vs, Type) ->
%% lists:foldl(fun({K, Val}, Xx) ->
%% re:replace(Xx, [$\\, $$ | K],
%% stringify(Val()), [{return,Type}])
%% end, X, Vs).
do_expand_env(X, Vs, A, binary, V) ->
do_expand_env_b(iolist_to_binary(X), Vs, A, V);
do_expand_env(X, Vs, A, list, V) ->
binary_to_list(do_expand_env_b(iolist_to_binary(X), Vs, A, V)).
do_expand_env_b(<<"$env(", T/binary>>, Vs, A, Vis) ->
case get_env_name_b(T) of
{K, T1} ->
case get_env_v_(A, K, Vis) of
{ok, V} ->
Res = expand_env(Vs, V, A, Vis),
<<(stringify(Res))/binary,
(do_expand_env_b(T1, Vs, A, Vis))/binary>>;
undefined ->
<<"$env(", (do_expand_env_b(T, Vs, A, Vis))/binary>>
end;
false ->
do_expand_env_b(T, Vs, A, Vis)
end;
do_expand_env_b(<<"$", T/binary>>, Vs, A, Vis) ->
case match_var_b(Vs, T, Vis) of
{Res, T1} ->
<<Res/binary, (do_expand_env_b(T1, Vs, A, Vis))/binary>>;
false ->
<<"$", (do_expand_env_b(T, Vs, A, Vis))/binary>>
end;
do_expand_env_b(<<H, T/binary>>, Vs, A, Vis) ->
<<H, (do_expand_env_b(T, Vs, A, Vis))/binary>>;
do_expand_env_b(<<>>, _, _, _) ->
<<>>.
get_env_name_b(B) ->
get_env_name_b(B, <<>>).
get_env_name_b(<<")", T/binary>>, Acc) ->
try {binary_to_existing_atom(Acc, latin1), T}
catch
error:_ -> false
end;
get_env_name_b(<<H, T/binary>>, Acc) ->
get_env_name_b(T, <<Acc/binary, H>>);
get_env_name_b(<<>>, _) ->
false.
get_env_name_l(L) ->
get_env_name_l(L, []).
get_env_name_l(")" ++ T, Acc) ->
try {list_to_existing_atom(lists:reverse(Acc)), T}
catch
error:_ -> false
end;
get_env_name_l([H|T], Acc) ->
get_env_name_l(T, [H|Acc]);
get_env_name_l([], _) ->
false.
match_var_b([{K,V}|T], B, Vis) ->
case re:split(B, "^" ++ K, [{return, binary}]) of
[_] ->
match_var_b(T, B, Vis);
[<<>>, Rest] ->
{stringify(V(Vis)), Rest}
end;
match_var_b([], _, _) ->
false.
env_value("LOG_DIR" , Vis) -> log_dir_(Vis);
env_value("DATA_DIR", Vis) -> data_dir_(Vis);
env_value("HOME" , Vis) -> home_(Vis).
env_value("APP" , A, _Vis) -> A;
env_value("PRIV_DIR", A, _Vis) -> priv_dir(A);
env_value("LIB_DIR" , A, _Vis) -> lib_dir(A).
custom_env_value(_K, {value, V}, _Vs, _A, _Vis) ->
V;
custom_env_value(K, {expand, V}, Vs, A, Vis) ->
expand_env(Vs, V, A, [K|Vis]);
custom_env_value(K, {apply, M, F, As}, _Vs, _A, _Vis) ->
%% Not ideal, but don't want to introduce exceptions in get_env()
try apply(M, F, As)
catch
error:_ ->
{error, {custom_setup_env, K}}
end.
%% This function is more general than to_string/1 below
stringify(V) ->
try iolist_to_binary(V)
catch
error:badarg ->
iolist_to_binary(io_lib:format("~w", [V]))
end.
priv_dir(A) ->
case code:priv_dir(A) of
{error, bad_name} ->
case is_cur_dir(A) of
true ->
filename:join(cwd(), "priv");
false ->
error({cannot_get_priv_dir, A})
end;
D -> D
end.
lib_dir(A) ->
case code:lib_dir(A) of
{error, bad_name} ->
case is_cur_dir(A) of
true ->
cwd();
false ->
error({cannot_get_lib_dir, A})
end;
D -> D
end.
cwd() ->
{ok, CWD} = file:get_cwd(),
CWD.
is_cur_dir(A) ->
As = atom_to_list(A),
filename:basename(cwd()) == As.
%% @spec patch_app(AppName::atom()) -> true | {error, Reason}
%%
%% @doc Adds an application's "development" path to a target system
%%
%% This function locates the given application (`AppName') along the `$ERL_LIBS'
%% path, and prepends it to the code path of the existing system. This is useful
%% not least when one wants to add e.g. a debugging or trace application to a
%% target system.
%%
%% The function will not add the same path again, if the new path is already
%% the 'first' path entry for the application `A'.
%% @end
patch_app(A) when is_atom(A) ->
patch_app(A, latest).
patch_app(A, Vsn) ->
patch_app(A, Vsn, lib_dirs()).
patch_app(A, Vsn, LibDirs) ->
case find_app(A, LibDirs) of
[_|_] = Found ->
{_ActualVsn, Dir} = pick_vsn(A, Found, Vsn),
error_logger:info_msg("[~p vsn ~p] code:add_patha(~s)", [A, _ActualVsn, Dir]),
code:add_patha(Dir);
[] ->
error(no_matching_vsn)
end.
%% @spec pick_vsn(App::atom(), Dirs::[{Vsn::string(),Dir::string()}], Which) ->
%% {Vsn, Dir}
%% where
%% Which = 'latest' | 'next' | Regexp
%%
%% @doc Picks the specified version out of a list returned by {@link find_app/1}
%%
%% * If `Which' is a string, it will be used as a `re' regexp pattern, and the
%% first matching version will be returned.
%%
%% * If `Which = latest', the last entry in the list will be returned (assumes
%% that the list is sorted in ascending version order).
%%
%% * If `Which = next', the next version following the current version of the
%% application `A' is returned, assuming `A' is loaded; if `A' is not loaded,
%% the first entry in the list is returned.
%%
%% If no matching version is found, the function raises an exception.
%% @end
pick_vsn(_, Dirs, latest) ->
lists:last(Dirs);
pick_vsn(A, Dirs, next) ->
case app_get_key(A, vsn) of
{ok, Cur} ->
case lists:dropwhile(fun({V, _}) -> V =/= Cur end, Dirs) of
[_, {_, _} = Next |_] -> Next;
_ -> error(no_matching_vsn)
end;
_ ->
hd(Dirs)
end;
pick_vsn(_, Dirs, Vsn) ->
case [X || {V, _} = X <- Dirs,
re:run(V, Vsn) =/= nomatch] of
[Found|_] ->
Found;
[] ->
error(no_matching_vsn)
end.
%% @spec find_app(A::atom()) -> [{Vsn, Dir}]
%% @equiv find_app(A, lib_dirs())
find_app(A) ->
find_app(A, lib_dirs()).
%% @spec find_app(A::atom(), LibDirs::[string()]) -> [{Vsn, Dir}]
%%
%% @doc Locates application `A' along LibDirs (see {@link lib_dirs/0} and
%% {@link lib_dirs/1}) or under the OTP root, returning all found candidates.
%% The version is extracted from the `.app' file; thus, no version suffix
%% in the path name is required.
%% @end
find_app(A, LibDirs) ->
Astr = to_string(A),
CurDir = case code:lib_dir(A) of
{error,_} -> [];
D ->
[filename:join(D, "ebin")]
end,
CurRoots = current_roots(),
InLib = [P || P <- LibDirs,
is_app_dir(Astr, P)],
InRoots = lists:append([in_root(A, R) || R <- CurRoots]),
setup_lib:sort_vsns(
lists:usort(CurDir ++ InRoots ++ InLib), atom_to_list(A)).
to_string(A) when is_atom(A) ->
atom_to_list(A);
to_string(A) when is_list(A) ->
A.
is_app_dir(A, D) ->
case lists:reverse(filename:split(D)) of
["ebin", App|_] ->
case re:split(App, <<"-">>, [{return,list}]) of
[A|_] -> true;
_ -> false
end;
_ ->
false
end.
current_roots() ->
CurPath = code:get_path(),
roots_of(CurPath).
roots_of(Path) ->
All = lists:foldr(
fun(D, Acc) ->
case lists:reverse(filename:split(D)) of
["ebin",_| [_|_] = T] ->
[filename:join(lists:reverse(T)) | Acc];
_ ->
Acc
end
end, [], Path),
lists:usort(All).
in_root(A, R) ->
Paths = filelib:wildcard(filename:join([R, "*", "ebin"])),
Pat = atom_to_list(A) ++ "-[\\.0-9]+/ebin\$",
[P || P <- Paths,
re:run(P, Pat) =/= nomatch].
%% @spec reload_app(AppName::atom()) -> {ok, NotPurged} | {error, Reason}
%%
%% @equiv reload_app(AppName, latest)
reload_app(A) ->
reload_app(A, latest).
%% @spec reload_app(AppName::atom(), ToVsn) -> {ok,UnPurged} | {error,Reason}
%%
%% @equiv reload_app(AppName, latest, lib_dirs())
reload_app(A, ToVsn) ->
reload_app(A, ToVsn, lib_dirs()).
%% @spec reload_app(AppName::atom(), ToVsn, LibDirs) ->
%% {ok, Unpurged} | {error, Reason}
%% where
%% ToVsn = 'latest' | 'next' | Vsn,
%% LibDirs = [string()]
%% Vsn = string()
%%
%% @doc Loads or upgrades an application to the specified version
%%
%% This function is a convenient function for 'upgrading' an application.
%% It locates the given version (using {@link find_app/1} and {@link pick_vsn/3})
%% and loads it in the most appropriate way:
%%
%% * If the application isn't already loaded, it loads the application and
%% all its modules.
%%
%% * If the application is loaded, it generates an appup script and performs
%% a soft upgrade. If the new version of the application has an `.appup' script
%% on-disk, that script is used instead.
%%
%% The application is searched for along the existing path (that is, under
%% the roots of the existing code path, allowing for e.g. $ROOT/lib/app-1.0
%% and $ROOT/lib/app-1.2 to be found and tested against the version condition),
%% and also along `LibDirs' (see {@link lib_dirs/0} an {@link lib_dirs/1}).
%%
%% The generated appup script is of the form:
%%
%% * add modules not present in the previous version of the application
%%
%% * do a soft upgrade on pre-existing modules, using suspend-code_change-resume
%%
%% * delete modules that existed in the old version, but not in the new.
%%
%% The purge method used is `brutal_purge' - see {@link //sasl/appup}.
%%
%% For details on how the new version is chosen, see {@link find_app/1} and
%% {@link pick_vsn/3}.
%% @end
reload_app(A, ToVsn0, LibDirs) ->
case app_get_key(A, vsn) of
undefined ->
ok = application:load(A),
{ok, Modules} = app_get_key(A, modules),
_ = [c:l(M) || M <- Modules],
{ok, []};
{ok, FromVsn} ->
{ToVsn, NewPath} = pick_vsn(A, find_app(A, LibDirs), ToVsn0),
if ToVsn == FromVsn ->
{error, same_version};
true ->
error_logger:info_msg("[~p vsn ~p] soft upgrade from ~p",
[A, ToVsn, FromVsn]),
reload_app(
A, FromVsn, filename:join(code:lib_dir(A), "ebin"),
NewPath, ToVsn)
end
end.
reload_app(A, OldVsn, OldPath, NewPath, NewVsn) ->
{_NewVsn, Script, NewApp} = make_appup_script(A, OldVsn, NewPath),
reload_app(A, OldVsn, OldPath, NewPath, NewVsn, Script, NewApp).
reload_app(A, _OldVsn, _OldPath, NewPath, NewVsn, Script, _NewApp) ->
LibDir = filename:dirname(NewPath),
_ = remove_path(NewPath, A),
case release_handler:eval_appup_script(A, NewVsn, LibDir, Script) of
{ok, Unpurged} ->
_ = [code:purge(M) || {M, brutal_purge} <- Unpurged],
{ok, [U || {_, Mode} = U <- Unpurged, Mode =/= brutal_purge]};
Other ->
Other
end.
remove_path(P, A) ->
CurPath = code:get_path(),
case lists:member(P, CurPath) of
true ->
%% don't remove if it's the only path
case [Px || Px <- path_entries(A, CurPath),
Px =/= P] of
[] ->
true;
[_|_] ->
code:set_path([Px || Px <- CurPath,
Px =/= P])
end;
false ->
true
end.
path_entries(A, Path) ->
Pat = atom_to_list(A) ++ "[^/]*/ebin\$",
[P || P <- Path,
re:run(P, Pat) =/= nomatch].
make_appup_script(A, OldVsn, NewPath) ->
{application, _, NewAppTerms} = NewApp =
read_app(filename:join(NewPath, atom_to_list(A) ++ ".app")),
%% OldAppTerms = application:get_all_key(A),
%% _OldApp = {application, A, OldAppTerms},
case find_script(A, NewPath, OldVsn, up) of
{NewVsn, Script} ->
{NewVsn, Script, NewApp};
false ->
{ok, OldMods} = app_get_key(A, modules),
{modules, NewMods} = lists:keyfind(modules, 1, NewAppTerms),
{vsn, NewVsn} = lists:keyfind(vsn, 1, NewAppTerms),
{DelMods,AddMods,ChgMods} = {OldMods -- NewMods,
NewMods -- OldMods,
intersection(NewMods, OldMods)},
{NewVsn,
[{load_object_code,{A, NewVsn, NewMods}}]
++ [point_of_no_return]
++ [{load, {M, brutal_purge, brutal_purge}} || M <- AddMods]
++ [{suspend, ChgMods} || ChgMods =/= []]
++ [{load, {M, brutal_purge,brutal_purge}} || M <- ChgMods]
++ [{code_change, up, [{M, setup} || M <- ChgMods]} ||
ChgMods =/= []]
++ [{resume, ChgMods} || ChgMods =/= []]
++ [{remove, {M, brutal_purge,brutal_purge}} || M <- DelMods]
++ [{purge, DelMods} || DelMods =/= []],
NewApp}
end.
read_app(F) ->
case file:consult(F) of
{ok, [App]} ->
App;
{error,_} = Error ->
error(Error, [F])
end.
%% slightly modified (and corrected!) version of release_handler:find_script/4.
find_script(App, Dir, OldVsn, UpOrDown) ->
Appup = filename:join([Dir, "ebin", atom_to_list(App)++".appup"]),
case file:consult(Appup) of
{ok, [{NewVsn, UpFromScripts, _DownToScripts}]} ->
Scripts = case UpOrDown of
up -> UpFromScripts
%% down -> DownToScripts
end,
case lists:dropwhile(fun({Re,_}) ->
re:run(OldVsn, Re) == nomatch
end, Scripts) of
[{_OldVsn, Script}|_] ->
{NewVsn, Script};
[] ->
false
end;
{error, enoent} ->
false;
{error, _} ->
false
end.
%% find_procs(Mods) ->
%% Ps = release_handler_1:get_supervised_procs(),
%% lists:flatmap(
%% fun({P,_,_,Ms}) ->
%% case intersection(Ms, Mods) of
%% [] -> [];
%% I -> [{P, I}]
%% end
%% end, Ps).
intersection(A, B) ->
A -- (A -- B).
%% @hidden
%%
%% Called from the start function. Will verify directories, then call
%% all setup hooks in all applications, and execute them in order.
%% Afterwards, setup will either finish and leave the system running, or
%% stop, terminating all nodes automatically.
%%
run_setup() ->
error_logger:info_msg("Setup running ...", []),
AbortOnError = check_abort_on_error(),
try run_setup_()
catch
?EXCEPTION(error, Error, Stacktrace) ->
error_logger:error_msg("Caught exception:~n"
"~p~n"
"~p~n", [Error, ?GET_STACK(Stacktrace)]),
if AbortOnError ->
erlang:error(Error);
true ->
ok
end
end.
run_setup_() ->
Res = maybe_verify_directories(),
error_logger:info_msg("Directories verified. Res = ~p", [Res]),
Mode = mode(),
Hooks = find_hooks(Mode),
run_selected_hooks(Hooks),
error_logger:info_msg(
"Setup finished processing hooks (Mode=~p)...", [Mode]),
ok.
%% @hidden
main(Args) ->
setup_gen:main(Args).
%% @spec find_hooks() -> [{PhaseNo, [{M,F,A}]}]
%% @doc Finds all custom setup hooks in all applications.
%% The setup hooks must be of the form
%% ``{'$setup_hooks', [{PhaseNo, {M, F, A}} | {Mode, [{PhaseNo, {M,F,A}}]}]}'',
%% where PhaseNo should be (but doesn't have to be) an integer.
%% If `Mode' is not specified, the hook will pertain to the `setup' mode.
%%
%% The hooks will be called in order:
%% - The phase numbers will be sorted.
%% - All hooks for a specific PhaseNo will be called in sequence,
%% in the same order as the applications appear in the boot script
%% (and, if included applications exist, in preorder traversal order).
%%
%% A suggested convention is:
%% - Create the database at phase 100
%% - Create tables (or configure schema) at 200
%% - Populate the database at 300
%%
%% Using the `setup' environment variable `modes', it is possible to
%% define a mode that includes all hooks from different modes.
%% The format is `[{M1, [M2,...]}]'. The expansion is done recursively,
%% so a mode entry in the right-hand side of a pair can expand into other
%% modes. In order to be included in the final list of modes, an expanding
%% mode needs to include itself in the right-hand side. For example:
%%
%% - Applying `a' to `[{a, [b]}]' returns `[b]'
%% - Applying `a' to `[{a, [a,b]}]' returns `[a,b]'
%% - Applying `a' to `[{a, [a,b]},{b,[c,d]}]' returns `[a,c,d]'
%%
%% A typical application of this would be `[{test, [normal, test]}]', where
%% starting in the `test' mode would cause all `normal' and all `test' hooks
%% to be executed.
%% @end
%%
find_hooks() ->
find_hooks(mode()).
%% @spec find_hooks(Mode) -> [{PhaseNo, [{M, F, A}]}]
%% @doc Find all setup hooks for `Mode' in all applications
%% @end
find_hooks(Mode) when is_atom(Mode) ->
Applications = applications(),
find_hooks(Mode, Applications).
%% @spec find_hooks(Mode, Applications) -> [{PhaseNo, [{M, F, A}]}]
%% @doc Find all setup hooks for `Mode' in `Applications'.
%% @end
find_hooks(Mode, Applications) ->
find_hooks_(Mode, maybe_expand_mode(Mode), Applications).
maybe_expand_mode(Mode) ->
maybe_expand_mode(Mode, app_get_env(setup, modes, [])).
maybe_expand_mode(Mode, Modes) ->
maybe_expand_mode(Mode, Modes, ordsets:new()).
maybe_expand_mode(Mode, Modes, Acc) ->
case lists:keyfind(Mode, 1, Modes) of
{_, Ms} ->
Modes1 = lists:keydelete(Mode, 1, Modes),
lists:foldl(
fun(M, Acc1) ->
maybe_expand_mode(M, Modes1, Acc1)
end, Acc, Ms);
false ->
ordsets:add_element(Mode, Acc)
end.
find_hooks_(Mode, Modes, Applications) ->
lists:foldl(
fun(A, Acc) ->
case app_get_env(A, '$setup_hooks') of
{ok, Hooks} ->
lists:foldl(
fun(H, Acc1) ->
f_find_hooks_(H, A, Mode, Modes, Acc1)
end, Acc, Hooks);
_ ->
Acc
end
end, orddict:new(), Applications).
f_find_hooks_(Hook, A, Mode, Modes, Acc) ->
IsSetup = lists:member(setup, Modes),
case Hook of
{Mode1, [{_, {_,_,_}}|_] = L} ->
case lists:member(Mode1, Modes) of
true -> find_hooks_1(Mode1, A, L, Acc);
false -> Acc
end;
{Mode1, [{_, [{_, _, _}|_]}|_] = L} ->
case lists:member(Mode1, Modes) of
true -> find_hooks_1(Mode1, A, L, Acc);
false -> Acc
end;
{N, {_, _, _} = MFA} when IsSetup ->
orddict:append(N, MFA, Acc);
{N, [{_, _, _}|_] = L} when IsSetup ->
lists:foldl(
fun(MFA, Acc1) ->
orddict:append(N, MFA, Acc1)
end, Acc, L);
_ ->
Acc
end.
find_hooks_1(Mode, A, L, Acc1) ->
lists:foldl(
fun({N, {_,_,_} = MFA}, Acc2) ->
orddict:append(N, MFA, Acc2);
({N, [{_,_,_}|_] = MFAs}, Acc2) ->
lists:foldl(
fun({_,_,_} = MFA1, Acc3) ->
orddict:append(
N, MFA1, Acc3);
(Other1, Acc3) ->
error_logger:info_msg(
"Invalid hook: ~p~n"
" App : ~p~n"
" Mode : ~p~n"
" Phase: ~p~n",
[Other1, A, Mode, N]),
Acc3
end, Acc2, MFAs)
end, Acc1, L).
-spec mode() -> normal | atom().
%% @doc Returns the current "setup mode".
%%
%% The mode can be defined using the `setup' environment variable `mode'.
%% The default value is `normal'. The mode is used to select which setup
%% hooks to execute when starting the `setup' application.
%% @end
mode() ->
case app_get_env(setup, mode) of
{ok, M} ->
M;
_ ->
case init:get_argument(boot) of
{ok, [[Boot]]} ->
case filename:basename(Boot) of
"install" -> setup;
_ -> normal
end;
_ ->
normal
end
end.
%% @spec run_hooks() -> ok
%% @doc Execute all setup hooks for current mode in order.
%%
%% See {@link find_hooks/0} for details on the order of execution.
%% @end
run_hooks() ->
run_hooks(applications()).
%% @spec run_hooks(Applications) -> ok
%% @doc Execute setup hooks for current mode in `Applications' in order.
%%
%% See {@link find_hooks/0} for details on the order of execution.
%% @end
run_hooks(Apps) ->
run_hooks(mode(), Apps).
%% @spec run_hooks(Mode, Applications) -> ok
%% @doc Execute setup hooks for `Mode' in `Applications' in order
%%
%% Note that no assumptions can be made about which process each setup hook
%% runs in, nor whether it runs in the same process as the previous hook.
%% See {@link find_hooks/0} for details on the order of execution.
%% @end
run_hooks(Mode, Apps) ->
Hooks = find_hooks(Mode, Apps),
run_selected_hooks(Hooks).
%% @spec run_selected_hooks(Hooks) -> ok
%% @doc Execute specified setup hooks in order
%%
%% Exceptions are caught and printed. This might/should be improved, but the
%% general idea is to complete as much as possible of the setup, and perhaps
%% repair afterwards. However, the fact that something went wrong should be
%% remembered and reflected at the end.
%% @end
%%
run_selected_hooks(Hooks) ->
AbortOnError = check_abort_on_error(),
lists:foreach(
fun({Phase, MFAs}) ->
error_logger:info_msg("Setup phase ~p~n", [Phase]),
lists:foreach(fun({M, F, A}) ->
try_apply(M, F, A, AbortOnError)
end, MFAs)
end, Hooks).
check_abort_on_error() ->
case app_get_env(setup, abort_on_error) of
{ok, F} when is_boolean(F) -> F;
{ok, Other} ->
error_logger:error_msg("Invalid abort_on_error flag (~p)~n"
"Aborting...~n", [Other]),
error({invalid_abort_on_error, Other});
_ -> false
end.
try_apply(M, F, A, Abort) ->
{_Pid, Ref} = spawn_monitor(
fun() ->
exit(try {ok, apply(M, F, A)}
catch
?EXCEPTION(Type, Exception, Stacktrace) ->
{error, {Type, Exception, ?GET_STACK(Stacktrace)}}
end)
end),
receive
{'DOWN', Ref, _, _, Return} ->
case Return of
{ok, Result} ->
report_result(Result, M, F, A);
{error, {Type, Exception, Stacktrace}} ->
report_error(Type, Exception, Stacktrace, M, F, A),
if Abort ->
error_logger:error_msg(
"Abort on error is set. Terminating sequence~n",[]),
error(Exception);
true ->
ok
end
end
end.
report_result(Result, M, F, A) ->
MFAString = format_mfa(M, F, A),
error_logger:info_msg(MFAString ++ "-> ~p~n", [Result]).
report_error(Type, Error, Stacktrace, M, F, A) ->
ErrTypeStr = case Type of
error -> "ERROR: ";
throw -> "THROW: ";
exit -> "EXIT: "
end,
MFAString = format_mfa(M, F, A),
error_logger:error_msg(MFAString ++ "-> " ++ ErrTypeStr ++ "~p~n~p~n",
[Error, Stacktrace]).
format_mfa(M, F, A) ->
lists:flatten([atom_to_list(M),":",atom_to_list(F),
"(", format_args(A), ")"]).
format_args([]) -> "";
format_args([A]) -> format_arg(A);
format_args([A, B | T]) -> [format_arg(A), "," | format_args([B | T])].
format_arg(A) ->
io_lib:fwrite("~p", [A]).
%% @spec applications() -> [atom()]
%% @doc Find all applications - either from the boot script or all loaded apps.
%% @end
%%
applications() ->
Apps = [A || {A, _, _} <- application:loaded_applications()],
group_applications(Apps).
%% Sort apps in preorder traversal order.
%% That is, for each "top application", all included apps follow before the
%% next top application. Normally, there will be no included apps, in which
%% case the list will maintain its original order.
%%
group_applications(Apps) ->
group_applications(Apps, []).
group_applications([H | T], Acc) ->
case app_get_key(H, included_applications) of
{ok, []} ->
group_applications(T, [{H,[]}|Acc]);
{ok, Incls} ->
AllIncls = all_included(Incls),
group_applications(T -- AllIncls,
[{H, AllIncls}
| lists:foldl(
fun(A,Acc1) ->
lists:keydelete(A,1,Acc1)
end, Acc, AllIncls)])
end;
group_applications([], Acc) ->
unfold(lists:reverse(Acc)).
unfold([{A,Incl}|T]) ->
[A|Incl] ++ unfold(T);
unfold([]) ->
[].
all_included([H | T]) ->
case app_get_key(H, included_applications) of
{ok, []} ->
[H | all_included(T)];
{ok, Incls} ->
[H | all_included(Incls)] ++ all_included(T)
end;
all_included([]) ->
[].
%% @spec keep_release(RelVsn) -> ok
%% @doc Generates a release based on what's running in the current node.
%% @end
keep_release(RelVsn) ->
%% 0. Check
RelDir = setup_lib:releases_dir(),
case filelib:is_dir(TargetDir = filename:join(RelDir, RelVsn)) of
true -> error({target_dir_exists, TargetDir});
false -> verify_dir(TargetDir)
end,
%% 1. Collect info
Loaded = application:loaded_applications(),
LoadedNames = [element(1,A) || A <- Loaded],
Running = application:which_applications(),
RunningNames = [element(1,A) || A <- Running],
OnlyLoaded = LoadedNames -- RunningNames,
Included = lists:flatmap(
fun(A) ->
case app_get_key(A, included_applications) of
{ok, []} ->
[];
{ok, As} ->
[{A, As}]
end
end, LoadedNames),
{Name,_} = init:script_id(),
Conf = [
{name, Name},
{apps, app_list(OnlyLoaded, Loaded, Included)}
| [{root, R} || R <- current_roots() -- [otp_root()]]
]
++ [{env, env_diff(LoadedNames)}],
setup_lib:write_script(
ConfF = filename:join(TargetDir, "setup.conf"), [Conf]),
setup_gen:run([{name, Name}, {outdir, TargetDir}, {conf, ConfF}]).
%% {loaded, Loaded},
%% {running, Running},
%% {only_loaded, OnlyLoaded},
%% {included, Included},
%% {env, env_diff(LoadedNames)},
%% {roots, current_roots() -- [otp_root()]},
%% {rel_dir, setup_lib:releases_dir()}].
app_list(OnlyLoaded, AllLoaded, Included) ->
lists:map(
fun({A, _, V}) ->
case {lists:member(A, OnlyLoaded),
lists:keyfind(A, 1, Included)} of
{true,false} -> {A, V, load};
{true,{_,I}} -> {A, V, load, I};
{false,false} -> {A, V};
{false,{_,I}} -> {A, V, I}
end
end, AllLoaded).
env_diff([A|As]) ->
AppF = filename:join([code:lib_dir(A), "ebin", atom_to_list(A) ++ ".app"]),
LiveEnv = lists:keydelete(included_applications, 1,
application:get_all_env(A)),
DiskEnv = fetch_env(AppF),
case LiveEnv -- DiskEnv of
[_|_] = Diff ->
[{A, Diff}|env_diff(As)];
[] ->
env_diff(As)
end;
env_diff([]) ->
[].
fetch_env(AppF) ->
case file:consult(AppF) of
{ok, [{application,_,Terms}]} ->
proplists:get_value(env, Terms, []);
{error, Reason} ->
error({reading_app_file, [AppF, Reason]})
end.
otp_root() ->
{ok, [[Root]]} = init:get_argument(root),
filename:join(Root, "lib").
%% Modified from code_server:get_user_lib_dirs():
%% @spec lib_dirs() -> [string()]
%% @equiv union(lib_dirs("ERL_SETUP_LIBS"), lib_dirs("ERL_LIBS"))
lib_dirs() ->
A = lib_dirs("ERL_SETUP_LIBS"),
B = lib_dirs("ERL_LIBS"),
A ++ (B -- A).
%% @spec lib_dirs(Env::string()) -> [string()]
%% @doc Returns an expanded list of application directories under a lib path
%%
%% This function expands the (ebin/) directories under e.g. `$ERL_SETUP_LIBS' or
%% `$ERL_LIBS'. `$ERL_SETUP_LIB' has the same syntax and semantics as
%% `$ERL_LIBS', but is (hopefully) only recognized by the `setup' application.
%% This can be useful e.g. when keeping a special 'extensions' or 'plugin'
%% root that is handled via `setup', but not treated as part of the normal
%% 'automatic code loading path'.
%% @end
lib_dirs(Env) ->
case os:getenv(Env) of
L when is_list(L) ->
LibDirs = split_paths(L, path_separator(), [], []),
get_user_lib_dirs_1(LibDirs);
false ->
[]
end.
path_separator() ->
case os:type() of
{win32, _} -> $;;
_ -> $:
end.
get_user_lib_dirs_1([Dir|DirList]) ->
case erl_prim_loader:list_dir(Dir) of
{ok, Dirs} ->
{Paths,_Libs} = make_path(Dir, Dirs),
%% Only add paths trailing with ./ebin.
[P || P <- Paths, filename:basename(P) =:= "ebin"] ++
get_user_lib_dirs_1(DirList);
error ->
get_user_lib_dirs_1(DirList)
end;
get_user_lib_dirs_1([]) -> [].
split_paths([S|T], S, Path, Paths) ->
split_paths(T, S, [], [lists:reverse(Path) | Paths]);
split_paths([C|T], S, Path, Paths) ->
split_paths(T, S, [C|Path], Paths);
split_paths([], _S, Path, Paths) ->
lists:reverse(Paths, [lists:reverse(Path)]).
make_path(BundleDir, Bundles0) ->
Bundles = choose_bundles(Bundles0),
make_path(BundleDir, Bundles, [], []).
choose_bundles(Bundles) ->
ArchiveExt = archive_extension(),
Bs = lists:sort([create_bundle(B, ArchiveExt) || B <- Bundles]),
[FullName || {_Name,_NumVsn,FullName} <-
choose(lists:reverse(Bs), [], ArchiveExt)].
create_bundle(FullName, ArchiveExt) ->
BaseName = filename:basename(FullName, ArchiveExt),
case split(BaseName, "-") of
[_, _|_] = Toks ->
VsnStr = lists:last(Toks),
case vsn_to_num(VsnStr) of
{ok, VsnNum} ->
Name = join(lists:sublist(Toks, length(Toks)-1),"-"),
{Name,VsnNum,FullName};
false ->
{FullName,[0],FullName}
end;
_ ->
{FullName,[0],FullName}
end.
%% Convert "X.Y.Z. ..." to [K, L, M| ...]
vsn_to_num(Vsn) ->
case is_vsn(Vsn) of
true ->
{ok, [list_to_integer(S) || S <- split(Vsn, ".")]};
_ ->
false
end.
is_vsn(Str) when is_list(Str) ->
Vsns = split(Str, "."),
lists:all(fun is_numstr/1, Vsns).
is_numstr(Cs) ->
lists:all(fun (C) when $0 =< C, C =< $9 -> true;
(_) -> false
end, Cs).
split(Cs, S) ->
split1(Cs, S, []).
split1([C|S], Seps, Toks) ->
case lists:member(C, Seps) of
true -> split1(S, Seps, Toks);
false -> split2(S, Seps, Toks, [C])
end;
split1([], _Seps, Toks) ->
lists:reverse(Toks).
split2([C|S], Seps, Toks, Cs) ->
case lists:member(C, Seps) of
true -> split1(S, Seps, [lists:reverse(Cs)|Toks]);
false -> split2(S, Seps, Toks, [C|Cs])
end;
split2([], _Seps, Toks, Cs) ->
lists:reverse([lists:reverse(Cs)|Toks]).
join([H1, H2| T], S) ->
H1 ++ S ++ join([H2| T], S);
join([H], _) ->
H;
join([], _) ->
[].
choose([{Name,NumVsn,NewFullName}=New|Bs], Acc, ArchiveExt) ->
case lists:keyfind(Name, 1, Acc) of
{_, NV, OldFullName} when NV =:= NumVsn ->
case filename:extension(OldFullName) =:= ArchiveExt of
false ->
choose(Bs,Acc, ArchiveExt);
true ->
Acc2 = lists:keystore(Name, 1, Acc, New),
choose(Bs,Acc2, ArchiveExt)
end;
{_, _, _} ->
choose(Bs,Acc, ArchiveExt);
false ->
choose(Bs,[{Name,NumVsn,NewFullName}|Acc], ArchiveExt)
end;
choose([],Acc, _ArchiveExt) ->
Acc.
make_path(_,[],Res,Bs) ->
{Res,Bs};
make_path(BundleDir,[Bundle|Tail],Res,Bs) ->
Dir = filename:append(BundleDir,Bundle),
Ebin = filename:append(Dir,"ebin"),
%% First try with /ebin
case erl_prim_loader:read_file_info(Ebin) of
{ok,#file_info{type=directory}} ->
make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
_ ->
%% Second try with archive
Ext = archive_extension(),
Base = filename:basename(Dir, Ext),
Ebin2 = filename:join([filename:dirname(Dir), Base ++ Ext,
Base, "ebin"]),
Ebins =
case split(Base, "-") of
[_, _|_] = Toks ->
AppName = join(lists:sublist(Toks, length(Toks)-1),"-"),
Ebin3 = filename:join([filename:dirname(Dir), Base ++ Ext, AppName, "ebin"]),
[Ebin3, Ebin2, Dir];
_ ->
[Ebin2, Dir]
end,
try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle, Bs)
end.
try_ebin_dirs([Ebin | Ebins],BundleDir,Tail,Res,Bundle,Bs) ->
case erl_prim_loader:read_file_info(Ebin) of
{ok,#file_info{type=directory}} ->
make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
_ ->
try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle,Bs)
end;
try_ebin_dirs([],BundleDir,Tail,Res,_Bundle,Bs) ->
make_path(BundleDir,Tail,Res,Bs).
archive_extension() ->
init:archive_extension().
read_config_script(F, Name, Opts) ->
read_config_script(F, Name, [], Opts).
read_config_script(F, Name, Vars, Opts) ->
Dir = filename:dirname(F),
Absname = filename:absname(F),
case file_script(F, script_vars([{'Name', Name},
{'SCRIPT', Absname},
{'CWD', filename:absname(Dir)},
{'OPTIONS', Opts} | Vars])) of
{ok, Conf} when is_list(Conf) ->
expand_config_script(Conf, Name, [], Opts);
Error ->
setup_lib:abort("Error reading conf (~s): ~p~n", [F, Error])
end.
expand_config_script([{include, F}|T], Name, Acc, Opts) ->
Incl = read_config_script(F, Name, [], Opts),
expand_config_script(T, Name, [Incl|Acc], Opts);
expand_config_script([{include, F, Vars}|T], Name, Acc, Opts) ->
Incl = read_config_script(F, Name, Vars, Opts),
expand_config_script(T, Name, [Incl|Acc], Opts);
expand_config_script([{include_lib, LibF}|T], Name, Acc, Opts) ->
?if_verbose(io:fwrite("include_lib: ~s~n", [LibF])),
expand_include_lib(LibF, [], T, Name, Acc, Opts);
expand_config_script([{include_lib, LibF, Vars}|T], Name, Acc, Opts) ->
?if_verbose(io:fwrite("include_lib: ~s (~p)~n", [LibF, Vars])),
expand_include_lib(LibF, Vars, T, Name, Acc, Opts);
expand_config_script([H|T], Name, Acc, Opts) ->
expand_config_script(T, Name, [H|Acc], Opts);
expand_config_script([], _, Acc, _) ->
lists:flatten(lists:reverse(Acc)).
expand_include_lib(LibF, Vars, T, Name, Acc, Opts) ->
Fullname = find_lib_script(LibF),
Incl = read_config_script(Fullname, Name, Vars, Opts),
expand_config_script(T, Name, [Incl|Acc], Opts).
find_lib_script(LibF) ->
case filename:split(LibF) of
[App|Tail] ->
?if_verbose(io:fwrite("lib: ~s~n", [App])),
try code_lib_dir(App) of
{error, bad_name} ->
setup_lib:abort(
"Error including conf (~s): no such lib (~s)~n",
[LibF, App]);
LibDir when is_list(LibDir) ->
filename:join([LibDir | Tail])
catch
error:_ ->
setup_lib:abort(
"Error including conf (~s): no such lib (~s)~n",
[LibF, App])
end;
[] ->
setup_lib:abort("Invalid include conf: no file specified~n", [])
end.
code_lib_dir("setup") ->
IsEscript = setup_lib:is_escript(),
case IsEscript of
true ->
filename:dirname(
filename:absname(
escript:script_name()));
false ->
code:lib_dir(setup)
end;
code_lib_dir(App) when is_list(App); is_binary(App) ->
try code:lib_dir(binary_to_existing_atom(
iolist_to_binary(App), latin1))
catch error:_ -> undefined
end.
%% -- a modified version of file:script/2
%% -- The main difference: call erl_eval:exprs() with a local_function handler
file_script(File, Bs) ->
case file:open(File, [read]) of
{ok, Fd} ->
R = eval_stream(Fd, return, Bs),
_ = file:close(Fd),
R;
Error ->
Error
end.
eval_stream(Fd, Handling, Bs) ->
_ = epp:set_encoding(Fd),
eval_stream(Fd, Handling, 1, undefined, [], Bs).
eval_stream(Fd, H, Line, Last, E, Bs) ->
eval_stream2(io:parse_erl_exprs(Fd, '', Line), Fd, H, Last, E, Bs).
eval_stream2({ok,Form,EndLine}, Fd, H, Last, E, Bs0) ->
try erl_eval:exprs(Form, Bs0, local_func_handler()) of
{value,V,Bs} ->
eval_stream(Fd, H, EndLine, {V}, E, Bs)
catch
?EXCEPTION(Class, Reason, Stacktrace) ->
Error = {EndLine,?MODULE,{Class,Reason, ?GET_STACK(Stacktrace)}},
eval_stream(Fd, H, EndLine, Last, [Error|E], Bs0)
end;
eval_stream2({error,What,EndLine}, Fd, H, Last, E, Bs) ->
eval_stream(Fd, H, EndLine, Last, [What | E], Bs);
eval_stream2({eof,EndLine}, _Fd, H, Last, E, _Bs) ->
case {H, Last, E} of
{return, {Val}, []} ->
{ok, Val};
{return, undefined, E} ->
{error, hd(lists:reverse(E, [{EndLine,?MODULE,undefined_script}]))};
%% {ignore, _, []} ->
%% ok;
{_, _, [_|_] = E} ->
{error, hd(lists:reverse(E))}
end.
%% -- end file:script/2 copy-paste
local_func_handler() ->
{eval, fun local_func/3}.
local_func(b, [], Bs) ->
{value, erl_eval:bindings(Bs), Bs};
local_func(eval, Params, Bs) ->
[F|T] = [erl_parse:normalise(P) || P <- Params],
Vars = case T of
[] -> [];
[Vs] -> Vs
end,
Absname = filename:absname(F),
{value, file_script(F, script_vars(Vars ++ [{'SCRIPT', Absname}|Bs])), Bs};
local_func(eval_lib, Params, Bs) ->
[LibF|T] = [erl_parse:normalise(P) || P <- Params],
try find_lib_script(LibF) of
Fullname ->
Vars = case T of
[] -> [];
[Vs] -> Vs
end,
Res = file_script(Fullname, script_vars(
Vars ++ [{'SCRIPT', Fullname}|Bs])),
{value, Res, Bs}
catch
error:_ ->
{value, {error, enoent}, Bs}
end;
local_func(F, A, _) ->
erlang:error({script_undef, F, A, []}).
script_vars(Vs) ->
lists:foldl(fun({K,V}, Acc) ->
erl_eval:add_binding(K, V, Acc)
end, erl_eval:new_bindings(), Vs).
%% Unit tests
-ifdef(TEST).
setup_test_() ->
{foreach,
fun() ->
application:load(setup)
end,
fun(_) ->
application:stop(setup),
application:unload(setup)
end,
[
?_test(t_expand_modes()),
?_test(t_find_hooks()),
?_test(t_find_hooks_1()),
?_test(t_expand_vars()),
?_test(t_nested_includes())
]}.
t_expand_modes() ->
[a] = maybe_expand_mode(a, []),
[a] = maybe_expand_mode(a, [{a, [a]}]),
[a,b,c] = maybe_expand_mode(a, [{a, [a,b]},
{b, [b,c]}]),
[b] = maybe_expand_mode(a, [{a, [b]}]),
[a,b,c] = maybe_expand_mode(a, [{a, [a,b]},
{b, [b,c]},
{c, [c,a]}]),
[c,d] = maybe_expand_mode(a, [{a, [b]},
{b, [c,d]}]),
ok.
t_find_hooks() ->
application:set_env(setup, '$setup_hooks',
[{100, [{a, hook, [100,1]},
{a, hook, [100,2]}]},
{200, [{a, hook, [200,1]}]},
{upgrade, [{100, [{a, upgrade_hook, [100,1]}]}]},
{setup, [{100, [{a, hook, [100,3]}]}]},
{normal, [{300, {a, normal_hook, [300,1]}}]}
]),
NormalHooks = find_hooks(normal),
[{300, [{a, normal_hook, [300,1]}]}] = NormalHooks,
UpgradeHooks = find_hooks(upgrade),
[{100, [{a, upgrade_hook, [100,1]}]}] = UpgradeHooks,
SetupHooks = find_hooks(setup),
[{100, [{a,hook,[100,1]},
{a,hook,[100,2]},
{a,hook,[100,3]}]},
{200, [{a,hook,[200,1]}]}] = SetupHooks,
ok.
t_find_hooks_1() ->
application:set_env(setup, modes, [{test, [setup, normal, test]}]),
application:set_env(setup, '$setup_hooks',
[{100, [{a, hook, [100,1]},
{a, hook, [100,2]}]},
{200, [{a, hook, [200,1]}]},
{upgrade, [{100, [{a, upgrade_hook, [100,1]}]}]},
{setup, [{100, [{a, hook, [100,3]}]}]},
{normal, [{300, {a, normal_hook, [300,1]}}]},
{test, [{400, {a, test_hook, [400,1]}}]}
]),
NormalHooks = find_hooks(normal),
[{300, [{a, normal_hook, [300,1]}]}] = NormalHooks,
UpgradeHooks = find_hooks(upgrade),
[{100, [{a, upgrade_hook, [100,1]}]}] = UpgradeHooks,
SetupHooks = find_hooks(setup),
[{100, [{a,hook,[100,1]},
{a,hook,[100,2]},
{a,hook,[100,3]}]},
{200, [{a,hook,[200,1]}]}] = SetupHooks,
TestHooks = find_hooks(test),
[{100, [{a,hook,[100,1]},
{a,hook,[100,2]},
{a,hook,[100,3]}]},
{200, [{a,hook,[200,1]}]},
{300, [{a,normal_hook, [300,1]}]},
{400, [{a,test_hook, [400,1]}]}] = TestHooks,
ok.
t_expand_vars() ->
%% global env
application:set_env(setup, vars, [{"PLUS", {apply,erlang,'+',[1,2]}},
{"FOO", {value, {foo,1}}}]),
%% private env, stdlib
application:set_env(stdlib, '$setup_vars',
[{"MINUS", {apply,erlang,'-',[4,3]}},
{"BAR", {value, "bar"}}]),
application:set_env(setup, envy, 17),
application:set_env(setup, v1, "/$BAR/$PLUS/$MINUS/$FOO/$env(envy)"),
application:set_env(setup, v2, {'$value', "$FOO"}),
application:set_env(setup, v3, {'$string', "$env(envy)"}),
application:set_env(stdlib, v1, {'$string', "$FOO"}),
application:set_env(stdlib, v2, {'$binary', "$FOO"}),
application:set_env(stdlib, v3, {"$PLUS", "$MINUS", "$BAR"}),
application:set_env(stdlib, v4, [a|b]),
%% $BAR and $MINUS are not in setup's context
{ok, "/$BAR/3/$MINUS/{foo,1}/17"} = setup:get_env(setup, v1),
{ok, {foo,1}} = setup:get_env(setup, v2),
{ok, "17"} = setup:get_env(setup, v3),
{ok, "{foo,1}"} = setup:get_env(stdlib, v1),
{ok, <<"{foo,1}">>} = setup:get_env(stdlib,v2),
{ok, {"3", "1", "bar"}} = setup:get_env(stdlib,v3),
{ok, [a|b]} = setup:get_env(stdlib, v4),
ok.
t_nested_includes() ->
to_file_("a.config", [{apps,[kernel,stdlib,setup]},
{env,[{setup,[{a,1}]}]}]),
to_file_("b.config", [{include,"a.config"},
{set_env, [{setup, [{a,2}]}]}]),
to_file_("c.config", [{include, "b.config"},
{set_env, [{setup, [{a,3}]}]}]),
[{apps,[kernel,stdlib,setup]},
{env, [{setup, [{a,1}]}]},
{set_env, [{setup, [{a,2}]}]},
{set_env, [{setup, [{a,3}]}]}] =
setup:read_config_script("c.config", nested, []).
to_file_(F, Term) ->
{ok, Fd} = file:open(F, [write]),
try io:fwrite(Fd, "~p.~n", [Term])
after
file:close(Fd)
end.
-endif. | src/setup.erl | 0.655777 | 0.452052 | setup.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Riak: A lightweight, decentralized key-value store.
%%
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc This module provides a Webmachine resource that lists the
%% URLs for other resources available on this host.
%%
%% Links to Riak resources will be added to the Link header in
%% the form:
%%```
%% <URL>; rel="RESOURCE_NAME"
%%'''
%% HTML output of this resource is a list of link tags like:
%%```
%% <a href="URL">RESOURCE_NAME</a>
%%'''
%% JSON output of this resource in an object with elements like:
%%```
%% "RESOURCE_NAME":"URL"
%%'''
-module(riak_core_wm_urlmap).
-export([
init/1,
resource_exists/2,
content_types_provided/2,
to_html/2,
to_json/2
]).
-include_lib("webmachine/include/webmachine.hrl").
init([]) ->
{ok, service_list()}.
resource_exists(RD, Services) ->
{true, add_link_header(RD, Services), Services}.
add_link_header(RD, Services) ->
wrq:set_resp_header(
"Link",
string:join([ ["<",Uri,">; rel=\"",Resource,"\""]
|| {Resource, Uri} <- Services ],
","),
RD).
content_types_provided(RD, Services) ->
{[{"text/html", to_html},{"application/json", to_json}], RD, Services}.
to_html(RD, Services) ->
{["<html><body><ul>",
[ ["<li><a href=\"", Uri, "\">", Resource, "</a></li>"]
|| {Resource, Uri} <- Services ],
"</ul></body></html>"],
RD, Services}.
to_json(RD, Services) ->
{mochijson:encode({struct, Services}), RD, Services}.
service_list() ->
Dispatch = webmachine_router:get_routes(),
lists:usort(
[{atom_to_list(Resource), "/"++UriBase}
|| {[UriBase|_], Resource, _} <- Dispatch]). | src/riak_core_wm_urlmap.erl | 0.563618 | 0.49292 | riak_core_wm_urlmap.erl | starcoder |
% @copyright 2008-2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%%% @author <NAME> <<EMAIL>>
%%% @doc Unit tests for src/util.erl.
%%% @end
%% @version $Id$
-module(util_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-compile(export_all).
-include("unittest.hrl").
all() ->
[min_max, largest_smaller_than, gb_trees_foldl,
s_repeat_test, s_repeatAndCollect_test, s_repeatAndAccumulate_test,
p_repeat_test, p_repeatAndCollect_test, p_repeatAndAccumulate_test].
suite() ->
[
{timetrap, {seconds, 20}}
].
init_per_suite(Config) ->
unittest_helper:init_per_suite(Config).
end_per_suite(Config) ->
_ = unittest_helper:end_per_suite(Config),
ok.
min_max(_Config) ->
?equals(util:min(1, 2), 1),
?equals(util:min(2, 1), 1),
?equals(util:min(1, 1), 1),
?equals(util:max(1, 2), 2),
?equals(util:max(2, 1), 2),
?equals(util:max(1, 1), 1),
ok.
largest_smaller_than(_Config) ->
KVs = [{1, 1}, {2, 2}, {4, 4}, {8, 8}, {16, 16}, {32, 32}, {64, 64}],
Tree = gb_trees:from_orddict(KVs),
?equals(util:gb_trees_largest_smaller_than(0, Tree), nil),
?equals(util:gb_trees_largest_smaller_than(1, Tree), nil),
?equals(util:gb_trees_largest_smaller_than(2, Tree), {value, 1, 1}),
?equals(util:gb_trees_largest_smaller_than(3, Tree), {value, 2, 2}),
?equals(util:gb_trees_largest_smaller_than(7, Tree), {value, 4, 4}),
?equals(util:gb_trees_largest_smaller_than(9, Tree), {value, 8, 8}),
?equals(util:gb_trees_largest_smaller_than(31, Tree), {value, 16, 16}),
?equals(util:gb_trees_largest_smaller_than(64, Tree), {value, 32, 32}),
?equals(util:gb_trees_largest_smaller_than(65, Tree), {value, 64, 64}),
?equals(util:gb_trees_largest_smaller_than(1000, Tree), {value, 64, 64}),
ok.
gb_trees_foldl(_Config) ->
KVs = [{1, 1}, {2, 2}, {4, 4}, {8, 8}, {16, 16}, {32, 32}, {64, 64}],
Tree = gb_trees:from_orddict(KVs),
?assert(util:gb_trees_foldl(fun (K, K, Acc) ->
Acc + K
end,
0,
Tree) =:= 127).
s_repeat_test(_) ->
util:s_repeat(fun() -> io:format("#s_repeat#~n") end, [], 5),
io:format("s_repeat_test successful if #s_repeat# was printed 5 times~n"),
ok.
s_repeatAndCollect_test(_) ->
Times = 3,
Result = util:s_repeatAndCollect(fun(X) -> X * X end, [Times], Times),
?equals(Result, [9, 9, 9]),
ok.
s_repeatAndAccumulate_test(_) ->
Times = 5,
Result = util:s_repeatAndAccumulate(fun(X) -> X * X end,
[Times],
Times,
fun(X, Y) -> X + Y end,
0),
?equals(Result, Times*Times*Times),
Result2 = util:s_repeatAndAccumulate(fun(X) -> X * X end,
[Times],
Times,
fun(X, Y) -> X + Y end,
1000),
?equals(Result2, 1000 + Times*Times*Times),
ok.
p_repeat_test(_) ->
Times = 5,
util:p_repeat(fun(Caller) ->
io:format("~w #p_repeat_test# called by ~w",
[self(), Caller])
end,
[self()],
Times),
io:format("p_repeat_test successful if ~B different pids printed #p_repeat#.", [Times]),
ok.
p_repeatAndCollect_test(_) ->
Times = 3,
Result = util:p_repeatAndCollect(fun(X) -> X * X end, [Times], Times),
?equals(Result, [9, 9, 9]),
ok.
p_repeatAndAccumulate_test(_) ->
Times = 15,
Result = util:p_repeatAndAccumulate(fun(X) ->
R = X * X,
io:format("pid ~w result ~B", [self(), R]),
R
end,
[Times],
Times,
fun(X, Y) -> X + Y end,
0),
?equals(Result, Times*Times*Times),
Result2 = util:p_repeatAndAccumulate(fun(X) -> X * X end,
[Times],
Times,
fun(X, Y) -> X + Y end,
1000),
?equals(Result2, 1000 + Times*Times*Times),
ok. | test/util_SUITE.erl | 0.627152 | 0.489259 | util_SUITE.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(ot_ctx_pdict).
-behaviour(ot_ctx).
-export([set_value/4,
get_value/3,
get_value/4,
get_current/2,
set_current/3,
clear/2,
remove/3]).
-spec set_value(ot_ctx:context_manager(), term(), term(), term()) -> ok.
set_value(_ContextManager, Namespace, Key, Value) ->
case erlang:get(Namespace) of
Map when is_map(Map) ->
erlang:put(Namespace, Map#{Key => Value}),
ok;
_ ->
erlang:put(Namespace, #{Key => Value}),
ok
end.
-spec get_value(ot_ctx:context_manager(), term(), term()) -> term().
get_value(_ContextManager, Namespace, Key) ->
get_value(_ContextManager, Namespace, Key, undefined).
-spec get_value(ot_ctx:context_manager(), term(), term(), term()) -> term().
get_value(_ContextManager, Namespace, Key, Default) ->
case erlang:get(Namespace) of
undefined ->
Default;
Map when is_map(Map) ->
maps:get(Key, Map, Default);
_ ->
Default
end.
-spec clear(ot_ctx:context_manager(), term()) -> ok.
clear(_ContextManager, Namespace) ->
erlang:erase(Namespace).
-spec remove(ot_ctx:context_manager(), term(), term()) -> ok.
remove(_ContextManager, Namespace, Key) ->
case erlang:get(Namespace) of
Map when is_map(Map) ->
erlang:put(Namespace, maps:remove(Key, Map)),
ok;
_ ->
ok
end.
-spec get_current(ot_ctx:context_manager(), term()) -> map().
get_current(_ContextManager, Namespace) ->
case erlang:get(Namespace) of
Map when is_map(Map) ->
Map;
_ ->
#{}
end.
-spec set_current(ot_ctx:context_manager(), term(), map()) -> ok.
set_current(_ContextManager, Namespace, Ctx) ->
erlang:put(Namespace, Ctx). | src/ot_ctx_pdict.erl | 0.510252 | 0.415699 | ot_ctx_pdict.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(inet_udp).
-export([open/1, open/2, close/1]).
-export([send/2, send/4, recv/2, recv/3, connect/3]).
-export([controlling_process/2]).
-export([fdopen/2]).
-export([getserv/1, getaddr/1, getaddr/2, translate_ip/1]).
-include("inet_int.hrl").
-define(FAMILY, inet).
-define(PROTO, udp).
-define(TYPE, dgram).
-define(RECBUF, (8*1024)).
%% inet_udp port lookup
getserv(Port) when is_integer(Port) -> {ok, Port};
getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO).
%% inet_udp address lookup
getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
getaddr(Address, Timer) -> inet:getaddr(Address, ?FAMILY, Timer).
%% inet_udp special this side addresses
translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
-spec open(_) -> {ok, inet:socket()} | {error, atom()}.
open(Port) -> open(Port, []).
-spec open(_, _) -> {ok, inet:socket()} | {error, atom()}.
open(Port, Opts) ->
case inet:udp_options(
[{port,Port}, {recbuf, ?RECBUF} | Opts],
?MODULE) of
{error, Reason} -> exit(Reason);
{ok,
#udp_opts{
fd = Fd,
ifaddr = BAddr = {A,B,C,D},
port = BPort,
opts = SockOpts}}
when ?ip(A,B,C,D), ?port(BPort) ->
inet:open(
Fd, BAddr, BPort, SockOpts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE);
{ok, _} -> exit(badarg)
end.
send(S, {A,B,C,D} = IP, Port, Data)
when ?ip(A,B,C,D), ?port(Port) ->
prim_inet:sendto(S, {IP, Port}, [], Data);
send(S, {{A,B,C,D}, Port} = Addr, AncData, Data)
when ?ip(A,B,C,D), ?port(Port), is_list(AncData) ->
prim_inet:sendto(S, Addr, AncData, Data);
send(S, {?FAMILY, {{A,B,C,D}, Port}} = Address, AncData, Data)
when ?ip(A,B,C,D), ?port(Port), is_list(AncData) ->
prim_inet:sendto(S, Address, AncData, Data);
send(S, {?FAMILY, {loopback, Port}} = Address, AncData, Data)
when ?port(Port), is_list(AncData) ->
prim_inet:sendto(S, Address, AncData, Data).
send(S, Data) ->
prim_inet:sendto(S, {any, 0}, [], Data).
connect(S, Addr = {A,B,C,D}, Port)
when ?ip(A,B,C,D), ?port(Port) ->
prim_inet:connect(S, Addr, Port).
recv(S, Len) ->
prim_inet:recvfrom(S, Len).
recv(S, Len, Time) ->
prim_inet:recvfrom(S, Len, Time).
-spec close(inet:socket()) -> ok.
close(S) ->
inet:udp_close(S).
%%
%% Set controlling process:
%% 1) First sync socket into a known state
%% 2) Move all messages onto the new owners message queue
%% 3) Commit the owner
%% 4) Wait for ack of new Owner (since socket does some link and unlink)
%%
controlling_process(Socket, NewOwner) ->
inet:udp_controlling_process(Socket, NewOwner).
%%
%% Create a port/socket from a file descriptor
%%
fdopen(Fd, Opts) ->
inet:fdopen(
Fd, optuniquify([{recbuf, ?RECBUF} | Opts]),
?PROTO, ?FAMILY, ?TYPE, ?MODULE).
%% Remove all duplicate options from an option list.
%% The last occurring duplicate is used, and the order is preserved.
%%
%% Here's how:
%% Reverse the list.
%% For each head option go through the tail and remove
%% all occurrences of the same option from the tail.
%% Store that head option and iterate using the new tail.
%% Return the list of stored head options.
optuniquify(List) ->
optuniquify(lists:reverse(List), []).
optuniquify([], Result) ->
Result;
optuniquify([Opt | Tail], Result) ->
%% Remove all occurrences of Opt in Tail,
%% prepend Opt to Result,
%% then iterate back here.
optuniquify(Opt, Tail, [], Result).
%% All duplicates of current option are now removed
optuniquify(Opt, [], Rest, Result) ->
%% Store unique option
optuniquify(lists:reverse(Rest), [Opt | Result]);
%% Duplicate option tuple
optuniquify(Opt0, [Opt1 | Tail], Rest, Result)
when tuple_size(Opt0) =:= tuple_size(Opt1),
element(1, Opt0) =:= element(1, Opt1) ->
%% Waste duplicate
optuniquify(Opt0, Tail, Rest, Result);
%% Duplicate option atom or other term
optuniquify(Opt, [Opt | Tail], Rest, Result) ->
%% Waste duplicate
optuniquify(Opt, Tail, Rest, Result);
%% Non-duplicate option
optuniquify(Opt, [X | Tail], Rest, Result) ->
%% Keep non-duplicate
optuniquify(Opt, Tail, [X | Rest], Result). | lib/kernel/src/inet_udp.erl | 0.524395 | 0.41114 | inet_udp.erl | starcoder |
%% @copyright 2021 Driebit BV
%% @doc Extract properties from a compact RDF document encoded by zotonic_rdf.
%% Copyright 2021 Driebit BV
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_rdf_props).
-author("<NAME> <<EMAIL>>").
-export([
mapping/1,
extract_resource/2,
extract_props/1,
extract_edges/2,
map_values/1,
to_simple_value/1
]).
-include("../../include/zotonic.hrl").
%% @doc Check if a property can be mapped to a standard property.
%% This is used in m_rsc:p/3 for fetching namespaced properties.
-spec mapping( binary() ) -> binary() | undefined.
mapping(Prop) ->
case maps:get(Prop, mapping(), undefined) of
undefined ->
maps:get(Prop, mapping_dates(), undefined);
Mapped ->
Mapped
end.
%% @doc Extract standard Zotonic src import from an RDF document. The document
%% is a "compact" document returned by zotonic_rdf.
-spec extract_resource( RDFDoc, z:context() ) -> {ok, Import} | {error, term()}
when RDFDoc :: map(),
Import :: map().
extract_resource(#{ <<"@id">> := Uri } = RDFDoc, Context) when is_binary(Uri) ->
RDFDoc1 = extract_props(RDFDoc),
Props1 = RDFDoc1#{
<<"uri">> => Uri,
<<"name">> => undefined,
<<"is_published">> => true,
<<"is_authoritative">> => true
},
Edges = extract_edges(RDFDoc, Context),
Props2 = maps:without(maps:keys(Edges), Props1),
{Medium, MediumUrl} = extract_medium(Props2),
Props3 = maps:without([
<<"@id">>,
<<"zotonic:medium_url">>,
<<"zotonic:medium">>
], Props2),
Rsc = #{
<<"uri">> => Uri,
<<"is_a">> => [ maps:get(<<"category_id">>, Props3, <<"other">>) ],
<<"resource">> => Props3,
<<"edges">> => Edges,
<<"medium">> => Medium,
<<"medium_url">> => MediumUrl
},
{ok, Rsc};
extract_resource(_, _) ->
{error, id}.
%% @doc Extra the medium record for importing an image or other media.
-spec extract_medium( RDFDoc ) -> {Medium, MediumUrl}
when RDFDoc :: map(),
Medium :: map() | undefined,
MediumUrl :: binary() | undefined.
extract_medium(#{ <<"zotonic:medium_url">> := MediumUrl } = RDFDoc) when MediumUrl =/= <<>> ->
Created = maps:get(<<"modified">>, RDFDoc,
maps:get(<<"created">>, RDFDoc, erlang:universaltime())),
Medium = #{
<<"created">> => Created
},
{Medium, to_value(MediumUrl)};
extract_medium(#{ <<"zotonic:medium">> := Medium } = RDFDoc) when is_map(Medium) ->
CreatedDoc = maps:get(<<"modified">>, RDFDoc,
maps:get(<<"created">>, RDFDoc, erlang:universaltime())),
CreatedMedium = maps:get(<<"modified">>, Medium,
maps:get(<<"created">>, Medium, CreatedDoc)),
Medium1 = Medium#{
<<"created">> => CreatedMedium
},
MediumUrl = maps:get(<<"download_url">>, Medium1, undefined),
{Medium1, MediumUrl};
extract_medium(_) ->
{undefined, undefined}.
%% @doc Extract standard Zotonic edges from an RDF document. The document
%% is a "compact" document returned by zotonic_rdf. This collects the @id
%% attributes in the top-level predicates of the document.
-spec extract_edges( RDFDoc, Context ) -> Edges
when RDFDoc :: map(),
Edges :: #{ Predicate := [ Edge ]},
Predicate :: binary(),
Edge :: map(),
Context :: z:context().
extract_edges(RDFDoc, Context) ->
Es = maps:fold(
fun(K, V, Acc) -> extract_edge(K, V, Acc, Context) end,
#{},
RDFDoc),
maps:fold(
fun(P, Os, Acc) ->
Acc#{
P => #{
<<"predicate">> => #{
<<"is_a">> => [ <<"meta">>, <<"predicate">> ],
<<"uri">> => P
},
<<"objects">> => Os
}
}
end,
#{},
Es).
extract_edge(<<"rdf:type">>, _, Acc, _Context) ->
Acc;
extract_edge(K, Vs, Acc, Context) when is_list(Vs) ->
lists:foldr(
fun(V, VAcc) ->
extract_edge(K, V, VAcc, Context)
end,
Acc,
Vs);
extract_edge(K, #{ <<"@id">> := Uri }, Acc, Context) ->
case m_rsc:rid(K, Context) of
undefined ->
Acc;
RId ->
case m_rsc:is_a(RId, predicate, Context) of
true ->
Es = maps:get(K, Acc, []),
Obj = #{
<<"object_id">> => #{
<<"is_a">> => [ <<"other">> ],
<<"uri">> => Uri
}
},
Acc#{ K => lists:flatten([Es, Obj]) };
false ->
Acc
end
end;
extract_edge(_K, _, Acc, _Context) ->
Acc.
%% @doc Extract standard Zotonic properties from an RDF document. The document
%% is a "compact" document returned by zotonic_rdf. All predicates in the
%% document are of the form "namespace:term", for example: "dc:title".
%% The namespaces are the ones defined in zotonic_rdf.
-spec extract_props( RDFDoc ) -> Props
when RDFDoc :: map(),
Props :: map().
extract_props(RDFDoc) ->
Ps1 = map(RDFDoc, mapping_dates(), fun to_date/1, RDFDoc),
Ps2 = map(RDFDoc, mapping(), fun to_simple_value/1, Ps1),
maps:fold(
fun(K, V, Acc) ->
Acc#{
K => map_nested_values(V)
}
end,
#{},
Ps2).
map(Doc, Mapping, Fun, DocAcc) ->
maps:fold(
fun(K, P, Acc) ->
case maps:find(K, Doc) of
{ok, V} ->
case Fun(V) of
error ->
Acc;
V1 ->
Acc1 = maps:remove(K, Acc),
Acc1#{ P => V1 }
end;
error ->
Acc
end
end,
DocAcc,
Mapping).
map_nested_values(#{ <<"@id">> := _ } = V) ->
V;
map_nested_values(#{ <<"@value">> := _ } = V) ->
case to_value(V) of
error -> V;
V1 -> V1
end;
map_nested_values([ #{ <<"@language">> := _ } | _ ] = V) ->
case to_value(V) of
error -> V;
V1 -> V1
end;
map_nested_values(V) when is_list(V) ->
lists:map(fun map_nested_values/1, V);
map_nested_values(V) when is_map(V) ->
maps:fold(
fun(K1, V1, Acc) ->
Acc#{ K1 => map_nested_values(V1) }
end,
#{},
V);
map_nested_values(V) ->
V.
to_date([ #{ <<"@value">> := V } | _ ]) ->
try z_datetime:to_datetime(V)
catch _:_ -> error
end;
to_date(#{ <<"@value">> := V }) ->
try z_datetime:to_datetime(V)
catch _:_ -> error
end;
to_date([ V | _ ]) when is_binary(V) ->
try z_datetime:to_datetime(V)
catch _:_ -> error
end;
to_date(V) when is_binary(V) ->
try z_datetime:to_datetime(V)
catch _:_ -> error
end;
to_date(_) ->
error.
%% @doc Translate the given value to a simpler value
-spec to_simple_value( map() ) -> error | term().
to_simple_value(#{ <<"@value">> := Val } = V) when is_binary(Val); is_number(Val); is_boolean(Val) ->
case to_value(V) of
error -> Val;
V1 -> V1
end;
to_simple_value(V) ->
to_value(V).
%% @doc Map all values in the RDF doc to values we can handle in the system
map_values(#{ <<"@language">> := _ } = V) ->
case to_value(V) of
error -> V;
V1 -> V1
end;
map_values([ #{ <<"@language">> := _ } | _ ] = V) ->
case to_value(V) of
error -> V;
V1 -> V1
end;
map_values([ #{ <<"@value">> := _ } | _ ] = V) ->
case to_value(V) of
error -> V;
V1 -> V1
end;
map_values(Doc) when is_map(Doc) ->
maps:fold(
fun(K, V, Acc) ->
Acc#{ K => map_values(V) }
end,
#{},
Doc);
map_values(L) when is_list(L) ->
lists:map(fun map_values/1, L);
map_values(V) ->
V.
% See https://www.w3.org/TR/xmlschema-2/#built-in-datatypes
to_value(V) when is_binary(V); is_number(V); is_boolean(V) ->
V;
to_value(null) ->
undefined;
to_value(undefined) ->
undefined;
to_value(#{
<<"@id">> := URI
}) ->
try z_convert:to_binary(URI)
catch _:_ -> error
end;
to_value(#{
<<"@value">> := V,
<<"@type">> := <<"xsd:integer">>
}) ->
try z_convert:to_integer(V)
catch _:_ -> error
end;
to_value(#{
<<"@value">> := V,
<<"@type">> := <<"xsd:boolean">>
}) ->
try z_convert:to_bool(V)
catch _:_ -> error
end;
to_value(#{
<<"@value">> := V,
<<"@type">> := <<"xsd:float">>
}) ->
try z_convert:to_float(V)
catch _:_ -> error
end;
to_value(#{
<<"@value">> := V,
<<"@type">> := <<"xsd:double">>
}) ->
try z_convert:to_float(V)
catch _:_ -> error
end;
to_value(#{
<<"@value">> := V,
<<"@type">> := <<"xsd:datetime">>
}) ->
to_date(V);
to_value(#{
<<"@language">> := Lang,
<<"@value">> := V
}) ->
case z_language:to_language_atom(Lang) of
{ok, LangAtom} ->
try
V1 = z_convert:to_binary(V),
#trans{ tr = simplify_langs([ {LangAtom, V1} ])}
catch _:_ ->
error
end;
{error, _} ->
error
end;
to_value([ #{ <<"@language">> := _ } | _ ] = Vs) ->
Trans = lists:filtermap(
fun
(#{
<<"@language">> := Lang,
<<"@value">> := V
}) ->
case z_language:to_language_atom(Lang) of
{ok, LangAtom} ->
try
V1 = z_convert:to_binary(V),
{true, {LangAtom, V1}}
catch _:_ ->
false
end;
{error, _} ->
false
end;
(_) ->
false
end,
Vs),
#trans{ tr = simplify_langs(Trans) };
to_value(_) ->
error.
%% @doc Cleanup languages, map "en-gb" to "en" if there is no base
%% version of that language.
simplify_langs(Tr) ->
lists:foldl(
fun({Iso, V}, Acc) ->
case atom_to_binary(Iso, utf8) of
<<A,B, $-, _/binary>> ->
Base = binary_to_atom(<<A, B>>, utf8),
case lists:member(Base, Acc) of
false -> [ {Base, V} | Acc ];
true -> [ {Iso, V} | Acc ]
end;
_ ->
[ {Iso, V} | Acc ]
end
end,
[],
Tr).
mapping_dates() ->
#{
<<"schema:dateCreated">> => <<"created">>,
<<"schema:dateModified">> => <<"modified">>,
<<"schema:datePublished">> => <<"publication_start">>,
<<"schema:startDate">> => <<"date_start">>,
<<"schema:endDate">> => <<"date_end">>,
<<"dcterms:created">> => <<"created">>,
<<"dcterms:modified">> => <<"modified">>
}.
mapping() ->
#{
<<"rdf:type">> => <<"category_id">>,
<<"schema:givenName">> => <<"name_first">>,
<<"schema:familyName">> => <<"name_surname">>,
<<"schema:telephone">> => <<"phone">>,
<<"schema:license">> => <<"license">>,
<<"schema:headline">> => <<"title">>,
<<"schema:subtitle">> => <<"alternativeHeadline">>,
<<"schema:text">> => <<"body">>,
<<"schema:url">> => <<"website">>,
<<"dcterms:title">> => <<"title">>,
<<"dcterms:description">> => <<"summary">>,
<<"dc:title">> => <<"title">>,
<<"dc:description">> => <<"summary">>,
<<"foaf:name">> => <<"title">>,
<<"foaf:givenName">> => <<"name_first">>,
<<"foaf:familyName">> => <<"name_surname">>,
<<"foaf:firstName">> => <<"name_first">>,
<<"foaf:lastName">> => <<"name_surname">>,
<<"foaf:gender">> => <<"gender">>,
<<"foaf:homepage">> => <<"website">>,
<<"foaf:mbox">> => <<"email">>,
<<"foaf:phone">> => <<"phone">>,
<<"geo:lat">> => <<"location_lat">>,
<<"geo:long">> => <<"location_lng">>
}. | apps/zotonic_core/src/support/z_rdf_props.erl | 0.511961 | 0.465387 | z_rdf_props.erl | starcoder |
%%%----------------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright 2012 University of St Andrews (See LICENCE)
%%% @headerfile "skel.hrl"
%%%
%%% @doc This module contains the Map skeleton initialisation logic.
%%%
%%% The Map skeleton is a parallel map. The skeleton applies a given function
%%% to the elements within one or more lists.
%%%
%%% This implementation assumes a list of lists as input, where the
%%% decomposition of said input may be expressed as the identity function.
%%% Whilst this implementation of Map usually determines the number of worker
%%% processes it needs automatically, the developer may explicitly set this,
%%% as in {@link sk_farm}.
%%%
%%%
%%% === Example ===
%%%
%%% ```skel:do([{map, [{seq, fun ?MODULE:f/1}]}], Input).'''
%%%
%%% Here we use a Map skeleton to perform a function `f/1' over all
%%% elements for all lists represented by `Input'. Returned, we receive a
%%% list of lists the same as `Input' itself, bar that the elements of
%%% each are the result of their application to `f/1'.
%%%
%%% In this example we note that the number of worker processes the Map
%%% skeleton uses is determined by the length of the longest list in
%%% `Input'. To constrain, or otherwise set this value, we might add an
%%% extra term to the Map tuple.
%%%
%%% ```skel:do([{map, [{seq, fun ?MODULE:f/1}], 10}], Input).'''
%%%
%%% Using the same example, we now note that the number of worker
%%% processes used is set to ten. Performance comparisons between these
%%% two depends heavily on the chosen `Input', and the machine on which it
%%% runs.
%%%
%%% @end
%%%----------------------------------------------------------------------------
-module(sk_map).
-export([make/1, make/2, make_hyb/4]).
-include("skel.hrl").
-spec make(workflow()) -> maker_fun().
%% @doc Initialises an instance of the Map skeleton ready to receive inputs,
%% where the number of worker processes is automatic. The function or
%% functions to be applied to said inputs are given under `WorkFlow'.
%%
%% A combiner, or recomposition, process is created and acts as a sink for the
%% workers. These workers are generated and managed from within a
%% {@link sk_map_partitioner} process.
make(WorkFlow) ->
fun(NextPid) ->
CombinerPid = spawn(sk_map_combiner, start, [NextPid]),
spawn(sk_map_partitioner, start, [auto, WorkFlow, CombinerPid])
end.
-spec make(workflow(), pos_integer()) -> maker_fun().
%% @doc Initialises an instance of the Map skeleton ready to receive inputs,
%% using a given number of worker processes. This number is specified under
%% `NWorkers', and the function or functions to be applied to any and all
%% inputs are given by `WorkFlow'.
%%
%% A combiner, or recomposition, process is created, and acts as a sink for
%% the workers. These workers are initialised with the specified workflow, and
%% their Pids passed to a {@link sk_map_partitioner} process.
make(WorkFlow, NWorkers) ->
fun(NextPid) ->
CombinerPid = spawn(sk_map_combiner, start, [NextPid, NWorkers]),
WorkerPids = sk_utils:start_workers(NWorkers, WorkFlow, CombinerPid),
spawn(sk_map_partitioner, start, [man, WorkerPids, CombinerPid])
end.
-spec make_hyb(workflow(), workflow(), pos_integer(), pos_integer()) -> maker_fun().
%% @doc Initialises an instance of the Hybrid Map skeleton ready to receive inputs,
%% using a given number of CPU and GPU worker processes. These numbers are specified under
%% `NCPUWorkers' and `NGPUWorkers', and the CPU and GPU versions of the function
%% to be applied to inputs are given by `WorkFlowCPU' and `WorkFlowGPU'.
%%
%% A combiner, or recomposition, process is created, and acts as a sink for
%% the workers. These workers are initialised with the specified workflow, and
%% their Pids passed to a {@link sk_map_partitioner} process.
make_hyb(WorkFlowCPU, WorkFlowGPU, NCPUWorkers, NGPUWorkers) ->
fun(NextPid) ->
CombinerPid = spawn(sk_map_combiner, start, [NextPid, NCPUWorkers+NGPUWorkers]),
{CPUWorkerPids, GPUWorkerPids} = sk_utils:start_workers_hyb(NCPUWorkers, NGPUWorkers, WorkFlowCPU, WorkFlowGPU, CombinerPid),
spawn(sk_map_partitioner, start_hyb, [man, CPUWorkerPids, GPUWorkerPids, CombinerPid])
end.
%make_hyb(WorkFlowCPU, WorkFlowGPU) ->
% fun(NextPid) ->
% CombinerPid = spawn(sk_map_combiner, start, [NextPid]),
% spawn(sk_map_partitioner, start, [auto, [{seq, fun(X) -> sk_utils:hyb_worker(WorkFlowCPU, WorkFlowGPU, X) end}],
% CombinerPid])
% end. | src/sk_map.erl | 0.508056 | 0.774967 | sk_map.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(porkrind_fun_calls).
-include("porkrind_internal.hrl").
-export([
calling/3,
does_not_raise/0,
raises/0,
raises/1,
raises/2
]).
calling(Mod, Fun, Args) when is_atom(Mod), is_atom(Fun), is_list(Args) ->
% Rather than create an anonymous function here
% we pass the required bits as a tuple through so
% that test formatting is prettier.
{apply, Mod, Fun, Args}.
does_not_raise() ->
#'porkrind.matcher'{
name = does_not_raise,
args = [],
match = fun(Value) ->
Fun = to_fun(Value),
try Fun() of
_ ->
ok
catch T:R ->
?PR_FAIL({raised, Value, T, R})
end
end,
reason = fun
({bad_fun, Value}) ->
io_lib:format("~p is not a zero arity function", [Value]);
({raised, Value, T, R}) ->
FunStr = format_fun(Value),
Raised = format_exc(T, R),
io_lib:format("~s raised ~s", [FunStr, Raised])
end
}.
raises() ->
raises('_', '_').
raises(Type) ->
raises(Type, '_').
raises(Type, Reason)
when Type == throw; Type == error; Type == exit; Type == '_' ->
ReasonMatcher = porkrind_util:maybe_wrap(Reason),
#'porkrind.matcher'{
name = raises,
args = [Type, Reason],
match = fun(Value) ->
Fun = to_fun(Value),
try Fun() of
_ ->
?PR_FAIL({did_not_raise, Value})
catch
T:R when (T == Type orelse Type == '_') ->
case porkrind:check(R, ReasonMatcher) of
ok ->
ok;
{assertion_failed, _} ->
?PR_FAIL({raised, Value, T, R})
end;
T:R ->
?PR_FAIL({raised, Value, T, R})
end
end,
reason = fun
({bad_fun, Value}) ->
io_lib:format("~p is not a zero arity function", [Value]);
({did_not_raise, Value}) ->
FunStr = format_fun(Value),
ExcStr = format_exc(Type, Reason),
io_lib:format("~s did not raise ~s", [FunStr, ExcStr]);
({raised, Value, T, R}) ->
FunStr = format_fun(Value),
Raised = format_exc(T, R),
ExpStr = format_exc(Type, Reason),
io_lib:format("~s raised ~s, not ~s", [FunStr, Raised, ExpStr])
end
}.
format_fun({apply, Mod, Fun, Args}) ->
porkrind_util:mfa_to_string(Mod, Fun, Args);
format_fun(Fun) ->
Info = erlang:fun_info(Fun),
{module, Mod} = lists:keyfind(module, 1, Info),
{name, Name} = lists:keyfind(name, 1, Info),
{arity, Arity} = lists:keyfind(arity, 1, Info),
porkrind_util:mfa_to_string(Mod, Name, Arity).
format_exc('_', '_') ->
"any exception";
format_exc(Type, '_') ->
io_lib:format("a ~w exception", [Type]);
format_exc(Type, Reason) when ?IS_MATCHER(Reason) ->
Prefix = io_lib:format("~w with reason matching ", [Type]),
[Prefix, porkrind:describe(Reason)];
format_exc(Type, Reason) ->
io_lib:format("~w:~w", [Type, Reason]).
to_fun({apply, Mod, Fun, Args}) ->
fun() -> erlang:apply(Mod, Fun, Args) end;
to_fun(Fun) when is_function(Fun, 0) ->
Fun;
to_fun(Else) ->
?PR_FAIL({bad_fun, Else}). | src/porkrind_fun_calls.erl | 0.572364 | 0.528168 | porkrind_fun_calls.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc This module has the behaviour that each exporter must implement
%% and creates the buffer of trace spans to be exported.
%%
%% The exporter process can be configured to export the current finished
%% spans based on timeouts and the size of the finished spans table.
%%
%% Timeouts:
%% exporting_timeout_ms: How long to let the exports run before killing.
%% check_table_size_ms: Timeout to check the size of the export table.
%% scheduled_delay_ms: How often to trigger running the exporters.
%%
%% The size limit of the current table where finished spans are stored can
%% be configured with the `max_queue_size' option.
%% @end
%%%-----------------------------------------------------------------------
-module(ot_exporter).
-behaviour(gen_statem).
-compile({no_auto_import, [register/2]}).
-export([start_link/1,
store_span/1,
register/1,
register/2]).
-export([init/1,
callback_mode/0,
idle/3,
exporting/3,
terminate/3]).
-include("opentelemetry.hrl").
-include_lib("kernel/include/logger.hrl").
%% behaviour for exporters to implement
-type opts() :: term().
%% Do any initialization of the exporter here and return configuration
%% that will be passed along with a list of spans to the `export' function.
-callback init(term()) -> opts().
%% This function is called when the configured interval expires with any
%% spans that have been collected so far and the configuration returned in `init'.
%% Do whatever needs to be done to export each span here, the caller will block
%% until it returns.
-callback export(ets:tid(), opts()) -> ok | success | failed_not_retryable | failed_retryable.
-record(data, {exporters :: [{module(), term()}],
handed_off_table :: atom() | undefined,
runner_pid :: pid() | undefined,
max_queue_size :: integer() | infinity,
exporting_timeout_ms :: integer(),
check_table_size_ms :: integer() | infinity,
scheduled_delay_ms :: integer()}).
-define(CURRENT_TABLES_KEY, {?MODULE, current_table}).
-define(TABLE_1, ot_export_table1).
-define(TABLE_2, ot_export_table2).
-define(CURRENT_TABLE, persistent_term:get(?CURRENT_TABLES_KEY)).
-define(DEFAULT_MAX_QUEUE_SIZE, 2048).
-define(DEFAULT_SCHEDULED_DEPLAY_MS, timer:seconds(5)).
-define(DEFAULT_EXPORTING_TIMEOUT, timer:minutes(5)).
-define(DEFAULT_CHECK_TABLE_SIZE_INTERVAL, infinity).
start_link(Opts) ->
gen_statem:start_link({local, ?MODULE}, ?MODULE, [Opts], []).
%% @equiv register(Exporter, []).
register(Exporter) ->
register(Exporter, []).
%% @doc Register new traces exporter `Exporter' with `Config'.
-spec register(module(), term()) -> ok.
register(Exporter, Options) ->
gen_statem:call(?MODULE, {register, init_exporter({Exporter, Options})}).
-spec store_span(opencensus:span()) -> true | {error, invalid_span} | {error, no_export_buffer}.
store_span(Span=#span{}) ->
try
ets:insert(?CURRENT_TABLE, Span)
catch
error:badarg ->
{error, no_export_buffer}
end;
store_span(_) ->
{error, invalid_span}.
init([Args]) ->
process_flag(trap_exit, true),
SizeLimit = proplists:get_value(max_queue_size, Args, ?DEFAULT_MAX_QUEUE_SIZE),
ExportingTimeout = proplists:get_value(exporting_timeout_ms, Args, ?DEFAULT_EXPORTING_TIMEOUT),
ScheduledDelay = proplists:get_value(scheduled_delay_ms, Args, ?DEFAULT_SCHEDULED_DEPLAY_MS),
CheckTableSize = proplists:get_value(check_table_size_ms, Args, ?DEFAULT_CHECK_TABLE_SIZE_INTERVAL),
Exporters = [init_exporter(Config) || Config <- proplists:get_value(exporters, Args, [])],
_Tid1 = new_export_table(?TABLE_1),
_Tid2 = new_export_table(?TABLE_2),
persistent_term:put(?CURRENT_TABLES_KEY, ?TABLE_1),
{ok, idle, #data{exporters=Exporters,
handed_off_table=undefined,
max_queue_size=case SizeLimit of
infinity -> infinity;
_ -> SizeLimit div erlang:system_info(wordsize)
end,
exporting_timeout_ms=ExportingTimeout,
check_table_size_ms=CheckTableSize,
scheduled_delay_ms=ScheduledDelay}}.
callback_mode() ->
[state_functions, state_enter].
idle(enter, _OldState, #data{scheduled_delay_ms=SendInterval}) ->
{keep_state_and_data, [{{timeout, export_spans}, SendInterval, export_spans}]};
idle(_, export_spans, Data) ->
{next_state, exporting, Data};
idle(EventType, Event, Data) ->
handle_event_(idle, EventType, Event, Data).
exporting({timeout, export_spans}, export_spans, _) ->
{keep_state_and_data, [postpone]};
exporting(enter, _OldState, Data=#data{exporting_timeout_ms=ExportingTimeout,
scheduled_delay_ms=SendInterval}) ->
{OldTableName, RunnerPid} = export_spans(Data),
{keep_state, Data#data{runner_pid=RunnerPid,
handed_off_table=OldTableName},
[{state_timeout, ExportingTimeout, exporting_timeout},
{{timeout, export_spans}, SendInterval, export_spans}]};
exporting(state_timeout, exporting_timeout, Data=#data{handed_off_table=ExportingTable}) ->
%% kill current exporting process because it is taking too long
%% which deletes the exporting table, so create a new one and
%% repeat the state to force another span exporting immediately
Data1 = kill_runner(Data),
new_export_table(ExportingTable),
{repeat_state, Data1};
%% important to verify runner_pid and FromPid are the same in case it was sent
%% after kill_runner was called but before it had done the unlink
exporting(info, {'EXIT', FromPid, _}, Data=#data{runner_pid=FromPid}) ->
complete_exporting([], Data);
%% important to verify runner_pid and FromPid are the same in case it was sent
%% after kill_runner was called but before it had done the unlink
exporting(info, {completed, FromPid, FailedExporters}, Data=#data{runner_pid=FromPid}) ->
complete_exporting(FailedExporters, Data);
exporting(EventType, Event, Data) ->
handle_event_(exporting, EventType, Event, Data).
handle_event_(_State, {timeout, check_table_size}, check_table_size, #data{max_queue_size=infinity}) ->
keep_state_and_data;
handle_event_(State, {timeout, check_table_size}, check_table_size, Data=#data{max_queue_size=SizeLimit}) ->
case ets:info(?CURRENT_TABLE, memory) of
M when M >= SizeLimit, State =:= idle ->
Data1 = kill_runner(Data),
{next_state, exporting, Data1};
M when M >= SizeLimit, State =:= exporting ->
Data1 = kill_runner(Data),
{repeat_state, Data1};
_ ->
keep_state_and_data
end;
handle_event_(_, {call, From}, {register, Exporter}, Data=#data{exporters=Exporters}) ->
{keep_state, Data#data{exporters=[Exporter | Exporters]}, [{reply, From, ok}]};
handle_event_(_, _, _, _) ->
keep_state_and_data.
terminate(_, _, _Data) ->
ok.
%%
complete_exporting(FailedExporters, Data=#data{exporters=Exporters,
handed_off_table=ExportingTable})
when ExportingTable =/= undefined ->
new_export_table(ExportingTable),
{next_state, idle, Data#data{exporters=Exporters--FailedExporters,
runner_pid=undefined,
handed_off_table=undefined}}.
kill_runner(Data=#data{runner_pid=RunnerPid}) ->
erlang:unlink(RunnerPid),
erlang:exit(RunnerPid, kill),
Data#data{runner_pid=undefined,
handed_off_table=undefined}.
new_export_table(Name) ->
ets:new(Name, [public, named_table, {write_concurrency, true}, duplicate_bag]).
init_exporter({Exporter, Config}) when is_atom(Exporter) ->
{fun Exporter:export/2, Exporter:init(Config)};
init_exporter(Exporter) when is_atom(Exporter) ->
{fun Exporter:export/2, Exporter:init([])};
init_exporter(Exporter) when is_function(Exporter) ->
{Exporter, []};
init_exporter({Exporter, Config}) when is_function(Exporter) ->
{Exporter, Config}.
export_spans(#data{exporters=Exporters}) ->
CurrentTable = ?CURRENT_TABLE,
NewCurrentTable = case CurrentTable of
?TABLE_1 ->
?TABLE_2;
?TABLE_2 ->
?TABLE_1
end,
%% an atom is a single word so this does not trigger a global GC
persistent_term:put(?CURRENT_TABLES_KEY, NewCurrentTable),
Self = self(),
RunnerPid = erlang:spawn_link(fun() -> send_spans(Self, Exporters) end),
ets:give_away(CurrentTable, RunnerPid, export),
{CurrentTable, RunnerPid}.
%% Additional benefit of using a separate process is calls to `register` won't
%% timeout if the actual exporting takes longer than the call timeout
send_spans(FromPid, Exporters) ->
receive
{'ETS-TRANSFER', Table, FromPid, export} ->
TableName = ets:rename(Table, current_send_table),
FailedExporters = lists:filtermap(fun({Exporter, Config}) ->
export(Exporter, TableName, Config)
end, Exporters),
ets:delete(TableName),
completed(FromPid, FailedExporters)
end.
completed(FromPid, FailedExporters) ->
FromPid ! {completed, self(), FailedExporters}.
export(undefined, _, _) ->
true;
export(Exporter, SpansTid, Config) ->
%% don't let a exporter exception crash us
%% and return true if exporter failed
try
Exporter(SpansTid, Config) =:= failed_not_retryable
catch
Class:Exception:StackTrace ->
?LOG_INFO("dropping exporter that threw exception: exporter=~p ~p:~p stacktrace=~p",
[Exporter, Class, Exception, StackTrace]),
true
end. | src/ot_exporter.erl | 0.629433 | 0.454896 | ot_exporter.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2012 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(verify_claimant).
-behavior(riak_test).
-export([confirm/0]).
-include_lib("eunit/include/eunit.hrl").
-import(rt, [build_cluster/1,
start/1,
stop/1,
down/2,
claimant_according_to/1,
wait_until_unpingable/1,
wait_until_ring_converged/1,
status_of_according_to/2,
wait_until_nodes_ready/1]).
confirm() ->
Nodes = build_cluster(3),
[Node1, Node2, _Node3] = Nodes,
%% Ensure all nodes believe node1 is the claimant
lager:info("Ensure all nodes believe ~p is the claimant", [Node1]),
[?assertEqual(Node1, claimant_according_to(Node)) || Node <- Nodes],
%% Stop node1
lager:info("Stop ~p", [Node1]),
stop(Node1),
?assertEqual(ok, wait_until_unpingable(Node1)),
%% Ensure all nodes still believe node1 is the claimant
lager:info("Ensure all nodes still believe ~p is the claimant", [Node1]),
Remaining = Nodes -- [Node1],
[?assertEqual(Node1, claimant_according_to(Node)) || Node <- Remaining],
%% Mark node1 as down and wait for ring convergence
lager:info("Mark ~p as down", [Node1]),
down(Node2, Node1),
?assertEqual(ok, wait_until_ring_converged(Remaining)),
[?assertEqual(down, status_of_according_to(Node1, Node)) || Node <- Remaining],
%% Ensure all nodes now believe node2 to be the claimant
lager:info("Ensure all nodes now believe ~p is the claimant", [Node2]),
[?assertEqual(Node2, claimant_according_to(Node)) || Node <- Remaining],
%% Restart node1 and wait for ring convergence
lager:info("Restart ~p and wait for ring convergence", [Node1]),
start(Node1),
?assertEqual(ok, wait_until_nodes_ready([Node1])),
?assertEqual(ok, rt:wait_until_all_members(Nodes)),
?assertEqual(ok, wait_until_ring_converged(Nodes)),
timer:sleep(1000),
%% Ensure node has rejoined and is no longer down
lager:info("Ensure ~p has rejoined and is no longer down", [Node1]),
[?assertEqual(valid, status_of_according_to(Node1, Node)) || Node <- Nodes],
%% Ensure all nodes still believe node2 is the claimant
lager:info("Ensure all nodes still believe ~p is the claimant", [Node2]),
[?assertEqual(Node2, claimant_according_to(Node)) || Node <- Nodes],
pass. | test/verify_claimant.erl | 0.583915 | 0.436142 | verify_claimant.erl | starcoder |
%%------------------------------------------------------------------------------
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%%-----------------------------------------------------------------------------
%%
%% @author Infoblox Inc <<EMAIL>>
%% @copyright 2013 Infoblox Inc
%% @doc Tapestry. A Network Complexity Index Calculator
%%
-module(nci).
-export([compute_from_communities/1]).
-include("tap_logger.hrl").
%% === compute_from_communities ===
%% Compute the NCI from a list of communities
%%
%% Input: list where each element of the list of a tuple of two elements,
%% the Community and the number of vertices in that community.
%%
%% Output: The Network Complexity Index for the input graph described in
%% <NAME>. A Network Complexity Index for Networks of
%% Networks. 2013
%% http://www.flowforwarding.org/docs/Bailey%20-%20Grossman%20article%20on%20network%20complexity.pdf
%%
compute_from_communities(C) ->
%% At this point in the code C refers to an unordered of community
%% names with associated sizes
%% For example if C = [{a,10},{f,3},{c,55}], it is a representation
%% of three communities a, f, and c
%% which have 10, 3, and 55 members respectively
SC = ?LOGDURATION(rev_sort_labels(C)),
%% At this point, SC referes to a sorted list of communities with
%% sizes which have been sorted by size
%% such that the largest communties are at the head of the list.
%% Using the example C above, SC = [{c,55},{a,10},{f,3}].
%% Compute NCI from the sorted list of communities.
?LOGDURATION(calc_nci(SC, 0)).
%%
%% WARNING: Recursive function
%%
%% calc_nci(SortedList,NCI) is a recursive internal function called by
%% the non-recursive calc_nci(LablelGraph) function above
%%
%% This is a simple algorithm which just recursively walks a sorted
%% list of {community,size} elements and returns the balance point
%% between sizes and and numbers of the communities.
%%
%% For example the input list of
%% [{b,100},{c,100},{a,50},{z,43},{f,20},{g,3},{x,3},{p,2}]
%% returns 5: "There are exactly 5 communties that are
%% greater than or equal to 5 in size."
%%
%% Input: A sorted list of communities with sizes which have been
%% sorted by size such that the largest communties are at the
%% head of the list. See example above.
%%
%% Output: The Network Complexit Index for the input graph described in
%% <NAME>. A Network Complexity Index for Networks of
%% Networks. 2013
%% http://www.flowforwarding.org/docs/Bailey%20-%20Grossman%20article%20on%20network%20complexity.pdf
%%
-spec calc_nci(SortedList :: list(), NCI :: integer()) -> NCI :: integer().
calc_nci([], NCI)->
NCI;
calc_nci(SortedList, NCI) ->
[Head|Rest] = SortedList,
{_,Size} = Head,
case (NCI >= Size) of
true ->
NCI;
false ->
calc_nci(Rest, NCI+1)
end.
%% Sort List on the size of the community
rev_sort_labels(List) ->
lists:sort(fun({_, Size1},{_, Size2}) -> Size1 > Size2 end, List). | tapestry/apps/tapestry/src/nci.erl | 0.747892 | 0.647022 | nci.erl | starcoder |
%% @doc Implements a data structure for cryptographically signed transactions.
%% This is the envelope around transactions to make them cryptographically safe.
%% The transactions normally also have keys of the "signers" in the transaction,
%% which are extracted using the signers/1 function in the respective transaction
%% handler.
%%
%% The purpose of this module is to provide an API for cryptograpically signed
%% transactions and hide all implementation details. Therefore, the record
%% #signed_tx{} should be kept private and considered an abstract type.
%%
%% A transaction can be signed by one or several signers. Each transaction can
%% determine its own signers by the transaction callback 'signers'. Since we do not
%% want to depend upon transaction types in this module, the user of
%% {@module} should first obtain the signers of the transaction and then call this
%% {@link sign/2} with these signers. There is a {@link sign/3} function that can sign
%% with respect to a certain block height. This is handy whenever the governance
%% variables on what crypto to use would change.
-module(aetx_sign).
%% API
-export([new/2,
hash/1,
add_signatures/2,
tx/1,
innermost_tx/1,
verify/3,
verify_w_env/3,
verify_half_signed/3,
verify_one_pubkey/3,
from_db_format/1,
signatures/1]).
%% API that should be avoided to be used
-export([serialize_for_client/2,
serialize_for_client_pending/1,
serialize_for_client_inner/2,
meta_data_from_client_serialized/1,
serialize_to_binary/1,
deserialize_from_binary/1]).
-ifdef(TEST).
-export([set_tx/2]).
-endif.
-export_type([signed_tx/0,
binary_signed_tx/0]).
-include("../../aecore/include/blocks.hrl").
-include("../../aecontract/include/hard_forks.hrl").
-record(signed_tx,
{ tx :: aetx:tx(),
signatures = ordsets:new() :: ordsets:ordset(binary()) }).
-opaque signed_tx() :: #signed_tx{}.
-type binary_signed_tx() :: binary().
-define(VALID_PUBK(K), byte_size(K) =:= 32).
-spec new(aetx:tx(), [binary()]) -> signed_tx().
new(Bin, Signatures) when is_binary(Bin) ->
true = lists:all(fun is_binary/1, Signatures),
{Bin, lists:usort(Signatures)};
new(Tx, Signatures) ->
_ = aetx:specialize_type(Tx),
true = lists:all(fun is_binary/1, Signatures),
assert_sigs_size(Signatures),
#signed_tx{tx = Tx, signatures = lists:usort(Signatures)}.
-spec hash(signed_tx()) -> binary().
hash(#signed_tx{} = Tx) ->
aec_hash:hash(signed_tx, serialize_to_binary(Tx)).
-spec add_signatures(signed_tx(), list(binary())) -> signed_tx().
add_signatures(#signed_tx{signatures = OldSigs} = Tx, NewSigs)
when is_list(NewSigs) ->
assert_sigs_size(NewSigs),
Tx#signed_tx{signatures = lists:usort(NewSigs ++ OldSigs)}.
-spec tx(signed_tx()) -> aetx:tx().
%% @doc Get the original transaction from a signed transaction.
%% Note that no verification is performed, it just returns the transaction.
%% We have no type yest for any transaction, and spend_tx()
%% seems restricted as type.
tx(#signed_tx{tx = Tx}) ->
Tx.
-spec innermost_tx(signed_tx()) -> aetx:tx().
innermost_tx(SignedTx) ->
Aetx = aetx_sign:tx(SignedTx),
case aetx:specialize_callback(Aetx) of
{aega_meta_tx, MetaTx} -> innermost_tx(aega_meta_tx:tx(MetaTx));
{_CallBack, _Tx} -> Aetx
end.
-spec from_db_format(tuple()) -> signed_tx().
from_db_format(#signed_tx{tx = Tx} = STx) ->
case aetx:from_db_format(Tx) of
Tx -> STx;
Tx1 -> STx#signed_tx{tx = Tx1}
end.
%% @doc Get the signatures of a signed transaction.
-spec signatures(signed_tx()) -> list(binary()).
signatures(#signed_tx{signatures = Sigs}) ->
Sigs.
-spec verify_w_env(signed_tx(), aec_trees:trees(), aetx_env:env()) ->
ok | {error, signature_verification_failed}.
verify_w_env(#signed_tx{tx = Tx, signatures = Sigs}, Trees, TxEnv) ->
Bin = aetx:serialize_to_binary(Tx),
Height = aetx_env:height(TxEnv),
case aetx:signers(Tx, Trees) of
{ok, Signers} ->
RemainingSigners = Signers -- aetx_env:ga_auth_ids(TxEnv),
verify_signatures(RemainingSigners, Bin, Sigs, Height);
{error, _Reason} ->
{error, signature_verification_failed}
end.
%% this function is strict and does not allow having more signatures that the
%% one being checked
-spec verify(signed_tx(), aec_trees:trees(), aec_blocks:height()) ->
ok | {error, signature_check_failed}.
verify(#signed_tx{tx = Tx, signatures = Sigs}, Trees, Height) ->
Bin = aetx:serialize_to_binary(Tx),
case aetx:signers(Tx, Trees) of
{ok, Signers} ->
verify_signatures(Signers, Bin, Sigs, Height);
{error, _Reason} ->
{error, signature_check_failed}
end.
%% this function allows having more signatures that the one being checked
-spec verify_one_pubkey(aec_keys:pubkey(), signed_tx(), aec_blocks:height()) ->
ok | {error, signature_check_failed}.
verify_one_pubkey(Signer, #signed_tx{tx = Tx, signatures = Sigs}, Height) ->
Bin = aetx:serialize_to_binary(Tx),
case verify_one_pubkey(Sigs, Signer, Bin, Height) of
{ok, _} -> ok;
error -> {error, signature_check_failed}
end.
-spec verify_half_signed(aec_keys:pubkey() | [aec_keys:pubkey()],
signed_tx(), aec_blocks:height()) ->
ok | {error, signature_check_failed}.
verify_half_signed(Signer, SignedTx, Height) when is_binary(Signer) ->
verify_half_signed([Signer], SignedTx, Height);
verify_half_signed(Signers, #signed_tx{tx = Tx, signatures = Sigs}, Height) ->
verify_signatures(Signers, aetx:serialize_to_binary(Tx), Sigs, Height).
verify_signatures([], _Bin, [], _Height) ->
ok;
verify_signatures([PubKey|Left], Bin, Sigs, Height) ->
case verify_one_pubkey(Sigs, PubKey, Bin, Height) of
{ok, SigsLeft} -> verify_signatures(Left, Bin, SigsLeft, Height);
error -> {error, signature_check_failed}
end;
verify_signatures(PubKeys,_Bin, Sigs, _Height) ->
lager:debug("Signature check failed: ~p ~p", [PubKeys, Sigs]),
{error, signature_check_failed}.
verify_one_pubkey(Sigs, PubKey, Bin, Height) when ?VALID_PUBK(PubKey) ->
Protocol = aec_hard_forks:protocol_effective_at_height(Height),
HashSign = Protocol >= ?LIMA_PROTOCOL_VSN,
verify_one_pubkey(Sigs, PubKey, Bin, HashSign, []);
verify_one_pubkey(_Sigs, _PubKey, _Bin, _Height) ->
error. %% invalid pubkey
verify_one_pubkey([Sig|Left], PubKey, Bin, HashSign, Acc) ->
BinForNetwork = aec_governance:add_network_id(Bin),
case enacl:sign_verify_detached(Sig, BinForNetwork, PubKey) of
{ok, _} ->
{ok, Acc ++ Left};
{error, _} when HashSign ->
TxHash = aec_hash:hash(signed_tx, Bin),
BinForNetwork2 = aec_governance:add_network_id(TxHash),
case enacl:sign_verify_detached(Sig, BinForNetwork2, PubKey) of
{ok, _} -> {ok, Acc ++ Left};
{error, _} -> verify_one_pubkey(Left, PubKey, Bin, HashSign, [Sig | Acc])
end;
{error, _} ->
verify_one_pubkey(Left, PubKey, Bin, HashSign, [Sig|Acc])
end;
verify_one_pubkey([], _PubKey, _Bin, _HashSign, _Acc) -> % no more signatures
error.
-define(SIG_TX_TYPE, signed_tx).
-define(SIG_TX_VSN, 1).
%% deterministic canonical serialization.
-spec serialize_to_binary(signed_tx()) -> binary_signed_tx().
serialize_to_binary(#signed_tx{tx = Tx, signatures = Sigs}) ->
%% TODO: The original binary should be kept
%% around since that is what was signed
aeser_chain_objects:serialize(
?SIG_TX_TYPE,
?SIG_TX_VSN,
serialization_template(?SIG_TX_VSN),
[ {signatures, lists:sort(Sigs)}
, {transaction, aetx:serialize_to_binary(Tx)}
]).
-spec deserialize_from_binary(binary()) -> signed_tx().
deserialize_from_binary(SignedTxBin) when is_binary(SignedTxBin) ->
[ {signatures, Sigs}
, {transaction, TxBin}
] = aeser_chain_objects:deserialize(
?SIG_TX_TYPE,
?SIG_TX_VSN,
serialization_template(?SIG_TX_VSN),
SignedTxBin),
assert_sigs_size(Sigs),
#signed_tx{ tx = aetx:deserialize_from_binary(TxBin)
, signatures = Sigs
}.
serialization_template(?SIG_TX_VSN) ->
[ {signatures, [binary]}
, {transaction, binary}
].
-spec serialize_for_client(aec_headers:header(), aetx_sign:signed_tx()) -> binary() | map().
serialize_for_client(Header, #signed_tx{}=S) ->
{ok, BlockHash} = aec_headers:hash_header(Header),
serialize_for_client(S, aec_headers:height(Header), BlockHash, hash(S)).
-spec serialize_for_client_pending(aetx_sign:signed_tx()) -> binary() | map().
serialize_for_client_pending(#signed_tx{}=S) ->
serialize_for_client(S, -1, <<>>, hash(S)).
-spec serialize_for_client(aetx_sign:signed_tx(), integer(), binary(), binary()) ->
binary() | map().
serialize_for_client(#signed_tx{} = SigTx, BlockHeight, BlockHash0, TxHash) ->
BlockHash =
case BlockHash0 of
<<>> -> <<"none">>;
_ -> aeser_api_encoder:encode(micro_block_hash, BlockHash0)
end,
MetaData =
#{
<<"block_height">> => BlockHeight,
<<"block_hash">> => BlockHash,
<<"hash">> => aeser_api_encoder:encode(tx_hash, TxHash)},
serialize_for_client_inner(SigTx, MetaData).
%% For inner transactions we leave out block height etc and for generalized
%% accounts even signatures make no sense.
-spec serialize_for_client_inner(aetx_sign:signed_tx(), map()) -> binary() | map().
serialize_for_client_inner(#signed_tx{tx = Tx, signatures = Sigs}, MetaData) ->
case aetx:specialize_type(Tx) of
{ga_meta_tx, _} ->
MetaData#{<<"tx">> => aetx:serialize_for_client(Tx)};
_ ->
MetaData#{<<"tx">> => aetx:serialize_for_client(Tx),
<<"signatures">> => [aeser_api_encoder:encode(signature, S) || S <- Sigs]}
end.
meta_data_from_client_serialized(Serialized) ->
#{<<"tx">> := _EncodedTx,
<<"block_height">> := BlockHeight,
<<"block_hash">> := BlockHashEncoded,
<<"hash">> := TxHashEncoded,
<<"signatures">> := _Sigs} = Serialized,
{block_hash, BlockHash} = aeser_api_encoder:decode(BlockHashEncoded),
{tx_hash, TxHash} = aeser_api_encoder:decode(TxHashEncoded),
#{block_height => BlockHeight,
block_hash => BlockHash,
hash => TxHash}.
assert_sigs_size(Sigs) ->
AllowedByteSize = aeser_api_encoder:byte_size_for_type(signature),
lists:foreach(
fun(Sig) -> {AllowedByteSize, _} = {byte_size(Sig), Sig} end,
Sigs).
-ifdef(TEST).
set_tx(SignedTx, Aetx) ->
SignedTx#signed_tx{tx = Aetx}.
-endif. | apps/aetx/src/aetx_sign.erl | 0.73431 | 0.622287 | aetx_sign.erl | starcoder |
-module(atom_gotchas_app).
-behaviour(application).
-export([start/2, stop/1]). % callbacks
-export([atom5/0]). % exposed function
%% "atom bomb" gotcha demo function
-export([generate_atoms/1]).
%% "attempt_atom" gotcha demo functions
-export([json/1,
decode/1,
check/1]).
%% This section, together with the `atom_bomb.escript` represents the solution
%% We create a new function (and export it, since it's not used internally)
-export([load_all_atoms/0]).
%% We include the `.hrl` file containing the `?ALL_ATOMS` macro
-include("all_atoms.hrl").
load_all_atoms() ->
%% We use the `?ALL_ATOMS` macro (which is filled with all atom literals used
%% in the current application, by running `atom_bomb.escript`)
%% Thus, all atom literals are created when this module is loaded
?ALL_ATOMS.
%% The `application` callback functions
start(_StartType, _StartArgs) ->
%% Starts a dummy supervisor
atom_gotchas_sup:start_link().
stop(_State) ->
%% The following is to prove that atom literals used in internal
%% functions are created, regardless of when the functions are called
atom6(),
load_atom7().
%% The function to demonstrate the problem with dynamically generating atoms
-spec generate_atoms(ThousandsOfAtoms :: integer()) -> ok.
generate_atoms(ThousandsOfAtoms) ->
%% Have a unique prefix, so that everytime we call this function, we get new atoms
UniquePart = integer_to_list(os:system_time()),
%% Generate and decode `ThousandsOfAtoms` maps, each with 1K unique keys
lists:foreach(
fun(Index) ->
Json = json(UniquePart, Index * 1000, 1000),
decode(Json, [{keys, atom}])
end, lists:seq(1, ThousandsOfAtoms)),
io:format("~w atoms generated~n", [ThousandsOfAtoms * 1000]).
%% The functions used to demonstrate the problem posed by decoding with `attempt_atom`
-spec json(NumberOfKeys :: integer()) -> binary().
json(NumberOfKeys) ->
json("atom", 1, NumberOfKeys).
-spec json(KeyPrefix :: string(),
First :: integer(),
NumberOfKeys :: integer()) -> binary().
json(KeyPrefix, First, NumberOfKeys) ->
%% Create a test encoded Json object
%% the keys of the Json have the form `atom<integer>`
Json = lists:foldl(
fun(N, Acc) ->
Key = list_to_binary(KeyPrefix ++ integer_to_list(N)),
Acc#{ Key => N }
end, #{}, lists:seq(First, First + NumberOfKeys - 1)),
jsone:encode(Json).
-spec decode(binary()) -> map().
decode(Encoded) ->
%% Decode the input with `attempt_atom` option
decode(Encoded, [{keys, attempt_atom}]).
decode(Encoded, Opts) ->
jsone:decode(Encoded, Opts).
-spec check(map()) -> ok.
check(Decoded) ->
%% Check how many `atom<N>` keys were decoded to atoms
NrKeys = length(maps:keys(Decoded)),
lists:foreach(
fun(N) ->
case internal:atom_exists(N, Decoded) of
true ->
io:format("atom~w found~n", [N]),
ok;
false ->
io:format("atom~w NOT FOUND!!!~n", [N])
end
end, lists:seq(1, NrKeys)).
%% internal functions
%% Being the name of a function, literal `atom5` exists
%% as soon as the module is loaded
atom5() ->
ok.
%% Being the name of a function, literal `atom6` exists
%% as soon as the module is loaded
atom6() ->
ok.
%% Being used as a literal here (even if as part of a macro)
%% `atom7` exists as soon as the module is loaded
-define(ATOMS, [atom7]).
load_atom7() ->
?ATOMS. | src/atom_gotchas_app.erl | 0.718989 | 0.570511 | atom_gotchas_app.erl | starcoder |
%% -------- SST (Variant) ---------
%%
%% A FSM module intended to wrap a persisted, ordered view of Keys and Values
%%
%% The persisted view is built from a list (which may be created by merging
%% multiple lists). The list is built first, then the view is created in bulk.
%%
%% -------- Slots ---------
%%
%% The view is built from sublists referred to as slot. Each slot is up to 128
%% keys and values in size. Three strategies have been benchmarked for the
%% slot: a skiplist, a gb-tree, four blocks of flat lists with an index.
%%
%% Skiplist:
%% build and serialise slot - 3233 microseconds
%% de-serialise and check * 128 - 14669 microseconds
%% flatten back to list - 164 microseconds
%%
%% GBTree:
%% build and serialise tree - 1433 microseconds
%% de-serialise and check * 128 - 15263 microseconds
%% flatten back to list - 175 microseconds
%%
%% Indexed Blocks:
%% build and serialise slot 342 microseconds
%% de-deserialise and check * 128 - 6746 microseconds
%% flatten back to list - 187 microseconds
%%
%% The negative side of using Indexed Blocks is the storage of the index. In
%% the original implementation this was stored on fadvised disk (the index in
%% this case was a rice-encoded view of which block the object is in). In this
%% implementation it is cached in memory -requiring 2-bytes per key to be kept
%% in memory.
%%
%% -------- Blooms ---------
%%
%% There is a bloom for each slot - based on two hashes and 8 bits per key.
%%
%% Hashing for blooms is a challenge, as the slot is a slice of an ordered
%% list of keys with a fixed format. It is likely that the keys may vary by
%% only one or two ascii characters, and there is a desire to avoid the
%% overhead of cryptographic hash functions that may be able to handle this.
%%
%% -------- Summary ---------
%%
%% Each file has a summary - which is the 128 keys at the top of each slot in
%% a skiplist, with some basic metadata about the slot stored as the value.
%%
%% The summary is stored seperately to the slots (within the same file).
%%
%% -------- CRC Checks ---------
%%
%% Every attempt to either read a summary or a slot off disk will also include
%% a CRC check. If the CRC check fails non-presence is assumed (the data
%% within is assumed to be entirely lost). The data can be recovered by either
%% using a recoverable strategy in transaction log compaction, and triggering
%% the transaction log replay; or by using a higher level for of anti-entropy
%% (i.e. make Riak responsible).
-module(leveled_sst).
-behaviour(gen_fsm).
-ifdef(fsm_deprecated).
-compile({nowarn_deprecated_function,
[{gen_fsm, start_link, 3},
{gen_fsm, sync_send_event, 3},
{gen_fsm, send_event, 2},
{gen_fsm, send_all_state_event, 2}]}).
-endif.
-include("include/leveled.hrl").
-define(MAX_SLOTS, 256).
-define(LOOK_SLOTSIZE, 128). % Maximum of 128
-define(LOOK_BLOCKSIZE, {24, 32}). % 4x + y = ?LOOK_SLOTSIZE
-define(NOLOOK_SLOTSIZE, 256).
-define(NOLOOK_BLOCKSIZE, {56, 32}). % 4x + y = ?NOLOOK_SLOTSIZE
-define(COMPRESSION_LEVEL, 1).
-define(BINARY_SETTINGS, [{compressed, ?COMPRESSION_LEVEL}]).
-define(MERGE_SCANWIDTH, 16).
-define(DISCARD_EXT, ".discarded").
-define(DELETE_TIMEOUT, 10000).
-define(TREE_TYPE, idxt).
-define(TREE_SIZE, 4).
-define(TIMING_SAMPLECOUNTDOWN, 10000).
-define(TIMING_SAMPLESIZE, 100).
-define(CACHE_SIZE, 32).
-define(BLOCK_LENGTHS_LENGTH, 20).
-define(LMD_LENGTH, 4).
-define(FLIPPER32, 4294967295).
-define(COMPRESS_AT_LEVEL, 1).
-define(INDEX_MODDATE, true).
-define(USE_SET_FOR_SPEED, 64).
-include_lib("eunit/include/eunit.hrl").
-export([init/1,
handle_sync_event/4,
handle_event/3,
handle_info/3,
terminate/3,
code_change/4,
starting/2,
starting/3,
reader/3,
delete_pending/2,
delete_pending/3]).
-export([sst_new/6,
sst_new/8,
sst_newlevelzero/7,
sst_open/2,
sst_get/2,
sst_get/3,
sst_expandpointer/5,
sst_getmaxsequencenumber/1,
sst_setfordelete/2,
sst_clear/1,
sst_checkready/1,
sst_deleteconfirmed/1,
sst_close/1]).
-export([tune_seglist/1, extract_hash/1, member_check/2]).
-export([in_range/3]).
-record(slot_index_value, {slot_id :: integer(),
start_position :: integer(),
length :: integer()}).
-record(summary, {first_key :: tuple(),
last_key :: tuple(),
index :: tuple() | undefined,
size :: integer(),
max_sqn :: integer()}).
-type press_method()
:: lz4|native|none.
-type range_endpoint()
:: all|leveled_codec:ledger_key().
-type slot_pointer()
:: {pointer, pid(), integer(), range_endpoint(), range_endpoint()}.
-type sst_pointer()
% Used in sst_new
:: {next,
leveled_pmanifest:manifest_entry(),
range_endpoint()}.
-type sst_closed_pointer()
% used in expand_list_by_pointer
% (close point is added by maybe_expand_pointer
:: {next,
leveled_pmanifest:manifest_entry(),
range_endpoint(),
range_endpoint()}.
-type expandable_pointer()
:: slot_pointer()|sst_closed_pointer().
-type expanded_pointer()
:: leveled_codec:ledger_kv()|expandable_pointer().
-type binaryslot_element()
:: {tuple(), tuple()}|{binary(), integer(), tuple(), tuple()}.
%% yield_blockquery is used to detemrine if the work necessary to process a
%% range query beyond the fetching the slot should be managed from within
%% this process, or should be handled by the calling process.
%% Handling within the calling process may lead to extra binary heap garbage
%% see Issue 52. Handling within the SST process may lead to contention and
%% extra copying. Files at the top of the tree yield, those lower down don't.
-record(state,
{summary,
handle :: file:fd() | undefined,
penciller :: pid() | undefined,
root_path,
filename,
yield_blockquery = false :: boolean(),
blockindex_cache,
compression_method = native :: press_method(),
index_moddate = ?INDEX_MODDATE :: boolean(),
timings = no_timing :: sst_timings(),
timings_countdown = 0 :: integer(),
fetch_cache = array:new([{size, ?CACHE_SIZE}])}).
-record(sst_timings,
{sample_count = 0 :: integer(),
index_query_time = 0 :: integer(),
lookup_cache_time = 0 :: integer(),
slot_index_time = 0 :: integer(),
fetch_cache_time = 0 :: integer(),
slot_fetch_time = 0 :: integer(),
noncached_block_time = 0 :: integer(),
lookup_cache_count = 0 :: integer(),
slot_index_count = 0 :: integer(),
fetch_cache_count = 0 :: integer(),
slot_fetch_count = 0 :: integer(),
noncached_block_count = 0 :: integer()}).
-record(build_timings,
{slot_hashlist = 0 :: integer(),
slot_serialise = 0 :: integer(),
slot_finish = 0 :: integer(),
fold_toslot = 0 :: integer()}).
-type sst_state() :: #state{}.
-type sst_timings() :: no_timing|#sst_timings{}.
-type build_timings() :: no_timing|#build_timings{}.
-export_type([expandable_pointer/0]).
%%%============================================================================
%%% API
%%%============================================================================
-spec sst_open(string(), string())
-> {ok, pid(),
{leveled_codec:ledger_key(), leveled_codec:ledger_key()},
binary()}.
%% @doc
%% Open an SST file at a given path and filename. The first and last keys
%% are returned in response to the request - so that those keys can be used
%% in manifests to understand what range of keys are covered by the SST file.
%% All keys in the file should be between the first and last key in erlang
%% term order.
%%
%% The filename should include the file extension.
sst_open(RootPath, Filename) ->
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
case gen_fsm:sync_send_event(Pid,
{sst_open, RootPath, Filename},
infinity) of
{ok, {SK, EK}, Bloom} ->
{ok, Pid, {SK, EK}, Bloom}
end.
-spec sst_new(string(), string(), integer(),
list(leveled_codec:ledger_kv()),
integer(), press_method())
-> {ok, pid(),
{leveled_codec:ledger_key(), leveled_codec:ledger_key()},
binary()}.
%% @doc
%% Start a new SST file at the assigned level passing in a list of Key, Value
%% pairs. This should not be used for basement levels or unexpanded Key/Value
%% lists as merge_lists will not be called.
sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod) ->
sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod,
?INDEX_MODDATE).
sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod,
IndexModDate) ->
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
PressMethod0 = compress_level(Level, PressMethod),
{[], [], SlotList, FK} =
merge_lists(KVList, PressMethod0, IndexModDate),
case gen_fsm:sync_send_event(Pid,
{sst_new,
RootPath,
Filename,
Level,
{SlotList, FK},
MaxSQN,
PressMethod0,
IndexModDate},
infinity) of
{ok, {SK, EK}, Bloom} ->
{ok, Pid, {SK, EK}, Bloom}
end.
-spec sst_new(string(), string(),
list(leveled_codec:ledger_kv()|sst_pointer()),
list(leveled_codec:ledger_kv()|sst_pointer()),
boolean(), integer(),
integer(), press_method())
-> empty|{ok, pid(),
{{list(leveled_codec:ledger_kv()),
list(leveled_codec:ledger_kv())},
leveled_codec:ledger_key(),
leveled_codec:ledger_key()},
binary()}.
%% @doc
%% Start a new SST file at the assigned level passing in a two lists of
%% {Key, Value} pairs to be merged. The merge_lists function will use the
%% IsBasement boolean to determine if expired keys or tombstones can be
%% deleted.
%%
%% The remainder of the lists is returned along with the StartKey and EndKey
%% so that the remainder cna be used in the next file in the merge. It might
%% be that the merge_lists returns nothing (for example when a basement file is
%% all tombstones) - and the atome empty is returned in this case so that the
%% file is not added to the manifest.
sst_new(RootPath, Filename,
KVL1, KVL2, IsBasement, Level,
MaxSQN, PressMethod) ->
sst_new(RootPath, Filename,
KVL1, KVL2, IsBasement, Level,
MaxSQN, PressMethod, ?INDEX_MODDATE).
sst_new(RootPath, Filename,
KVL1, KVL2, IsBasement, Level,
MaxSQN, PressMethod, IndexModDate) ->
PressMethod0 = compress_level(Level, PressMethod),
{Rem1, Rem2, SlotList, FK} =
merge_lists(KVL1, KVL2, {IsBasement, Level},
PressMethod0, IndexModDate),
case SlotList of
[] ->
empty;
_ ->
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
case gen_fsm:sync_send_event(Pid,
{sst_new,
RootPath,
Filename,
Level,
{SlotList, FK},
MaxSQN,
PressMethod0,
IndexModDate},
infinity) of
{ok, {SK, EK}, Bloom} ->
{ok, Pid, {{Rem1, Rem2}, SK, EK}, Bloom}
end
end.
-spec sst_newlevelzero(string(), string(),
integer(), fun(), pid()|undefined, integer(),
press_method()) ->
{ok, pid(), noreply}.
%% @doc
%% Start a new file at level zero. At this level the file size is not fixed -
%% it will be as big as the input. Also the KVList is not passed in, it is
%% fetched slot by slot using the FetchFun
sst_newlevelzero(RootPath, Filename,
Slots, FetchFun, Penciller,
MaxSQN, PressMethod) ->
PressMethod0 = compress_level(0, PressMethod),
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
gen_fsm:send_event(Pid,
{sst_newlevelzero,
RootPath,
Filename,
Slots,
FetchFun,
Penciller,
MaxSQN,
PressMethod0,
?INDEX_MODDATE}),
{ok, Pid, noreply}.
-spec sst_get(pid(), leveled_codec:ledger_key())
-> leveled_codec:ledger_kv()|not_present.
%% @doc
%% Return a Key, Value pair matching a Key or not_present if the Key is not in
%% the store. The segment_hash function is used to accelerate the seeking of
%% keys, sst_get/3 should be used directly if this has already been calculated
sst_get(Pid, LedgerKey) ->
sst_get(Pid, LedgerKey, leveled_codec:segment_hash(LedgerKey)).
-spec sst_get(pid(), leveled_codec:ledger_key(), leveled_codec:segment_hash())
-> leveled_codec:ledger_kv()|not_present.
%% @doc
%% Return a Key, Value pair matching a Key or not_present if the Key is not in
%% the store (with the magic hash precalculated).
sst_get(Pid, LedgerKey, Hash) ->
gen_fsm:sync_send_event(Pid, {get_kv, LedgerKey, Hash}, infinity).
-spec sst_getmaxsequencenumber(pid()) -> integer().
%% @doc
%% Get the maximume sequence number for this SST file
sst_getmaxsequencenumber(Pid) ->
gen_fsm:sync_send_event(Pid, get_maxsequencenumber, infinity).
-spec sst_expandpointer(expandable_pointer(),
list(expandable_pointer()),
pos_integer(),
leveled_codec:segment_list(),
non_neg_integer())
-> list(expanded_pointer()).
%% @doc
%% Expand out a list of pointer to return a list of Keys and Values with a
%% tail of pointers (once the ScanWidth has been satisfied).
%% Folding over keys in a store uses this function, although this function
%% does not directly call the gen_server - it does so by sst_getfilteredslots
%% or sst_getfilteredrange depending on the nature of the pointer.
sst_expandpointer(Pointer, MorePointers, ScanWidth, SegmentList, LowLastMod) ->
expand_list_by_pointer(Pointer, MorePointers, ScanWidth,
SegmentList, LowLastMod).
-spec sst_setfordelete(pid(), pid()|false) -> ok.
%% @doc
%% If the SST is no longer in use in the active ledger it can be set for
%% delete. Once set for delete it will poll the Penciller pid to see if
%% it is yet safe to be deleted (i.e. because all snapshots which depend
%% on it have finished). No polling will be done if the Penciller pid
%% is 'false'
sst_setfordelete(Pid, Penciller) ->
gen_fsm:sync_send_event(Pid, {set_for_delete, Penciller}, infinity).
-spec sst_clear(pid()) -> ok.
%% @doc
%% For this file to be closed and deleted
sst_clear(Pid) ->
gen_fsm:sync_send_event(Pid, {set_for_delete, false}, infinity),
gen_fsm:sync_send_event(Pid, close, 1000).
-spec sst_deleteconfirmed(pid()) -> ok.
%% @doc
%% Allows a penciller to confirm to a SST file that it can be cleared, as it
%% is no longer in use
sst_deleteconfirmed(Pid) ->
gen_fsm:send_event(Pid, close).
-spec sst_checkready(pid()) -> {ok, string(),
leveled_codec:ledger_key(),
leveled_codec:ledger_key()}.
%% @doc
%% If a file has been set to be built, check that it has been built. Returns
%% the filename and the {startKey, EndKey} for the manifest.
sst_checkready(Pid) ->
%% Only used in test
gen_fsm:sync_send_event(Pid, background_complete, 100).
-spec sst_close(pid()) -> ok.
%% @doc
%% Close the file
sst_close(Pid) ->
gen_fsm:sync_send_event(Pid, close, 2000).
-spec sst_printtimings(pid()) -> ok.
%% @doc
%% The state of the FSM keeps track of timings of operations, and this can
%% forced to be printed.
%% Used in unit tests to force the printing of timings
sst_printtimings(Pid) ->
gen_fsm:sync_send_event(Pid, print_timings, 1000).
%%%============================================================================
%%% gen_server callbacks
%%%============================================================================
init([]) ->
{ok, starting, #state{}}.
starting({sst_open, RootPath, Filename}, _From, State) ->
{UpdState, Bloom} =
read_file(Filename, State#state{root_path=RootPath}),
Summary = UpdState#state.summary,
{reply,
{ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom},
reader,
UpdState};
starting({sst_new,
RootPath, Filename, Level,
{SlotList, FirstKey}, MaxSQN,
PressMethod, IdxModDate}, _From, State) ->
SW = os:timestamp(),
{Length, SlotIndex, BlockIndex, SlotsBin, Bloom} =
build_all_slots(SlotList),
SummaryBin =
build_table_summary(SlotIndex, Level, FirstKey, Length, MaxSQN, Bloom),
ActualFilename =
write_file(RootPath, Filename, SummaryBin, SlotsBin,
PressMethod, IdxModDate),
YBQ = Level =< 2,
{UpdState, Bloom} =
read_file(ActualFilename,
State#state{root_path=RootPath, yield_blockquery=YBQ}),
Summary = UpdState#state.summary,
leveled_log:log_timer("SST08",
[ActualFilename, Level, Summary#summary.max_sqn],
SW),
{reply,
{ok, {Summary#summary.first_key, Summary#summary.last_key}, Bloom},
reader,
UpdState#state{blockindex_cache = BlockIndex}}.
starting({sst_newlevelzero, RootPath, Filename,
Slots, FetchFun, Penciller, MaxSQN,
PressMethod, IdxModDate}, State) ->
SW0 = os:timestamp(),
KVList = leveled_pmem:to_list(Slots, FetchFun),
Time0 = timer:now_diff(os:timestamp(), SW0),
SW1 = os:timestamp(),
{[], [], SlotList, FirstKey} =
merge_lists(KVList, PressMethod, IdxModDate),
Time1 = timer:now_diff(os:timestamp(), SW1),
SW2 = os:timestamp(),
{SlotCount, SlotIndex, BlockIndex, SlotsBin,Bloom} =
build_all_slots(SlotList),
Time2 = timer:now_diff(os:timestamp(), SW2),
SW3 = os:timestamp(),
SummaryBin =
build_table_summary(SlotIndex, 0, FirstKey, SlotCount, MaxSQN, Bloom),
Time3 = timer:now_diff(os:timestamp(), SW3),
SW4 = os:timestamp(),
ActualFilename =
write_file(RootPath, Filename, SummaryBin, SlotsBin,
PressMethod, IdxModDate),
{UpdState, Bloom} =
read_file(ActualFilename,
State#state{root_path=RootPath, yield_blockquery=true}),
Summary = UpdState#state.summary,
Time4 = timer:now_diff(os:timestamp(), SW4),
leveled_log:log_timer("SST08",
[ActualFilename, 0, Summary#summary.max_sqn],
SW0),
leveled_log:log("SST11", [Time0, Time1, Time2, Time3, Time4]),
case Penciller of
undefined ->
{next_state,
reader,
UpdState#state{blockindex_cache = BlockIndex}};
_ ->
leveled_penciller:pcl_confirml0complete(Penciller,
UpdState#state.filename,
Summary#summary.first_key,
Summary#summary.last_key,
Bloom),
{next_state,
reader,
UpdState#state{blockindex_cache = BlockIndex}}
end.
reader({get_kv, LedgerKey, Hash}, _From, State) ->
% Get a KV value and potentially take sample timings
{Result, UpdState, UpdTimings} =
fetch(LedgerKey, Hash, State, State#state.timings),
{UpdTimings0, CountDown} =
update_statetimings(UpdTimings, State#state.timings_countdown),
{reply, Result, reader, UpdState#state{timings = UpdTimings0,
timings_countdown = CountDown}};
reader({get_kvrange, StartKey, EndKey, ScanWidth, SegList, LowLastMod},
_From, State) ->
{SlotsToFetchBinList, SlotsToPoint} = fetch_range(StartKey,
EndKey,
ScanWidth,
SegList,
LowLastMod,
State),
PressMethod = State#state.compression_method,
IdxModDate = State#state.index_moddate,
case State#state.yield_blockquery of
true ->
{reply,
{yield,
SlotsToFetchBinList,
SlotsToPoint,
PressMethod,
IdxModDate},
reader,
State};
false ->
{L, BIC} =
binaryslot_reader(SlotsToFetchBinList,
PressMethod, IdxModDate, SegList),
FoldFun =
fun(CacheEntry, Cache) ->
case CacheEntry of
{_ID, none} ->
Cache;
{ID, Header} ->
array:set(ID - 1, Header, Cache)
end
end,
BlockIdxC0 = lists:foldl(FoldFun, State#state.blockindex_cache, BIC),
{reply,
L ++ SlotsToPoint,
reader,
State#state{blockindex_cache = BlockIdxC0}}
end;
reader({get_slots, SlotList, SegList, LowLastMod}, _From, State) ->
PressMethod = State#state.compression_method,
IdxModDate = State#state.index_moddate,
SlotBins =
read_slots(State#state.handle,
SlotList,
{SegList, LowLastMod, State#state.blockindex_cache},
State#state.compression_method,
State#state.index_moddate),
{reply, {SlotBins, PressMethod, IdxModDate}, reader, State};
reader(get_maxsequencenumber, _From, State) ->
Summary = State#state.summary,
{reply, Summary#summary.max_sqn, reader, State};
reader(print_timings, _From, State) ->
log_timings(State#state.timings),
{reply, ok, reader, State};
reader({set_for_delete, Penciller}, _From, State) ->
leveled_log:log("SST06", [State#state.filename]),
{reply,
ok,
delete_pending,
State#state{penciller=Penciller},
?DELETE_TIMEOUT};
reader(background_complete, _From, State) ->
Summary = State#state.summary,
{reply,
{ok,
State#state.filename,
Summary#summary.first_key,
Summary#summary.last_key},
reader,
State};
reader(close, _From, State) ->
ok = file:close(State#state.handle),
{stop, normal, ok, State}.
delete_pending({get_kv, LedgerKey, Hash}, _From, State) ->
{Result, UpdState, _Ts} = fetch(LedgerKey, Hash, State, no_timing),
{reply, Result, delete_pending, UpdState, ?DELETE_TIMEOUT};
delete_pending({get_kvrange, StartKey, EndKey, ScanWidth, SegList, LowLastMod},
_From, State) ->
{SlotsToFetchBinList, SlotsToPoint} = fetch_range(StartKey,
EndKey,
ScanWidth,
SegList,
LowLastMod,
State),
% Always yield as about to clear and de-reference
PressMethod = State#state.compression_method,
IdxModDate = State#state.index_moddate,
{reply,
{yield, SlotsToFetchBinList, SlotsToPoint, PressMethod, IdxModDate},
delete_pending,
State,
?DELETE_TIMEOUT};
delete_pending({get_slots, SlotList, SegList, LowLastMod}, _From, State) ->
PressMethod = State#state.compression_method,
IdxModDate = State#state.index_moddate,
SlotBins =
read_slots(State#state.handle,
SlotList,
{SegList, LowLastMod, State#state.blockindex_cache},
PressMethod,
IdxModDate),
{reply,
{SlotBins, PressMethod, IdxModDate},
delete_pending,
State,
?DELETE_TIMEOUT};
delete_pending(close, _From, State) ->
leveled_log:log("SST07", [State#state.filename]),
ok = file:close(State#state.handle),
ok = file:delete(filename:join(State#state.root_path,
State#state.filename)),
{stop, normal, ok, State}.
delete_pending(timeout, State) ->
ok = leveled_penciller:pcl_confirmdelete(State#state.penciller,
State#state.filename,
self()),
% If the next thing is another timeout - may be long-running snapshot, so
% back-off
{next_state, delete_pending, State, leveled_rand:uniform(10) * ?DELETE_TIMEOUT};
delete_pending(close, State) ->
leveled_log:log("SST07", [State#state.filename]),
ok = file:close(State#state.handle),
ok = file:delete(filename:join(State#state.root_path,
State#state.filename)),
{stop, normal, State}.
handle_sync_event(_Msg, _From, StateName, State) ->
{reply, undefined, StateName, State}.
handle_event(_Msg, StateName, State) ->
{next_state, StateName, State}.
handle_info(_Msg, StateName, State) ->
{next_state, StateName, State}.
terminate(normal, delete_pending, _State) ->
ok;
terminate(Reason, _StateName, State) ->
leveled_log:log("SST04", [Reason, State#state.filename]).
code_change(_OldVsn, StateName, State, _Extra) ->
{ok, StateName, State}.
%%%============================================================================
%%% External Functions
%%%============================================================================
-spec expand_list_by_pointer(expandable_pointer(),
list(expandable_pointer()),
pos_integer())
-> list(expanded_pointer()).
%% @doc
%% Expand a list of pointers, maybe ending up with a list of keys and values
%% with a tail of pointers
%% By defauls will not have a segment filter, or a low last_modified_date, but
%% they can be used. Range checking a last modified date must still be made on
%% the output - at this stage the low last_modified_date has been used to bulk
%% skip those slots not containing any information over the low last modified
%% date
expand_list_by_pointer(Pointer, Tail, Width) ->
expand_list_by_pointer(Pointer, Tail, Width, false).
%% TODO until leveled_penciller updated
expand_list_by_pointer(Pointer, Tail, Width, SegList) ->
expand_list_by_pointer(Pointer, Tail, Width, SegList, 0).
-spec expand_list_by_pointer(expandable_pointer(),
list(expandable_pointer()),
pos_integer(),
leveled_codec:segment_list(),
non_neg_integer())
-> list(expanded_pointer()).
%% @doc
%% With filters (as described in expand_list_by_pointer/3
expand_list_by_pointer({pointer, SSTPid, Slot, StartKey, EndKey},
Tail, Width, SegList, LowLastMod) ->
FoldFun =
fun(X, {Pointers, Remainder}) ->
case length(Pointers) of
L when L < Width ->
case X of
{pointer, SSTPid, S, SK, EK} ->
{Pointers ++ [{pointer, S, SK, EK}], Remainder};
_ ->
{Pointers, Remainder ++ [X]}
end;
_ ->
{Pointers, Remainder ++ [X]}
end
end,
InitAcc = {[{pointer, Slot, StartKey, EndKey}], []},
{AccPointers, AccTail} = lists:foldl(FoldFun, InitAcc, Tail),
ExpPointers = sst_getfilteredslots(SSTPid,
AccPointers,
SegList,
LowLastMod),
lists:append(ExpPointers, AccTail);
expand_list_by_pointer({next, ManEntry, StartKey, EndKey},
Tail, Width, SegList, LowLastMod) ->
SSTPid = ManEntry#manifest_entry.owner,
leveled_log:log("SST10", [SSTPid, is_process_alive(SSTPid)]),
ExpPointer = sst_getfilteredrange(SSTPid,
StartKey,
EndKey,
Width,
SegList,
LowLastMod),
ExpPointer ++ Tail.
-spec sst_getkvrange(pid(),
range_endpoint(),
range_endpoint(),
integer())
-> list(leveled_codec:ledger_kv()|slot_pointer()).
%% @doc
%% Get a range of {Key, Value} pairs as a list between StartKey and EndKey
%% (inclusive). The ScanWidth is the maximum size of the range, a pointer
%% will be placed on the tail of the resulting list if results expand beyond
%% the Scan Width
sst_getkvrange(Pid, StartKey, EndKey, ScanWidth) ->
sst_getfilteredrange(Pid, StartKey, EndKey, ScanWidth, false, 0).
-spec sst_getfilteredrange(pid(),
range_endpoint(),
range_endpoint(),
integer(),
leveled_codec:segment_list(),
non_neg_integer())
-> list(leveled_codec:ledger_kv()|slot_pointer()).
%% @doc
%% Get a range of {Key, Value} pairs as a list between StartKey and EndKey
%% (inclusive). The ScanWidth is the maximum size of the range, a pointer
%% will be placed on the tail of the resulting list if results expand beyond
%% the Scan Width
%%
%% To make the range open-ended (either to start, end or both) the all atom
%% can be used in place of the Key tuple.
%%
%% A segment list can also be passed, which inidcates a subset of segment
%% hashes of interest in the query.
%%
%% TODO: Optimise this so that passing a list of segments that tune to the
%% same hash is faster - perhaps provide an exportable function in
%% leveled_tictac
sst_getfilteredrange(Pid, StartKey, EndKey, ScanWidth, SegList, LowLastMod) ->
SegList0 = tune_seglist(SegList),
case gen_fsm:sync_send_event(Pid,
{get_kvrange,
StartKey, EndKey,
ScanWidth, SegList0, LowLastMod},
infinity) of
{yield, SlotsToFetchBinList, SlotsToPoint, PressMethod, IdxModDate} ->
{L, _BIC} =
binaryslot_reader(SlotsToFetchBinList,
PressMethod, IdxModDate, SegList0),
L ++ SlotsToPoint;
Reply ->
Reply
end.
-spec sst_getslots(pid(), list(slot_pointer()))
-> list(leveled_codec:ledger_kv()).
%% @doc
%% Get a list of slots by their ID. The slot will be converted from the binary
%% to term form outside of the FSM loop, this is to stop the copying of the
%% converted term to the calling process.
sst_getslots(Pid, SlotList) ->
sst_getfilteredslots(Pid, SlotList, false, 0).
-spec sst_getfilteredslots(pid(),
list(slot_pointer()),
leveled_codec:segment_list(),
non_neg_integer())
-> list(leveled_codec:ledger_kv()).
%% @doc
%% Get a list of slots by their ID. The slot will be converted from the binary
%% to term form outside of the FSM loop
%%
%% A list of 16-bit integer Segment IDs can be passed to filter the keys
%% returned (not precisely - with false results returned in addition). Use
%% false as a SegList to not filter.
%% An integer can be provided which gives a floor for the LastModified Date
%% of the object, if the object is to be covered by the query
sst_getfilteredslots(Pid, SlotList, SegList, LowLastMod) ->
SegL0 = tune_seglist(SegList),
{SlotBins, PressMethod, IdxModDate} =
gen_fsm:sync_send_event(Pid,
{get_slots, SlotList, SegL0, LowLastMod},
infinity),
{L, _BIC} = binaryslot_reader(SlotBins, PressMethod, IdxModDate, SegL0),
L.
-spec find_pos(binary(),
non_neg_integer()|
{list, list(non_neg_integer())}|
{sets, sets:set(non_neg_integer())},
list(non_neg_integer()),
non_neg_integer()) -> list(non_neg_integer()).
%% @doc
%% Find a list of positions where there is an element with a matching segment
%% ID to the expected segments (which cna either be a single segment, a list of
%% segments or a set of segments depending on size.
find_pos(<<>>, _Hash, PosList, _Count) ->
PosList;
find_pos(<<1:1/integer, PotentialHit:15/integer, T/binary>>,
Checker, PosList, Count) ->
case member_check(PotentialHit, Checker) of
true ->
find_pos(T, Checker, PosList ++ [Count], Count + 1);
false ->
find_pos(T, Checker, PosList, Count + 1)
end;
find_pos(<<0:1/integer, NHC:7/integer, T/binary>>, Checker, PosList, Count) ->
find_pos(T, Checker, PosList, Count + NHC + 1).
-spec member_check(non_neg_integer(),
non_neg_integer()|
{list, list(non_neg_integer())}|
{sets, sets:set(non_neg_integer())}) -> boolean().
member_check(Hash, Hash) ->
true;
member_check(Hash, {list, HashList}) ->
lists:member(Hash, HashList);
member_check(Hash, {sets, HashSet}) ->
sets:is_element(Hash, HashSet);
member_check(_Miss, _Checker) ->
false.
extract_hash({SegHash, _ExtraHash}) when is_integer(SegHash) ->
tune_hash(SegHash);
extract_hash(NotHash) ->
NotHash.
cache_hash({_SegHash, ExtraHash}) when is_integer(ExtraHash) ->
ExtraHash band (?CACHE_SIZE - 1).
-spec tune_hash(non_neg_integer()) -> non_neg_integer().
%% @doc
%% Only 15 bits of the hash is ever interesting
tune_hash(SegHash) ->
SegHash band 32767.
-spec tune_seglist(leveled_codec:segment_list())
-> leveled_codec:segment_list().
%% @doc
%% Only 15 bits of the hash is ever interesting
tune_seglist(SegList) ->
case is_list(SegList) of
true ->
SL0 = lists:usort(lists:map(fun tune_hash/1, SegList)),
case length(SL0) > ?USE_SET_FOR_SPEED of
true ->
{sets, sets:from_list(SL0)};
false ->
{list, SL0}
end;
false ->
false
end.
%%%============================================================================
%%% Internal Functions
%%%============================================================================
-spec fetch(tuple(),
{integer(), integer()}|integer(),
sst_state(), sst_timings())
-> {not_present|tuple(), sst_state(), sst_timings()}.
%% @doc
%%
%% Fetch a key from the store, potentially taking timings. Result should be
%% not_present if the key is not in the store.
fetch(LedgerKey, Hash, State, Timings0) ->
SW0 = os:timestamp(),
Summary = State#state.summary,
PressMethod = State#state.compression_method,
IdxModDate = State#state.index_moddate,
Slot = lookup_slot(LedgerKey, Summary#summary.index),
{SW1, Timings1} = update_timings(SW0, Timings0, index_query, true),
SlotID = Slot#slot_index_value.slot_id,
CachedBlockIdx =
array:get(SlotID - 1, State#state.blockindex_cache),
{SW2, Timings2} = update_timings(SW1, Timings1, lookup_cache, true),
case extract_header(CachedBlockIdx, IdxModDate) of
none ->
SlotBin = read_slot(State#state.handle, Slot),
{Result, Header} =
binaryslot_get(SlotBin, LedgerKey, Hash, PressMethod, IdxModDate),
BlockIndexCache =
array:set(SlotID - 1, Header, State#state.blockindex_cache),
{_SW3, Timings3} =
update_timings(SW2, Timings2, noncached_block, false),
{Result,
State#state{blockindex_cache = BlockIndexCache},
Timings3};
{BlockLengths, _LMD, PosBin} ->
PosList = find_pos(PosBin, extract_hash(Hash), [], 0),
case PosList of
[] ->
{_SW3, Timings3} =
update_timings(SW2, Timings2, slot_index, false),
{not_present, State, Timings3};
_ ->
{SW3, Timings3} =
update_timings(SW2, Timings2, slot_index, true),
FetchCache = State#state.fetch_cache,
CacheHash = cache_hash(Hash),
case array:get(CacheHash, FetchCache) of
{LedgerKey, V} ->
{_SW4, Timings4} =
update_timings(SW3,
Timings3,
fetch_cache,
false),
{{LedgerKey, V}, State, Timings4};
_ ->
StartPos = Slot#slot_index_value.start_position,
Result =
check_blocks(PosList,
{State#state.handle, StartPos},
BlockLengths,
byte_size(PosBin),
LedgerKey,
PressMethod,
IdxModDate,
not_present),
FetchCache0 =
array:set(CacheHash, Result, FetchCache),
{_SW4, Timings4} =
update_timings(SW3,
Timings3,
slot_fetch,
false),
{Result,
State#state{fetch_cache = FetchCache0},
Timings4}
end
end
end.
-spec fetch_range(tuple(), tuple(), integer(),
leveled_codec:segment_list(), non_neg_integer(),
sst_state()) -> {list(), list()}.
%% @doc
%% Fetch the contents of the SST file for a given key range. This will
%% pre-fetch some results, and append pointers for additional results.
%%
%% A filter can be provided based on the Segment ID (usable for hashable
%% objects not no_lookup entries) to accelerate the query if the 5-arity
%% version is used
fetch_range(StartKey, EndKey, ScanWidth, SegList, LowLastMod, State) ->
Summary = State#state.summary,
Handle = State#state.handle,
{Slots, RTrim} = lookup_slots(StartKey, EndKey, Summary#summary.index),
Self = self(),
SL = length(Slots),
ExpandedSlots =
case SL of
1 ->
[Slot] = Slots,
case RTrim of
true ->
[{pointer, Self, Slot, StartKey, EndKey}];
false ->
[{pointer, Self, Slot, StartKey, all}]
end;
N ->
{LSlot, MidSlots, RSlot} =
case N of
2 ->
[Slot1, Slot2] = Slots,
{Slot1, [], Slot2};
N ->
[Slot1|_Rest] = Slots,
SlotN = lists:last(Slots),
{Slot1, lists:sublist(Slots, 2, N - 2), SlotN}
end,
MidSlotPointers = lists:map(fun(S) ->
{pointer, Self, S, all, all}
end,
MidSlots),
case RTrim of
true ->
[{pointer, Self, LSlot, StartKey, all}] ++
MidSlotPointers ++
[{pointer, Self, RSlot, all, EndKey}];
false ->
[{pointer, Self, LSlot, StartKey, all}] ++
MidSlotPointers ++
[{pointer, Self, RSlot, all, all}]
end
end,
{SlotsToFetch, SlotsToPoint} =
case ScanWidth of
SW when SW >= SL ->
{ExpandedSlots, []};
_ ->
lists:split(ScanWidth, ExpandedSlots)
end,
SlotsToFetchBinList =
read_slots(Handle,
SlotsToFetch,
{SegList, LowLastMod, State#state.blockindex_cache},
State#state.compression_method,
State#state.index_moddate),
{SlotsToFetchBinList, SlotsToPoint}.
-spec compress_level(integer(), press_method()) -> press_method().
%% @doc
%% disable compression at higher levels for improved performance
compress_level(Level, _PressMethod) when Level < ?COMPRESS_AT_LEVEL ->
none;
compress_level(_Level, PressMethod) ->
PressMethod.
write_file(RootPath, Filename, SummaryBin, SlotsBin,
PressMethod, IdxModDate) ->
SummaryLength = byte_size(SummaryBin),
SlotsLength = byte_size(SlotsBin),
{PendingName, FinalName} = generate_filenames(Filename),
FileVersion = gen_fileversion(PressMethod, IdxModDate),
ok = file:write_file(filename:join(RootPath, PendingName),
<<FileVersion:8/integer,
SlotsLength:32/integer,
SummaryLength:32/integer,
SlotsBin/binary,
SummaryBin/binary>>,
[raw]),
case filelib:is_file(filename:join(RootPath, FinalName)) of
true ->
AltName = filename:join(RootPath, filename:basename(FinalName))
++ ?DISCARD_EXT,
leveled_log:log("SST05", [FinalName, AltName]),
ok = file:rename(filename:join(RootPath, FinalName), AltName);
false ->
ok
end,
file:rename(filename:join(RootPath, PendingName),
filename:join(RootPath, FinalName)),
FinalName.
read_file(Filename, State) ->
{Handle, FileVersion, SummaryBin} =
open_reader(filename:join(State#state.root_path, Filename)),
UpdState0 = imp_fileversion(FileVersion, State),
{Summary, Bloom, SlotList} = read_table_summary(SummaryBin),
BlockIndexCache = array:new([{size, Summary#summary.size},
{default, none}]),
UpdState1 = UpdState0#state{blockindex_cache = BlockIndexCache},
SlotIndex = from_list(SlotList),
UpdSummary = Summary#summary{index = SlotIndex},
leveled_log:log("SST03", [Filename,
Summary#summary.size,
Summary#summary.max_sqn]),
{UpdState1#state{summary = UpdSummary,
handle = Handle,
filename = Filename},
Bloom}.
gen_fileversion(PressMethod, IdxModDate) ->
% Native or none can be treated the same once written, as reader
% does not need to know as compression info will be in header of the
% block
Bit1 =
case PressMethod of
lz4 -> 1;
native -> 0;
none -> 0
end,
Bit2 =
case IdxModDate of
true ->
2;
false ->
0
end,
Bit1+ Bit2.
imp_fileversion(VersionInt, State) ->
UpdState0 =
case VersionInt band 1 of
0 ->
State#state{compression_method = native};
1 ->
State#state{compression_method = lz4}
end,
UpdState1 =
case VersionInt band 2 of
0 ->
UpdState0#state{index_moddate = false};
2 ->
UpdState0#state{index_moddate = true}
end,
UpdState1.
open_reader(Filename) ->
{ok, Handle} = file:open(Filename, [binary, raw, read]),
{ok, Lengths} = file:pread(Handle, 0, 9),
<<FileVersion:8/integer,
SlotsLength:32/integer,
SummaryLength:32/integer>> = Lengths,
{ok, SummaryBin} = file:pread(Handle, SlotsLength + 9, SummaryLength),
{Handle, FileVersion, SummaryBin}.
build_table_summary(SlotIndex, _Level, FirstKey, SlotCount, MaxSQN, Bloom) ->
[{LastKey, _LastV}|_Rest] = SlotIndex,
Summary = #summary{first_key = FirstKey,
last_key = LastKey,
size = SlotCount,
max_sqn = MaxSQN},
SummBin =
term_to_binary({Summary, Bloom, lists:reverse(SlotIndex)},
?BINARY_SETTINGS),
SummCRC = hmac(SummBin),
<<SummCRC:32/integer, SummBin/binary>>.
read_table_summary(BinWithCheck) ->
<<SummCRC:32/integer, SummBin/binary>> = BinWithCheck,
CRCCheck = hmac(SummBin),
if
CRCCheck == SummCRC ->
% If not might it might be possible to rebuild from all the slots
binary_to_term(SummBin)
end.
build_all_slots(SlotList) ->
SlotCount = length(SlotList),
{SlotIndex, BlockIndex, SlotsBin, HashLists} =
build_all_slots(SlotList,
9,
1,
[],
array:new([{size, SlotCount},
{default, none}]),
<<>>,
[]),
Bloom = leveled_ebloom:create_bloom(HashLists),
{SlotCount, SlotIndex, BlockIndex, SlotsBin, Bloom}.
build_all_slots([], _Pos, _SlotID,
SlotIdxAcc, BlockIdxAcc, SlotBinAcc, HashLists) ->
{SlotIdxAcc, BlockIdxAcc, SlotBinAcc, HashLists};
build_all_slots([SlotD|Rest], Pos, SlotID,
SlotIdxAcc, BlockIdxAcc, SlotBinAcc, HashLists) ->
{BlockIdx, SlotBin, HashList, LastKey} = SlotD,
Length = byte_size(SlotBin),
SlotIndexV = #slot_index_value{slot_id = SlotID,
start_position = Pos,
length = Length},
build_all_slots(Rest,
Pos + Length,
SlotID + 1,
[{LastKey, SlotIndexV}|SlotIdxAcc],
array:set(SlotID - 1, BlockIdx, BlockIdxAcc),
<<SlotBinAcc/binary, SlotBin/binary>>,
lists:append(HashLists, HashList)).
generate_filenames(RootFilename) ->
Ext = filename:extension(RootFilename),
Components = filename:split(RootFilename),
case Ext of
[] ->
{filename:join(Components) ++ ".pnd",
filename:join(Components) ++ ".sst"};
Ext ->
DN = filename:dirname(RootFilename),
FP_NOEXT = filename:basename(RootFilename, Ext),
{filename:join(DN, FP_NOEXT) ++ ".pnd",
filename:join(DN, FP_NOEXT) ++ ".sst"}
end.
-spec serialise_block(any(), press_method()) -> binary().
%% @doc
%% Convert term to binary
%% Function split out to make it easier to experiment with different
%% compression methods. Also, perhaps standardise applictaion of CRC
%% checks
serialise_block(Term, lz4) ->
{ok, Bin} = lz4:pack(term_to_binary(Term)),
CRC32 = hmac(Bin),
<<Bin/binary, CRC32:32/integer>>;
serialise_block(Term, native) ->
Bin = term_to_binary(Term, ?BINARY_SETTINGS),
CRC32 = hmac(Bin),
<<Bin/binary, CRC32:32/integer>>;
serialise_block(Term, none) ->
Bin = term_to_binary(Term),
CRC32 = hmac(Bin),
<<Bin/binary, CRC32:32/integer>>.
-spec deserialise_block(binary(), press_method()) -> any().
%% @doc
%% Convert binary to term
%% Function split out to make it easier to experiment with different
%% compression methods.
%%
%% If CRC check fails we treat all the data as missing
deserialise_block(Bin, PressMethod) ->
BinS = byte_size(Bin) - 4,
<<TermBin:BinS/binary, CRC32:32/integer>> = Bin,
case hmac(TermBin) of
CRC32 ->
deserialise_checkedblock(TermBin, PressMethod);
_ ->
[]
end.
deserialise_checkedblock(Bin, lz4) ->
{ok, Bin0} = lz4:unpack(Bin),
binary_to_term(Bin0);
deserialise_checkedblock(Bin, _Other) ->
% native or none can be treated the same
binary_to_term(Bin).
-spec hmac(binary()|integer()) -> integer().
%% @doc
%% Perform a CRC check on an input
hmac(Bin) when is_binary(Bin) ->
erlang:crc32(Bin);
hmac(Int) when is_integer(Int) ->
Int bxor ?FLIPPER32.
%%%============================================================================
%%% SlotIndex Implementation
%%%============================================================================
%% The Slot Index is stored as a flat (sorted) list of {Key, Slot} where Key
%% is the last key within the slot.
%%
%% This implementation of the SlotIndex uses leveled_tree
from_list(SlotList) ->
leveled_tree:from_orderedlist(SlotList, ?TREE_TYPE, ?TREE_SIZE).
lookup_slot(Key, Tree) ->
StartKeyFun =
fun(_V) ->
all
end,
% The penciller should never ask for presence out of range - so will
% always return a slot (As we don't compare to StartKey)
{_LK, Slot} = leveled_tree:search(Key, Tree, StartKeyFun),
Slot.
lookup_slots(StartKey, EndKey, Tree) ->
StartKeyFun =
fun(_V) ->
all
end,
MapFun =
fun({_LK, Slot}) ->
Slot
end,
SlotList = leveled_tree:search_range(StartKey, EndKey, Tree, StartKeyFun),
{EK, _EndSlot} = lists:last(SlotList),
{lists:map(MapFun, SlotList), not leveled_codec:endkey_passed(EK, EndKey)}.
%%%============================================================================
%%% Slot Implementation
%%%============================================================================
%% Implementing a slot has gone through numerous iterations. One of the most
%% critical considerations has been the cost of the binary_to_term and
%% term_to_binary calls for different sizes of slots and different data types.
%%
%% Microbenchmarking indicated that flat lists were the fastest at sst build
%% time. However, the lists need scanning at query time - and so give longer
%% lookups. Bigger slots did better at term_to_binary time. However
%% binary_to_term is an often repeated task, and this is better with smaller
%% slots.
%%
%% The outcome has been to divide the slot into four small blocks to minimise
%% the binary_to_term time. A binary index is provided for the slot for all
%% Keys that are directly fetchable (i.e. standard keys not index keys).
%%
%% The division and use of a list saves about 100 microseconds per fetch when
%% compared to using a 128-member gb:tree.
%%
%% The binary index is cacheable and doubles as a not_present filter, as it is
%% based on a 17-bit hash (so 0.0039 fpr).
-spec accumulate_positions(leveled_codec:ledger_kv(),
{binary(),
non_neg_integer(),
list(non_neg_integer()),
leveled_codec:last_moddate()}) ->
{binary(),
non_neg_integer(),
list(non_neg_integer()),
leveled_codec:last_moddate()}.
%% @doc
%% Fold function use to accumulate the position information needed to
%% populate the summary of the slot
accumulate_positions({K, V}, {PosBinAcc, NoHashCount, HashAcc, LMDAcc}) ->
{_SQN, H1, LMD} = leveled_codec:strip_to_indexdetails({K, V}),
LMDAcc0 = take_max_lastmoddate(LMD, LMDAcc),
PosH1 = extract_hash(H1),
case is_integer(PosH1) of
true ->
case NoHashCount of
0 ->
{<<1:1/integer, PosH1:15/integer,PosBinAcc/binary>>,
0,
[H1|HashAcc],
LMDAcc0};
N ->
% The No Hash Count is an integer between 0 and 127
% and so at read time should count NHC + 1
NHC = N - 1,
{<<1:1/integer,
PosH1:15/integer,
0:1/integer,
NHC:7/integer,
PosBinAcc/binary>>,
0,
HashAcc,
LMDAcc0}
end;
false ->
{PosBinAcc, NoHashCount + 1, HashAcc, LMDAcc0}
end.
-spec take_max_lastmoddate(leveled_codec:last_moddate(),
leveled_codec:last_moddate()) ->
leveled_codec:last_moddate().
%% @doc
%% Get the last modified date. If no Last Modified Date on any object, can't
%% add the accelerator and should check each object in turn
take_max_lastmoddate(undefined, _LMDAcc) ->
?FLIPPER32;
take_max_lastmoddate(LMD, LMDAcc) ->
max(LMD, LMDAcc).
-spec generate_binary_slot(leveled_codec:maybe_lookup(),
list(leveled_codec:ledger_kv()),
press_method(),
boolean(),
build_timings()) ->
{{binary(),
binary(),
list(integer()),
leveled_codec:ledger_key()},
build_timings()}.
%% @doc
%% Generate the serialised slot to be used when storing this sublist of keys
%% and values
generate_binary_slot(Lookup, KVL, PressMethod, IndexModDate, BuildTimings0) ->
SW0 = os:timestamp(),
{HashL, PosBinIndex, LMD} =
case Lookup of
lookup ->
InitAcc = {<<>>, 0, [], 0},
{PosBinIndex0, NHC, HashL0, LMD0} =
lists:foldr(fun accumulate_positions/2, InitAcc, KVL),
PosBinIndex1 =
case NHC of
0 ->
PosBinIndex0;
_ ->
N = NHC - 1,
<<0:1/integer, N:7/integer, PosBinIndex0/binary>>
end,
{HashL0, PosBinIndex1, LMD0};
no_lookup ->
{[], <<0:1/integer, 127:7/integer>>, 0}
end,
BuildTimings1 = update_buildtimings(SW0, BuildTimings0, slot_hashlist),
SW1 = os:timestamp(),
{SideBlockSize, MidBlockSize} =
case Lookup of
lookup ->
?LOOK_BLOCKSIZE;
no_lookup ->
?NOLOOK_BLOCKSIZE
end,
{B1, B2, B3, B4, B5} =
case length(KVL) of
L when L =< SideBlockSize ->
{serialise_block(KVL, PressMethod),
<<0:0>>,
<<0:0>>,
<<0:0>>,
<<0:0>>};
L when L =< 2 * SideBlockSize ->
{KVLA, KVLB} = lists:split(SideBlockSize, KVL),
{serialise_block(KVLA, PressMethod),
serialise_block(KVLB, PressMethod),
<<0:0>>,
<<0:0>>,
<<0:0>>};
L when L =< (2 * SideBlockSize + MidBlockSize) ->
{KVLA, KVLB_Rest} = lists:split(SideBlockSize, KVL),
{KVLB, KVLC} = lists:split(SideBlockSize, KVLB_Rest),
{serialise_block(KVLA, PressMethod),
serialise_block(KVLB, PressMethod),
serialise_block(KVLC, PressMethod),
<<0:0>>,
<<0:0>>};
L when L =< (3 * SideBlockSize + MidBlockSize) ->
{KVLA, KVLB_Rest} = lists:split(SideBlockSize, KVL),
{KVLB, KVLC_Rest} = lists:split(SideBlockSize, KVLB_Rest),
{KVLC, KVLD} = lists:split(MidBlockSize, KVLC_Rest),
{serialise_block(KVLA, PressMethod),
serialise_block(KVLB, PressMethod),
serialise_block(KVLC, PressMethod),
serialise_block(KVLD, PressMethod),
<<0:0>>};
L when L =< (4 * SideBlockSize + MidBlockSize) ->
{KVLA, KVLB_Rest} = lists:split(SideBlockSize, KVL),
{KVLB, KVLC_Rest} = lists:split(SideBlockSize, KVLB_Rest),
{KVLC, KVLD_Rest} = lists:split(MidBlockSize, KVLC_Rest),
{KVLD, KVLE} = lists:split(SideBlockSize, KVLD_Rest),
{serialise_block(KVLA, PressMethod),
serialise_block(KVLB, PressMethod),
serialise_block(KVLC, PressMethod),
serialise_block(KVLD, PressMethod),
serialise_block(KVLE, PressMethod)}
end,
BuildTimings2 = update_buildtimings(SW1, BuildTimings1, slot_serialise),
SW2 = os:timestamp(),
B1P =
case IndexModDate of
true ->
byte_size(PosBinIndex) + ?BLOCK_LENGTHS_LENGTH + ?LMD_LENGTH;
false ->
byte_size(PosBinIndex) + ?BLOCK_LENGTHS_LENGTH
end,
CheckB1P = hmac(B1P),
B1L = byte_size(B1),
B2L = byte_size(B2),
B3L = byte_size(B3),
B4L = byte_size(B4),
B5L = byte_size(B5),
Header =
case IndexModDate of
true ->
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
B5L:32/integer,
LMD:32/integer,
PosBinIndex/binary>>;
false ->
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
B5L:32/integer,
PosBinIndex/binary>>
end,
CheckH = hmac(Header),
SlotBin = <<CheckB1P:32/integer, B1P:32/integer,
CheckH:32/integer, Header/binary,
B1/binary, B2/binary, B3/binary, B4/binary, B5/binary>>,
{LastKey, _LV} = lists:last(KVL),
BuildTimings3 = update_buildtimings(SW2, BuildTimings2, slot_finish),
{{Header, SlotBin, HashL, LastKey}, BuildTimings3}.
-spec check_blocks(list(integer()),
binary()|{file:io_device(), integer()},
binary(),
integer(),
leveled_codec:ledger_key()|false,
press_method(),
boolean(),
list()|not_present) -> list()|not_present.
%% @doc
%% Acc should start as not_present if LedgerKey is a key, and a list if
%% LedgerKey is false
check_blocks([], _BlockPointer, _BlockLengths, _PosBinLength,
_LedgerKeyToCheck, _PressMethod, _IdxModDate, not_present) ->
not_present;
check_blocks([], _BlockPointer, _BlockLengths, _PosBinLength,
_LedgerKeyToCheck, _PressMethod, _IdxModDate, Acc) ->
lists:reverse(Acc);
check_blocks([Pos|Rest], BlockPointer, BlockLengths, PosBinLength,
LedgerKeyToCheck, PressMethod, IdxModDate, Acc) ->
{BlockNumber, BlockPos} = revert_position(Pos),
BlockBin =
read_block(BlockPointer,
BlockLengths,
PosBinLength,
BlockNumber,
additional_offset(IdxModDate)),
BlockL = deserialise_block(BlockBin, PressMethod),
{K, V} = lists:nth(BlockPos, BlockL),
case K of
LedgerKeyToCheck ->
{K, V};
_ ->
case LedgerKeyToCheck of
false ->
check_blocks(Rest, BlockPointer,
BlockLengths, PosBinLength,
LedgerKeyToCheck, PressMethod, IdxModDate,
[{K, V}|Acc]);
_ ->
check_blocks(Rest, BlockPointer,
BlockLengths, PosBinLength,
LedgerKeyToCheck, PressMethod, IdxModDate,
Acc)
end
end.
-spec additional_offset(boolean()) -> pos_integer().
%% @doc
%% 4-byte CRC, 4-byte pos, 4-byte CRC, 5x4 byte lengths, 4 byte LMD
%% LMD may not be present
additional_offset(true) ->
?BLOCK_LENGTHS_LENGTH + 4 + 4 + 4 + ?LMD_LENGTH;
additional_offset(false) ->
?BLOCK_LENGTHS_LENGTH + 4 + 4 + 4.
read_block({Handle, StartPos}, BlockLengths, PosBinLength, BlockID, AO) ->
{Offset, Length} = block_offsetandlength(BlockLengths, BlockID),
{ok, BlockBin} = file:pread(Handle,
StartPos
+ Offset
+ PosBinLength
+ AO,
Length),
BlockBin;
read_block(SlotBin, BlockLengths, PosBinLength, BlockID, AO) ->
{Offset, Length} = block_offsetandlength(BlockLengths, BlockID),
StartPos = Offset + PosBinLength + AO,
<<_Pre:StartPos/binary, BlockBin:Length/binary, _Rest/binary>> = SlotBin,
BlockBin.
read_slot(Handle, Slot) ->
{ok, SlotBin} = file:pread(Handle,
Slot#slot_index_value.start_position,
Slot#slot_index_value.length),
SlotBin.
pointer_mapfun(Pointer) ->
{Slot, SK, EK} =
case Pointer of
{pointer, _Pid, Slot0, SK0, EK0} ->
{Slot0, SK0, EK0};
{pointer, Slot0, SK0, EK0} ->
{Slot0, SK0, EK0}
end,
{Slot#slot_index_value.start_position,
Slot#slot_index_value.length,
Slot#slot_index_value.slot_id,
SK,
EK}.
-spec binarysplit_mapfun(binary(), integer()) -> fun().
%% @doc
%% Return a function that can pull individual slot binaries from a binary
%% covering multiple slots
binarysplit_mapfun(MultiSlotBin, StartPos) ->
fun({SP, L, ID, SK, EK}) ->
Start = SP - StartPos,
<<_Pre:Start/binary, SlotBin:L/binary, _Post/binary>> = MultiSlotBin,
{SlotBin, ID, SK, EK}
end.
-spec read_slots(file:io_device(), list(),
{false|list(), non_neg_integer(), binary()},
press_method(), boolean()) -> list(binaryslot_element()).
%% @doc
%% The reading of sots will return a list of either 2-tuples containing
%% {K, V} pairs - or 3-tuples containing {Binary, SK, EK}. The 3 tuples
%% can be exploded into lists of {K, V} pairs using the binaryslot_reader/4
%% function
%%
%% Reading slots is generally unfiltered, but in the sepcial case when
%% querting across slots when only matching segment IDs are required the
%% BlockIndexCache can be used
%%
%% Note that false positives will be passed through. It is important that
%% any key comparison between levels should allow for a non-matching key to
%% be considered as superior to a matching key - as otherwise a matching key
%% may be intermittently removed from the result set
read_slots(Handle, SlotList, {false, 0, _BlockIndexCache},
_PressMethod, _IdxModDate) ->
% No list of segments passed or useful Low LastModified Date
% Just read slots in SlotList
read_slotlist(SlotList, Handle);
read_slots(Handle, SlotList, {SegList, LowLastMod, BlockIndexCache},
PressMethod, IdxModDate) ->
% List of segments passed so only {K, V} pairs matching those segments
% should be returned. This required the {K, V} pair to have been added
% with the appropriate hash - if the pair were added with no_lookup as
% the hash value this will fial unexpectedly.
BinMapFun =
fun(Pointer, Acc) ->
{SP, _L, ID, SK, EK} = pointer_mapfun(Pointer),
CachedHeader = array:get(ID - 1, BlockIndexCache),
case extract_header(CachedHeader, IdxModDate) of
none ->
% If there is an attempt to use the seg list query and the
% index block cache isn't cached for any part this may be
% slower as each slot will be read in turn
Acc ++ read_slotlist([Pointer], Handle);
{BlockLengths, LMD, BlockIdx} ->
% If there is a BlockIndex cached then we can use it to
% check to see if any of the expected segments are
% present without lifting the slot off disk. Also the
% fact that we know position can be used to filter out
% other keys
%
% Note that LMD will be 0 if the indexing of last mod
% date was not enable at creation time. So in this
% case the filter should always map
case LowLastMod > LMD of
true ->
% The highest LMD on the slot was before the
% LowLastMod date passed in the query - therefore
% there are no interesting modifications in this
% slot - it is all too old
Acc;
false ->
case SegList of
false ->
% Need all the slot now
Acc ++ read_slotlist([Pointer], Handle);
_SL ->
% Need to find just the right keys
PositionList =
find_pos(BlockIdx, SegList, [], 0),
% Note check_blocks should return [] if
% PositionList is empty (which it may be)
KVL =
check_blocks(PositionList,
{Handle, SP},
BlockLengths,
byte_size(BlockIdx),
false,
PressMethod,
IdxModDate,
[]),
% There is no range passed through to the
% binaryslot_reader, so these results need
% to be filtered
FilterFun =
fun(KV) -> in_range(KV, SK, EK) end,
Acc ++ lists:filter(FilterFun, KVL)
end
end
end
end,
lists:foldl(BinMapFun, [], SlotList).
-spec in_range(leveled_codec:ledger_kv(),
range_endpoint(), range_endpoint()) -> boolean().
%% @doc
%% Is the ledger key in the range.
in_range({_LK, _LV}, all, all) ->
true;
in_range({LK, _LV}, all, EK) ->
not leveled_codec:endkey_passed(EK, LK);
in_range({LK, LV}, SK, EK) ->
(LK >= SK) and in_range({LK, LV}, all, EK).
read_slotlist(SlotList, Handle) ->
LengthList = lists:map(fun pointer_mapfun/1, SlotList),
{MultiSlotBin, StartPos} = read_length_list(Handle, LengthList),
lists:map(binarysplit_mapfun(MultiSlotBin, StartPos), LengthList).
-spec binaryslot_reader(list(binaryslot_element()),
press_method(),
boolean(),
leveled_codec:segment_list())
-> {list({tuple(), tuple()}),
list({integer(), binary()})}.
%% @doc
%% Read the binary slots converting them to {K, V} pairs if they were not
%% already {K, V} pairs. If they are already {K, V} pairs it is assumed
%% that they have already been range checked before extraction.
%%
%% Keys which are still to be extracted from the slot, are accompanied at
%% this function by the range against which the keys need to be checked.
%% This range is passed with the slot to binaryslot_trimmedlist which should
%% open the slot block by block, filtering individual keys where the endpoints
%% of the block are outside of the range, and leaving blocks already proven to
%% be outside of the range unopened.
binaryslot_reader(SlotBinsToFetch, PressMethod, IdxModDate, SegList) ->
% Two accumulators are added.
% One to collect the list of keys and values found in the binary slots
% (subject to range filtering if the slot is still deserialised at this
% stage.
% The second accumulator extracts the header information from the slot, so
% that the cache can be built for that slot. This is used by the handling
% of get_kvreader calls. This means that slots which are only used in
% range queries can still populate their block_index caches (on the FSM
% loop state), and those caches can be used for future queries.
binaryslot_reader(SlotBinsToFetch,
PressMethod, IdxModDate, SegList, [], []).
binaryslot_reader([], _PressMethod, _IdxModDate, _SegList, Acc, BIAcc) ->
{Acc, BIAcc};
binaryslot_reader([{SlotBin, ID, SK, EK}|Tail],
PressMethod, IdxModDate, SegList, Acc, BIAcc) ->
% The start key and end key here, may not the start key and end key the
% application passed into the query. If the slot is known to lie entirely
% inside the range, on either of both sides, the SK and EK may be
% substituted for the 'all' key work to indicate there is no need for
% entries in this slot to be trimmed from either or both sides.
{TrimmedL, BICache} =
binaryslot_trimmedlist(SlotBin,
SK, EK,
PressMethod,
IdxModDate,
SegList),
binaryslot_reader(Tail,
PressMethod,
IdxModDate,
SegList,
Acc ++ TrimmedL,
[{ID, BICache}|BIAcc]);
binaryslot_reader([{K, V}|Tail],
PressMethod, IdxModDate, SegList, Acc, BIAcc) ->
% These entries must already have been filtered for membership inside any
% range used in the query.
binaryslot_reader(Tail,
PressMethod, IdxModDate, SegList,
Acc ++ [{K, V}], BIAcc).
read_length_list(Handle, LengthList) ->
StartPos = element(1, lists:nth(1, LengthList)),
EndPos = element(1, lists:last(LengthList))
+ element(2, lists:last(LengthList)),
{ok, MultiSlotBin} = file:pread(Handle, StartPos, EndPos - StartPos),
{MultiSlotBin, StartPos}.
-spec extract_header(binary()|none, boolean()) ->
{binary(), integer(), binary()}|none.
%% @doc
%% Helper for extracting the binaries from the header ignoring the missing LMD
%% if LMD is not indexed
extract_header(none, _IdxModDate) ->
none; % used when the block cache has returned none
extract_header(Header, true) ->
BL = ?BLOCK_LENGTHS_LENGTH,
<<BlockLengths:BL/binary, LMD:32/integer, PosBinIndex/binary>> = Header,
{BlockLengths, LMD, PosBinIndex};
extract_header(Header, false) ->
BL = ?BLOCK_LENGTHS_LENGTH,
<<BlockLengths:BL/binary, PosBinIndex/binary>> = Header,
{BlockLengths, 0, PosBinIndex}.
binaryslot_get(FullBin, Key, Hash, PressMethod, IdxModDate) ->
case crc_check_slot(FullBin) of
{Header, Blocks} ->
{BlockLengths, _LMD, PosBinIndex} =
extract_header(Header, IdxModDate),
PosList = find_pos(PosBinIndex,
extract_hash(Hash),
[],
0),
{fetch_value(PosList, BlockLengths, Blocks, Key, PressMethod),
Header};
crc_wonky ->
{not_present,
none}
end.
binaryslot_tolist(FullBin, PressMethod, IdxModDate) ->
BlockFetchFun =
fun(Length, {Acc, Bin}) ->
case Length of
0 ->
{Acc, Bin};
_ ->
<<Block:Length/binary, Rest/binary>> = Bin,
{Acc ++ deserialise_block(Block, PressMethod), Rest}
end
end,
{Out, _Rem} =
case crc_check_slot(FullBin) of
{Header, Blocks} ->
{BlockLengths, _LMD, _PosBinIndex} =
extract_header(Header, IdxModDate),
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
B5L:32/integer>> = BlockLengths,
lists:foldl(BlockFetchFun,
{[], Blocks},
[B1L, B2L, B3L, B4L, B5L]);
crc_wonky ->
{[], <<>>}
end,
Out.
binaryslot_trimmedlist(FullBin, all, all,
PressMethod, IdxModDate, false) ->
{binaryslot_tolist(FullBin, PressMethod, IdxModDate), none};
binaryslot_trimmedlist(FullBin, StartKey, EndKey,
PressMethod, IdxModDate, SegList) ->
LTrimFun = fun({K, _V}) -> K < StartKey end,
RTrimFun = fun({K, _V}) -> not leveled_codec:endkey_passed(EndKey, K) end,
BlockCheckFun =
fun(Block, {Acc, Continue}) ->
case {Block, Continue} of
{<<>>, _} ->
{Acc, false};
{_, true} ->
BlockList =
case is_binary(Block) of
true ->
deserialise_block(Block, PressMethod);
false ->
Block
end,
case fetchend_rawblock(BlockList) of
{LastKey, _LV} when StartKey > LastKey ->
{Acc, true};
{LastKey, _LV} ->
{_LDrop, RKeep} = lists:splitwith(LTrimFun,
BlockList),
case leveled_codec:endkey_passed(EndKey,
LastKey) of
true ->
{LKeep, _RDrop}
= lists:splitwith(RTrimFun, RKeep),
{Acc ++ LKeep, false};
false ->
{Acc ++ RKeep, true}
end;
_ ->
{Acc, true}
end;
{_ , false} ->
{Acc, false}
end
end,
case {crc_check_slot(FullBin), SegList} of
% It will be more effecient to check a subset of blocks. To work out
% the best subset we always look in the middle block of 5, and based on
% the first and last keys of that middle block when compared to the Start
% and EndKey of the query determines a subset of blocks
%
% This isn't perfectly efficient, esepcially if the query overlaps Block2
% and Block3 (as Block 1 will also be checked), but finessing this last
% scenario is hard to do in concise code
{{Header, Blocks}, false} ->
{BlockLengths, _LMD, _PosBinIndex} =
extract_header(Header, IdxModDate),
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
B5L:32/integer>> = BlockLengths,
<<Block1:B1L/binary, Block2:B2L/binary,
MidBlock:B3L/binary,
Block4:B4L/binary, Block5:B5L/binary>> = Blocks,
BlocksToCheck =
case B3L of
0 ->
[Block1, Block2];
_ ->
MidBlockList =
deserialise_block(MidBlock, PressMethod),
{MidFirst, _} = lists:nth(1, MidBlockList),
{MidLast, _} = lists:last(MidBlockList),
Split = {StartKey > MidLast,
StartKey >= MidFirst,
leveled_codec:endkey_passed(EndKey,
MidFirst),
leveled_codec:endkey_passed(EndKey,
MidLast)},
case Split of
{true, _, _, _} ->
[Block4, Block5];
{false, true, false, true} ->
[MidBlockList];
{false, true, false, false} ->
[MidBlockList, Block4, Block5];
{false, false, true, true} ->
[Block1, Block2];
{false, false, false, true} ->
[Block1, Block2, MidBlockList];
_ ->
[Block1, Block2, MidBlockList, Block4, Block5]
end
end,
{Acc, _Continue} = lists:foldl(BlockCheckFun, {[], true}, BlocksToCheck),
{Acc, none};
{{Header, _Blocks}, SegList} ->
{BlockLengths, _LMD, BlockIdx} = extract_header(Header, IdxModDate),
PosList = find_pos(BlockIdx, SegList, [], 0),
KVL = check_blocks(PosList,
FullBin,
BlockLengths,
byte_size(BlockIdx),
false,
PressMethod,
IdxModDate,
[]),
{KVL, Header};
{crc_wonky, _} ->
{[], none}
end.
crc_check_slot(FullBin) ->
<<CRC32PBL:32/integer,
PosBL:32/integer,
CRC32H:32/integer,
Rest/binary>> = FullBin,
PosBL0 = min(PosBL, byte_size(FullBin) - 12),
% If the position has been bit-flipped to beyond the maximum paossible
% length, use the maximum possible length
<<Header:PosBL0/binary, Blocks/binary>> = Rest,
case {hmac(Header), hmac(PosBL0)} of
{CRC32H, CRC32PBL} ->
{Header, Blocks};
_ ->
leveled_log:log("SST09", []),
crc_wonky
end.
block_offsetandlength(BlockLengths, BlockID) ->
case BlockID of
1 ->
<<B1L:32/integer, _BR/binary>> = BlockLengths,
{0, B1L};
2 ->
<<B1L:32/integer, B2L:32/integer, _BR/binary>> = BlockLengths,
{B1L, B2L};
3 ->
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
_BR/binary>> = BlockLengths,
{B1L + B2L, B3L};
4 ->
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
_BR/binary>> = BlockLengths,
{B1L + B2L + B3L, B4L};
5 ->
<<B1L:32/integer,
B2L:32/integer,
B3L:32/integer,
B4L:32/integer,
B5L:32/integer>> = BlockLengths,
{B1L + B2L + B3L + B4L, B5L}
end.
fetch_value([], _BlockLengths, _Blocks, _Key, _PressMethod) ->
not_present;
fetch_value([Pos|Rest], BlockLengths, Blocks, Key, PressMethod) ->
{BlockNumber, BlockPos} = revert_position(Pos),
{Offset, Length} = block_offsetandlength(BlockLengths, BlockNumber),
<<_Pre:Offset/binary, Block:Length/binary, _Rest/binary>> = Blocks,
RawBlock = deserialise_block(Block, PressMethod),
case fetchfrom_rawblock(BlockPos, RawBlock) of
{K, V} when K == Key ->
{K, V};
_ ->
fetch_value(Rest, BlockLengths, Blocks, Key, PressMethod)
end.
fetchfrom_rawblock(_BlockPos, []) ->
not_present;
fetchfrom_rawblock(BlockPos, RawBlock) ->
lists:nth(BlockPos, RawBlock).
fetchend_rawblock([]) ->
not_present;
fetchend_rawblock(RawBlock) ->
lists:last(RawBlock).
revert_position(Pos) ->
{SideBlockSize, MidBlockSize} = ?LOOK_BLOCKSIZE,
case Pos < 2 * SideBlockSize of
true ->
{(Pos div SideBlockSize) + 1, (Pos rem SideBlockSize) + 1};
false ->
case Pos < (2 * SideBlockSize + MidBlockSize) of
true ->
{3, ((Pos - 2 * SideBlockSize) rem MidBlockSize) + 1};
false ->
TailPos = Pos - 2 * SideBlockSize - MidBlockSize,
{(TailPos div SideBlockSize) + 4,
(TailPos rem SideBlockSize) + 1}
end
end.
%%%============================================================================
%%% Merge Functions
%%%============================================================================
%% The source lists are merged into lists of slots before the file is created
%% At Level zero, there will be a single source list - and this will always be
%% split into standard size slots
%%
%% At lower levels there will be two source lists and they will need to be
%% merged to ensure that the best conflicting answer survives and compactable
%% KV pairs are discarded.
%%
%% At lower levels slots can be larger if there are no lookup keys present in
%% the slot. This is to slow the growth of the manifest/number-of-files when
%% large numbers of index keys are present - as well as improving compression
%% ratios in the Ledger.
%%
%% The outcome of merge_lists/3 and merge_lists/5 should be an list of slots.
%% Each slot should be ordered by Key and be of the form {Flag, KVList}, where
%% Flag can either be lookup or no-lookup. The list of slots should also be
%% ordered by Key (i.e. the first key in the slot)
%%
%% For merging ...
%% Compare the keys at the head of the list, and either skip that "best" key or
%% identify as the next key.
%%
%% The logic needs to change if the file is in the basement level, as keys with
%% expired timestamps need not be written at this level
%%
%% The best key is considered to be the lowest key in erlang term order. If
%% there are matching keys then the highest sequence number must be chosen and
%% any lower sequence numbers should be compacted out of existence
-spec merge_lists(list(), press_method(), boolean())
-> {list(), list(), list(tuple()), tuple()|null}.
%% @doc
%%
%% Merge from asingle list (i.e. at Level 0)
merge_lists(KVList1, PressMethod, IdxModDate) ->
SlotCount = length(KVList1) div ?LOOK_SLOTSIZE,
{[],
[],
split_lists(KVList1, [], SlotCount, PressMethod, IdxModDate),
element(1, lists:nth(1, KVList1))}.
split_lists([], SlotLists, 0, _PressMethod, _IdxModDate) ->
lists:reverse(SlotLists);
split_lists(LastPuff, SlotLists, 0, PressMethod, IdxModDate) ->
{SlotD, _} =
generate_binary_slot(lookup, LastPuff, PressMethod, IdxModDate, no_timing),
lists:reverse([SlotD|SlotLists]);
split_lists(KVList1, SlotLists, N, PressMethod, IdxModDate) ->
{Slot, KVListRem} = lists:split(?LOOK_SLOTSIZE, KVList1),
{SlotD, _} =
generate_binary_slot(lookup, Slot, PressMethod, IdxModDate, no_timing),
split_lists(KVListRem, [SlotD|SlotLists], N - 1, PressMethod, IdxModDate).
-spec merge_lists(list(), list(), tuple(), press_method(), boolean()) ->
{list(), list(), list(tuple()), tuple()|null}.
%% @doc
%% Merge lists when merging across more thna one file. KVLists that are
%% provided may include pointers to fetch more Keys/Values from the source
%% file
merge_lists(KVList1, KVList2, LevelInfo, PressMethod, IndexModDate) ->
merge_lists(KVList1, KVList2,
LevelInfo,
[], null, 0,
PressMethod,
IndexModDate,
#build_timings{}).
merge_lists(KVL1, KVL2, LI, SlotList, FirstKey, ?MAX_SLOTS,
_PressMethod, _IdxModDate, T0) ->
% This SST file is full, move to complete file, and return the
% remainder
log_buildtimings(T0, LI),
{KVL1, KVL2, lists:reverse(SlotList), FirstKey};
merge_lists([], [], LI, SlotList, FirstKey, _SlotCount,
_PressMethod, _IdxModDate, T0) ->
% the source files are empty, complete the file
log_buildtimings(T0, LI),
{[], [], lists:reverse(SlotList), FirstKey};
merge_lists(KVL1, KVL2, LI, SlotList, FirstKey, SlotCount,
PressMethod, IdxModDate, T0) ->
% Form a slot by merging the two lists until the next 128 K/V pairs have
% been determined
SW = os:timestamp(),
{KVRem1, KVRem2, Slot, FK0} =
form_slot(KVL1, KVL2, LI, no_lookup, 0, [], FirstKey),
T1 = update_buildtimings(SW, T0, fold_toslot),
case Slot of
{_, []} ->
% There were no actual keys in the slot (maybe some expired)
merge_lists(KVRem1,
KVRem2,
LI,
SlotList,
FK0,
SlotCount,
PressMethod,
IdxModDate,
T1);
{Lookup, KVL} ->
% Convert the list of KVs for the slot into a binary, and related
% metadata
{SlotD, T2} =
generate_binary_slot(Lookup, KVL, PressMethod, IdxModDate, T1),
merge_lists(KVRem1,
KVRem2,
LI,
[SlotD|SlotList],
FK0,
SlotCount + 1,
PressMethod,
IdxModDate,
T2)
end.
form_slot([], [], _LI, Type, _Size, Slot, FK) ->
{[], [], {Type, lists:reverse(Slot)}, FK};
form_slot(KVList1, KVList2, _LI, lookup, ?LOOK_SLOTSIZE, Slot, FK) ->
{KVList1, KVList2, {lookup, lists:reverse(Slot)}, FK};
form_slot(KVList1, KVList2, _LI, no_lookup, ?NOLOOK_SLOTSIZE, Slot, FK) ->
{KVList1, KVList2, {no_lookup, lists:reverse(Slot)}, FK};
form_slot(KVList1, KVList2, {IsBasement, TS}, lookup, Size, Slot, FK) ->
case {key_dominates(KVList1, KVList2, {IsBasement, TS}), FK} of
{{{next_key, TopKV}, Rem1, Rem2}, _} ->
form_slot(Rem1,
Rem2,
{IsBasement, TS},
lookup,
Size + 1,
[TopKV|Slot],
FK);
{{skipped_key, Rem1, Rem2}, _} ->
form_slot(Rem1, Rem2, {IsBasement, TS}, lookup, Size, Slot, FK)
end;
form_slot(KVList1, KVList2, {IsBasement, TS}, no_lookup, Size, Slot, FK) ->
case key_dominates(KVList1, KVList2, {IsBasement, TS}) of
{{next_key, {TopK, TopV}}, Rem1, Rem2} ->
FK0 =
case FK of
null ->
TopK;
_ ->
FK
end,
case leveled_codec:to_lookup(TopK) of
no_lookup ->
form_slot(Rem1,
Rem2,
{IsBasement, TS},
no_lookup,
Size + 1,
[{TopK, TopV}|Slot],
FK0);
lookup ->
case Size >= ?LOOK_SLOTSIZE of
true ->
{KVList1,
KVList2,
{no_lookup, lists:reverse(Slot)},
FK};
false ->
form_slot(Rem1,
Rem2,
{IsBasement, TS},
lookup,
Size + 1,
[{TopK, TopV}|Slot],
FK0)
end
end;
{skipped_key, Rem1, Rem2} ->
form_slot(Rem1, Rem2, {IsBasement, TS}, no_lookup, Size, Slot, FK)
end.
key_dominates(KL1, KL2, Level) ->
key_dominates_expanded(maybe_expand_pointer(KL1),
maybe_expand_pointer(KL2),
Level).
key_dominates_expanded([H1|T1], [], Level) ->
case leveled_codec:maybe_reap_expiredkey(H1, Level) of
true ->
{skipped_key, T1, []};
false ->
{{next_key, H1}, T1, []}
end;
key_dominates_expanded([], [H2|T2], Level) ->
case leveled_codec:maybe_reap_expiredkey(H2, Level) of
true ->
{skipped_key, [], T2};
false ->
{{next_key, H2}, [], T2}
end;
key_dominates_expanded([H1|T1], [H2|T2], Level) ->
case leveled_codec:key_dominates(H1, H2) of
left_hand_first ->
case leveled_codec:maybe_reap_expiredkey(H1, Level) of
true ->
{skipped_key, T1, [H2|T2]};
false ->
{{next_key, H1}, T1, [H2|T2]}
end;
right_hand_first ->
case leveled_codec:maybe_reap_expiredkey(H2, Level) of
true ->
{skipped_key, [H1|T1], T2};
false ->
{{next_key, H2}, [H1|T1], T2}
end;
left_hand_dominant ->
{skipped_key, [H1|T1], T2};
right_hand_dominant ->
{skipped_key, T1, [H2|T2]}
end.
%% When a list is provided it may include a pointer to gain another batch of
%% entries from the same file, or a new batch of entries from another file
%%
%% This resultant list should include the Tail of any pointers added at the
%% end of the list
maybe_expand_pointer([]) ->
[];
maybe_expand_pointer([{pointer, SSTPid, Slot, StartKey, all}|Tail]) ->
expand_list_by_pointer({pointer, SSTPid, Slot, StartKey, all},
Tail,
?MERGE_SCANWIDTH);
maybe_expand_pointer([{next, ManEntry, StartKey}|Tail]) ->
expand_list_by_pointer({next, ManEntry, StartKey, all},
Tail,
?MERGE_SCANWIDTH);
maybe_expand_pointer(List) ->
List.
%%%============================================================================
%%% Timing Functions
%%%============================================================================
-spec update_buildtimings(erlang:timestamp(), build_timings(), atom())
-> build_timings().
%% @doc
%%
%% Timings taken from the build of a SST file.
%%
%% There is no sample window, but the no_timing status is still used for
%% level zero files where we're not breaking down the build time in this way.
update_buildtimings(_SW, no_timing, _Stage) ->
no_timing;
update_buildtimings(SW, Timings, Stage) ->
Timer = timer:now_diff(os:timestamp(), SW),
case Stage of
slot_hashlist ->
HLT = Timings#build_timings.slot_hashlist + Timer,
Timings#build_timings{slot_hashlist = HLT};
slot_serialise ->
SST = Timings#build_timings.slot_serialise + Timer,
Timings#build_timings{slot_serialise = SST};
slot_finish ->
SFT = Timings#build_timings.slot_finish + Timer,
Timings#build_timings{slot_finish = SFT};
fold_toslot ->
FST = Timings#build_timings.fold_toslot + Timer,
Timings#build_timings{fold_toslot = FST}
end.
-spec log_buildtimings(build_timings(), tuple()) -> ok.
%% @doc
%%
%% Log out the time spent during the merge lists part of the SST build
log_buildtimings(Timings, LI) ->
leveled_log:log("SST13", [Timings#build_timings.fold_toslot,
Timings#build_timings.slot_hashlist,
Timings#build_timings.slot_serialise,
Timings#build_timings.slot_finish,
element(1, LI),
element(2, LI)]).
-spec update_statetimings(sst_timings(), integer())
-> {sst_timings(), integer()}.
%% @doc
%%
%% The timings state is either in countdown to the next set of samples of
%% we are actively collecting a sample. Active collection take place
%% when the countdown is 0. Once the sample has reached the expected count
%% then there is a log of that sample, and the countdown is restarted.
%%
%% Outside of sample windows the timings object should be set to the atom
%% no_timing. no_timing is a valid state for the cdb_timings type.
update_statetimings(no_timing, 0) ->
{#sst_timings{}, 0};
update_statetimings(Timings, 0) ->
case Timings#sst_timings.sample_count of
SC when SC >= ?TIMING_SAMPLESIZE ->
log_timings(Timings),
{no_timing, leveled_rand:uniform(2 * ?TIMING_SAMPLECOUNTDOWN)};
_SC ->
{Timings, 0}
end;
update_statetimings(no_timing, N) ->
{no_timing, N - 1}.
log_timings(no_timing) ->
ok;
log_timings(Timings) ->
leveled_log:log("SST12", [Timings#sst_timings.sample_count,
Timings#sst_timings.index_query_time,
Timings#sst_timings.lookup_cache_time,
Timings#sst_timings.slot_index_time,
Timings#sst_timings.fetch_cache_time,
Timings#sst_timings.slot_fetch_time,
Timings#sst_timings.noncached_block_time,
Timings#sst_timings.slot_index_count,
Timings#sst_timings.fetch_cache_count,
Timings#sst_timings.slot_fetch_count,
Timings#sst_timings.noncached_block_count]).
update_timings(_SW, no_timing, _Stage, _Continue) ->
{no_timing, no_timing};
update_timings(SW, Timings, Stage, Continue) ->
Timer = timer:now_diff(os:timestamp(), SW),
Timings0 =
case Stage of
index_query ->
IQT = Timings#sst_timings.index_query_time,
Timings#sst_timings{index_query_time = IQT + Timer};
lookup_cache ->
TBT = Timings#sst_timings.lookup_cache_time,
Timings#sst_timings{lookup_cache_time = TBT + Timer};
slot_index ->
SIT = Timings#sst_timings.slot_index_time,
Timings#sst_timings{slot_index_time = SIT + Timer};
fetch_cache ->
FCT = Timings#sst_timings.fetch_cache_time,
Timings#sst_timings{fetch_cache_time = FCT + Timer};
slot_fetch ->
SFT = Timings#sst_timings.slot_fetch_time,
Timings#sst_timings{slot_fetch_time = SFT + Timer};
noncached_block ->
NCT = Timings#sst_timings.noncached_block_time,
Timings#sst_timings{noncached_block_time = NCT + Timer}
end,
case Continue of
true ->
{os:timestamp(), Timings0};
false ->
Timings1 =
case Stage of
slot_index ->
SIC = Timings#sst_timings.slot_index_count,
Timings0#sst_timings{slot_index_count = SIC + 1};
fetch_cache ->
FCC = Timings#sst_timings.fetch_cache_count,
Timings0#sst_timings{fetch_cache_count = FCC + 1};
slot_fetch ->
SFC = Timings#sst_timings.slot_fetch_count,
Timings0#sst_timings{slot_fetch_count = SFC + 1};
noncached_block ->
NCC = Timings#sst_timings.noncached_block_count,
Timings0#sst_timings{noncached_block_count = NCC + 1}
end,
SC = Timings1#sst_timings.sample_count,
{no_timing, Timings1#sst_timings{sample_count = SC + 1}}
end.
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(TEST).
testsst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod) ->
sst_new(RootPath, Filename, Level, KVList, MaxSQN, PressMethod, false).
testsst_new(RootPath, Filename,
KVL1, KVL2, IsBasement, Level, MaxSQN, PressMethod) ->
sst_new(RootPath, Filename, KVL1, KVL2, IsBasement, Level, MaxSQN,
PressMethod, false).
generate_randomkeys(Seqn, Count, BucketRangeLow, BucketRangeHigh) ->
generate_randomkeys(Seqn,
Count,
[],
BucketRangeLow,
BucketRangeHigh).
generate_randomkeys(_Seqn, 0, Acc, _BucketLow, _BucketHigh) ->
Acc;
generate_randomkeys(Seqn, Count, Acc, BucketLow, BRange) ->
BRand = leveled_rand:uniform(BRange),
BNumber = string:right(integer_to_list(BucketLow + BRand), 4, $0),
KNumber = string:right(integer_to_list(leveled_rand:uniform(1000)), 6, $0),
LK = leveled_codec:to_ledgerkey("Bucket" ++ BNumber, "Key" ++ KNumber, o),
Chunk = leveled_rand:rand_bytes(64),
{_B, _K, MV, _H, _LMs} =
leveled_codec:generate_ledgerkv(LK, Seqn, Chunk, 64, infinity),
generate_randomkeys(Seqn + 1,
Count - 1,
[{LK, MV}|Acc],
BucketLow,
BRange).
generate_indexkeys(Count) ->
generate_indexkeys(Count, []).
generate_indexkeys(0, IndexList) ->
IndexList;
generate_indexkeys(Count, IndexList) ->
Changes = generate_indexkey(leveled_rand:uniform(8000), Count),
generate_indexkeys(Count - 1, IndexList ++ Changes).
generate_indexkey(Term, Count) ->
IndexSpecs = [{add, "t1_int", Term}],
leveled_codec:idx_indexspecs(IndexSpecs,
"Bucket",
"Key" ++ integer_to_list(Count),
Count,
infinity).
form_slot_test() ->
% If a skip key happens, mustn't switch to loookup by accident as could be
% over the expected size
SkippingKV = {{o, "B1", "K9999", null}, {9999, tomb, 1234567, {}}},
Slot = [{{o, "B1", "K5", null}, {5, active, 99234567, {}}}],
R1 = form_slot([SkippingKV], [],
{true, 99999999},
no_lookup,
?LOOK_SLOTSIZE + 1,
Slot,
{o, "B1", "K5", null}),
?assertMatch({[], [], {no_lookup, Slot}, {o, "B1", "K5", null}}, R1).
merge_tombstonelist_test() ->
% Merge lists with nothing but tombstones
SkippingKV1 = {{o, "B1", "K9995", null}, {9995, tomb, 1234567, {}}},
SkippingKV2 = {{o, "B1", "K9996", null}, {9996, tomb, 1234567, {}}},
SkippingKV3 = {{o, "B1", "K9997", null}, {9997, tomb, 1234567, {}}},
SkippingKV4 = {{o, "B1", "K9998", null}, {9998, tomb, 1234567, {}}},
SkippingKV5 = {{o, "B1", "K9999", null}, {9999, tomb, 1234567, {}}},
R = merge_lists([SkippingKV1, SkippingKV3, SkippingKV5],
[SkippingKV2, SkippingKV4],
{true, 9999999},
native,
?INDEX_MODDATE),
?assertMatch({[], [], [], null}, R).
indexed_list_test() ->
io:format(user, "~nIndexed list timing test:~n", []),
N = 150,
KVL0 = lists:ukeysort(1, generate_randomkeys(1, N, 1, 4)),
KVL1 = lists:sublist(KVL0, ?LOOK_SLOTSIZE),
SW0 = os:timestamp(),
{{_PosBinIndex1, FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, KVL1, native, ?INDEX_MODDATE, no_timing),
io:format(user,
"Indexed list created slot in ~w microseconds of size ~w~n",
[timer:now_diff(os:timestamp(), SW0), byte_size(FullBin)]),
{TestK1, TestV1} = lists:nth(20, KVL1),
MH1 = leveled_codec:segment_hash(TestK1),
{TestK2, TestV2} = lists:nth(40, KVL1),
MH2 = leveled_codec:segment_hash(TestK2),
{TestK3, TestV3} = lists:nth(60, KVL1),
MH3 = leveled_codec:segment_hash(TestK3),
{TestK4, TestV4} = lists:nth(80, KVL1),
MH4 = leveled_codec:segment_hash(TestK4),
{TestK5, TestV5} = lists:nth(100, KVL1),
MH5 = leveled_codec:segment_hash(TestK5),
test_binary_slot(FullBin, TestK1, MH1, {TestK1, TestV1}),
test_binary_slot(FullBin, TestK2, MH2, {TestK2, TestV2}),
test_binary_slot(FullBin, TestK3, MH3, {TestK3, TestV3}),
test_binary_slot(FullBin, TestK4, MH4, {TestK4, TestV4}),
test_binary_slot(FullBin, TestK5, MH5, {TestK5, TestV5}).
indexed_list_mixedkeys_test() ->
KVL0 = lists:ukeysort(1, generate_randomkeys(1, 50, 1, 4)),
KVL1 = lists:sublist(KVL0, 33),
Keys = lists:ukeysort(1, generate_indexkeys(60) ++ KVL1),
{{_PosBinIndex1, FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, ?INDEX_MODDATE, no_timing),
{TestK1, TestV1} = lists:nth(4, KVL1),
MH1 = leveled_codec:segment_hash(TestK1),
{TestK2, TestV2} = lists:nth(8, KVL1),
MH2 = leveled_codec:segment_hash(TestK2),
{TestK3, TestV3} = lists:nth(12, KVL1),
MH3 = leveled_codec:segment_hash(TestK3),
{TestK4, TestV4} = lists:nth(16, KVL1),
MH4 = leveled_codec:segment_hash(TestK4),
{TestK5, TestV5} = lists:nth(20, KVL1),
MH5 = leveled_codec:segment_hash(TestK5),
test_binary_slot(FullBin, TestK1, MH1, {TestK1, TestV1}),
test_binary_slot(FullBin, TestK2, MH2, {TestK2, TestV2}),
test_binary_slot(FullBin, TestK3, MH3, {TestK3, TestV3}),
test_binary_slot(FullBin, TestK4, MH4, {TestK4, TestV4}),
test_binary_slot(FullBin, TestK5, MH5, {TestK5, TestV5}).
indexed_list_mixedkeys2_test() ->
KVL0 = lists:ukeysort(1, generate_randomkeys(1, 50, 1, 4)),
KVL1 = lists:sublist(KVL0, 33),
IdxKeys1 = lists:ukeysort(1, generate_indexkeys(30)),
IdxKeys2 = lists:ukeysort(1, generate_indexkeys(30)),
% this isn't actually ordered correctly
Keys = IdxKeys1 ++ KVL1 ++ IdxKeys2,
{{_Header, FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, ?INDEX_MODDATE, no_timing),
lists:foreach(fun({K, V}) ->
MH = leveled_codec:segment_hash(K),
test_binary_slot(FullBin, K, MH, {K, V})
end,
KVL1).
indexed_list_allindexkeys_test() ->
Keys = lists:sublist(lists:ukeysort(1, generate_indexkeys(150)),
?LOOK_SLOTSIZE),
{{HeaderT, FullBinT, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, true, no_timing),
{{HeaderF, FullBinF, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, false, no_timing),
EmptySlotSize = ?LOOK_SLOTSIZE - 1,
LMD = ?FLIPPER32,
?assertMatch(<<_BL:20/binary, LMD:32/integer, EmptySlotSize:8/integer>>,
HeaderT),
?assertMatch(<<_BL:20/binary, EmptySlotSize:8/integer>>,
HeaderF),
% SW = os:timestamp(),
BinToListT = binaryslot_tolist(FullBinT, native, true),
BinToListF = binaryslot_tolist(FullBinF, native, false),
% io:format(user,
% "Indexed list flattened in ~w microseconds ~n",
% [timer:now_diff(os:timestamp(), SW)]),
?assertMatch(Keys, BinToListT),
?assertMatch({Keys, none}, binaryslot_trimmedlist(FullBinT,
all, all,
native,
true,
false)),
?assertMatch(Keys, BinToListF),
?assertMatch({Keys, none}, binaryslot_trimmedlist(FullBinF,
all, all,
native,
false,
false)).
indexed_list_allindexkeys_nolookup_test() ->
Keys = lists:sublist(lists:ukeysort(1, generate_indexkeys(1000)),
?NOLOOK_SLOTSIZE),
{{Header, FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(no_lookup, Keys, native, ?INDEX_MODDATE,no_timing),
?assertMatch(<<_BL:20/binary, _LMD:32/integer, 127:8/integer>>, Header),
% SW = os:timestamp(),
BinToList = binaryslot_tolist(FullBin, native, ?INDEX_MODDATE),
% io:format(user,
% "Indexed list flattened in ~w microseconds ~n",
% [timer:now_diff(os:timestamp(), SW)]),
?assertMatch(Keys, BinToList),
?assertMatch({Keys, none}, binaryslot_trimmedlist(FullBin,
all, all,
native,
?INDEX_MODDATE,
false)).
indexed_list_allindexkeys_trimmed_test() ->
Keys = lists:sublist(lists:ukeysort(1, generate_indexkeys(150)),
?LOOK_SLOTSIZE),
{{Header, FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, ?INDEX_MODDATE,no_timing),
EmptySlotSize = ?LOOK_SLOTSIZE - 1,
?assertMatch(<<_BL:20/binary, _LMD:32/integer, EmptySlotSize:8/integer>>,
Header),
?assertMatch({Keys, none}, binaryslot_trimmedlist(FullBin,
{i,
"Bucket",
{"t1_int", 0},
null},
{i,
"Bucket",
{"t1_int", 99999},
null},
native,
?INDEX_MODDATE,
false)),
{SK1, _} = lists:nth(10, Keys),
{EK1, _} = lists:nth(100, Keys),
R1 = lists:sublist(Keys, 10, 91),
{O1, none} = binaryslot_trimmedlist(FullBin, SK1, EK1,
native, ?INDEX_MODDATE, false),
?assertMatch(91, length(O1)),
?assertMatch(R1, O1),
{SK2, _} = lists:nth(10, Keys),
{EK2, _} = lists:nth(20, Keys),
R2 = lists:sublist(Keys, 10, 11),
{O2, none} = binaryslot_trimmedlist(FullBin, SK2, EK2,
native, ?INDEX_MODDATE, false),
?assertMatch(11, length(O2)),
?assertMatch(R2, O2),
{SK3, _} = lists:nth(?LOOK_SLOTSIZE - 1, Keys),
{EK3, _} = lists:nth(?LOOK_SLOTSIZE, Keys),
R3 = lists:sublist(Keys, ?LOOK_SLOTSIZE - 1, 2),
{O3, none} = binaryslot_trimmedlist(FullBin, SK3, EK3,
native, ?INDEX_MODDATE, false),
?assertMatch(2, length(O3)),
?assertMatch(R3, O3).
indexed_list_mixedkeys_bitflip_test() ->
KVL0 = lists:ukeysort(1, generate_randomkeys(1, 50, 1, 4)),
KVL1 = lists:sublist(KVL0, 33),
Keys = lists:ukeysort(1, generate_indexkeys(60) ++ KVL1),
{{Header, SlotBin, _HL, LK}, no_timing} =
generate_binary_slot(lookup, Keys, native, ?INDEX_MODDATE, no_timing),
?assertMatch(LK, element(1, lists:last(Keys))),
<<B1L:32/integer,
_B2L:32/integer,
_B3L:32/integer,
_B4L:32/integer,
_B5L:32/integer,
_LMD:32/integer,
PosBin/binary>> = Header,
TestKey1 = element(1, lists:nth(1, KVL1)),
TestKey2 = element(1, lists:nth(33, KVL1)),
MH1 = leveled_codec:segment_hash(TestKey1),
MH2 = leveled_codec:segment_hash(TestKey2),
test_binary_slot(SlotBin, TestKey1, MH1, lists:nth(1, KVL1)),
test_binary_slot(SlotBin, TestKey2, MH2, lists:nth(33, KVL1)),
ToList = binaryslot_tolist(SlotBin, native, ?INDEX_MODDATE),
?assertMatch(Keys, ToList),
[Pos1] = find_pos(PosBin, extract_hash(MH1), [], 0),
[Pos2] = find_pos(PosBin, extract_hash(MH2), [], 0),
{BN1, _BP1} = revert_position(Pos1),
{BN2, _BP2} = revert_position(Pos2),
{Offset1, Length1} = block_offsetandlength(Header, BN1),
{Offset2, Length2} = block_offsetandlength(Header, BN2),
SlotBin1 = flip_byte(SlotBin, byte_size(Header) + 12 + Offset1, Length1),
SlotBin2 = flip_byte(SlotBin, byte_size(Header) + 12 + Offset2, Length2),
test_binary_slot(SlotBin2, TestKey1, MH1, lists:nth(1, KVL1)),
test_binary_slot(SlotBin1, TestKey2, MH2, lists:nth(33, KVL1)),
test_binary_slot(SlotBin1, TestKey1, MH1, not_present),
test_binary_slot(SlotBin2, TestKey2, MH2, not_present),
ToList1 = binaryslot_tolist(SlotBin1, native, ?INDEX_MODDATE),
ToList2 = binaryslot_tolist(SlotBin2, native, ?INDEX_MODDATE),
?assertMatch(true, is_list(ToList1)),
?assertMatch(true, is_list(ToList2)),
?assertMatch(true, length(ToList1) > 0),
?assertMatch(true, length(ToList2) > 0),
?assertMatch(true, length(ToList1) < length(Keys)),
?assertMatch(true, length(ToList2) < length(Keys)),
SlotBin3 = flip_byte(SlotBin, byte_size(Header) + 12, B1L),
{SK1, _} = lists:nth(10, Keys),
{EK1, _} = lists:nth(20, Keys),
{O1, none} = binaryslot_trimmedlist(SlotBin3, SK1, EK1,
native, ?INDEX_MODDATE, false),
?assertMatch([], O1),
SlotBin4 = flip_byte(SlotBin, 0, 20),
SlotBin5 = flip_byte(SlotBin, 20, byte_size(Header) - 20 - 12),
test_binary_slot(SlotBin4, TestKey1, MH1, not_present),
test_binary_slot(SlotBin5, TestKey1, MH1, not_present),
ToList4 = binaryslot_tolist(SlotBin4, native, ?INDEX_MODDATE),
ToList5 = binaryslot_tolist(SlotBin5, native, ?INDEX_MODDATE),
?assertMatch([], ToList4),
?assertMatch([], ToList5),
{O4, none} = binaryslot_trimmedlist(SlotBin4, SK1, EK1,
native, ?INDEX_MODDATE, false),
{O5, none} = binaryslot_trimmedlist(SlotBin4, SK1, EK1,
native, ?INDEX_MODDATE, false),
?assertMatch([], O4),
?assertMatch([], O5).
flip_byte(Binary, Offset, Length) ->
Byte1 = leveled_rand:uniform(Length) + Offset - 1,
<<PreB1:Byte1/binary, A:8/integer, PostByte1/binary>> = Binary,
case A of
0 ->
<<PreB1:Byte1/binary, 255:8/integer, PostByte1/binary>>;
_ ->
<<PreB1:Byte1/binary, 0:8/integer, PostByte1/binary>>
end.
test_binary_slot(FullBin, Key, Hash, ExpectedValue) ->
% SW = os:timestamp(),
{ReturnedValue, _Header} =
binaryslot_get(FullBin, Key, Hash, native, ?INDEX_MODDATE),
?assertMatch(ExpectedValue, ReturnedValue).
% io:format(user, "Fetch success in ~w microseconds ~n",
% [timer:now_diff(os:timestamp(), SW)]).
merge_test() ->
merge_tester(fun testsst_new/6, fun testsst_new/8),
merge_tester(fun sst_new/6, fun sst_new/8).
merge_tester(NewFunS, NewFunM) ->
N = 3000,
KVL1 = lists:ukeysort(1, generate_randomkeys(N + 1, N, 1, 20)),
KVL2 = lists:ukeysort(1, generate_randomkeys(1, N, 1, 20)),
KVL3 = lists:ukeymerge(1, KVL1, KVL2),
SW0 = os:timestamp(),
{ok, P1, {FK1, LK1}, _Bloom1} =
NewFunS("../test/", "level1_src", 1, KVL1, 6000, native),
{ok, P2, {FK2, LK2}, _Bloom2} =
NewFunS("../test/", "level2_src", 2, KVL2, 3000, native),
ExpFK1 = element(1, lists:nth(1, KVL1)),
ExpLK1 = element(1, lists:last(KVL1)),
ExpFK2 = element(1, lists:nth(1, KVL2)),
ExpLK2 = element(1, lists:last(KVL2)),
?assertMatch(ExpFK1, FK1),
?assertMatch(ExpFK2, FK2),
?assertMatch(ExpLK1, LK1),
?assertMatch(ExpLK2, LK2),
ML1 = [{next, #manifest_entry{owner = P1}, FK1}],
ML2 = [{next, #manifest_entry{owner = P2}, FK2}],
NewR =
NewFunM("../test/", "level2_merge", ML1, ML2, false, 2, N * 2, native),
{ok, P3, {{Rem1, Rem2}, FK3, LK3}, _Bloom3} = NewR,
?assertMatch([], Rem1),
?assertMatch([], Rem2),
?assertMatch(true, FK3 == min(FK1, FK2)),
io:format("LK1 ~w LK2 ~w LK3 ~w~n", [LK1, LK2, LK3]),
?assertMatch(true, LK3 == max(LK1, LK2)),
io:format(user,
"Created and merged two files of size ~w in ~w microseconds~n",
[N, timer:now_diff(os:timestamp(), SW0)]),
SW1 = os:timestamp(),
lists:foreach(fun({K, V}) ->
?assertMatch({K, V}, sst_get(P3, K))
end,
KVL3),
io:format(user,
"Checked presence of all ~w objects in ~w microseconds~n",
[length(KVL3), timer:now_diff(os:timestamp(), SW1)]),
ok = sst_close(P1),
ok = sst_close(P2),
ok = sst_close(P3),
ok = file:delete("../test/level1_src.sst"),
ok = file:delete("../test/level2_src.sst"),
ok = file:delete("../test/level2_merge.sst").
simple_persisted_range_test() ->
simple_persisted_range_tester(fun testsst_new/6),
simple_persisted_range_tester(fun sst_new/6).
simple_persisted_range_tester(SSTNewFun) ->
{RP, Filename} = {"../test/", "simple_test"},
KVList0 = generate_randomkeys(1, ?LOOK_SLOTSIZE * 16, 1, 20),
KVList1 = lists:ukeysort(1, KVList0),
[{FirstKey, _FV}|_Rest] = KVList1,
{LastKey, _LV} = lists:last(KVList1),
{ok, Pid, {FirstKey, LastKey}, _Bloom} =
SSTNewFun(RP, Filename, 1, KVList1, length(KVList1), native),
{o, B, K, null} = LastKey,
SK1 = {o, B, K, 0},
EK1 = {o, B, K, 1},
FetchListA1 = sst_getkvrange(Pid, SK1, EK1, 1),
?assertMatch([], FetchListA1),
SK2 = element(1, lists:nth(127, KVList1)),
SK3 = element(1, lists:nth(128, KVList1)),
SK4 = element(1, lists:nth(129, KVList1)),
SK5 = element(1, lists:nth(130, KVList1)),
EK2 = element(1, lists:nth(255, KVList1)),
EK3 = element(1, lists:nth(256, KVList1)),
EK4 = element(1, lists:nth(257, KVList1)),
EK5 = element(1, lists:nth(258, KVList1)),
TestFun =
fun({SK, EK}) ->
FetchList = sst_getkvrange(Pid, SK, EK, 4),
?assertMatch(SK, element(1, lists:nth(1, FetchList))),
?assertMatch(EK, element(1, lists:last(FetchList)))
end,
TL2 = lists:map(fun(EK) -> {SK2, EK} end, [EK2, EK3, EK4, EK5]),
TL3 = lists:map(fun(EK) -> {SK3, EK} end, [EK2, EK3, EK4, EK5]),
TL4 = lists:map(fun(EK) -> {SK4, EK} end, [EK2, EK3, EK4, EK5]),
TL5 = lists:map(fun(EK) -> {SK5, EK} end, [EK2, EK3, EK4, EK5]),
lists:foreach(TestFun, TL2 ++ TL3 ++ TL4 ++ TL5).
simple_persisted_rangesegfilter_test() ->
simple_persisted_rangesegfilter_tester(fun testsst_new/6),
simple_persisted_rangesegfilter_tester(fun sst_new/6).
simple_persisted_rangesegfilter_tester(SSTNewFun) ->
{RP, Filename} = {"../test/", "range_segfilter_test"},
KVList0 = generate_randomkeys(1, ?LOOK_SLOTSIZE * 16, 1, 20),
KVList1 = lists:ukeysort(1, KVList0),
[{FirstKey, _FV}|_Rest] = KVList1,
{LastKey, _LV} = lists:last(KVList1),
{ok, Pid, {FirstKey, LastKey}, _Bloom} =
SSTNewFun(RP, Filename, 1, KVList1, length(KVList1), native),
SK1 = element(1, lists:nth(124, KVList1)),
SK2 = element(1, lists:nth(126, KVList1)),
SK3 = element(1, lists:nth(128, KVList1)),
SK4 = element(1, lists:nth(130, KVList1)),
SK5 = element(1, lists:nth(132, KVList1)),
EK1 = element(1, lists:nth(252, KVList1)),
EK2 = element(1, lists:nth(254, KVList1)),
EK3 = element(1, lists:nth(256, KVList1)),
EK4 = element(1, lists:nth(258, KVList1)),
EK5 = element(1, lists:nth(260, KVList1)),
GetSegFun =
fun(LK) ->
extract_hash(
leveled_codec:strip_to_segmentonly(
lists:keyfind(LK, 1, KVList1)))
end,
SegList =
lists:map(GetSegFun,
[SK1, SK2, SK3, SK4, SK5, EK1, EK2, EK3, EK4, EK5]),
TestFun =
fun(StartKey, EndKey, OutList) ->
RangeKVL =
sst_getfilteredrange(Pid, StartKey, EndKey, 4, SegList, 0),
RangeKL = lists:map(fun({LK0, _LV0}) -> LK0 end, RangeKVL),
?assertMatch(true, lists:member(StartKey, RangeKL)),
?assertMatch(true, lists:member(EndKey, RangeKL)),
CheckOutFun =
fun(OutKey) ->
?assertMatch(false, lists:member(OutKey, RangeKL))
end,
lists:foreach(CheckOutFun, OutList)
end,
lists:foldl(fun(SK0, Acc) ->
TestFun(SK0, EK1, [EK2, EK3, EK4, EK5] ++ Acc),
[SK0|Acc]
end,
[],
[SK1, SK2, SK3, SK4, SK5]),
lists:foldl(fun(SK0, Acc) ->
TestFun(SK0, EK2, [EK3, EK4, EK5] ++ Acc),
[SK0|Acc]
end,
[],
[SK1, SK2, SK3, SK4, SK5]),
lists:foldl(fun(SK0, Acc) ->
TestFun(SK0, EK3, [EK4, EK5] ++ Acc),
[SK0|Acc]
end,
[],
[SK1, SK2, SK3, SK4, SK5]),
lists:foldl(fun(SK0, Acc) ->
TestFun(SK0, EK4, [EK5] ++ Acc),
[SK0|Acc]
end,
[],
[SK1, SK2, SK3, SK4, SK5]),
ok = sst_clear(Pid).
additional_range_test() ->
% Test fetching ranges that fall into odd situations with regards to the
% summayr index
% - ranges which fall between entries in summary
% - ranges which go beyond the end of the range of the sst
% - ranges which match to an end key in the summary index
IK1 = lists:foldl(fun(X, Acc) ->
Acc ++ generate_indexkey(X, X)
end,
[],
lists:seq(1, ?NOLOOK_SLOTSIZE)),
Gap = 2,
IK2 = lists:foldl(fun(X, Acc) ->
Acc ++ generate_indexkey(X, X)
end,
[],
lists:seq(?NOLOOK_SLOTSIZE + Gap + 1,
2 * ?NOLOOK_SLOTSIZE + Gap)),
{ok, P1, {{Rem1, Rem2}, SK, EK}, _Bloom1} =
sst_new("../test/", "range1_src", IK1, IK2, false, 1, 9999, native),
?assertMatch([], Rem1),
?assertMatch([], Rem2),
?assertMatch(SK, element(1, lists:nth(1, IK1))),
?assertMatch(EK, element(1, lists:last(IK2))),
% Basic test - checking scanwidth
R1 = sst_getkvrange(P1, SK, EK, 1),
?assertMatch(?NOLOOK_SLOTSIZE + 1, length(R1)),
QR1 = lists:sublist(R1, ?NOLOOK_SLOTSIZE),
?assertMatch(IK1, QR1),
R2 = sst_getkvrange(P1, SK, EK, 2),
?assertMatch(?NOLOOK_SLOTSIZE * 2, length(R2)),
QR2 = lists:sublist(R2, ?NOLOOK_SLOTSIZE),
QR3 = lists:sublist(R2, ?NOLOOK_SLOTSIZE + 1, 2 * ?NOLOOK_SLOTSIZE),
?assertMatch(IK1, QR2),
?assertMatch(IK2, QR3),
% Testing the gap
[GapSKV] = generate_indexkey(?NOLOOK_SLOTSIZE + 1, ?NOLOOK_SLOTSIZE + 1),
[GapEKV] = generate_indexkey(?NOLOOK_SLOTSIZE + 2, ?NOLOOK_SLOTSIZE + 2),
R3 = sst_getkvrange(P1, element(1, GapSKV), element(1, GapEKV), 1),
?assertMatch([], R3),
% Testing beyond the range
[PastEKV] = generate_indexkey(2 * ?NOLOOK_SLOTSIZE + Gap + 1,
2 * ?NOLOOK_SLOTSIZE + Gap + 1),
R4 = sst_getkvrange(P1, element(1, GapSKV), element(1, PastEKV), 2),
?assertMatch(IK2, R4),
R5 = sst_getkvrange(P1, SK, element(1, PastEKV), 2),
IKAll = IK1 ++ IK2,
?assertMatch(IKAll, R5),
[MidREKV] = generate_indexkey(?NOLOOK_SLOTSIZE + Gap + 2,
?NOLOOK_SLOTSIZE + Gap + 2),
io:format(user, "Mid second range to past range test~n", []),
R6 = sst_getkvrange(P1, element(1, MidREKV), element(1, PastEKV), 2),
Exp6 = lists:sublist(IK2, 2, length(IK2)),
?assertMatch(Exp6, R6),
% Testing at a slot end
Slot1EK = element(1, lists:last(IK1)),
R7 = sst_getkvrange(P1, SK, Slot1EK, 2),
?assertMatch(IK1, R7).
% Testing beyond end (should never happen if manifest behaves)
% Test blows up anyway
% R8 = sst_getkvrange(P1, element(1, PastEKV), element(1, PastEKV), 2),
% ?assertMatch([], R8).
simple_persisted_slotsize_test() ->
simple_persisted_slotsize_tester(fun testsst_new/6),
simple_persisted_slotsize_tester(fun sst_new/6).
simple_persisted_slotsize_tester(SSTNewFun) ->
{RP, Filename} = {"../test/", "simple_slotsize_test"},
KVList0 = generate_randomkeys(1, ?LOOK_SLOTSIZE * 2, 1, 20),
KVList1 = lists:sublist(lists:ukeysort(1, KVList0),
?LOOK_SLOTSIZE),
[{FirstKey, _FV}|_Rest] = KVList1,
{LastKey, _LV} = lists:last(KVList1),
{ok, Pid, {FirstKey, LastKey}, _Bloom} =
SSTNewFun(RP, Filename, 1, KVList1, length(KVList1), native),
lists:foreach(fun({K, V}) ->
?assertMatch({K, V}, sst_get(Pid, K))
end,
KVList1),
ok = sst_close(Pid),
ok = file:delete(filename:join(RP, Filename ++ ".sst")).
simple_persisted_test_() ->
{timeout, 60, fun simple_persisted_test_bothformats/0}.
simple_persisted_test_bothformats() ->
simple_persisted_tester(fun testsst_new/6),
simple_persisted_tester(fun sst_new/6).
simple_persisted_tester(SSTNewFun) ->
{RP, Filename} = {"../test/", "simple_test"},
KVList0 = generate_randomkeys(1, ?LOOK_SLOTSIZE * 32, 1, 20),
KVList1 = lists:ukeysort(1, KVList0),
[{FirstKey, _FV}|_Rest] = KVList1,
{LastKey, _LV} = lists:last(KVList1),
{ok, Pid, {FirstKey, LastKey}, _Bloom} =
SSTNewFun(RP, Filename, 1, KVList1, length(KVList1), native),
SW0 = os:timestamp(),
lists:foreach(fun({K, V}) ->
?assertMatch({K, V}, sst_get(Pid, K))
end,
KVList1),
io:format(user,
"Checking for ~w keys (once) in file with cache hit took ~w "
++ "microseconds~n",
[length(KVList1), timer:now_diff(os:timestamp(), SW0)]),
SW1 = os:timestamp(),
lists:foreach(fun({K, V}) ->
?assertMatch({K, V}, sst_get(Pid, K)),
?assertMatch({K, V}, sst_get(Pid, K))
end,
KVList1),
io:format(user,
"Checking for ~w keys (twice) in file with cache hit took ~w "
++ "microseconds~n",
[length(KVList1), timer:now_diff(os:timestamp(), SW1)]),
ok = sst_printtimings(Pid),
KVList2 = generate_randomkeys(1, ?LOOK_SLOTSIZE * 32, 1, 20),
MapFun =
fun({K, V}, Acc) ->
In = lists:keymember(K, 1, KVList1),
case {K > FirstKey, LastKey > K, In} of
{true, true, false} ->
[{K, leveled_codec:segment_hash(K), V}|Acc];
_ ->
Acc
end
end,
KVList3 = lists:foldl(MapFun, [], KVList2),
SW2 = os:timestamp(),
lists:foreach(fun({K, H, _V}) ->
?assertMatch(not_present, sst_get(Pid, K, H))
end,
KVList3),
io:format(user,
"Checking for ~w missing keys took ~w microseconds~n",
[length(KVList3), timer:now_diff(os:timestamp(), SW2)]),
ok = sst_printtimings(Pid),
FetchList1 = sst_getkvrange(Pid, all, all, 2),
FoldFun = fun(X, Acc) ->
case X of
{pointer, P, S, SK, EK} ->
Acc ++ sst_getslots(P, [{pointer, P, S, SK, EK}]);
_ ->
Acc ++ [X]
end end,
FetchedList1 = lists:foldl(FoldFun, [], FetchList1),
?assertMatch(KVList1, FetchedList1),
{TenthKey, _v10} = lists:nth(10, KVList1),
{Three000Key, _v300} = lists:nth(300, KVList1),
SubKVList1 = lists:sublist(KVList1, 10, 291),
SubKVList1L = length(SubKVList1),
FetchList2 = sst_getkvrange(Pid, TenthKey, Three000Key, 2),
?assertMatch(pointer, element(1, lists:last(FetchList2))),
FetchedList2 = lists:foldl(FoldFun, [], FetchList2),
?assertMatch(SubKVList1L, length(FetchedList2)),
?assertMatch(SubKVList1, FetchedList2),
{Eight000Key, _v800} = lists:nth(800, KVList1),
SubKVListA1 = lists:sublist(KVList1, 10, 791),
SubKVListA1L = length(SubKVListA1),
FetchListA2 = sst_getkvrange(Pid, TenthKey, Eight000Key, 2),
?assertMatch(pointer, element(1, lists:last(FetchListA2))),
FetchedListA2 = lists:foldl(FoldFun, [], FetchListA2),
?assertMatch(SubKVListA1L, length(FetchedListA2)),
?assertMatch(SubKVListA1, FetchedListA2),
FetchListB2 = sst_getkvrange(Pid, TenthKey, Eight000Key, 4),
?assertMatch(pointer, element(1, lists:last(FetchListB2))),
FetchedListB2 = lists:foldl(FoldFun, [], FetchListB2),
?assertMatch(SubKVListA1L, length(FetchedListB2)),
?assertMatch(SubKVListA1, FetchedListB2),
FetchListB3 = sst_getkvrange(Pid,
Eight000Key,
{o, null, null, null},
4),
FetchedListB3 = lists:foldl(FoldFun, [], FetchListB3),
SubKVListA3 = lists:nthtail(800 - 1, KVList1),
SubKVListA3L = length(SubKVListA3),
io:format("Length expected ~w~n", [SubKVListA3L]),
?assertMatch(SubKVListA3L, length(FetchedListB3)),
?assertMatch(SubKVListA3, FetchedListB3),
io:format("Eight hundredth key ~w~n", [Eight000Key]),
FetchListB4 = sst_getkvrange(Pid,
Eight000Key,
Eight000Key,
4),
FetchedListB4 = lists:foldl(FoldFun, [], FetchListB4),
?assertMatch([{Eight000Key, _v800}], FetchedListB4),
ok = sst_close(Pid),
ok = file:delete(filename:join(RP, Filename ++ ".sst")).
key_dominates_test() ->
KV1 = {{o, "Bucket", "Key1", null}, {5, {active, infinity}, 0, []}},
KV2 = {{o, "Bucket", "Key3", null}, {6, {active, infinity}, 0, []}},
KV3 = {{o, "Bucket", "Key2", null}, {3, {active, infinity}, 0, []}},
KV4 = {{o, "Bucket", "Key4", null}, {7, {active, infinity}, 0, []}},
KV5 = {{o, "Bucket", "Key1", null}, {4, {active, infinity}, 0, []}},
KV6 = {{o, "Bucket", "Key1", null}, {99, {tomb, 999}, 0, []}},
KV7 = {{o, "Bucket", "Key1", null}, {99, tomb, 0, []}},
KL1 = [KV1, KV2],
KL2 = [KV3, KV4],
?assertMatch({{next_key, KV1}, [KV2], KL2},
key_dominates(KL1, KL2, {undefined, 1})),
?assertMatch({{next_key, KV1}, KL2, [KV2]},
key_dominates(KL2, KL1, {undefined, 1})),
?assertMatch({skipped_key, KL2, KL1},
key_dominates([KV5|KL2], KL1, {undefined, 1})),
?assertMatch({{next_key, KV1}, [KV2], []},
key_dominates(KL1, [], {undefined, 1})),
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
key_dominates([KV6|KL2], KL1, {undefined, 1})),
?assertMatch({{next_key, KV6}, KL2, [KV2]},
key_dominates([KV6|KL2], [KV2], {undefined, 1})),
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
key_dominates([KV6|KL2], KL1, {true, 1})),
?assertMatch({skipped_key, [KV6|KL2], [KV2]},
key_dominates([KV6|KL2], KL1, {true, 1000})),
?assertMatch({{next_key, KV6}, KL2, [KV2]},
key_dominates([KV6|KL2], [KV2], {true, 1})),
?assertMatch({skipped_key, KL2, [KV2]},
key_dominates([KV6|KL2], [KV2], {true, 1000})),
?assertMatch({skipped_key, [], []},
key_dominates([KV6], [], {true, 1000})),
?assertMatch({skipped_key, [], []},
key_dominates([], [KV6], {true, 1000})),
?assertMatch({{next_key, KV6}, [], []},
key_dominates([KV6], [], {true, 1})),
?assertMatch({{next_key, KV6}, [], []},
key_dominates([], [KV6], {true, 1})),
?assertMatch({skipped_key, [], []},
key_dominates([KV7], [], {true, 1})),
?assertMatch({skipped_key, [], []},
key_dominates([], [KV7], {true, 1})),
?assertMatch({skipped_key, [KV7|KL2], [KV2]},
key_dominates([KV7|KL2], KL1, {undefined, 1})),
?assertMatch({{next_key, KV7}, KL2, [KV2]},
key_dominates([KV7|KL2], [KV2], {undefined, 1})),
?assertMatch({skipped_key, [KV7|KL2], [KV2]},
key_dominates([KV7|KL2], KL1, {true, 1})),
?assertMatch({skipped_key, KL2, [KV2]},
key_dominates([KV7|KL2], [KV2], {true, 1})).
nonsense_coverage_test() ->
{ok, Pid} = gen_fsm:start_link(?MODULE, [], []),
ok = gen_fsm:send_all_state_event(Pid, nonsense),
?assertMatch({next_state, reader, #state{}}, handle_info(nonsense,
reader,
#state{})),
?assertMatch({ok, reader, #state{}}, code_change(nonsense,
reader,
#state{},
nonsense)),
?assertMatch({reply, undefined, reader, #state{}},
handle_sync_event("hello", self(), reader, #state{})),
SampleBin = <<0:128/integer>>,
FlippedBin = flip_byte(SampleBin, 0, 16),
?assertMatch(false, FlippedBin == SampleBin).
hashmatching_bytreesize_test() ->
B = <<"Bucket">>,
V = leveled_codec:riak_metadata_to_binary(term_to_binary([{"actor1", 1}]),
<<1:32/integer,
0:32/integer,
0:32/integer>>),
GenKeyFun =
fun(X) ->
LK =
{?RIAK_TAG,
B,
list_to_binary("Key" ++ integer_to_list(X)),
null},
LKV = leveled_codec:generate_ledgerkv(LK,
X,
V,
byte_size(V),
{active, infinity}),
{_Bucket, _Key, MetaValue, _Hashes, _LastMods} = LKV,
{LK, MetaValue}
end,
KVL = lists:map(GenKeyFun, lists:seq(1, 128)),
{{PosBinIndex1, _FullBin, _HL, _LK}, no_timing} =
generate_binary_slot(lookup, KVL, native, ?INDEX_MODDATE, no_timing),
check_segment_match(PosBinIndex1, KVL, small),
check_segment_match(PosBinIndex1, KVL, medium).
check_segment_match(PosBinIndex1, KVL, TreeSize) ->
CheckFun =
fun({{_T, B, K, null}, _V}) ->
Seg =
leveled_tictac:get_segment(
leveled_tictac:keyto_segment32(<<B/binary, K/binary>>),
TreeSize),
SegList0 = tune_seglist([Seg]),
PosList = find_pos(PosBinIndex1, SegList0, [], 0),
?assertMatch(true, length(PosList) >= 1)
end,
lists:foreach(CheckFun, KVL).
timings_test() ->
SW = os:timestamp(),
timer:sleep(1),
{no_timing, T1} = update_timings(SW, #sst_timings{}, slot_index, false),
{no_timing, T2} = update_timings(SW, T1, slot_fetch, false),
{no_timing, T3} = update_timings(SW, T2, noncached_block, false),
timer:sleep(1),
{_, T4} = update_timings(SW, T3, slot_fetch, true),
?assertMatch(3, T4#sst_timings.sample_count),
?assertMatch(1, T4#sst_timings.slot_fetch_count),
?assertMatch(true, T4#sst_timings.slot_fetch_time >
T3#sst_timings.slot_fetch_time).
take_max_lastmoddate_test() ->
% TODO: Remove this test
% Temporarily added to make dialyzer happy (until we've made use of last
% modified dates
?assertMatch(1, take_max_lastmoddate(0, 1)).
-endif. | src/leveled_sst.erl | 0.523908 | 0.646307 | leveled_sst.erl | starcoder |
%%==============================================================================
%% @copyright 2019-2020 Erlang Solutions Ltd.
%% Licensed under the Apache License, Version 2.0 (see LICENSE file)
%% @end
%%
%% @doc
%% In this scenario users are sending multiple messages to their neighbours in
%% intervals.
%%
%% == User steps: ==
%%
%% 1. Connect to the XMPP host given by the `mim_host' variable.
%%
%% 2. Set filter on incoming stanzas so that only messages are received.
%%
%% 3. Send presence `available' and wait for 5 seconds.
%%
%% 4. Select neighbouring users with lower and greater IDs defined by the
%% `number_of_prev_neighbours' and `number_of_next_neighbours' values.
%%
%% 5. Send messages to every neighbour multiple times (defined by
%% `number_of_send_message_repeats') in a round-robin fashion. After each
%% message wait for `sleep_time_after_every_message'.
%%
%% 6. Having sent all messages wait for 10 seconds before sending presence
%% `unavailable' and disconnect.
%%
%% == Metrics exposed by this scenario: ==
%%
%% === Counters: ===
%% - messages_sent - it is updated with every sent message by the
%% `amoc_xmpp_handlers:measure_sent_messages/0' handler.
%%
%% === Times: ===
%% - message_ttd - it is updated with every received message by the
%% `amoc_xmpp_handlers:measure_ttd/3' handler.
%%
%% @end
%%==============================================================================
-module(mongoose_one_to_one).
-behaviour(amoc_scenario).
-include_lib("exml/include/exml.hrl").
-include_lib("kernel/include/logger.hrl").
-define(SLEEP_TIME_AFTER_SCENARIO, 10000). %% wait 10s after scenario before disconnecting
-define(V(X), (fun amoc_config_validation:X/1)).
-required_variable([
#{name => number_of_prev_neighbours, default_value => 4, verification => ?V(nonnegative_integer),
description => "Number of users before current one to use (def: 4)"},
#{name => number_of_next_neighbours, default_value => 4, verification => ?V(nonnegative_integer),
description => "Number of users after current one to use (def: 4)"},
#{name => number_of_send_message_repeats, default_value => 73, verification => ?V(positive_integer),
description => "Number of send message (to all neighours) repeats (def: 73)"},
#{name => sleep_time_after_every_message, default_value => 20, verification => ?V(nonnegative_integer),
description => "Wait time between sent messages (seconds, def: 20)"},
#{name => mim_host, default_value => <<"localhost">>, verification => ?V(binary),
description => "The virtual host served by the server (def: <<\"localhost\">>)"}
]).
-export([start/1]).
-export([init/0]).
-spec init() -> ok.
init() ->
?LOG_INFO("init metrics"),
amoc_metrics:init(counters, messages_sent),
amoc_metrics:init(times, message_ttd),
ok.
-spec start(amoc_scenario:user_id()) -> any().
start(MyId) ->
ExtraProps = [{server, amoc_config:get(mim_host)}, {socket_opts, socket_opts()}] ++
amoc_xmpp:pick_server([[{host, "127.0.0.1"}]]) ++
send_and_recv_escalus_handlers(),
{ok, Client, _} = amoc_xmpp:connect_or_exit(MyId, ExtraProps),
do(MyId, Client),
escalus_connection:wait(Client, ?SLEEP_TIME_AFTER_SCENARIO),
escalus_session:send_presence_unavailable(Client),
escalus_connection:stop(Client).
-spec do(amoc_scenario:user_id(), escalus:client()) -> any().
do(MyId, Client) ->
escalus_connection:set_filter_predicate(Client, fun escalus_pred:is_message/1),
escalus_session:send_presence_available(Client),
escalus_connection:wait(Client, 5000),
PrevNeighbours = amoc_config:get(number_of_prev_neighbours),
NextNeighbours = amoc_config:get(number_of_next_neighbours),
NeighbourIds = lists:delete(MyId, lists:seq(max(1, MyId - PrevNeighbours),
MyId + NextNeighbours)),
SleepTimeAfterMessage = amoc_config:get(sleep_time_after_every_message),
send_messages_many_times(Client, timer:seconds(SleepTimeAfterMessage), NeighbourIds).
-spec send_messages_many_times(escalus:client(), timeout(), [amoc_scenario:user_id()]) -> ok.
send_messages_many_times(Client, MessageInterval, NeighbourIds) ->
S = fun(_) ->
send_messages_to_neighbors(Client, NeighbourIds, MessageInterval)
end,
SendMessageRepeats = amoc_config:get(number_of_send_message_repeats),
lists:foreach(S, lists:seq(1, SendMessageRepeats)).
-spec send_messages_to_neighbors(escalus:client(), [amoc_scenario:user_id()], timeout()) -> list().
send_messages_to_neighbors(Client, TargetIds, SleepTime) ->
[send_message(Client, TargetId, SleepTime)
|| TargetId <- TargetIds].
-spec send_message(escalus:client(), amoc_scenario:user_id(), timeout()) -> ok.
send_message(Client, ToId, SleepTime) ->
Body = base64:encode(<<"Message_random_", (crypto:strong_rand_bytes(80 + rand:uniform(40)))/binary>>),
MsgIn = escalus_stanza:chat_to_with_id_and_timestamp(amoc_xmpp_users:make_jid(ToId), Body),
escalus_connection:send(Client, MsgIn),
escalus_connection:wait(Client, SleepTime).
-spec send_and_recv_escalus_handlers() -> [{atom(), any()}].
send_and_recv_escalus_handlers() ->
[{received_stanza_handlers,
amoc_xmpp_handlers:make_stanza_handlers(
[{fun escalus_pred:is_message/1, fun amoc_xmpp_handlers:measure_ttd/3}])},
{sent_stanza_handlers,
amoc_xmpp_handlers:make_stanza_handlers(
[{fun escalus_pred:is_message/1, fun amoc_xmpp_handlers:measure_sent_messages/0}])}
].
-spec socket_opts() -> [gen_tcp:option()].
socket_opts() ->
[binary,
{reuseaddr, false},
{nodelay, true}]. | src/scenarios/mongoose_one_to_one.erl | 0.579876 | 0.467393 | mongoose_one_to_one.erl | starcoder |
%% Copyright (c) 2008-2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : lfe_parse.erl
%% Author : <NAME>
%% Purpose : A simple Sexpr parser.
%% A simple sexpr parser. It is both re-entrant and returns excess
%% tokens. The main engine is pretty naively coded at the moment.
-module(lfe_parse).
-export([sexpr/1,sexpr/2,format_error/1]).
-import(lists, [reverse/1,reverse/2]).
%% Define IS_MAP/1 macro for is_map/1 bif.
-ifdef(HAS_MAPS).
-define(IS_MAP(T), is_map(T)).
-else.
-define(IS_MAP(T), false).
-endif.
%% We define the syntax as an LL(1) and write/generate a parser for
%% it. We also define the grammar with the same form as for yecc even
%% though we have no automatic generator.
%%
%% Terminals
%% symbol number string fun '(' ')' '[' ']' '.' '\'' ',' '@' ',@' '`' '#('
%% '#B(' '#M(' '#\''.
%%
%% Nonterminals form sexpr list list_tail proper_list .
%%
%% 0 form -> sexpr : '$1'.
%% 1 sexpr -> symbol : val('$1').
%% 2 sexpr -> number : val('$1').
%% 3 sexpr -> string : val('$1').
%% 4 sexpr -> '#\'' : make_fun(val('$1')).
%% 5 sexpr -> '\'' sexpr : [quote,'$2'].
%% 6 sexpr -> '`' sexpr : [backquote,'$2'].
%% 7 sexpr -> ',' sexpr : [unquote,'$2'].
%% 8 sexpr -> ',@' sexpr : ['unquote-splicing','$2'].
%% 9 sexpr -> ( list ) : '$2'.
%% 10 sexpr -> [ list ] : '$2'.
%% 11 sexpr -> '#(' proper_list ')' : list_to_tuple('$2').
%% 12 sexpr -> '#B(' proper_list ')' :
%% case catch lfe_eval:expr([binary|'$2']) of
%% Bin when is_bitstring(Bin) -> Bin;
%% _ -> return_error(line('$1'))
%% end
%% 13 sexpr -> '#M(' proper_list ')' :
%% case catch maps:from_list(pair_list('$2')) of
%% Map when is_map(Map) -> Map;
%% _ -> return_error(line('$1'))
%% end
%% 14 list -> sexpr list_tail : ['$1'|'$2].
%% 15 list -> empty : [].
%% 16 list_tail -> sexpr list_tail : ['$1'|'$2'].
%% 17 list_tail -> . sexpr : '$2'.
%% 18 list_tail -> empty : [].
%% 19 proper_list -> sexpr proper_list : ['$1'|'$2'].
%% 20 proper_list -> empty : [].
%% The computed First and Follow sets for the productions. This is the
%% only really tricky bit.
%%
%% First(f) = {symbol number string #' ( [ ' ` , ,@ #( #B( #M(}
%% First(s) = {symbol number string #' ( [ ' ` , ,@ #( #B( #M(}
%% First(l) = {symbol number string #' ( [ ' ` , ,@ #( #B( #M( empty}
%% First(t) = {symbol number string #' ( [ . ' ` , ,@ #( #B( #M( empty}
%% First(p) = {symbol number string #' ( [ ' ` , ,@ #( #B( #M( empty}
%% Follow(f) = empty
%% Follow(s) = {symbol number string #' ( [ ) ] ' ` , ,@ #( #B( #M(}
%% Follow(l) = {symbol number string #' ( [ ) ] ' ` , ,@ #( #B( #M(}
%% Follow(t) = {symbol number string #' ( [ ) ] ' ` , ,@ #( #B( #M(}
%% Follow(p) = {symbol number string #' ( [ ) ] ' ` , ,@ #( #B( #M(}
%% The table (tedious).
%% Top symbol ( ) [ ] . '`,,@ #(#B(#M(
%% f f->s f->s f->s f->s f->s
%% s s->sym s->( l ) s->[ s ] s->' s s->( p )
%% l l->s t l->s t l->e l->s t l->e l->s t l->s t
%% t t->s t t->s t t->e t->s t t->e t->. s t->s t t->s t
%% p p->s p p->s p p->e p->s p p->e p->s p p->s p
%% The non-terminal types.
-define(FORM, 0).
-define(EXPR, 1).
-define(LIST, 2).
-define(TAIL, 3).
-define(PROP, 4).
%% Start non-terminal state.
start() -> ?FORM.
%% The reductions, we are naive and straight forward here.
reduce(0, Vs) -> Vs; %f->s
reduce(1, [T|Vs]) -> [val(T)|Vs]; %s->symbol
reduce(2, [T|Vs]) -> [val(T)|Vs]; %s->number
reduce(3, [T|Vs]) -> [val(T)|Vs]; %s->string
reduce(4, [T|Vs]) -> %s->fun
[make_fun(val(T))|Vs];
reduce(5, [S,_|Vs]) -> [[quote,S]|Vs]; %s->' s
reduce(6, [S,_|Vs]) -> [[backquote,S]|Vs]; %s->` s
reduce(7, [S,_|Vs]) -> [[unquote,S]|Vs]; %s->, s
reduce(8, [S,_|Vs]) -> %s->,@ s
[['unquote-splicing',S]|Vs];
reduce(9, [_,L,_|Vs]) -> [L|Vs]; %s->( s )
reduce(10, [_,L,_|Vs]) -> [L|Vs]; %s->[ s ]
reduce(11, [_,L,_|Vs]) -> %s->#( p )
[list_to_tuple(L)|Vs];
reduce(12, [_,L,B|Vs]) -> %s->#B( p )
case catch lfe_eval:literal([binary|L]) of
Bin when is_bitstring(Bin) -> [Bin|Vs];
_ -> {error,line(B),{illegal,binary}}
end;
reduce(13, [_,L,B|Vs]) -> %s->#M( p )
case catch maps:from_list(pair_list(L)) of
Map when ?IS_MAP(Map) -> [Map|Vs];
_ -> {error,line(B),{illegal,map}}
end;
reduce(14, [T,H|Vs]) -> [[H|T]|Vs]; %l->s t
reduce(15, Vs) -> [[]|Vs]; %l->empty
reduce(16, [T,H|Vs]) -> [[H|T]|Vs]; %t->s t
reduce(17, [T,_|Vs]) -> [T|Vs]; %t->. s
reduce(18, Vs) -> [[]|Vs]; %t->empty
reduce(19, [T,H|Vs]) -> [[H|T]|Vs]; %p->s t
reduce(20, Vs) -> [[]|Vs]. %p->empty
%% The table, this gets pretty big but is very straight forward.
table(?FORM, symbol) -> [?EXPR];
table(?FORM, number) -> [?EXPR];
table(?FORM, string) -> [?EXPR];
table(?FORM, '#\'') -> [?EXPR];
table(?FORM, '\'') -> [?EXPR];
table(?FORM, '`') -> [?EXPR];
table(?FORM, ',') -> [?EXPR];
table(?FORM, ',@') -> [?EXPR];
table(?FORM, '(') -> [?EXPR];
table(?FORM, '[') -> [?EXPR];
table(?FORM, '#(') -> [?EXPR];
table(?FORM, '#B(') -> [?EXPR];
table(?FORM, '#M(') -> [?EXPR];
table(?EXPR, symbol) -> [symbol,{reduce,1}];
table(?EXPR, number) -> [number,{reduce,2}];
table(?EXPR, string) -> [string,{reduce,3}];
table(?EXPR, '#\'') -> ['#\'',{reduce,4}];
table(?EXPR, '\'') -> ['\'',?EXPR,{reduce,5}];
table(?EXPR, '`') -> ['`',?EXPR,{reduce,6}];
table(?EXPR, ',') -> [',',?EXPR,{reduce,7}];
table(?EXPR, ',@') -> [',@',?EXPR,{reduce,8}];
table(?EXPR, '(') -> ['(',?LIST,')',{reduce,9}];
table(?EXPR, '[') -> ['[',?LIST,']',{reduce,10}];
table(?EXPR, '#(') -> ['#(',?PROP,')',{reduce,11}];
table(?EXPR, '#B(') -> ['#B(',?PROP,')',{reduce,12}];
table(?EXPR, '#M(') -> ['#M(',?PROP,')',{reduce,13}];
table(?LIST, symbol) -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, number) -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, string) -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '#\'') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '\'') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '`') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, ',') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, ',@') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '(') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '[') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '#(') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '#B(') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, '#M(') -> [?EXPR,?TAIL,{reduce,14}];
table(?LIST, ')') -> [{reduce,15}];
table(?LIST, ']') -> [{reduce,15}];
table(?TAIL, symbol) -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, number) -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, string) -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '#\'') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '\'') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '`') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, ',') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, ',@') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '(') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '[') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '#(') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '#B(') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '#M(') -> [?EXPR,?TAIL,{reduce,16}];
table(?TAIL, '.') -> ['.',?EXPR,{reduce,17}];
table(?TAIL, ')') -> [{reduce,18}];
table(?TAIL, ']') -> [{reduce,18}];
table(?PROP, symbol) -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, number) -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, string) -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '#\'') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '\'') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '`') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, ',') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, ',@') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '(') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '[') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '#(') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '#B(') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, '#M(') -> [?EXPR,?PROP,{reduce,19}];
table(?PROP, ')') -> [{reduce,20}];
table(?PROP, ']') -> [{reduce,20}];
table(_, _) -> error.
%% sexpr(Tokens) ->
%% {ok,Line,Sexpr,Rest} | {more,Continuation} | {error,Error,Rest}.
%% sexpr(Continuation, Tokens) ->
%% {ok,Line,Sexpr,Rest} | {more,Continuation} | {error,Error,Rest}.
sexpr(Ts) -> sexpr([], Ts). %Start with empty state
sexpr(Cont, Ts) -> parse1(Cont, Ts).
-record(lp, {l=none,st=[],vs=[]}). %Line, States, Values
%% parse1(Tokens) ->
%% {ok,Line,Sexpr,Rest} | {more,Continuation} | {error,Error,Rest}.
%% parse1(Continuation, Tokens) ->
%% {ok,Line,Sexpr,Rest} | {more,Continuation} | {error,Error,Rest}.
%% This is the opt-level of the LL engine. It
%% initialises/packs/unpacks the continuation information.
parse1([], Ts) -> %First call
Start = start(), %The start state.
parse1(#lp{l=none,st=[Start],vs=[]}, Ts);
parse1(#lp{l=none}=Lp, [T|_]=Ts) -> %Guarantee a start line
parse1(Lp#lp{l=line(T)}, Ts);
parse1(#lp{l=L,st=St0,vs=Vs0}, Ts) ->
case parse2(Ts, St0, Vs0) of
{done,Rest,[],[V]} -> {ok,L,V,Rest};
{more,[],St1,Vs1} -> {more,#lp{l=L,st=St1,vs=Vs1}};
{error,Line,Error,Rest,_,_} ->
%% Can't really continue from errors here.
{error,{Line,?MODULE,Error},Rest}
end.
%% parse2(Tokens, StateStack, ValueStack) ->
%% {done,Ts,Sstack,Vstack} | {more,Ts,Sstack,Vstack} |
%% {error,Line,Error,Ts,Sstack,Vstack}.
%% Main loop of the parser engine. Handle any reductions on the top of
%% the StateStack, then try to match type of next token with top
%% state. If we have a match, it is a terminal, then push token onto
%% value stack, else try to find new state(s) from table using current
%% state and token type and push them onto state stack. Continue until
%% no states left.
parse2(Ts, [{reduce,R}|St], Vs0) ->
%% io:fwrite("p: ~p\n", [{R,Vs}]),
%% Try to reduce values and push value on value stack.
case reduce(R, Vs0) of
{error,L,E} -> {error,L,E,Ts,St,Vs0};
Vs1 -> parse2(Ts, St, Vs1)
end;
parse2(Ts, [], Vs) -> {done,Ts,[],Vs}; %All done
parse2([T|Ts]=Ts0, [S|St]=St0, Vs) ->
%% io:fwrite("p: ~p\n", [{St0,Ts0}]),
%% Try to match token type against state on stack.
case type(T) of
S -> parse2(Ts, St, [T|Vs]); %Match
Type -> %Try to predict
case table(S, Type) of
error -> {error,line(T),{illegal,Type},Ts0,St0,Vs};
Top -> parse2(Ts0, Top ++ St, Vs)
end
end;
parse2([], St, Vs) -> %Need more tokens
{more,[],St,Vs};
parse2({eof,L}=Ts, St, Vs) -> %No more tokens
{error,L,{missing,token},Ts,St,Vs}.
%% Access the fields of a token.
type(T) -> element(1, T).
line(T) -> element(2, T).
val(T) -> element(3, T).
%% make_fun(String) -> FunList.
%% Convert a fun string to a fun sexpr.
%% "F/A" -> ['fun', F, A].
%% "M:F/A" -> ['fun', M, F, A].
make_fun("=:=/2") ->
['fun', '=:=', 2];
make_fun(FunStr) ->
J = string:rchr(FunStr, $/),
A = list_to_integer(string:substr(FunStr, J + 1)),
case string:chr(FunStr, $:) of
0 ->
F = list_to_atom(string:substr(FunStr, 1, J - 1)),
['fun', F, A];
I ->
F = list_to_atom(string:substr(FunStr, I + 1, J - I - 1)),
M = list_to_atom(string:substr(FunStr, 1, I - 1)),
['fun', M, F, A]
end.
%% pair_list(List) -> [{A,B}].
%% Generate a list of tuple pairs from the elements. An error if odd
%% number of elements in list.
pair_list([A,B|L]) -> [{A,B}|pair_list(L)];
pair_list([]) -> [].
%% format_error(Error) -> String.
%% Format errors to printable string.
format_error({missing,Tok}) ->
io_lib:fwrite("missing ~p", [Tok]);
format_error({illegal,What}) ->
io_lib:fwrite("illegal ~p", [What]). | src/lfe_parse.erl | 0.51562 | 0.428891 | lfe_parse.erl | starcoder |
%% @doc
%% This module defines `intcode' instruction set along with providing tools to read `intcode' programs from a string.
-module(intcode).
-include_lib("eunit/include/eunit.hrl").
-include("intcode.hrl").
-export([
% Actual public API
read_program/1,
% API for VM implementations
instruction/1,
increment_pc/3,
advance/3,
read_instruction/1
]).
-export_type([
memory/0, input/0, output/0, value/0, address/0, instruction/0
]).
%% @doc
%% Reads an intcode program from a single unquoted CSV line.
-spec read_program(Line :: string()) -> list(value()).
read_program(Line) ->
[list_to_integer(I) || I <- string:tokens(Line, ",")].
%% @doc
%% Defines the `intcode' instruction set.
%%
%% This function is intended to be used only by VM implementations.
%%
%% <table>
%% <thead>
%% <tr>
%% <th>Code</th>
%% <th>Instruction name</th>
%% <th>Description</th>
%% </tr>
%% </thead>
%% <tbody>
%% <tr>
%% <td>1</td>
%% <td>ADD</td>
%% <td>Set `#C=A+B'</td>
%% </tr>
%% <tr>
%% <td>2</td>
%% <td>MUL</td>
%% <td>Set `#C=A*B'.</td>
%% </tr>
%% <tr>
%% <td>3</td>
%% <td>INP</td>
%% <td>Read from input and store to `#A'</td>
%% </tr>
%% <tr>
%% <td>4</td>
%% <td>OUT</td>
%% <td>Write `A' to output</td>
%% </tr>
%% <tr>
%% <td>5</td>
%% <td>JNZ</td>
%% <td>Set `PC=#B' when `A=/=0'</td>
%% </tr>
%% <tr>
%% <td>6</td>
%% <td>JZ</td>
%% <td>Set `PC=#B' when `A==0'</td>
%% </tr>
%% <tr>
%% <td>7</td>
%% <td>TLT</td>
%% <td>Set `#C=1' when `A<B', otherwise set `#C=0'</td>
%% </tr>
%% <tr>
%% <td>8</td>
%% <td>TEQ</td>
%% <td>Set `#C=1' when `A==B', otherwise set `#C=0'</td>
%% </tr>
%% <tr>
%% <td>99</td>
%% <td>HLT</td>
%% <td>Halt the VM</td>
%% </tr>
%% </tbody>
%% </table>
%% @param Code The instruction to look up
%% @returns An instruction representation as defined by the {@type instruction()} type.
-spec instruction(Code :: value()) -> instruction().
instruction(1) -> alu(fun(A, B) -> A + B end);
instruction(2) -> alu(fun(A, B) -> A * B end);
instruction(3) -> {1, fun input/3};
instruction(4) -> {1, fun output/3};
instruction(5) -> jumpwhen(fun(A) -> A =/= 0 end);
instruction(6) -> jumpwhen(fun(A) -> A == 0 end);
instruction(7) -> alu(fun(A, B) when A < B -> 1; (_, _) -> 0 end);
instruction(8) -> alu(fun(A, B) when A == B -> 1; (_, _) -> 0 end);
instruction(9) -> {1, fun([{_, A}], #machine_state{relbase = RB}, VmState) -> {continue, #machine_state{relbase = A + RB}, VmState} end};
instruction(99) -> {0, fun(_, #machine_state{pc = Pc}, VmState) -> {halt, #machine_state{pc = Pc}, VmState} end}.
%% @doc Increments the program counter to the next instruction.
%%
%% This function is intended to be used only by VM implementations.
-spec increment_pc(pc(), memory(), instruction_arity()) -> pc().
increment_pc(#pc{pc = I}, Memory, Arity) ->
#pc{pc = I + Arity + 1, instruction = array:get(I + Arity + 1, Memory)}.
%% @doc Updates `OldMachineState' with the deltas provided in
%% `NewMachineStateDelta' and optionally advances the program counter with
%% `Arity'.
%%
%% This function is intended to be used only by VM implementations.
-spec advance(OldMachineState :: machine_state(), NewMachineStateDelta :: partial_machine_state(), Arity :: non_neg_integer()) -> machine_state().
advance(#machine_state{pc = OldPc, mem = OldMem, output = OldOutp, relbase = OldRelBase}, #machine_state{pc = NewPc, mem = NewMem, output = NewOutp, relbase = NewRelBase}, Arity) ->
Pc = case {NewPc, NewMem} of
{nil, nil} -> increment_pc(OldPc, OldMem, Arity);
{nil, _} -> increment_pc(OldPc, NewMem, Arity);
_ -> NewPc
end,
Mem = case NewMem of nil -> OldMem; _ -> NewMem end,
Outp = case NewOutp of nil -> OldOutp; _ -> NewOutp end,
RelBase = case NewRelBase of nil -> OldRelBase; _ -> NewRelBase end,
#machine_state{
pc = Pc,
mem = Mem,
output = Outp,
relbase = RelBase
}.
%% @doc Decodes the instruction in the given program counter.
%%
%% This function is intended to be used only by VM implementations.
%%
%% For some reason, the length of the Modes array will be one longer (too long?) than lists:seq(1, Arity),
%% and I am not sure, so I just pad it to one less.
-spec read_instruction(machine_state()) ->
{
Arity :: non_neg_integer(),
Function :: fun((list(instruction_argument()), machine_state(), vm_state()) -> {continuation_method(), partial_machine_state(), vm_state()}),
InstructionArguments :: list(instruction_argument())
}.
read_instruction(#machine_state{pc=#pc{pc = Pos, instruction = I}, mem=Memory, relbase = Relbase}) ->
{Arity, Function} = instruction(I rem 100),
XModes = lists:reverse(
lists:sublist(
integer_to_list(I),
max(0, length(integer_to_list(I)) - 2))
),
Modes = XModes ++ [$0 || _ <- lists:seq(1, Arity - length(XModes))],
Vs = [
case M of
$1 -> {array:get(O + Pos, Memory), array:get(O + Pos, Memory)};
$2 -> {array:get(O + Pos, Memory) + Relbase, array:get(array:get(O + Pos, Memory) + Relbase, Memory)};
_ -> {array:get(O + Pos, Memory), array:get(array:get(O + Pos, Memory), Memory)}
end || {M, O} <- lists:zip(Modes, lists:seq(1, Arity))],
{Arity, Function, Vs}.
%% @doc Sets the value at the given address in memory.
-spec memset(address(), value(), memory()) -> memory().
memset(Addr, Value, Mem) -> array:set(Addr, Value, Mem).
%% @doc Returns the value at the given address in memory.
-spec memget(address(), memory()) -> value().
memget(Addr, Mem) -> case Addr >= array:size(Mem) of true -> 0; _ -> array:get(Addr, Mem) end.
%% @doc Returns an instruction specification for an ALU-like operation.
-spec alu(fun((value(), value()) -> value())) -> instruction().
alu(Fun) ->
{3,
fun([{_, A}, {_, B}, {Coff, _}], #machine_state{mem = Memory}, VmState) ->
{continue, #machine_state{
mem = memset(Coff, Fun(A, B), Memory)
}, VmState}
end
}.
%% @doc Reads a value from the input to the address specified by the first
%% argument to this function.
-spec input(
InstructionArguments :: list(instruction_argument()),
CurrentMachineState :: machine_state(),
CurrentVmState :: vm_state()) ->
{ContinuationMethod :: continuation_method(), NewMachineStateDelta :: partial_machine_state(), NewVmState :: vm_state()}.
input([{Aoff, _}], #machine_state{mem = Memory} = MachineState, VmState) ->
case read_input(MachineState, VmState) of
{ok, X, NewVmState} -> {continue,
#machine_state{
mem = memset(Aoff, X, Memory)
},
NewVmState
};
{sleep, NewVmState} -> {sleep,
MachineState,
NewVmState
}
end.
%% @doc Outputs the value of the first argument to this function.
-spec output(
InstructionArguments :: list(instruction_argument()),
CurrentMachineState :: machine_state(),
CurrentVmState :: VmState) ->
{continuation_method(), NewMachineStateDeltas :: partial_machine_state(), NewVmState :: VmState}
when VmState :: vm_state().
output([{_, A}], MachineState, VmState) ->
{continue,
#machine_state{
output = write_output(MachineState#machine_state.output, A)
},
VmState
}.
%% @doc Jumps when `Fun' returns ``'true' '' for the value of the first
%% argument to this instruction.
-spec jumpwhen(fun((value()) -> boolean())) -> instruction().
jumpwhen(Fun) -> {2,
fun([{_, A}, {_, B}], #machine_state{mem = Memory}, VmState) ->
case Fun(A) of
true -> {continue,
#machine_state{
pc = #pc{
pc = B,
instruction = memget(B, Memory)
}
},
VmState
};
_ -> {continue, #machine_state{}, VmState}
end
end}.
%% @doc Writes the given value to the given output.
-spec write_output(output(), value()) -> output().
write_output(Output, Value) ->
intcode_io:push(Output, Value).
%% @doc Attempts to read input from the VM state, and if that fails, requests
%% a notification from the input once input becomes available.
-spec read_input(CurrentMachineState :: machine_state(), CurrentVmState :: vm_state()) ->
{sleep, NewVmState :: vm_state()} | {ok, Result :: value(), NewVmState :: vm_state()}.
read_input(MachineState, #vm_state{input = Input, input_callback = InputCallback} = VmState) ->
case intcode_io:poll_or_notify(Input, fun() -> InputCallback(MachineState, VmState) end) of
{V, NewInput} when V == nil orelse V == wait -> {sleep, VmState#vm_state{input = NewInput}};
{V, NewInput} -> {ok, V, VmState#vm_state{input = NewInput}}
end. | src/intcode/intcode.erl | 0.63624 | 0.409988 | intcode.erl | starcoder |
%%%=============================================================================
%%% @doc Worker process to handle hackney metrics
%%%
%%% A worker process has two jobs:
%%%
%%% (1) Calculate the metric value.
%%%
%%% Hackney does not keep the state of its metrics, but instead emits events to
%%% the metrics engine, like "+1 on this counter", "set X on this gauge", "add
%%% Y on this histogram". The job of a metric worker is to process these events
%%% and keep an up-to-date state that represent the real value of the tracked
%%% metric. State updates runs on constant time - aka O(1) complexity, which is
%%% important since each single request generates about 9 metric updates.
%%%
%%% (2) Send the metric value to Telemetry
%%%
%%% Telemetry will apply backpressure if we send too many events - which may
%%% cause the process inbox to grow if we try to send the metric value after
%%% every update. But because the state already has the most up-to-date value,
%%% we can send this value on intervals - this does cause the metric to be less
%%% accurate but allows us to use telemetry.
%%%
%%% Start options:
%%%
%%% - metric: the name of the metric, as a list of atoms. It's required.
%%% - report_interval: the interval, in milliseconds, that a worker reports data
%%% to telemetry. If set to 0, scheduled reports are disabled and the worker
%%% will report after every update. Defaults to the value configured in the
%%% hackney_telemetry/report_interval config.
%%%
%%% @end
%%%=============================================================================
-module(hackney_telemetry_worker).
-behaviour(gen_server).
% gen_server callbacks
-export(
[code_change/3, init/1, handle_call/3, handle_cast/2, handle_info/2, start_link/1, terminate/2]
).
% public functions
-export([child_spec/1, update/3, worker_name/1]).
-include("hackney_telemetry.hrl").
-include("hackney_telemetry_worker.hrl").
-define(DEFAULT_REPORT_INTERVAL, 5000).
%%------------------------------------------------------------------------------
%% @doc Generates a worker child spec based on a
%% @end
%%------------------------------------------------------------------------------
-spec child_spec(hackney_metric()) -> map().
child_spec(Args) ->
Metric = proplists:get_value(metric, Args),
#{id => {?MODULE, Metric}, start => {?MODULE, start_link, [Args]}}.
%%------------------------------------------------------------------------------
%% @doc Returns the name of the worker process
%% @end
%%------------------------------------------------------------------------------
-spec worker_name(hackney_metric()) -> {global, {atom(), hackney_metric()}}.
worker_name(Metric) -> {global, {node(), Metric}}.
%%-----------------------------------------------------------------------------
%% @doc Updates a metric
%% @end
%%-----------------------------------------------------------------------------
-spec update(hackney_metric(), any(), transform_fun()) -> ok.
update(Metric, EventValue, TransformFun) ->
ProcessName = worker_name(Metric),
gen_server:cast(ProcessName, {update_event, EventValue, TransformFun}).
%%-----------------------------------------------------------------------------
%% @doc Starts the server
%% @end
%%-----------------------------------------------------------------------------
start_link(Args) ->
Metric = proplists:get_value(metric, Args),
gen_server:start_link(worker_name(Metric), ?MODULE, Args, []).
%%-----------------------------------------------------------------------------
%% @doc Initialize the state of the server
%% @end
%%-----------------------------------------------------------------------------
init(Args) ->
case telemetry_settings(Args) of
{ok, TelemetrySettings} ->
State =
#worker_state{
value = 0,
report_interval = fetch_report_interval(Args),
telemetry_settings = TelemetrySettings
},
maybe_schedule_report(State),
{ok, State};
{error, Error} -> {stop, Error}
end.
telemetry_settings(Args) ->
Metric = proplists:get_value(metric, Args),
case Metric of
[hackney, MeasurementKey] -> {ok, {[hackney], MeasurementKey, #{}}};
[hackney_pool, PoolName, MeasurementKey] ->
{ok, {[hackney_pool], MeasurementKey, #{pool => PoolName}}};
_ -> {error, unsupported_metric}
end.
%%-----------------------------------------------------------------------------
%% @doc gen_server handle_call implementation.
%% @end
%%-----------------------------------------------------------------------------
handle_call(_Message, _From, State) -> {reply, ok, State}.
%%-----------------------------------------------------------------------------
%% @doc Handles update events
%% @end
%%-----------------------------------------------------------------------------
handle_cast({update_event, EventValue, TransformFun}, State) ->
NewValue = TransformFun(State#worker_state.value, EventValue),
UpdatedState = State#worker_state{value = NewValue},
if
UpdatedState#worker_state.report_interval == 0 -> report(UpdatedState);
true -> ok
end,
{noreply, UpdatedState}.
%%-----------------------------------------------------------------------------
%% @doc Handles report events
%% @end
%%-----------------------------------------------------------------------------
handle_info(report, State) ->
report(State),
maybe_schedule_report(State),
{noreply, State}.
%%------------------------------------------------------------------------------
%% @doc gen_server terminate calback
%% @end
%%------------------------------------------------------------------------------
terminate(_Reason, _State) -> ok.
%%------------------------------------------------------------------------------
%% @doc gen_server code_change calback
%% @end
%%------------------------------------------------------------------------------
code_change(_OldVersion, State, _Extra) -> {ok, State}.
%%------------------------------------------------------------------------------
%% @doc Evaluates the current state of the process and report the value to
%% telemetry
%% @end
%%------------------------------------------------------------------------------
-spec report(#worker_state{}) -> ok.
report(State) ->
{Metric, MeasurementKey, Metadata} = State#worker_state.telemetry_settings,
Measurement = #{MeasurementKey => State#worker_state.value},
telemetry:execute(Metric, Measurement, Metadata),
ok.
%%------------------------------------------------------------------------------
%% @doc Reports events to telemetry If the report interval is greater than zero,
%% we schedule the report to happen.
%% @end
%%------------------------------------------------------------------------------
-spec maybe_schedule_report(#worker_state{}) -> ok.
maybe_schedule_report(State) ->
ReportInterval = State#worker_state.report_interval,
if
ReportInterval > 0 ->
erlang:send_after(ReportInterval, self(), report),
ok;
true -> ok
end.
%%------------------------------------------------------------------------------
%% @doc Fetches the interval on which workers report metrics to telemetry.
%% @end
%%------------------------------------------------------------------------------
-spec fetch_report_interval(#worker_state{}) -> ok.
fetch_report_interval(Args) ->
ValueFromArgs = proplists:get_value(report_interval, Args),
ValueFromConfig = application:get_env(hackney_telemetry, report_interval),
if
ValueFromArgs =/= undefined -> ValueFromArgs;
ValueFromConfig =/= undefined ->
{ok, ActualValueFromConfig} = ValueFromConfig,
ActualValueFromConfig;
true -> ?DEFAULT_REPORT_INTERVAL
end. | src/hackney_telemetry_worker.erl | 0.53777 | 0.424889 | hackney_telemetry_worker.erl | starcoder |
%% -----------------------------------------------------------------------------
%%
%% Hamcrest Erlang.
%%
%% Copyright (c) 2017 <NAME> (<EMAIL>)
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%% THE SOFTWARE.
%% -----------------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @copyright 2017 <NAME>.
%% @doc Hamcrest parse transform.
%% @reference See <a href="http://code.google.com/p/hamcrest/">Hamcrest</a>
%% for more information.
%% -----------------------------------------------------------------------------
-module(hamcrest_transform).
-export([parse_transform/2]).
%% @doc
%% This parse transform imports the hamcrest matchers if they are not imported yet.
%% It also expands `assertThat' and `assertThat_' calls to the corresponding assertions.
%%
parse_transform(Forms, _Opts) ->
ok = init_var_names(),
FormsWithImport = ensure_import(Forms),
lists:map(fun transform_asserts_form/1, FormsWithImport).
%% @private
%% Check, if there exists an import for the matchers.
%%
have_import(Forms) ->
[] =/= [ I || I = {attribute, _Line, import, {hamcrest_matchers, _Imported}} <- Forms ].
%% @private
%% Add import for matchers, if they are missing.
%%
ensure_import(Forms) ->
case have_import(Forms) of
true -> Forms;
false -> add_import_form(Forms)
end.
%% @private
%% Add import after the module attribute.
%%
add_import_form([]) ->
[];
add_import_form([{attribute, L, module, _Name} = Form | Other]) ->
Imported = hamcrest_matchers:module_info(exports) -- [{module_info,0}, {module_info,1}],
ImportForm = {attribute, L, import, {hamcrest_matchers, Imported}},
[Form, ImportForm | Other];
add_import_form([Form | Other]) ->
[Form | add_import_form(Other)].
%% @private
%% Transform `assertThat', traverse functions.
%%
transform_asserts_form({function, Line, Name, Arity, Clauses}) ->
{function, Line, Name, Arity, lists:map(fun transform_asserts_clause/1, Clauses)};
transform_asserts_form(Form) ->
Form.
%% @private
%% Transform `assertThat', traverse function clauses.
%%
transform_asserts_clause({clause, Line, Head, Guard, Exprs}) ->
{clause, Line, Head, Guard, lists:map(fun transform_asserts_expr/1, Exprs)}.
%% @private
%% Transform `assertThat', traverse expressions.
%%
transform_asserts_expr({call, LineC, {atom, _LineA, assertThat}, [_Value, _Expected] = Args}) ->
transform_assert_call(LineC, Args);
transform_asserts_expr({call, LineC, {atom, _LineA, assertThat_}, [_Value, _Expected] = Args}) ->
{'fun', LineC, {clauses, [{clause, LineC, [], [], [
transform_assert_call(LineC, Args)
]}]}};
transform_asserts_expr(Expr) when is_tuple(Expr) ->
list_to_tuple(lists:map(fun transform_asserts_expr/1, tuple_to_list(Expr)));
transform_asserts_expr(Expr) when is_list(Expr) ->
lists:map(fun transform_asserts_expr/1, Expr);
transform_asserts_expr(Expr) ->
Expr.
%% @private
%%
%% Transforms `assertThat(Value, MatchSpec)' to
%% ```
%% case hamcrest:check(Value, MatchSpec) of
%% true -> true;
%% {assertion_failed, __V_X} -> erlang:error({assertion_failed, __V_X});
%% __V_X -> erlang:error({assertion_failed, __V_X})
%% end
%% '''
%%
%% The transformation differs from the `?assertThat(Value, MatchSpec)' macro
%% in order to have stack trace without the temporary function on top ot it.
%%
transform_assert_call(Line, Args) ->
VarName = next_var_name(),
CallErlangError = {call, Line, {remote, Line, {atom, Line, erlang}, {atom, Line, error}}, [
{tuple, Line, [
{atom, Line, assertion_failed},
{var, Line, VarName}
]}
]},
{'case', Line, {call,Line, {remote,Line,{atom,Line,hamcrest},{atom,Line,check}}, Args}, [
{clause, Line, [{atom, Line, true}], [], [
{atom, Line, true}
]},
{clause, Line, [{tuple, Line, [{atom, Line, assertion_failed}, {var, Line, VarName}]}], [], [
CallErlangError
]},
{clause, Line, [{var, Line, VarName}], [], [
CallErlangError
]}
]}.
%% @private
%% Reset variable counter.
%%
init_var_names() ->
_ = erlang:put({?MODULE, var_num}, 0),
ok.
%% @private
%% Create new variable name.
%%
next_var_name() ->
VarNum = erlang:get({?MODULE, var_num}) + 1,
erlang:put({?MODULE, var_num}, VarNum),
list_to_atom("__V_" ++ integer_to_list(VarNum)). | src/hamcrest_transform.erl | 0.642993 | 0.514888 | hamcrest_transform.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(mria_membership_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include("mria.hrl").
-include_lib("eunit/include/eunit.hrl").
all() -> mria_ct:all(?MODULE).
init_per_testcase(_TestCase, Config) ->
ok = meck:new(mria_mnesia, [non_strict, passthrough, no_history]),
ok = meck:expect(mria_mnesia, cluster_status, fun(_) -> running end),
{ok, _} = mria_membership:start_link(),
ok = init_membership(3),
Config.
end_per_testcase(_TestCase, Config) ->
ok = mria_membership:stop(),
ok = meck:unload(mria_mnesia),
Config.
t_lookup_member(_) ->
false = mria_membership:lookup_member('node@127.0.0.1'),
#member{node = 'n1@127.0.0.1', status = up}
= mria_membership:lookup_member('n1@127.0.0.1').
t_coordinator(_) ->
?assertEqual(node(), mria_membership:coordinator()),
Nodes = ['n1@127.0.0.1', 'n2@127.0.0.1', 'n3@127.0.0.1'],
?assertEqual('n1@127.0.0.1', mria_membership:coordinator(Nodes)).
t_node_down_up(_) ->
ok = meck:expect(mria_mnesia, is_node_in_cluster, fun(_) -> true end),
ok = mria_membership:node_down('n2@127.0.0.1'),
ok = timer:sleep(100),
#member{status = down} = mria_membership:lookup_member('n2@127.0.0.1'),
ok = mria_membership:node_up('n2@127.0.0.1'),
ok = timer:sleep(100),
#member{status = up} = mria_membership:lookup_member('n2@127.0.0.1').
t_mnesia_down_up(_) ->
ok = mria_membership:mnesia_down('n2@127.0.0.1'),
ok = timer:sleep(100),
#member{mnesia = stopped} = mria_membership:lookup_member('n2@127.0.0.1'),
ok = mria_membership:mnesia_up('n2@127.0.0.1'),
ok = timer:sleep(100),
#member{status = up, mnesia = running} = mria_membership:lookup_member('n2@127.0.0.1').
t_partition_occurred(_) ->
ok = mria_membership:partition_occurred('n2@127.0.0.1').
t_partition_healed(_) ->
ok = mria_membership:partition_healed(['n2@127.0.0.1']).
t_announce(_) ->
ok = mria_membership:announce(leave).
t_leader(_) ->
?assertEqual(node(), mria_membership:leader()).
t_is_all_alive(_) ->
?assert(mria_membership:is_all_alive()).
t_members(_) ->
?assertEqual(4, length(mria_membership:members())).
t_nodelist(_) ->
Nodes = lists:sort([node(),
'n1@127.0.0.1',
'n2@127.0.0.1',
'n3@127.0.0.1'
]),
?assertEqual(Nodes, lists:sort(mria_membership:nodelist())).
t_is_member(_) ->
?assert(mria_membership:is_member('n1@127.0.0.1')),
?assert(mria_membership:is_member('n2@127.0.0.1')),
?assert(mria_membership:is_member('n3@127.0.0.1')).
t_local_member(_) ->
#member{node = Node} = mria_membership:local_member(),
?assertEqual(node(), Node).
t_leave(_) ->
Cluster = mria_ct:cluster([core, core, core], []),
try
[N0, N1, N2] = mria_ct:start_cluster(mria, Cluster),
?assertMatch([N0, N1, N2], rpc:call(N0, mria, info, [running_nodes])),
ok = rpc:call(N1, mria, leave, []),
ok = rpc:call(N2, mria, leave, []),
?assertMatch([N0], rpc:call(N0, mria, info, [running_nodes]))
after
mria_ct:teardown_cluster(Cluster)
end.
t_force_leave(_) ->
Cluster = mria_ct:cluster([core, core, core], []),
try
[N0, N1, N2] = mria_ct:start_cluster(mria, Cluster),
?assertMatch(true, rpc:call(N0, mria_node, is_running, [N1])),
true = rpc:call(N0, mria_node, is_running, [N2]),
?assertMatch([N0, N1, N2], rpc:call(N0, mria, info, [running_nodes])),
?assertMatch(ok, rpc:call(N0, mria, force_leave, [N1])),
?assertMatch(ok, rpc:call(N0, mria, force_leave, [N2])),
?assertMatch([N0], rpc:call(N0, mria, info, [running_nodes]))
after
mria_ct:teardown_cluster(Cluster)
end.
%%--------------------------------------------------------------------
%% Helper functions
%%--------------------------------------------------------------------
init_membership(N) ->
lists:foreach(
fun(Member) ->
ok = mria_membership:pong(node(), Member)
end, lists:map(fun member/1, lists:seq(1, N))),
mria_membership:announce(join).
member(I) ->
Node = list_to_atom("n" ++ integer_to_list(I) ++ "@127.0.0.1"),
#member{node = Node,
addr = {{127,0,0,1}, 5000 + I},
guid = mria_guid:gen(),
hash = 1000 * I,
status = up,
mnesia = running,
ltime = erlang:timestamp()
}. | test/mria_membership_SUITE.erl | 0.519765 | 0.442637 | mria_membership_SUITE.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% merge_index: main interface to merge_index library.
%%
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc The merge_index application is an index from Index/Field/Term
%% (IFT) tuples to Values. The values are document IDs or some form
%% of identification for the object which contains the IFT.
%% Futhermore, each IFT/Value pair has an associated proplists (Props)
%% and timestamp (Timestamp) which are used to describe where the IFT
%% was found in the Value and at what time the entry was written,
%% respectively.
-module(merge_index).
-author("<NAME> <<EMAIL>>").
-include("merge_index.hrl").
-include_lib("kernel/include/file.hrl").
-export([
%% API
start_link/1,
stop/1,
index/2,
lookup/5, lookup_sync/5,
range/7, range_sync/7,
iterator/2,
info/4,
is_empty/1,
fold/3,
drop/1,
compact/1
]).
-type(index() :: any()).
-type(field() :: any()).
-type(mi_term() :: any()).
-type(size() :: all | integer()).
-type(posting() :: {Index::index(),
Field::field(),
Term::mi_term(),
Value::any(),
Props::[proplists:property()],
Timestamp::integer()}).
-type(iterator() :: fun(() -> {any(), iterator()}
| eof
| {error, Reason::any()})).
-define(LOOKUP_TIMEOUT, 60000).
%% @doc Start a new merge_index server.
-spec start_link(string()) -> {ok, Pid::pid()} | ignore | {error, Error::any()}.
start_link(Root) -> mi_server:start_link(Root).
%% @doc Stop the merge_index server.
-spec stop(pid()) -> ok.
stop(Server) -> mi_server:stop(Server).
%% @doc Index `Postings'.
-spec index(pid(), [posting()]) -> ok.
index(Server, Postings) -> mi_server:index(Server, Postings).
%% @doc Return a `Weight' for the given IFT.
-spec info(pid(), index(), field(), mi_term()) ->
{ok, [{Term::any(), Weight::integer()}]}.
info(Server, Index, Field, Term) -> mi_server:info(Server, Index, Field, Term).
%% @doc Return an `Iterator' over the entire index using the given
%% `Filter'.
-spec iterator(pid(), function()) -> Iterator::iterator().
iterator(Server, Filter) ->
{ok, Ref} = mi_server:iterator(Server, Filter),
make_result_iterator_bp(Ref).
%% @doc Lookup the results for IFT and return an iterator. This
%% allows the caller to process data as it comes in/wants it.
%%
%% @throws lookup_timeout
%%
%% `Server' - Pid of the server instance.
%%
%% `Filter' - Function used to filter the results.
-spec lookup(pid(), index(), field(), mi_term(), function()) -> iterator().
lookup(Server, Index, Field, Term, Filter) ->
{ok, Ref} = mi_server:lookup(Server, Index, Field, Term, Filter),
make_result_iterator(Ref).
%% @doc Lookup the results for IFT and return a list. The caller will
%% block until the result list is built.
%%
%% @throws lookup_timeout
%%
%% `Server' - Pid of the server instance.
%%
%% `Filter' - Function used to filter the results.
-spec lookup_sync(pid(), index(), field(), mi_term(), function()) ->
list() | {error, Reason::any()}.
lookup_sync(Server, Index, Field, Term, Filter) ->
{ok, Ref} = mi_server:lookup(Server, Index, Field, Term, Filter),
make_result_list(Ref).
%% @doc Much like `lookup' except allows one to specify a range of
%% terms. The range is a closed interval meaning that both
%% `StartTerm' and `EndTerm' are included.
%%
%% `StartTerm' - The start of the range.
%%
%% `EndTerm' - The end of the range.
%%
%% `Size' - The size of the term in bytes.
%%
%% @see lookup/5.
-spec range(pid(), index(), field(), mi_term(), mi_term(),
size(), function()) -> iterator().
range(Server, Index, Field, StartTerm, EndTerm, Size, Filter) ->
{ok, Ref} = mi_server:range(Server, Index, Field, StartTerm, EndTerm,
Size, Filter),
make_result_iterator(Ref).
%% @doc Much like `lookup_sync' except allows one to specify a range
%% of terms. The range is a closed interval meaning that both
%% `StartTerm' and `EndTerm' are included.
%%
%% `StartTerm' - The start of the range.
%%
%% `EndTerm' - The end of the range.
%%
%% `Size' - The size of the term in bytes.
%%
%% @see lookup_sync/5.
-spec range_sync(pid(), index(), field(), mi_term(), mi_term(),
size(), function()) -> list() | {error, Reason::any()}.
range_sync(Server, Index, Field, StartTerm, EndTerm, Size, Filter) ->
{ok, Ref} = mi_server:range(Server, Index, Field, StartTerm, EndTerm,
Size, Filter),
make_result_list(Ref).
%% @doc Predicate to determine if the buffers AND segments are empty.
-spec is_empty(pid()) -> boolean().
is_empty(Server) -> mi_server:is_empty(Server).
%% @doc Fold over all IFTs in the index.
%%
%% `Fun' - Function to fold over data. It takes 7 args. 1-6 are `I',
%% `F', `T', `Value', `Props', `Timestamp' and the 7th is the
%% accumulator.
%%
%% `Acc' - The accumulator to seed the fold with.
%%
%% `Acc2' - The final accumulator.
-spec fold(pid(), function(), any()) -> {ok, Acc2::any()}.
fold(Server, Fun, Acc) -> mi_server:fold(Server, Fun, Acc).
%% @doc Drop all current state and start from scratch.
-spec drop(pid()) -> ok.
drop(Server) -> mi_server:drop(Server).
%% @doc Perform compaction of segments if needed.
%%
%% `Segs' - The number of segments compacted.
%%
%% `Bytes' - The number of bytes compacted.
-spec compact(pid()) ->
{ok, Segs::integer(), Bytes::integer()}
| {error, Reason::any()}.
compact(Server) -> mi_server:compact(Server).
%%%===================================================================
%%% Internal Functions
%%%===================================================================
%% @private
make_result_iterator(Ref) ->
fun() -> result_iterator(Ref) end.
%% @private
result_iterator(Ref) ->
receive
{results, Results, Ref} ->
{Results, fun() -> result_iterator(Ref) end};
{eof, Ref} ->
eof;
{error, Ref, Reason} ->
{error, Reason}
after
?LOOKUP_TIMEOUT ->
throw(lookup_timeout)
end.
%% @private
%%
%% @doc Make an iterator with back pressure.
make_result_iterator_bp(Ref) ->
fun() -> result_iterator_bp(Ref) end.
%% @private
result_iterator_bp(Ref) ->
receive
{results, Results, Ref} ->
{Results, fun() -> result_iterator_bp(Ref) end};
{waiting, From, Ref} ->
From ! {continue, Ref},
result_iterator_bp(Ref);
{eof, Ref} ->
eof;
{error, Ref, Reason} ->
{error, Reason}
after
?LOOKUP_TIMEOUT ->
throw(lookup_timeout)
end.
%% @private
make_result_list(Ref) ->
make_result_list(Ref, []).
%% @private
make_result_list(Ref, Acc) ->
receive
{results, Results, Ref} ->
make_result_list(Ref, [Results|Acc]);
{eof, Ref} ->
lists:flatten(lists:reverse(Acc));
{error, Ref, Reason} ->
{error, Reason}
after
?LOOKUP_TIMEOUT ->
throw(lookup_timeout)
end. | deps/merge_index/src/merge_index.erl | 0.727104 | 0.417153 | merge_index.erl | starcoder |
%%------------------------------------------------------------------------------
%% Copyright 2012 FlowForwarding.org
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%-----------------------------------------------------------------------------
%% @author Erlang Solutions Ltd. <<EMAIL>>
%% @copyright 2012 FlowForwarding.org
%% @doc Implementation of sliding window for calculating average transfer over
%% a period of time.
%%
%% Implementation note: sliding window is implemented as a queue
%% using two lists (stacks). It's implementation is simpler,
%% because we always have the same number of entries in the queues.
%%
%% We operate it by modyfying the last added entry (tail), or
%% adding new tail and dropping element from the head (stepping). Once
%% per Len steps we perform an O(N) wrap - switch stacks, which
%% allows us to be amortized O(1) on all operations. Because wrapping
%% is always succeeded by stepping, a nice property holds - tail list
%% is never empty, thus queue's youngest element - hd(Tail) is
%% always available in O(1) time.
-module(linc_us4_sliding_window).
-export([new/2,
refresh/1,
bump_transfer/2,
total_transfer/1,
length_ms/1]).
-compile(export_all).
-record(bucket, {start :: erlang:timestamp(),
transfer = 0 :: integer()}).
%% Properties (hold between steps and bumps):
%% window's tail list is nonempty
%% window's start is equal to oldest element's start
%% window's total_transfer is sum of window's transfers
%% TODO: quickcheck
-record(sliding_window, {size :: integer(),
bucket_size_us :: integer(),
total_transfer = 0 :: integer(),
start :: erlang:timestamp(),
head :: [#bucket{}],
tail :: [#bucket{}]}).
-type sliding_window() :: #sliding_window{}.
-export_type([sliding_window/0]).
%% 10^12 and 10^6 for micro-, milli-, mega- manipulation
-define(E12, 1000000000000).
-define(E6, 1000000).
new(BucketCount, BucketSize) when BucketCount > 3, BucketSize > 0 ->
Now = erlang:now(),
BucketSizeUs = BucketSize * 1000,
Buckets = [#bucket{start = now_add(Now, -N * BucketSizeUs)} ||
N <- lists:seq(0, BucketCount-1)],
#sliding_window{size = BucketCount,
bucket_size_us = BucketSizeUs,
total_transfer = 0,
start = now_add(Now, -(BucketCount-1) * BucketSizeUs),
head = [],
tail = Buckets}.
bump_transfer(Queue0, Transfer) ->
Queue1 = refresh(Queue0),
#sliding_window{tail = [TailH | TailT],
total_transfer = TotalTransfer} = Queue1,
NewTailH = TailH#bucket{transfer = TailH#bucket.transfer + Transfer},
Queue1#sliding_window{tail = [NewTailH | TailT],
total_transfer = TotalTransfer + Transfer}.
total_transfer(#sliding_window{total_transfer = TotalTransfer}) ->
TotalTransfer.
length_ms(#sliding_window{start = Start}) ->
timer:now_diff(now(), Start) div 1000.
%%--------------------------------------------------------------------
%% Queue helpers
%%--------------------------------------------------------------------
refresh(#sliding_window{tail = [#bucket{start = T} | _],
bucket_size_us = BucketSizeUs} = Queue) ->
case timer:now_diff(now(), T) > BucketSizeUs of
true ->
refresh(step(Queue));
false ->
Queue
end.
step(#sliding_window{head = [HeadH | HeadT],
tail = OldTail,
bucket_size_us = BucketSizeUs,
total_transfer = OldTotalXfer} = Queue) ->
NewStart = now_add(get_start(OldTail, HeadT), BucketSizeUs),
Queue#sliding_window{head = HeadT,
tail = [#bucket{start = NewStart} | OldTail],
start = get_start(HeadT, OldTail),
total_transfer = OldTotalXfer - HeadH#bucket.transfer};
step(Queue) -> %% head is too short - wrap
step(wrap(Queue)).
wrap(#sliding_window{head = [], tail = Tail} = Queue) ->
Queue#sliding_window{head = lists:reverse(Tail),
tail = []}.
get_start([#bucket{start = Start} | _], _) ->
Start;
get_start([], Tail) ->
#bucket{start = Start} = lists:last(Tail),
Start.
%%--------------------------------------------------------------------
%% Misc. helpers
%%--------------------------------------------------------------------
now_add({MS, S, US}, USDelta) ->
NewTotal = ?E12 * MS + ?E6 * S + US + USDelta,
NewMS = NewTotal div ?E12,
NewSUS = NewTotal rem ?E12,
NewS = NewSUS div ?E6,
NewUS = NewSUS rem ?E6,
{NewMS, NewS, NewUS}. | apps/linc_us4/src/linc_us4_sliding_window.erl | 0.667148 | 0.4206 | linc_us4_sliding_window.erl | starcoder |
-module(mat).
-export([tr/1, inv/1]).
-export(['+'/2, '-'/2, '=='/2, '*'/2, '*´'/2]).
-export([row/2, col/2, get/3]).
-export([zeros/2, eye/1, diag/1]).
-export([eval/1]).
-export_type([matrix/0]).
-type matrix() :: [[number(), ...], ...].
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% API
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% transpose matrix
-spec tr(M) -> Transposed when
M :: matrix(),
Transposed :: matrix().
tr(M) ->
tr(M, []).
%% matrix addition (M3 = M1 + M2)
-spec '+'(M1, M2) -> M3 when
M1 :: matrix(),
M2 :: matrix(),
M3:: matrix().
'+'(M1, M2) ->
element_wise_op(fun erlang:'+'/2, M1, M2).
%% matrix subtraction (M3 = M1 - M2)
-spec '-'(M1, M2) -> M3 when
M1 :: matrix(),
M2 :: matrix(),
M3 :: matrix().
'-'(M1, M2) ->
element_wise_op(fun erlang:'-'/2, M1, M2).
%% matrix multiplication (M3 = Op1 * M2)
-spec '*'(Op1, M2) -> M3 when
Op1 :: number() | matrix(),
M2 :: matrix(),
M3 :: matrix().
'*'(N, M) when is_number(N) ->
[[N*X|| X <- Row] || Row <- M];
'*'(M1, M2) ->
'*´'(M1, tr(M2)).
%% transposed matrix multiplication (M3 = M1 * tr(M2))
-spec '*´'(M1, M2) -> M3 when
M1 :: matrix(),
M2 :: matrix(),
M3 :: matrix().
'*´'(M1, M2) ->
[[lists:sum(lists:zipwith(fun erlang:'*'/2, Li, Cj))
|| Cj <- M2]
|| Li <- M1].
%% return true if M1 equals M2 using 1e-6 precision
-spec '=='(M1, M2) -> boolean() when
M1 :: matrix(),
M2 :: matrix().
'=='(M1, M2) ->
case length(M1) == length(M2) of
true when length(hd(M1)) == length(hd(M2)) ->
RoundFloat = fun(F) -> round(F*1000000)/1000000 end,
CmpFloat = fun(F1, F2) -> RoundFloat(F1) == RoundFloat(F2) end,
Eq = element_wise_op(CmpFloat, M1, M2),
lists:all(fun(Row) -> lists:all(fun(B) -> B end, Row) end, Eq);
false -> false
end.
%% return the row I of M
-spec row(I, M) -> Row when
I :: pos_integer(),
M :: matrix(),
Row :: matrix().
row(I, M) ->
[lists:nth(I, M)].
%% return the column J of M
-spec col(J, M) -> Col when
J :: pos_integer(),
M :: matrix(),
Col :: matrix().
col(J, M) ->
[[lists:nth(J, Row)] || Row <- M].
%% return the element at index (I,J) in M
-spec get(I, J, M) -> Elem when
I :: pos_integer(),
J :: pos_integer(),
M :: matrix(),
Elem :: number().
get(I, J, M) ->
lists:nth(J, lists:nth(I, M)).
%% return a null matrix of size NxM
-spec zeros(N, M) -> Zeros when
N :: pos_integer(),
M :: pos_integer(),
Zeros :: matrix().
zeros(N, M) ->
[[0 || _ <- lists:seq(1, M)] || _ <- lists:seq(1, N)].
%% return an identity matrix of size NxN
-spec eye(N) -> Identity when
N :: pos_integer(),
Identity :: matrix().
eye(N) ->
[[ if I =:= J -> 1; true -> 0 end
|| J <- lists:seq(1, N)]
|| I <- lists:seq(1, N)].
%% return a square diagonal matrix with the elements of L on the main diagonal
-spec diag(L) -> Diag when
L :: [number(), ...],
Diag :: matrix().
diag(L) ->
N = length(L),
diag(L, zeros(N, N), 0, []).
%% compute the inverse of a square matrix
-spec inv(M) -> Invert when
M :: matrix(),
Invert :: matrix().
inv(M) ->
N = length(M),
A = lists:zipwith(fun lists:append/2, M, eye(N)),
Gj = gauss_jordan(A, N, 0, 1),
[lists:nthtail(N, Row) || Row <- Gj].
%% evaluate a list of matrix operations
-spec eval(Expr) -> Result when
Expr :: [T],
T :: matrix() | '+' | '-' | '*' | '*´',
Result :: matrix().
eval([L|[O|[R|T]]]) ->
F = fun mat:O/2,
eval([F(L, R)|T]);
eval([Res]) ->
Res.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Internal functions
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% transpose matrix with accumulator
tr([[]|_], Rows) ->
lists:reverse(Rows);
tr(M, Rows) ->
{Row, Cols} = tr(M, [], []),
tr(Cols, [Row|Rows]).
%% transpose the first row of a matrix with accumulators
tr([], Col, Cols) ->
{lists:reverse(Col), lists:reverse(Cols)};
tr([[H|T]|Rows], Col, Cols) ->
tr(Rows, [H|Col], [T|Cols]).
%% apply Op element wise on matrices M1 and M2
element_wise_op(Op, M1, M2) ->
lists:zipwith(fun(L1, L2) -> lists:zipwith(Op, L1, L2) end, M1, M2).
%% Gauss-Jordan method from
%% https://fr.wikipedia.org/wiki/%C3%89limination_de_Gauss-Jordan#Pseudocode
gauss_jordan(A, N, _, J) when J > N ->
A;
gauss_jordan(A, N, R, J) ->
case pivot(col(J, lists:nthtail(R, A)), R+1, {0, 0}) of
{_, 0} ->
gauss_jordan(A, N, R, J+1);
{K, Pivot} ->
A2 = swap(K, R+1, A),
[Row] = row(R+1, A2),
Norm = lists:map(fun(E) -> E/Pivot end, Row),
A3 = gauss_jordan_aux(A2, {R+1, J}, Norm, 1, []),
gauss_jordan(A3, N, R+1, J+1)
end.
%% Matrix(i, :) -= Matrix(i, j)/Pivot * Matrix(R, :) forall i\{R}
%% Matrix(R, :) *= 1/Pivot
%% with Pivot = Matrix(R, j)
gauss_jordan_aux([], _, _, _, Acc) ->
lists:reverse(Acc);
gauss_jordan_aux([_|Rows], {I, J}, L, I, Acc)->
gauss_jordan_aux(Rows, {I, J}, L, I+1, [L|Acc]);
gauss_jordan_aux([Row|Rows], {R, J}, L, I, Acc) ->
F = lists:nth(J, Row),
NewRow = lists:zipwith(fun(A, B) -> A-F*B end, Row, L),
gauss_jordan_aux(Rows, {R, J}, L, I+1, [NewRow|Acc]).
%% find the gauss jordan pivot of a column
pivot([], _, Pivot) ->
Pivot;
pivot([[H]|T], I, {_, V}) when abs(H) >= abs(V) ->
pivot(T, I+1, {I, H});
pivot([_|T], I, Pivot) ->
pivot(T, I+1, Pivot).
%% swap two indexes of a list
%% taken from https://stackoverflow.com/a/64024907
swap(A, A, List) ->
List;
swap(A, B, List) ->
{P1, P2} = {min(A,B), max(A,B)},
{L1, [Elem1 | T1]} = lists:split(P1-1, List),
{L2, [Elem2 | L3]} = lists:split(P2-P1-1, T1),
lists:append([L1, [Elem2], L2, [Elem1], L3]).
%% build a diagonal matrix from a zero matrix
diag([], [], _, Acc) ->
lists:reverse(Acc);
diag([X|Xs], [Row|Rows], I, Acc) ->
{L1, [_|T1]} = lists:split(I, Row),
NewRow = lists:append([L1, [X], T1]),
diag(Xs, Rows, I+1, [NewRow|Acc]). | src/mat.erl | 0.521227 | 0.576363 | mat.erl | starcoder |
%% -------- Overview ---------
%%
%% Leveled is based on the LSM-tree similar to leveldb, except that:
%% - Keys, Metadata and Values are not persisted together - the Keys and
%% Metadata are kept in a tree-based ledger, whereas the values are stored
%% only in a sequential Journal.
%% - Different file formats are used for Journal (based on DJ Bernstein
%% constant database), and the ledger (based on sst)
%% - It is not intended to be general purpose, but be primarily suited for
%% use as a Riak backend in specific circumstances (relatively large values,
%% and frequent use of iterators)
%% - The Journal is an extended nursery log in leveldb terms. It is keyed
%% on the sequence number of the write
%% - The ledger is a merge tree, where the key is the actual object key, and
%% the value is the metadata of the object including the sequence number
%%
%%
%% -------- Actors ---------
%%
%% The store is fronted by a Bookie, who takes support from different actors:
%% - An Inker who persists new data into the journal, and returns items from
%% the journal based on sequence number
%% - A Penciller who periodically redraws the ledger, that associates keys with
%% sequence numbers and other metadata, as well as secondary keys (for index
%% queries)
%% - One or more Clerks, who may be used by either the inker or the penciller
%% to fulfill background tasks
%%
%% Both the Inker and the Penciller maintain a manifest of the files which
%% represent the current state of the Journal and the Ledger repsectively.
%% For the Inker the manifest maps ranges of sequence numbers to cdb files.
%% For the Penciller the manifest maps key ranges to files at each level of
%% the Ledger.
%%
-module(leveled_bookie).
-behaviour(gen_server).
-include("include/leveled.hrl").
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3,
book_start/1,
book_start/4,
book_plainstart/1,
book_put/5,
book_put/6,
book_tempput/7,
book_mput/2,
book_mput/3,
book_delete/4,
book_get/3,
book_get/4,
book_head/3,
book_head/4,
book_sqn/3,
book_sqn/4,
book_headonly/4,
book_snapshot/4,
book_compactjournal/2,
book_islastcompactionpending/1,
book_trimjournal/1,
book_hotbackup/1,
book_close/1,
book_destroy/1,
book_isempty/2,
book_logsettings/1,
book_loglevel/2,
book_addlogs/2,
book_removelogs/2]).
%% folding API
-export([
book_returnfolder/2,
book_indexfold/5,
book_bucketlist/4,
book_keylist/3,
book_keylist/4,
book_keylist/5,
book_keylist/6,
book_objectfold/4,
book_objectfold/5,
book_objectfold/6,
book_headfold/6,
book_headfold/7,
book_headfold/9
]).
-export([empty_ledgercache/0,
snapshot_store/6,
fetch_value/2,
journal_notfound/4]).
-ifdef(TEST).
-export([book_returnactors/1]).
-endif.
-include_lib("eunit/include/eunit.hrl").
-define(LOADING_PAUSE, 1000).
-define(CACHE_SIZE, 2500).
-define(MIN_CACHE_SIZE, 100).
-define(MIN_PCL_CACHE_SIZE, 400).
-define(MAX_PCL_CACHE_SIZE, 28000).
% This is less than actual max - but COIN_SIDECOUNT
-define(CACHE_SIZE_JITTER, 25).
-define(JOURNAL_SIZE_JITTER, 20).
-define(ABSOLUTEMAX_JOURNALSIZE, 4000000000).
-define(LONG_RUNNING, 1000000).
% An individual task taking > 1s gets a specific log
-define(COMPRESSION_METHOD, lz4).
-define(COMPRESSION_POINT, on_receipt).
-define(LOG_LEVEL, info).
-define(TIMING_SAMPLESIZE, 100).
-define(DEFAULT_DBID, 65536).
-define(TIMING_SAMPLECOUNTDOWN, 50000).
-define(DUMMY, dummy). % Dummy key used for mput operations
-define(MAX_KEYCHECK_FREQUENCY, 100).
-define(MIN_KEYCHECK_FREQUENCY, 1).
-define(OPEN_LASTMOD_RANGE, {0, infinity}).
-define(SNAPTIMEOUT_SHORT, 900). % 15 minutes
-define(SNAPTIMEOUT_LONG, 43200). % 12 hours
-define(SST_PAGECACHELEVEL_NOLOOKUP, 1).
-define(SST_PAGECACHELEVEL_LOOKUP, 4).
-define(CACHE_LOGPOINT, 50000).
-define(OPTION_DEFAULTS,
[{root_path, undefined},
{snapshot_bookie, undefined},
{cache_size, ?CACHE_SIZE},
{max_journalsize, 1000000000},
{max_journalobjectcount, 200000},
{max_sstslots, 256},
{sync_strategy, none},
{head_only, false},
{waste_retention_period, undefined},
{max_run_length, undefined},
{singlefile_compactionpercentage, 50.0},
{maxrunlength_compactionpercentage, 70.0},
{reload_strategy, []},
{max_pencillercachesize, ?MAX_PCL_CACHE_SIZE},
{ledger_preloadpagecache_level, ?SST_PAGECACHELEVEL_LOOKUP},
{compression_method, ?COMPRESSION_METHOD},
{compression_point, ?COMPRESSION_POINT},
{log_level, ?LOG_LEVEL},
{forced_logs, []},
{database_id, ?DEFAULT_DBID},
{override_functions, []},
{snapshot_timeout_short, ?SNAPTIMEOUT_SHORT},
{snapshot_timeout_long, ?SNAPTIMEOUT_LONG}]).
-record(ledger_cache, {mem :: ets:tab(),
loader = leveled_tree:empty(?CACHE_TYPE)
:: tuple()|empty_cache,
load_queue = [] :: list(),
index = leveled_pmem:new_index(), % array or empty_index
min_sqn = infinity :: integer()|infinity,
max_sqn = 0 :: integer()}).
-record(state, {inker :: pid() | undefined,
penciller :: pid() | undefined,
cache_size :: integer() | undefined,
ledger_cache = #ledger_cache{} :: ledger_cache(),
is_snapshot :: boolean() | undefined,
slow_offer = false :: boolean(),
head_only = false :: boolean(),
head_lookup = true :: boolean(),
ink_checking = ?MAX_KEYCHECK_FREQUENCY :: integer(),
put_countdown = 0 :: integer(),
get_countdown = 0 :: integer(),
fold_countdown = 0 :: integer(),
head_countdown = 0 :: integer(),
cache_ratio = {0, 0, 0} :: cache_ratio(),
get_timings = no_timing :: get_timings(),
put_timings = no_timing :: put_timings(),
fold_timings = no_timing :: fold_timings(),
head_timings = no_timing :: head_timings()}).
-record(get_timings, {sample_count = 0 :: integer(),
head_time = 0 :: integer(),
body_time = 0 :: integer(),
fetch_count = 0 :: integer()}).
-record(head_timings, {sample_count = 0 :: integer(),
pcl_time = 0 :: integer(),
buildhead_time = 0 :: integer()}).
-record(put_timings, {sample_count = 0 :: integer(),
mem_time = 0 :: integer(),
ink_time = 0 :: integer(),
total_size = 0 :: integer()}).
-record(fold_timings, {sample_count = 0 :: integer(),
setup_time = 0 :: integer()}).
-type book_state() :: #state{}.
-type sync_mode() :: sync|none|riak_sync.
-type ledger_cache() :: #ledger_cache{}.
-type get_timings() :: no_timing|#get_timings{}.
-type put_timings() :: no_timing|#put_timings{}.
-type fold_timings() :: no_timing|#fold_timings{}.
-type head_timings() :: no_timing|#head_timings{}.
-type timing_types() :: head|get|put|fold.
-type cache_ratio() ::
{non_neg_integer(), non_neg_integer(), non_neg_integer()}.
-type open_options() ::
%% For full description of options see ../docs/STARTUP_OPTIONS.md
[{root_path, string()|undefined} |
% Folder to be used as the root path for storing all the database
% information. May be undefined is snapshot_bookie is a pid()
% TODO: Some sort of split root path to allow for mixed classes of
% storage (e.g. like eleveldb tiered storage - only with
% separation between ledger and non-current journal)
{snapshot_bookie, undefined|pid()} |
% Is the bookie being started required to a be a snapshot of an
% existing bookie, rather than a new bookie. The bookie to be
% snapped should have its pid passed as the startup option in this
% case
{cache_size, pos_integer()} |
% The size of the Bookie's memory, the cache of the recent
% additions to the ledger. Defaults to ?CACHE_SIZE, plus some
% randomised jitter (randomised jitter will still be added to
% configured values)
% The minimum value is 100 - any lower value will be ignored
{max_journalsize, pos_integer()} |
% The maximum size of a journal file in bytes. The absolute
% maximum must be 4GB due to 4 byte file pointers being used
{max_journalobjectcount, pos_integer()} |
% The maximum size of the journal by count of the objects. The
% journal must remain within the limit set by both this figures and
% the max_journalsize
{max_sstslots, pos_integer()} |
% The maximum number of slots in a SST file. All testing is done
% at a size of 256 (except for Quickcheck tests}, altering this
% value is not recommended
{sync_strategy, sync_mode()} |
% Should be sync if it is necessary to flush to disk after every
% write, or none if not (allow the OS to schecdule). This has a
% significant impact on performance which can be mitigated
% partially in hardware (e.g through use of FBWC).
% riak_sync is used for backwards compatability with OTP16 - and
% will manually call sync() after each write (rather than use the
% O_SYNC option on startup)
{head_only, false|with_lookup|no_lookup} |
% When set to true, there are three fundamental changes as to how
% leveled will work:
% - Compaction of the journalwill be managed by simply removing any
% journal file thathas a highest sequence number persisted to the
% ledger;
% - GETs are not supported, only head requests;
% - PUTs should arrive batched object specs using the book_mput/2
% function.
% head_only mode is disabled with false (default). There are two
% different modes in which head_only can run with_lookup or
% no_lookup and heaD_only mode is enabled by passing one of these
% atoms:
% - with_lookup assumes that individual objects may need to be
% fetched;
% - no_lookup prevents individual objects from being fetched, so
% that the store can only be used for folds (without segment list
% acceleration)
{waste_retention_period, undefined|pos_integer()} |
% If a value is not required in the journal (i.e. it has been
% replaced and is now to be removed for compaction) for how long
% should it be retained. For example should it be kept for a
% period until the operator cna be sure a backup has been
% completed?
% If undefined, will not retian waste, otherwise the period is the
% number of seconds to wait
{max_run_length, undefined|pos_integer()} |
% The maximum number of consecutive files that can be compacted in
% one compaction operation.
% Defaults to leveled_iclerk:?MAX_COMPACTION_RUN (if undefined)
{singlefile_compactionpercentage, float()} |
% What is the percentage of space to be recovered from compacting
% a single file, before that file can be a compaction candidate in
% a compaction run of length 1
{maxrunlength_compactionpercentage, float()} |
% What is the percentage of space to be recovered from compacting
% a run of max_run_length, before that run can be a compaction
% candidate. For runs between 1 and max_run_length, a
% proportionate score is calculated
{reload_strategy, list()} |
% The reload_strategy is exposed as an option as currently no firm
% decision has been made about how recovery from failure should
% work. For instance if we were to trust everything as permanent
% in the Ledger once it is persisted, then there would be no need
% to retain a skinny history of key changes in the Journal after
% compaction. If, as an alternative we assume the Ledger is never
% permanent, and retain the skinny hisory - then backups need only
% be made against the Journal. The skinny history of key changes
% is primarily related to the issue of supporting secondary indexes
% in Riak.
%
% These two strategies are referred to as recovr (assume we can
% recover any deltas from a lost ledger and a lost history through
% resilience outside of the store), or retain (retain a history of
% key changes, even when the object value has been compacted).
%
% There is a third strategy, which is recalc, where on reloading
% the Ledger from the Journal, the key changes are recalculated by
% comparing the extracted metadata from the Journal object, with the
% extracted metadata from the current Ledger object it is set to
% replace (should one be present). Implementing the recalc
% strategy requires a override function for
% `leveled_head:diff_indexspecs/3`.
% A function for the ?RIAK_TAG is provided and tested.
%
% reload_strategy options are a list - to map from a tag to the
% strategy (recovr|retain|recalc). Defualt strategies are:
% [{?RIAK_TAG, retain}, {?STD_TAG, retain}]
{max_pencillercachesize, pos_integer()|undefined} |
% How many ledger keys should the penciller retain in memory
% between flushing new level zero files.
% Defaults to ?MAX_PCL_CACHE_SIZE when undefined
% The minimum size 400 - attempt to set this vlaue lower will be
% ignored. As a rule the value should be at least 4 x the Bookie's
% cache size
{ledger_preloadpagecache_level, pos_integer()} |
% To which level of the ledger should the ledger contents be
% pre-loaded into the pagecache (using fadvise on creation and
% startup)
{compression_method, native|lz4} |
% Compression method and point allow Leveled to be switched from
% using bif based compression (zlib) to using nif based compression
% (lz4).
% Defaults to ?COMPRESSION_METHOD
{compression_point, on_compact|on_receipt} |
% The =compression point can be changed between on_receipt (all
% values are compressed as they are received), to on_compact where
% values are originally stored uncompressed (speeding PUT times),
% and are only compressed when they are first subject to compaction
% Defaults to ?COMPRESSION_POINT
{log_level, debug|info|warn|error|critical} |
% Set the log level. The default log_level of info is noisy - the
% current implementation was targetted at environments that have
% facilities to index large proportions of logs and allow for
% dynamic querying of those indexes to output relevant stats.
%
% As an alternative a higher log_level can be used to reduce this
% 'noise', however, there is currently no separate stats facility
% to gather relevant information outside of info level logs. So
% moving to higher log levels will at present make the operator
% blind to sample performance statistics of leveled sub-components
% etc
{forced_logs, list(string())} |
% Forced logs allow for specific info level logs, such as those
% logging stats to be logged even when the default log level has
% been set to a higher log level. Using:
% {forced_logs,
% ["B0015", "B0016", "B0017", "B0018",
% "P0032", "SST12", "CDB19", "SST13", "I0019"]}
% Will log all timing points even when log_level is not set to
% support info
{database_id, non_neg_integer()} |
% Integer database ID to be used in logs
{override_functions, list(leveled_head:appdefinable_function_tuple())} |
% Provide a list of override functions that will be used for
% user-defined tags
{snapshot_timeout_short, pos_integer()} |
% Time in seconds before a snapshot that has not been shutdown is
% assumed to have failed, and so requires to be torndown. The
% short timeout is applied to queries where long_running is set to
% false
{snapshot_timeout_long, pos_integer()}
% Time in seconds before a snapshot that has not been shutdown is
% assumed to have failed, and so requires to be torndown. The
% short timeout is applied to queries where long_running is set to
% true
].
-type initial_loadfun() ::
fun((leveled_codec:journal_key(),
any(),
non_neg_integer(),
{non_neg_integer(), non_neg_integer(), ledger_cache()},
fun((any()) -> {binary(), non_neg_integer()})) ->
{loop|stop,
{non_neg_integer(), non_neg_integer(), ledger_cache()}}).
-export_type([initial_loadfun/0]).
%%%============================================================================
%%% API
%%%============================================================================
-spec book_start(string(), integer(), integer(), sync_mode()) -> {ok, pid()}.
%% @doc Start a Leveled Key/Value store - limited options support.
%%
%% The most common startup parameters are extracted out from the options to
%% provide this startup method. This will start a KV store from the previous
%% store at root path - or an empty one if there is no store at the path.
%%
%% Fiddling with the LedgerCacheSize and JournalSize may improve performance,
%% but these are primarily exposed to support special situations (e.g. very
%% low memory installations), there should not be huge variance in outcomes
%% from modifying these numbers.
%%
%% The sync_strategy determines if the store is going to flush writes to disk
%% before returning an ack. There are three settings currrently supported:
%% - sync - sync to disk by passing the sync flag to the file writer (only
%% works in OTP 18)
%% - riak_sync - sync to disk by explicitly calling data_sync after the write
%% - none - leave it to the operating system to control flushing
%%
%% On startup the Bookie must restart both the Inker to load the Journal, and
%% the Penciller to load the Ledger. Once the Penciller has started, the
%% Bookie should request the highest sequence number in the Ledger, and then
%% and try and rebuild any missing information from the Journal.
%%
%% To rebuild the Ledger it requests the Inker to scan over the files from
%% the sequence number and re-generate the Ledger changes - pushing the changes
%% directly back into the Ledger.
book_start(RootPath, LedgerCacheSize, JournalSize, SyncStrategy) ->
book_start(set_defaults([{root_path, RootPath},
{cache_size, LedgerCacheSize},
{max_journalsize, JournalSize},
{sync_strategy, SyncStrategy}])).
-spec book_start(list(tuple())) -> {ok, pid()}.
%% @doc Start a Leveled Key/Value store - full options support.
%%
%% For full description of options see ../docs/STARTUP_OPTIONS.md and also
%% comments on the open_options() type
book_start(Opts) ->
gen_server:start_link(?MODULE, [set_defaults(Opts)], []).
-spec book_plainstart(list(tuple())) -> {ok, pid()}.
%% @doc
%% Start used in tests to start without linking
book_plainstart(Opts) ->
gen_server:start(?MODULE, [set_defaults(Opts)], []).
-spec book_tempput(pid(), leveled_codec:key(), leveled_codec:key(), any(),
leveled_codec:index_specs(),
leveled_codec:tag(), integer()) -> ok|pause.
%% @doc Put an object with an expiry time
%%
%% Put an item in the store but with a Time To Live - the time when the object
%% should expire, in gregorian_seconds (add the required number of seconds to
%% leveled_util:integer_time/1).
%%
%% There exists the possibility of per object expiry times, not just whole
%% store expiry times as has traditionally been the feature in Riak. Care
%% will need to be taken if implementing per-object times about the choice of
%% reload_strategy. If expired objects are to be compacted entirely, then the
%% history of KeyChanges will be lost on reload.
book_tempput(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL)
when is_integer(TTL) ->
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL).
%% @doc - Standard PUT
%%
%% A PUT request consists of
%% - A Primary Key and a Value
%% - IndexSpecs - a set of secondary key changes associated with the
%% transaction
%% - A tag indicating the type of object. Behaviour for metadata extraction,
%% and ledger compaction will vary by type. There are three currently
%% implemented types i (Index), o (Standard), o_rkv (Riak). Keys added with
%% Index tags are not fetchable (as they will not be hashed), but are
%% extractable via range query.
%%
%% The Bookie takes the request and passes it first to the Inker to add the
%% request to the journal.
%%
%% The inker will pass the PK/Value/IndexSpecs to the current (append only)
%% CDB journal file to persist the change. The call should return either 'ok'
%% or 'roll'. 'roll' indicates that the CDB file has insufficient capacity for
%% this write, and a new journal file should be created (with appropriate
%% manifest changes to be made).
%%
%% The inker will return the SQN which the change has been made at, as well as
%% the object size on disk within the Journal.
%%
%% Once the object has been persisted to the Journal, the Ledger can be updated.
%% The Ledger is updated by the Bookie applying a function (extract_metadata/4)
%% to the Value to return the Object Metadata, a function to generate a hash
%% of the Value and also taking the Primary Key, the IndexSpecs, the Sequence
%% Number in the Journal and the Object Size (returned from the Inker).
%%
%% A set of Ledger Key changes are then generated and placed in the Bookie's
%% Ledger Key cache.
%%
%% The PUT can now be acknowledged. In the background the Bookie may then
%% choose to push the cache to the Penciller for eventual persistence within
%% the ledger. This push will either be acccepted or returned (if the
%% Penciller has a backlog of key changes). The back-pressure should lead to
%% the Bookie entering into a slow-offer status whereby the next PUT will be
%% acknowledged by a PAUSE signal - with the expectation that the this will
%% lead to a back-off behaviour.
book_put(Pid, Bucket, Key, Object, IndexSpecs) ->
book_put(Pid, Bucket, Key, Object, IndexSpecs, ?STD_TAG).
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag) ->
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, infinity).
-spec book_put(pid(), leveled_codec:key(), leveled_codec:key(), any(),
leveled_codec:index_specs(),
leveled_codec:tag(), infinity|integer()) -> ok|pause.
book_put(Pid, Bucket, Key, Object, IndexSpecs, Tag, TTL) when is_atom(Tag) ->
gen_server:call(Pid,
{put, Bucket, Key, Object, IndexSpecs, Tag, TTL},
infinity).
-spec book_mput(pid(), list(leveled_codec:object_spec())) -> ok|pause.
%% @doc
%%
%% When the store is being run in head_only mode, batches of object specs may
%% be inserted in to the store using book_mput/2. ObjectSpecs should be
%% of the form {ObjectOp, Bucket, Key, SubKey, Value}. The Value will be
%% stored within the HEAD of the object (in the Ledger), so the full object
%% is retrievable using a HEAD request. The ObjectOp is either add or remove.
%%
%% The list should be de-duplicated before it is passed to the bookie.
book_mput(Pid, ObjectSpecs) ->
book_mput(Pid, ObjectSpecs, infinity).
-spec book_mput(pid(), list(leveled_codec:object_spec()), infinity|integer())
-> ok|pause.
%% @doc
%%
%% When the store is being run in head_only mode, batches of object specs may
%% be inserted in to the store using book_mput/2. ObjectSpecs should be
%% of the form {action, Bucket, Key, SubKey, Value}. The Value will be
%% stored within the HEAD of the object (in the Ledger), so the full object
%% is retrievable using a HEAD request.
%%
%% The list should be de-duplicated before it is passed to the bookie.
book_mput(Pid, ObjectSpecs, TTL) ->
gen_server:call(Pid, {mput, ObjectSpecs, TTL}, infinity).
-spec book_delete(pid(),
leveled_codec:key(), leveled_codec:key(),
leveled_codec:index_specs()) -> ok|pause.
%% @doc
%%
%% A thin wrap around the put of a special tombstone object. There is no
%% immediate reclaim of space, simply the addition of a more recent tombstone.
book_delete(Pid, Bucket, Key, IndexSpecs) ->
book_put(Pid, Bucket, Key, delete, IndexSpecs, ?STD_TAG).
-spec book_get(pid(),
leveled_codec:key(), leveled_codec:key(), leveled_codec:tag())
-> {ok, any()}|not_found.
-spec book_head(pid(),
leveled_codec:key(), leveled_codec:key(), leveled_codec:tag())
-> {ok, any()}|not_found.
-spec book_sqn(pid(),
leveled_codec:key(), leveled_codec:key(), leveled_codec:tag())
-> {ok, non_neg_integer()}|not_found.
-spec book_headonly(pid(),
leveled_codec:key(), leveled_codec:key(), leveled_codec:key())
-> {ok, any()}|not_found.
%% @doc - GET and HEAD requests
%%
%% The Bookie supports both GET and HEAD requests, with the HEAD request
%% returning only the metadata and not the actual object value. The HEAD
%% requets cna be serviced by reference to the Ledger Cache and the Penciller.
%%
%% GET requests first follow the path of a HEAD request, and if an object is
%% found, then fetch the value from the Journal via the Inker.
%%
%% to perform a head request in head_only mode with_lookup, book_headonly/4
%% should be used. Not if head_only mode is false or no_lookup, then this
%% request would not be supported
book_get(Pid, Bucket, Key, Tag) ->
gen_server:call(Pid, {get, Bucket, Key, Tag}, infinity).
book_head(Pid, Bucket, Key, Tag) ->
gen_server:call(Pid, {head, Bucket, Key, Tag, false}, infinity).
book_get(Pid, Bucket, Key) ->
book_get(Pid, Bucket, Key, ?STD_TAG).
book_head(Pid, Bucket, Key) ->
book_head(Pid, Bucket, Key, ?STD_TAG).
book_headonly(Pid, Bucket, Key, SubKey) ->
gen_server:call(Pid,
{head, Bucket, {Key, SubKey}, ?HEAD_TAG, false},
infinity).
book_sqn(Pid, Bucket, Key) ->
book_sqn(Pid, Bucket, Key, ?STD_TAG).
book_sqn(Pid, Bucket, Key, Tag) ->
gen_server:call(Pid, {head, Bucket, Key, Tag, true}, infinity).
-spec book_returnfolder(pid(), tuple()) -> {async, fun()}.
%% @doc Folds over store - deprecated
%% The tuple() is a query, and book_returnfolder will return an {async, Folder}
%% whereby calling Folder() will run a particular fold over a snapshot of the
%% store, and close the snapshot when complete
%%
%% For any new application requiring a fold - use the API below instead, and
%% one of:
%% - book_indexfold
%% - book_bucketlist
%% - book_keylist
%% - book_headfold
%% - book_objectfold
book_returnfolder(Pid, RunnerType) ->
gen_server:call(Pid, {return_runner, RunnerType}, infinity).
%% Different runner types for async queries:
%% - book_indexfold
%% - book_bucketlist
%% - book_keylist
%% - book_headfold
%% - book_objectfold
%%
%% See individual instructions for each one. All folds can be completed early
%% by using a fold_function that throws an exception when some threshold is
%% reached - and a worker that catches that exception.
%%
%% See test/end_to_end/iterator_SUITE:breaking_folds/1
%% @doc Builds and returns an `{async, Runner}' pair for secondary
%% index queries. Calling `Runner' will fold over keys (ledger) tagged
%% with the index `?IDX_TAG' and Constrain the fold to a specific
%% `Bucket''s index fields, as specified by the `Constraint'
%% argument. If `Constraint' is a tuple of `{Bucket, Key}' the fold
%% starts at `Key', meaning any keys lower than `Key' and which match
%% the start of the range query, will not be folded over (this is
%% useful for implementing pagination, for example.)
%%
%% Provide a `FoldAccT' tuple of fold fun ( which is 3 arity fun that
%% will be called once per-matching index entry, with the Bucket,
%% Primary Key (or {IndexVal and Primary key} if `ReturnTerms' is
%% true)) and an initial Accumulator, which will be passed as the 3rd
%% argument in the initial call to FoldFun. Subsequent calls to
%% FoldFun will use the previous return of FoldFun as the 3rd
%% argument, and the final return of `Runner' is the final return of
%% `FoldFun', the final Accumulator value. The query can filter inputs
%% based on `Range' and `TermHandling'. `Range' specifies the name of
%% `IndexField' to query, and `Start' and `End' optionally provide the
%% range to query over. `TermHandling' is a 2-tuple, the first
%% element is a `boolean()', `true' meaning return terms, (see fold
%% fun above), `false' meaning just return primary keys. `TermRegex'
%% is either a regular expression of type `re:mp()' (that will be run
%% against each index term value, and only those that match will be
%% accumulated) or `undefined', which means no regular expression
%% filtering of index values. NOTE: Regular Expressions can ONLY be
%% run on indexes that have binary or string values, NOT integer
%% values. In the Riak sense of secondary indexes, there are two types
%% of indexes `_bin' indexes and `_int' indexes. Term regex may only
%% be run against the `_bin' type.
%%
%% Any book_indexfold query will fold over the snapshot under the control
%% of the worker process controlling the function - and that process can
%% be interrupted by a throw, which will be forwarded to the worker (whilst
%% still closing down the snapshot). This may be used, for example, to
%% curtail a fold in the application at max_results
-spec book_indexfold(pid(),
Constraint:: {Bucket, StartKey},
FoldAccT :: {FoldFun, Acc},
Range :: {IndexField, Start, End},
TermHandling :: {ReturnTerms, TermRegex}) ->
{async, Runner::fun()}
when Bucket::term(),
StartKey::term(),
FoldFun::fun((Bucket, Key | {IndexVal, Key}, Acc) -> Acc),
Acc::term(),
IndexField::term(),
IndexVal::term(),
Start::IndexVal,
End::IndexVal,
ReturnTerms::boolean(),
TermRegex :: leveled_codec:regular_expression().
book_indexfold(Pid, Constraint, FoldAccT, Range, TermHandling)
when is_tuple(Constraint) ->
RunnerType =
{index_query, Constraint, FoldAccT, Range, TermHandling},
book_returnfolder(Pid, RunnerType);
book_indexfold(Pid, Bucket, FoldAccT, Range, TermHandling) ->
% StartKey must be specified to avoid confusion when bucket is a tuple.
% Use an empty StartKey if no StartKey is required (e.g. <<>>). In a
% future release this code branch may be removed, and such queries may
% instead return `error`. For now null is assumed to be lower than any
% key
leveled_log:log("B0019", [Bucket]),
book_indexfold(Pid, {Bucket, null}, FoldAccT, Range, TermHandling).
%% @doc list buckets. Folds over the ledger only. Given a `Tag' folds
%% over the keyspace calling `FoldFun' from `FoldAccT' for each
%% `Bucket'. `FoldFun' is a 2-arity function that is passed `Bucket'
%% and `Acc'. On first call `Acc' is the initial `Acc' from
%% `FoldAccT', thereafter the result of the previous call to
%% `FoldFun'. `Constraint' can be either atom `all' or `first' meaning
%% return all buckets, or just the first one found. Returns `{async,
%% Runner}' where `Runner' is a fun that returns the final value of
%% `FoldFun', the final `Acc' accumulator.
-spec book_bucketlist(pid(), Tag, FoldAccT, Constraint) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Acc) -> Acc),
Acc :: term(),
Constraint :: first | all,
Bucket :: term(),
Acc :: term(),
Runner :: fun(() -> Acc).
book_bucketlist(Pid, Tag, FoldAccT, Constraint) ->
RunnerType=
case Constraint of
first-> {first_bucket, Tag, FoldAccT};
all -> {bucket_list, Tag, FoldAccT}
end,
book_returnfolder(Pid, RunnerType).
%% @doc fold over the keys (ledger only) for a given `Tag'. Each key
%% will result in a call to `FoldFun' from `FoldAccT'. `FoldFun' is a
%% 3-arity function, called with `Bucket', `Key' and `Acc'. The
%% initial value of `Acc' is the second element of `FoldAccT'. Returns
%% `{async, Runner}' where `Runner' is a function that will run the
%% fold and return the final value of `Acc'
%%
%% Any book_keylist query will fold over the snapshot under the control
%% of the worker process controlling the function - and that process can
%% be interrupted by a throw, which will be forwarded to the worker (whilst
%% still closing down the snapshot). This may be used, for example, to
%% curtail a fold in the application at max_results
-spec book_keylist(pid(), Tag, FoldAccT) -> {async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Runner :: fun(() -> Acc).
book_keylist(Pid, Tag, FoldAccT) ->
RunnerType = {keylist, Tag, FoldAccT},
book_returnfolder(Pid, RunnerType).
%% @doc as for book_keylist/3 but constrained to only those keys in
%% `Bucket'
-spec book_keylist(pid(), Tag, Bucket, FoldAccT) -> {async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Runner :: fun(() -> Acc).
book_keylist(Pid, Tag, Bucket, FoldAccT) ->
RunnerType = {keylist, Tag, Bucket, FoldAccT},
book_returnfolder(Pid, RunnerType).
%% @doc as for book_keylist/4 with additional constraint that only
%% keys in the `KeyRange' tuple will be folder over, where `KeyRange'
%% is `StartKey', the first key in the range and `EndKey' the last,
%% (inclusive.) Or the atom `all', which will return all keys in the
%% `Bucket'.
-spec book_keylist(pid(), Tag, Bucket, KeyRange, FoldAccT) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
KeyRange :: {StartKey, EndKey} | all,
StartKey :: Key,
EndKey :: Key,
Key :: term(),
Runner :: fun(() -> Acc).
book_keylist(Pid, Tag, Bucket, KeyRange, FoldAccT) ->
RunnerType = {keylist, Tag, Bucket, KeyRange, FoldAccT, undefined},
book_returnfolder(Pid, RunnerType).
%% @doc as for book_keylist/5 with additional constraint that a compile regular
%% expression is passed to be applied against any key that is in the range.
%% This is always applied to the Key and only the Key, not to any SubKey.
-spec book_keylist(pid(), Tag, Bucket, KeyRange, FoldAccT, TermRegex) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
KeyRange :: {StartKey, EndKey} | all,
StartKey :: Key,
EndKey :: Key,
Key :: term(),
TermRegex :: leveled_codec:regular_expression(),
Runner :: fun(() -> Acc).
book_keylist(Pid, Tag, Bucket, KeyRange, FoldAccT, TermRegex) ->
RunnerType = {keylist, Tag, Bucket, KeyRange, FoldAccT, TermRegex},
book_returnfolder(Pid, RunnerType).
%% @doc fold over all the objects/values in the store in key
%% order. `Tag' is the tagged type of object. `FoldAccT' is a 2-tuple,
%% the first element being a 4-arity fun, that is called once for each
%% key with the arguments `Bucket', `Key', `Value', `Acc'. The 2nd
%% element is the initial accumulator `Acc' which is passed to
%% `FoldFun' on it's first call. Thereafter the return value from
%% `FoldFun' is the 4th argument to the next call of
%% `FoldFun'. `SnapPreFold' is a boolean where `true' means take the
%% snapshot at once, and `false' means take the snapshot when the
%% returned `Runner' is executed. Return `{async, Runner}' where
%% `Runner' is a 0-arity function that returns the final accumulator
%% from `FoldFun'
-spec book_objectfold(pid(), Tag, FoldAccT, SnapPreFold) -> {async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
SnapPreFold :: boolean(),
Runner :: fun(() -> Acc).
book_objectfold(Pid, Tag, FoldAccT, SnapPreFold) ->
RunnerType = {foldobjects_allkeys, Tag, FoldAccT, SnapPreFold},
book_returnfolder(Pid, RunnerType).
%% @doc exactly as book_objectfold/4 with the additional parameter
%% `Order'. `Order' can be `sqn_order' or `key_order'. In
%% book_objectfold/4 and book_objectfold/6 `key_order' is
%% implied. This function called with `Option == key_order' is
%% identical to book_objectfold/4. NOTE: if you most fold over ALL
%% objects, this is quicker than `key_order' due to accessing the
%% journal objects in thei ron disk order, not via a fold over the
%% ledger.
-spec book_objectfold(pid(), Tag, FoldAccT, SnapPreFold, Order) -> {async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
SnapPreFold :: boolean(),
Runner :: fun(() -> Acc),
Order :: key_order | sqn_order.
book_objectfold(Pid, Tag, FoldAccT, SnapPreFold, Order) ->
RunnerType = {foldobjects_allkeys, Tag, FoldAccT, SnapPreFold, Order},
book_returnfolder(Pid, RunnerType).
%% @doc as book_objectfold/4, with the addition of some constraints on
%% the range of objects folded over. The 3rd argument `Bucket' limits
%% ths fold to that specific bucket only. The 4th argument `Limiter'
%% further constrains the fold. `Limiter' can be either a `Range' or
%% `Index' query. `Range' is either that atom `all', meaning {min,
%% max}, or, a two tuple of start key and end key, inclusive. Index
%% Query is a 3-tuple of `{IndexField, StartTerm, EndTerm}`, just as
%% in book_indexfold/5
-spec book_objectfold(pid(), Tag, Bucket, Limiter, FoldAccT, SnapPreFold) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
Limiter :: Range | Index,
Range :: {StartKey, EndKey} | all,
Index :: {IndexField, Start, End},
IndexField::term(),
IndexVal::term(),
Start::IndexVal,
End::IndexVal,
StartKey :: Key,
EndKey :: Key,
SnapPreFold :: boolean(),
Runner :: fun(() -> Acc).
book_objectfold(Pid, Tag, Bucket, Limiter, FoldAccT, SnapPreFold) ->
RunnerType =
case Limiter of
all ->
{foldobjects_bybucket, Tag, Bucket, all, FoldAccT, SnapPreFold};
Range when is_tuple(Range) andalso size(Range) == 2 ->
{foldobjects_bybucket, Tag, Bucket, Range, FoldAccT, SnapPreFold};
IndexQuery when is_tuple(IndexQuery) andalso size(IndexQuery) == 3 ->
IndexQuery = Limiter,
{foldobjects_byindex, Tag, Bucket, IndexQuery, FoldAccT, SnapPreFold}
end,
book_returnfolder(Pid, RunnerType).
%% @doc LevelEd stores not just Keys in the ledger, but also may store
%% object metadata, referred to as heads (after Riak head request for
%% object metadata) Often when folding over objects all that is really
%% required is the object metadata. These "headfolds" are an efficient
%% way to fold over the ledger (possibly wholly in memory) and get
%% object metadata.
%%
%% Fold over the object's head. `Tag' is the tagged type of the
%% objects to fold over. `FoldAccT' is a 2-tuple. The 1st element is a
%% 4-arity fold fun, that takes a Bucket, Key, ProxyObject, and the
%% `Acc'. The ProxyObject is an object that only contains the
%% head/metadata, and no object data from the journal. The `Acc' in
%% the first call is that provided as the second element of `FoldAccT'
%% and thereafter the return of the previous all to the fold fun. If
%% `JournalCheck' is `true' then the journal is checked to see if the
%% object in the ledger is present, which means a snapshot of the
%% whole store is required, if `false', then no such check is
%% performed, and onlt ledger need be snapshotted. `SnapPreFold' is a
%% boolean that determines if the snapshot is taken when the folder is
%% requested `true', or when when run `false'. `SegmentList' can be
%% `false' meaning, all heads, or a list of integers that designate
%% segments in a TicTac Tree.
-spec book_headfold(pid(), Tag, FoldAccT, JournalCheck, SnapPreFold, SegmentList) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
JournalCheck :: boolean(),
SnapPreFold :: boolean(),
SegmentList :: false | list(integer()),
Runner :: fun(() -> Acc).
book_headfold(Pid, Tag, FoldAccT, JournalCheck, SnapPreFold, SegmentList) ->
book_headfold(Pid, Tag, all,
FoldAccT, JournalCheck, SnapPreFold,
SegmentList, false, false).
%% @doc as book_headfold/6, but with the addition of a `Limiter' that
%% restricts the set of objects folded over. `Limiter' can either be a
%% bucket list, or a key range of a single bucket. For bucket list,
%% the `Limiter' should be a 2-tuple, the first element the tag
%% `bucket_list' and the second a `list()' of `Bucket'. Only heads
%% from the listed buckets will be folded over. A single bucket key
%% range may also be used as a `Limiter', in which case the argument
%% is a 3-tuple of `{range ,Bucket, Range}' where `Bucket' is a
%% bucket, and `Range' is a 2-tuple of start key and end key,
%% inclusive, or the atom `all'. The rest of the arguments are as
%% `book_headfold/6'
-spec book_headfold(pid(), Tag, Limiter, FoldAccT, JournalCheck, SnapPreFold, SegmentList) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
Limiter :: BucketList | BucketKeyRange,
BucketList :: {bucket_list, list(Bucket)},
BucketKeyRange :: {range, Bucket, KeyRange},
KeyRange :: {StartKey, EndKey} | all,
StartKey :: Key,
EndKey :: Key,
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
JournalCheck :: boolean(),
SnapPreFold :: boolean(),
SegmentList :: false | list(integer()),
Runner :: fun(() -> Acc).
book_headfold(Pid, Tag, Limiter, FoldAccT, JournalCheck, SnapPreFold, SegmentList) ->
book_headfold(Pid, Tag, Limiter,
FoldAccT, JournalCheck, SnapPreFold,
SegmentList, false, false).
%% @doc as book_headfold/7, but with the addition of a Last Modified Date
%% Range and Max Object Count. For version 2 objects this will filter out
%% all objects with a highest Last Modified Date that is outside of the range.
%% All version 1 objects will be included in the result set regardless of Last
%% Modified Date.
%% The Max Object Count will stop the fold once the count has been reached on
%% this store only. The Max Object Count if provided will mean that the runner
%% will return {RemainingCount, Acc} not just Acc
-spec book_headfold(pid(), Tag, Limiter, FoldAccT, JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount) ->
{async, Runner} when
Tag :: leveled_codec:tag(),
Limiter :: BucketList | BucketKeyRange | all,
BucketList :: {bucket_list, list(Bucket)},
BucketKeyRange :: {range, Bucket, KeyRange},
KeyRange :: {StartKey, EndKey} | all,
StartKey :: Key,
EndKey :: Key,
FoldAccT :: {FoldFun, Acc},
FoldFun :: fun((Bucket, Key, Value, Acc) -> Acc),
Acc :: term(),
Bucket :: term(),
Key :: term(),
Value :: term(),
JournalCheck :: boolean(),
SnapPreFold :: boolean(),
SegmentList :: false | list(integer()),
LastModRange :: false | leveled_codec:lastmod_range(),
MaxObjectCount :: false | pos_integer(),
Runner :: fun(() -> ResultingAcc),
ResultingAcc :: Acc | {non_neg_integer(), Acc}.
book_headfold(Pid, Tag, {bucket_list, BucketList}, FoldAccT, JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount) ->
RunnerType =
{foldheads_bybucket, Tag, BucketList, bucket_list, FoldAccT,
JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount},
book_returnfolder(Pid, RunnerType);
book_headfold(Pid, Tag, {range, Bucket, KeyRange}, FoldAccT, JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount) ->
RunnerType =
{foldheads_bybucket, Tag, Bucket, KeyRange, FoldAccT,
JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount},
book_returnfolder(Pid, RunnerType);
book_headfold(Pid, Tag, all, FoldAccT, JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount) ->
RunnerType = {foldheads_allkeys, Tag, FoldAccT,
JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount},
book_returnfolder(Pid, RunnerType).
-spec book_snapshot(pid(),
store|ledger,
tuple()|undefined,
boolean()|undefined) -> {ok, pid(), pid()|null}.
%% @doc create a snapshot of the store
%%
%% Snapshot can be based on a pre-defined query (which will be used to filter
%% caches prior to copying for the snapshot), and can be defined as long
%% running to avoid timeouts (snapshots are generally expected to be required
%% for < 60s)
book_snapshot(Pid, SnapType, Query, LongRunning) ->
gen_server:call(Pid, {snapshot, SnapType, Query, LongRunning}, infinity).
-spec book_compactjournal(pid(), integer()) -> ok|busy.
-spec book_islastcompactionpending(pid()) -> boolean().
-spec book_trimjournal(pid()) -> ok.
%% @doc Call for compaction of the Journal
%%
%% the scheduling of Journla compaction is called externally, so it is assumed
%% in Riak it will be triggered by a vnode callback.
book_compactjournal(Pid, Timeout) ->
{R, _P} = gen_server:call(Pid, {compact_journal, Timeout}, infinity),
R.
%% @doc Check on progress of the last compaction
book_islastcompactionpending(Pid) ->
gen_server:call(Pid, confirm_compact, infinity).
%% @doc Trim the journal when in head_only mode
%%
%% In head_only mode the journlacna be trimmed of entries which are before the
%% persisted SQN. This is much quicker than compacting the journal
book_trimjournal(Pid) ->
gen_server:call(Pid, trim, infinity).
-spec book_close(pid()) -> ok.
-spec book_destroy(pid()) -> ok.
%% @doc Clean shutdown
%%
%% A clean shutdown will persist all the information in the Penciller memory
%% before closing, so shutdown is not instantaneous.
book_close(Pid) ->
gen_server:call(Pid, close, infinity).
%% @doc Close and clean-out files
book_destroy(Pid) ->
gen_server:call(Pid, destroy, infinity).
-spec book_hotbackup(pid()) -> {async, fun()}.
%% @doc Backup the Bookie
%% Return a function that will take a backup of a snapshot of the Journal.
%% The function will be 1-arity, and can be passed the absolute folder name
%% to store the backup.
%%
%% Backup files are hard-linked. Does not work in head_only mode, or if
%% index changes are used with a `recovr` compaction/reload strategy
book_hotbackup(Pid) ->
gen_server:call(Pid, hot_backup, infinity).
-spec book_isempty(pid(), leveled_codec:tag()) -> boolean().
%% @doc
%% Confirm if the store is empty, or if it contains a Key and Value for a
%% given tag
book_isempty(Pid, Tag) ->
FoldAccT = {fun(_B, _Acc) -> false end, true},
{async, Runner} = book_bucketlist(Pid, Tag, FoldAccT, first),
Runner().
-spec book_logsettings(pid()) -> {leveled_log:log_level(), list(string())}.
%% @doc
%% Retrieve the current log settings
book_logsettings(Pid) ->
gen_server:call(Pid, log_settings, infinity).
-spec book_loglevel(pid(), leveled_log:log_level()) -> ok.
%% @doc
%% Change the log level of the store
book_loglevel(Pid, LogLevel) ->
gen_server:cast(Pid, {log_level, LogLevel}).
-spec book_addlogs(pid(), list(string())) -> ok.
%% @doc
%% Add to the list of forced logs, a list of more forced logs
book_addlogs(Pid, ForcedLogs) ->
gen_server:cast(Pid, {add_logs, ForcedLogs}).
-spec book_removelogs(pid(), list(string())) -> ok.
%% @doc
%% Remove from the list of forced logs, a list of forced logs
book_removelogs(Pid, ForcedLogs) ->
gen_server:cast(Pid, {remove_logs, ForcedLogs}).
%% @doc
%% Return the Inker and Penciller - {ok, Inker, Penciller}. Used only in tests
book_returnactors(Pid) ->
gen_server:call(Pid, return_actors).
%%%============================================================================
%%% gen_server callbacks
%%%============================================================================
-spec init([open_options()]) -> {ok, book_state()}.
init([Opts]) ->
leveled_rand:seed(),
case {proplists:get_value(snapshot_bookie, Opts),
proplists:get_value(root_path, Opts)} of
{undefined, undefined} ->
{stop, no_root_path};
{undefined, _RP} ->
% Start from file not snapshot
% Must set log level first - as log level will be fetched within
% set_options/1. Also logs can now be added to set_options/1
LogLevel = proplists:get_value(log_level, Opts),
leveled_log:set_loglevel(LogLevel),
ForcedLogs = proplists:get_value(forced_logs, Opts),
leveled_log:add_forcedlogs(ForcedLogs),
DatabaseID = proplists:get_value(database_id, Opts),
leveled_log:set_databaseid(DatabaseID),
{InkerOpts, PencillerOpts} = set_options(Opts),
OverrideFunctions = proplists:get_value(override_functions, Opts),
SetFun =
fun({FuncName, Func}) ->
application:set_env(leveled, FuncName, Func)
end,
lists:foreach(SetFun, OverrideFunctions),
ConfiguredCacheSize =
max(proplists:get_value(cache_size, Opts), ?MIN_CACHE_SIZE),
CacheJitter =
max(1, ConfiguredCacheSize div (100 div ?CACHE_SIZE_JITTER)),
CacheSize =
ConfiguredCacheSize + erlang:phash2(self()) rem CacheJitter,
PCLMaxSize =
PencillerOpts#penciller_options.max_inmemory_tablesize,
CacheRatio = PCLMaxSize div ConfiguredCacheSize,
% It is expected that the maximum size of the penciller
% in-memory store should not be more than about 10 x the size
% of the ledger cache. In this case there will be a larger
% than tested list of ledger_caches in the penciller memory,
% and performance may be unpredictable
case CacheRatio > 32 of
true ->
leveled_log:log("B0020", [PCLMaxSize, ConfiguredCacheSize]);
false ->
ok
end,
PageCacheLevel = proplists:get_value(ledger_preloadpagecache_level, Opts),
{HeadOnly, HeadLookup, SSTPageCacheLevel} =
case proplists:get_value(head_only, Opts) of
false ->
{false, true, PageCacheLevel};
with_lookup ->
{true, true, PageCacheLevel};
no_lookup ->
{true, false, ?SST_PAGECACHELEVEL_NOLOOKUP}
end,
% Override the default page cache level - we want to load into the
% page cache many levels if we intend to support lookups, and only
% levels 0 and 1 otherwise
SSTOpts = PencillerOpts#penciller_options.sst_options,
SSTOpts0 = SSTOpts#sst_options{pagecache_level = SSTPageCacheLevel},
PencillerOpts0 =
PencillerOpts#penciller_options{sst_options = SSTOpts0},
State0 = #state{cache_size=CacheSize,
is_snapshot=false,
head_only=HeadOnly,
head_lookup = HeadLookup},
{Inker, Penciller} =
startup(InkerOpts, PencillerOpts0, State0),
NewETS = ets:new(mem, [ordered_set]),
leveled_log:log("B0001", [Inker, Penciller]),
{ok, State0#state{inker=Inker,
penciller=Penciller,
ledger_cache=#ledger_cache{mem = NewETS}}};
{Bookie, undefined} ->
{ok, Penciller, Inker} =
book_snapshot(Bookie, store, undefined, true),
leveled_log:log("B0002", [Inker, Penciller]),
{ok, #state{penciller=Penciller,
inker=Inker,
is_snapshot=true}}
end.
handle_call({put, Bucket, Key, Object, IndexSpecs, Tag, TTL}, From, State)
when State#state.head_only == false ->
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
SW0 = os:timestamp(),
{ok, SQN, ObjSize} = leveled_inker:ink_put(State#state.inker,
LedgerKey,
Object,
{IndexSpecs, TTL}),
{SW1, Timings1} =
update_timings(SW0, {put, {inker, ObjSize}}, State#state.put_timings),
Changes = preparefor_ledgercache(null,
LedgerKey,
SQN,
Object,
ObjSize,
{IndexSpecs, TTL}),
Cache0 = addto_ledgercache(Changes, State#state.ledger_cache),
{_SW2, Timings2} = update_timings(SW1, {put, mem}, Timings1),
{Timings, CountDown} =
update_statetimings(put, Timings2, State#state.put_countdown),
% If the previous push to memory was returned then punish this PUT with
% a delay. If the back-pressure in the Penciller continues, these
% delays will beocme more frequent
case State#state.slow_offer of
true ->
gen_server:reply(From, pause);
false ->
gen_server:reply(From, ok)
end,
maybe_longrunning(SW0, overall_put),
case maybepush_ledgercache(State#state.cache_size,
Cache0,
State#state.penciller) of
{ok, NewCache} ->
{noreply, State#state{ledger_cache = NewCache,
put_timings = Timings,
put_countdown = CountDown,
slow_offer = false}};
{returned, NewCache} ->
{noreply, State#state{ledger_cache = NewCache,
put_timings = Timings,
put_countdown = CountDown,
slow_offer = true}}
end;
handle_call({mput, ObjectSpecs, TTL}, From, State)
when State#state.head_only == true ->
{ok, SQN} =
leveled_inker:ink_mput(State#state.inker, dummy, {ObjectSpecs, TTL}),
Changes =
preparefor_ledgercache(?INKT_MPUT, ?DUMMY,
SQN, null, length(ObjectSpecs),
{ObjectSpecs, TTL}),
Cache0 = addto_ledgercache(Changes, State#state.ledger_cache),
case State#state.slow_offer of
true ->
gen_server:reply(From, pause);
false ->
gen_server:reply(From, ok)
end,
case maybepush_ledgercache(State#state.cache_size,
Cache0,
State#state.penciller) of
{ok, NewCache} ->
{noreply, State#state{ledger_cache = NewCache,
slow_offer = false}};
{returned, NewCache} ->
{noreply, State#state{ledger_cache = NewCache,
slow_offer = true}}
end;
handle_call({get, Bucket, Key, Tag}, _From, State)
when State#state.head_only == false ->
LedgerKey = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
SWh = os:timestamp(),
{H0, UpdCR} =
fetch_head(LedgerKey,
State#state.penciller,
State#state.ledger_cache,
State#state.cache_ratio),
HeadResult =
case H0 of
not_present ->
not_found;
Head ->
{Seqn, Status, _MH, _MD} =
leveled_codec:striphead_to_v1details(Head),
case Status of
tomb ->
not_found;
{active, TS} ->
case TS >= leveled_util:integer_now() of
false ->
not_found;
true ->
{LedgerKey, Seqn}
end
end
end,
{SWb, Timings1} =
update_timings(SWh, {get, head}, State#state.get_timings),
{Reply, Timings2} =
case HeadResult of
not_found ->
{not_found, Timings1};
{LK, SQN} ->
Object = fetch_value(State#state.inker, {LK, SQN}),
{_SW, UpdTimingsB} =
update_timings(SWb, {get, body}, Timings1),
case Object of
not_present ->
{not_found, UpdTimingsB};
_ ->
{{ok, Object}, UpdTimingsB}
end
end,
{Timings, CountDown} =
update_statetimings(get, Timings2, State#state.get_countdown),
{reply,
Reply,
State#state{get_timings = Timings,
get_countdown = CountDown,
cache_ratio =
maybelog_cacheratio(UpdCR, State#state.is_snapshot)}};
handle_call({head, Bucket, Key, Tag, SQNOnly}, _From, State)
when State#state.head_lookup == true ->
SWp = os:timestamp(),
LK = leveled_codec:to_ledgerkey(Bucket, Key, Tag),
{Head, UpdCR} =
fetch_head(LK,
State#state.penciller,
State#state.ledger_cache,
State#state.cache_ratio,
State#state.head_only),
{SWr, UpdTimingsP} =
update_timings(SWp, {head, pcl}, State#state.head_timings),
{LedgerMD, SQN, JournalCheckFrequency} =
case Head of
not_present ->
{not_found, null, State#state.ink_checking};
Head ->
case leveled_codec:striphead_to_v1details(Head) of
{_SeqN, tomb, _MH, _MD} ->
{not_found, null, State#state.ink_checking};
{SeqN, {active, TS}, _MH, MD} ->
case TS >= leveled_util:integer_now() of
true ->
CheckFrequency =
case State#state.head_only of
true ->
0;
false ->
State#state.ink_checking
end,
case journal_notfound(CheckFrequency,
State#state.inker,
LK,
SeqN) of
{true, UppedFrequency} ->
{not_found, null, UppedFrequency};
{false, ReducedFrequency} ->
{MD, SeqN, ReducedFrequency}
end;
false ->
{not_found, null, State#state.ink_checking}
end
end
end,
Reply =
case {LedgerMD, SQNOnly} of
{not_found, _} ->
not_found;
{_, false} ->
{ok, leveled_head:build_head(Tag, LedgerMD)};
{_, true} ->
{ok, SQN}
end,
{_SW, UpdTimingsR} =
update_timings(SWr, {head, rsp}, UpdTimingsP),
{UpdTimings, CountDown} =
update_statetimings(head,
UpdTimingsR,
State#state.head_countdown),
{reply,
Reply,
State#state{head_timings = UpdTimings,
head_countdown = CountDown,
ink_checking = JournalCheckFrequency,
cache_ratio =
maybelog_cacheratio(UpdCR, State#state.is_snapshot)}};
handle_call({snapshot, SnapType, Query, LongRunning}, _From, State) ->
% Snapshot the store, specifying if the snapshot should be long running
% (i.e. will the snapshot be queued or be required for an extended period
% e.g. many minutes)
Reply = snapshot_store(State, SnapType, Query, LongRunning),
{reply, Reply, State};
handle_call(log_settings, _From, State) ->
{reply, leveled_log:return_settings(), State};
handle_call({return_runner, QueryType}, _From, State) ->
SW = os:timestamp(),
Runner = get_runner(State, QueryType),
{_SW, Timings1} =
update_timings(SW, {fold, setup}, State#state.fold_timings),
{Timings, CountDown} =
update_statetimings(fold, Timings1, State#state.fold_countdown),
{reply, Runner, State#state{fold_timings = Timings,
fold_countdown = CountDown}};
handle_call({compact_journal, Timeout}, _From, State)
when State#state.head_only == false ->
case leveled_inker:ink_compactionpending(State#state.inker) of
true ->
{reply, {busy, undefined}, State};
false ->
{ok, PclSnap, null} =
snapshot_store(State, ledger, undefined, true),
R = leveled_inker:ink_compactjournal(State#state.inker,
PclSnap,
Timeout),
{reply, R, State}
end;
handle_call(confirm_compact, _From, State)
when State#state.head_only == false ->
{reply, leveled_inker:ink_compactionpending(State#state.inker), State};
handle_call(trim, _From, State) when State#state.head_only == true ->
PSQN = leveled_penciller:pcl_persistedsqn(State#state.penciller),
{reply, leveled_inker:ink_trim(State#state.inker, PSQN), State};
handle_call(hot_backup, _From, State) when State#state.head_only == false ->
ok = leveled_inker:ink_roll(State#state.inker),
BackupFun =
fun(InkerSnapshot) ->
fun(BackupPath) ->
ok = leveled_inker:ink_backup(InkerSnapshot, BackupPath),
ok = leveled_inker:ink_close(InkerSnapshot)
end
end,
InkerOpts =
#inker_options{start_snapshot = true,
source_inker = State#state.inker,
bookies_pid = self()},
{ok, Snapshot} = leveled_inker:ink_snapstart(InkerOpts),
{reply, {async, BackupFun(Snapshot)}, State};
handle_call(close, _From, State) ->
leveled_inker:ink_close(State#state.inker),
leveled_penciller:pcl_close(State#state.penciller),
{stop, normal, ok, State};
handle_call(destroy, _From, State=#state{is_snapshot=Snp}) when Snp == false ->
leveled_log:log("B0011", []),
{ok, InkPathList} = leveled_inker:ink_doom(State#state.inker),
{ok, PCLPathList} = leveled_penciller:pcl_doom(State#state.penciller),
lists:foreach(fun(DirPath) -> delete_path(DirPath) end, InkPathList),
lists:foreach(fun(DirPath) -> delete_path(DirPath) end, PCLPathList),
{stop, normal, ok, State};
handle_call(return_actors, _From, State) ->
{reply, {ok, State#state.inker, State#state.penciller}, State};
handle_call(Msg, _From, State) ->
{reply, {unsupported_message, element(1, Msg)}, State}.
handle_cast({log_level, LogLevel}, State) ->
PCL = State#state.penciller,
INK = State#state.inker,
ok = leveled_penciller:pcl_loglevel(PCL, LogLevel),
ok = leveled_inker:ink_loglevel(INK, LogLevel),
ok = leveled_log:set_loglevel(LogLevel),
{noreply, State};
handle_cast({add_logs, ForcedLogs}, State) ->
PCL = State#state.penciller,
INK = State#state.inker,
ok = leveled_penciller:pcl_addlogs(PCL, ForcedLogs),
ok = leveled_inker:ink_addlogs(INK, ForcedLogs),
ok = leveled_log:add_forcedlogs(ForcedLogs),
{noreply, State};
handle_cast({remove_logs, ForcedLogs}, State) ->
PCL = State#state.penciller,
INK = State#state.inker,
ok = leveled_penciller:pcl_removelogs(PCL, ForcedLogs),
ok = leveled_inker:ink_removelogs(INK, ForcedLogs),
ok = leveled_log:remove_forcedlogs(ForcedLogs),
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
terminate(Reason, _State) ->
leveled_log:log("B0003", [Reason]).
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%============================================================================
%%% External functions
%%%============================================================================
-spec empty_ledgercache() -> ledger_cache().
%% @doc
%% Empty the ledger cache table following a push
empty_ledgercache() ->
#ledger_cache{mem = ets:new(empty, [ordered_set])}.
-spec push_to_penciller(pid(), ledger_cache()) -> ok.
%% @doc
%% The push to penciller must start as a tree to correctly de-duplicate
%% the list by order before becoming a de-duplicated list for loading
push_to_penciller(Penciller, LedgerCache) ->
push_to_penciller_loop(Penciller, loadqueue_ledgercache(LedgerCache)).
push_to_penciller_loop(Penciller, LedgerCache) ->
case push_ledgercache(Penciller, LedgerCache) of
returned ->
timer:sleep(?LOADING_PAUSE),
push_to_penciller_loop(Penciller, LedgerCache);
ok ->
ok
end.
-spec push_ledgercache(pid(), ledger_cache()) -> ok|returned.
%% @doc
%% Push the ledgercache to the Penciller - which should respond ok or
%% returned. If the response is ok the cache can be flushed, but if the
%% response is returned the cache should continue to build and it should try
%% to flush at a later date
push_ledgercache(Penciller, Cache) ->
CacheToLoad = {Cache#ledger_cache.loader,
Cache#ledger_cache.index,
Cache#ledger_cache.min_sqn,
Cache#ledger_cache.max_sqn},
leveled_penciller:pcl_pushmem(Penciller, CacheToLoad).
-spec loadqueue_ledgercache(ledger_cache()) -> ledger_cache().
%% @doc
%% The ledger cache can be built from a queue, for example when loading the
%% ledger from the head of the journal on startup
%%
%% The queue should be build using [NewKey|Acc] so that the most recent
%% key is kept in the sort
loadqueue_ledgercache(Cache) ->
SL = lists:ukeysort(1, Cache#ledger_cache.load_queue),
T = leveled_tree:from_orderedlist(SL, ?CACHE_TYPE),
Cache#ledger_cache{load_queue = [], loader = T}.
-spec snapshot_store(ledger_cache(),
pid(), null|pid(), store|ledger,
undefined|tuple(), undefined|boolean()) ->
{ok, pid(), pid()|null}.
%% @doc
%% Allow all a snapshot to be created from part of the store, preferably
%% passing in a query filter so that all of the LoopState does not need to
%% be copied from the real actor to the clone
%%
%% SnapType can be store (requires journal and ledger) or ledger (requires
%% ledger only)
%%
%% Query can be no_lookup, indicating the snapshot will be used for non-specific
%% range queries and not direct fetch requests. {StartKey, EndKey} if the the
%% snapshot is to be used for one specific query only (this is much quicker to
%% setup, assuming the range is a small subset of the overall key space). If
%% lookup is required but the range isn't defined then 'undefined' should be
%% passed as the query
snapshot_store(LedgerCache, Penciller, Inker, SnapType, Query, LongRunning) ->
LedgerCacheReady = readycache_forsnapshot(LedgerCache, Query),
BookiesMem = {LedgerCacheReady#ledger_cache.loader,
LedgerCacheReady#ledger_cache.index,
LedgerCacheReady#ledger_cache.min_sqn,
LedgerCacheReady#ledger_cache.max_sqn},
PCLopts = #penciller_options{start_snapshot = true,
source_penciller = Penciller,
snapshot_query = Query,
snapshot_longrunning = LongRunning,
bookies_pid = self(),
bookies_mem = BookiesMem},
{ok, LedgerSnapshot} = leveled_penciller:pcl_snapstart(PCLopts),
case SnapType of
store ->
InkerOpts = #inker_options{start_snapshot=true,
bookies_pid = self(),
source_inker=Inker},
{ok, JournalSnapshot} = leveled_inker:ink_snapstart(InkerOpts),
{ok, LedgerSnapshot, JournalSnapshot};
ledger ->
{ok, LedgerSnapshot, null}
end.
snapshot_store(State, SnapType, Query, LongRunning) ->
snapshot_store(State#state.ledger_cache,
State#state.penciller,
State#state.inker,
SnapType,
Query,
LongRunning).
-spec fetch_value(pid(), leveled_codec:journal_ref()) -> not_present|any().
%% @doc
%% Fetch a value from the Journal
fetch_value(Inker, {Key, SQN}) ->
SW = os:timestamp(),
case leveled_inker:ink_fetch(Inker, Key, SQN) of
{ok, Value} ->
maybe_longrunning(SW, inker_fetch),
Value;
not_present ->
not_present
end.
%%%============================================================================
%%% Internal functions
%%%============================================================================
-spec startup(#inker_options{}, #penciller_options{}, book_state())
-> {pid(), pid()}.
%% @doc
%% Startup the Inker and the Penciller, and prompt the loading of the Penciller
%% from the Inker. The Penciller may be shutdown without the latest data
%% having been persisted: and so the Iker must be able to update the Penciller
%% on startup with anything that happened but wasn't flushed to disk.
startup(InkerOpts, PencillerOpts, State) ->
{ok, Inker} = leveled_inker:ink_start(InkerOpts),
{ok, Penciller} = leveled_penciller:pcl_start(PencillerOpts),
LedgerSQN = leveled_penciller:pcl_getstartupsequencenumber(Penciller),
leveled_log:log("B0005", [LedgerSQN]),
ReloadStrategy = InkerOpts#inker_options.reload_strategy,
LoadFun = get_loadfun(ReloadStrategy, Penciller, State),
BatchFun =
fun(BatchAcc, _Acc) ->
push_to_penciller(Penciller, BatchAcc)
end,
InitAccFun =
fun(FN, CurrentMinSQN) ->
leveled_log:log("I0014", [FN, CurrentMinSQN]),
empty_ledgercache()
end,
ok = leveled_inker:ink_loadpcl(Inker,
LedgerSQN + 1,
LoadFun,
InitAccFun,
BatchFun),
ok = leveled_inker:ink_checksqn(Inker, LedgerSQN),
{Inker, Penciller}.
-spec set_defaults(list()) -> open_options().
%% @doc
%% Set any pre-defined defaults for options if the option is not present in
%% the passed in options
set_defaults(Opts) ->
lists:ukeymerge(1,
lists:ukeysort(1, Opts),
lists:ukeysort(1, ?OPTION_DEFAULTS)).
-spec set_options(open_options()) -> {#inker_options{}, #penciller_options{}}.
%% @doc
%% Take the passed in property list of operations and extract out any relevant
%% options to the Inker or the Penciller
set_options(Opts) ->
MaxJournalSize0 =
min(?ABSOLUTEMAX_JOURNALSIZE,
proplists:get_value(max_journalsize, Opts)),
JournalSizeJitter = MaxJournalSize0 div (100 div ?JOURNAL_SIZE_JITTER),
MaxJournalSize =
min(?ABSOLUTEMAX_JOURNALSIZE,
MaxJournalSize0 - erlang:phash2(self()) rem JournalSizeJitter),
MaxJournalCount0 =
proplists:get_value(max_journalobjectcount, Opts),
JournalCountJitter = MaxJournalCount0 div (100 div ?JOURNAL_SIZE_JITTER),
MaxJournalCount =
MaxJournalCount0 - erlang:phash2(self()) rem JournalCountJitter,
SyncStrat = proplists:get_value(sync_strategy, Opts),
WRP = proplists:get_value(waste_retention_period, Opts),
SnapTimeoutShort = proplists:get_value(snapshot_timeout_short, Opts),
SnapTimeoutLong = proplists:get_value(snapshot_timeout_long, Opts),
AltStrategy = proplists:get_value(reload_strategy, Opts),
ReloadStrategy = leveled_codec:inker_reload_strategy(AltStrategy),
PCLL0CacheSize =
max(?MIN_PCL_CACHE_SIZE,
proplists:get_value(max_pencillercachesize, Opts)),
RootPath = proplists:get_value(root_path, Opts),
JournalFP = filename:join(RootPath, ?JOURNAL_FP),
LedgerFP = filename:join(RootPath, ?LEDGER_FP),
ok = filelib:ensure_dir(JournalFP),
ok = filelib:ensure_dir(LedgerFP),
SFL_CompPerc =
proplists:get_value(singlefile_compactionpercentage, Opts),
MRL_CompPerc =
proplists:get_value(maxrunlength_compactionpercentage, Opts),
true = MRL_CompPerc >= SFL_CompPerc,
true = 100.0 >= MRL_CompPerc,
true = SFL_CompPerc >= 0.0,
CompressionMethod = proplists:get_value(compression_method, Opts),
CompressOnReceipt =
case proplists:get_value(compression_point, Opts) of
on_receipt ->
% Note this will add measurable delay to PUT time
% https://github.com/martinsumner/leveled/issues/95
true;
on_compact ->
% If using lz4 this is not recommended
false
end,
MaxSSTSlots = proplists:get_value(max_sstslots, Opts),
{#inker_options{root_path = JournalFP,
reload_strategy = ReloadStrategy,
max_run_length = proplists:get_value(max_run_length, Opts),
singlefile_compactionperc = SFL_CompPerc,
maxrunlength_compactionperc = MRL_CompPerc,
waste_retention_period = WRP,
snaptimeout_long = SnapTimeoutLong,
compression_method = CompressionMethod,
compress_on_receipt = CompressOnReceipt,
cdb_options =
#cdb_options{max_size=MaxJournalSize,
max_count=MaxJournalCount,
binary_mode=true,
sync_strategy=SyncStrat,
log_options=leveled_log:get_opts()}},
#penciller_options{root_path = LedgerFP,
max_inmemory_tablesize = PCLL0CacheSize,
levelzero_cointoss = true,
snaptimeout_short = SnapTimeoutShort,
snaptimeout_long = SnapTimeoutLong,
sst_options =
#sst_options{press_method=CompressionMethod,
log_options=leveled_log:get_opts(),
max_sstslots=MaxSSTSlots}}
}.
-spec return_snapfun(book_state(), store|ledger,
tuple()|no_lookup|undefined,
boolean(), boolean()) -> fun().
%% @doc
%% Generates a function from which a snapshot can be created. The primary
%% factor here is the SnapPreFold boolean. If this is true then the snapshot
%% will be taken before the Fold function is returned. If SnapPreFold is
%% false then the snapshot will be taken when the Fold function is called.
%%
%% SnapPrefold is to be used when the intention is to queue the fold, and so
%% claling of the fold may be delayed, but it is still desired that the fold
%% represent the point in time that the query was requested.
return_snapfun(State, SnapType, Query, LongRunning, SnapPreFold) ->
case SnapPreFold of
true ->
{ok, LS, JS} = snapshot_store(State, SnapType, Query, LongRunning),
fun() -> {ok, LS, JS} end;
false ->
Self = self(),
% Timeout will be ignored, as will Requestor
%
% This uses the external snapshot - as the snapshot will need
% to have consistent state between Bookie and Penciller when
% it is made.
fun() -> book_snapshot(Self, SnapType, Query, LongRunning) end
end.
-spec snaptype_by_presence(boolean()) -> store|ledger.
%% @doc
%% Folds that traverse over object heads, may also either require to return
%% the object, or at least confirm the object is present in the Ledger. This
%% is achieved by enabling presence - and this will change the type of
%% snapshot to one that covers the whole store (i.e. both ledger and journal),
%% rather than just the ledger.
snaptype_by_presence(true) ->
store;
snaptype_by_presence(false) ->
ledger.
-spec get_runner(book_state(), tuple()) -> {async, fun()}.
%% @doc
%% Get an {async, Runner} for a given fold type. Fold types have different
%% tuple inputs
get_runner(State, {index_query, Constraint, FoldAccT, Range, TermHandling}) ->
{IdxFld, StartT, EndT} = Range,
{Bucket, ObjKey0} =
case Constraint of
{B, SK} ->
{B, SK};
B ->
{B, null}
end,
StartKey =
leveled_codec:to_ledgerkey(Bucket, ObjKey0, ?IDX_TAG, IdxFld, StartT),
EndKey =
leveled_codec:to_ledgerkey(Bucket, null, ?IDX_TAG, IdxFld, EndT),
SnapFun = return_snapfun(State, ledger, {StartKey, EndKey}, false, false),
leveled_runner:index_query(SnapFun,
{StartKey, EndKey, TermHandling},
FoldAccT);
get_runner(State, {keylist, Tag, FoldAccT}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:bucketkey_query(SnapFun, Tag, null, FoldAccT);
get_runner(State, {keylist, Tag, Bucket, FoldAccT}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:bucketkey_query(SnapFun, Tag, Bucket, FoldAccT);
get_runner(State, {keylist, Tag, Bucket, KeyRange, FoldAccT, TermRegex}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:bucketkey_query(SnapFun,
Tag, Bucket, KeyRange,
FoldAccT, TermRegex);
%% Set of runners for object or metadata folds
get_runner(State,
{foldheads_allkeys,
Tag, FoldFun,
JournalCheck, SnapPreFold, SegmentList,
LastModRange, MaxObjectCount}) ->
SnapType = snaptype_by_presence(JournalCheck),
SnapFun = return_snapfun(State, SnapType, no_lookup, true, SnapPreFold),
leveled_runner:foldheads_allkeys(SnapFun,
Tag, FoldFun,
JournalCheck, SegmentList,
LastModRange, MaxObjectCount);
get_runner(State,
{foldobjects_allkeys, Tag, FoldFun, SnapPreFold}) ->
get_runner(State,
{foldobjects_allkeys, Tag, FoldFun, SnapPreFold, key_order});
get_runner(State,
{foldobjects_allkeys, Tag, FoldFun, SnapPreFold, key_order}) ->
SnapFun = return_snapfun(State, store, no_lookup, true, SnapPreFold),
leveled_runner:foldobjects_allkeys(SnapFun, Tag, FoldFun, key_order);
get_runner(State,
{foldobjects_allkeys, Tag, FoldFun, SnapPreFold, sqn_order}) ->
SnapFun = return_snapfun(State, store, undefined, true, SnapPreFold),
leveled_runner:foldobjects_allkeys(SnapFun, Tag, FoldFun, sqn_order);
get_runner(State,
{foldheads_bybucket,
Tag,
BucketList, bucket_list,
FoldFun,
JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount}) ->
KeyRangeFun =
fun(Bucket) ->
{StartKey, EndKey, _} = return_ledger_keyrange(Tag, Bucket, all),
{StartKey, EndKey}
end,
SnapType = snaptype_by_presence(JournalCheck),
SnapFun = return_snapfun(State, SnapType, no_lookup, true, SnapPreFold),
leveled_runner:foldheads_bybucket(SnapFun,
Tag,
lists:map(KeyRangeFun, BucketList),
FoldFun,
JournalCheck,
SegmentList,
LastModRange, MaxObjectCount);
get_runner(State,
{foldheads_bybucket,
Tag,
Bucket, KeyRange,
FoldFun,
JournalCheck, SnapPreFold,
SegmentList, LastModRange, MaxObjectCount}) ->
{StartKey, EndKey, SnapQ} = return_ledger_keyrange(Tag, Bucket, KeyRange),
SnapType = snaptype_by_presence(JournalCheck),
SnapFun = return_snapfun(State, SnapType, SnapQ, true, SnapPreFold),
leveled_runner:foldheads_bybucket(SnapFun,
Tag,
[{StartKey, EndKey}],
FoldFun,
JournalCheck,
SegmentList,
LastModRange, MaxObjectCount);
get_runner(State,
{foldobjects_bybucket,
Tag, Bucket, KeyRange,
FoldFun,
SnapPreFold}) ->
{StartKey, EndKey, SnapQ} = return_ledger_keyrange(Tag, Bucket, KeyRange),
SnapFun = return_snapfun(State, store, SnapQ, true, SnapPreFold),
leveled_runner:foldobjects_bybucket(SnapFun,
Tag,
[{StartKey, EndKey}],
FoldFun);
get_runner(State,
{foldobjects_byindex,
Tag, Bucket, {Field, FromTerm, ToTerm},
FoldObjectsFun,
SnapPreFold}) ->
SnapFun = return_snapfun(State, store, no_lookup, true, SnapPreFold),
leveled_runner:foldobjects_byindex(SnapFun,
{Tag, Bucket, Field, FromTerm, ToTerm},
FoldObjectsFun);
get_runner(State, {bucket_list, Tag, FoldAccT}) ->
{FoldBucketsFun, Acc} = FoldAccT,
SnapFun = return_snapfun(State, ledger, no_lookup, false, false),
leveled_runner:bucket_list(SnapFun, Tag, FoldBucketsFun, Acc);
get_runner(State, {first_bucket, Tag, FoldAccT}) ->
{FoldBucketsFun, Acc} = FoldAccT,
SnapFun = return_snapfun(State, ledger, no_lookup, false, false),
leveled_runner:bucket_list(SnapFun, Tag, FoldBucketsFun, Acc, 1);
%% Set of specific runners, primarily used as exmaples for tests
get_runner(State, DeprecatedQuery) ->
get_deprecatedrunner(State, DeprecatedQuery).
-spec get_deprecatedrunner(book_state(), tuple()) -> {async, fun()}.
%% @doc
%% Get an {async, Runner} for a given fold type. Fold types have different
%% tuple inputs. These folds are currently used in tests, but are deprecated.
%% Most of these folds should be achievable through other available folds.
get_deprecatedrunner(State, {bucket_stats, Bucket}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:bucket_sizestats(SnapFun, Bucket, ?STD_TAG);
get_deprecatedrunner(State, {riakbucket_stats, Bucket}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:bucket_sizestats(SnapFun, Bucket, ?RIAK_TAG);
get_deprecatedrunner(State, {hashlist_query, Tag, JournalCheck}) ->
SnapType = snaptype_by_presence(JournalCheck),
SnapFun = return_snapfun(State, SnapType, no_lookup, true, true),
leveled_runner:hashlist_query(SnapFun, Tag, JournalCheck);
get_deprecatedrunner(State,
{tictactree_obj,
{Tag, Bucket, StartK, EndK, JournalCheck},
TreeSize,
PartitionFilter}) ->
SnapType = snaptype_by_presence(JournalCheck),
SnapFun = return_snapfun(State, SnapType, no_lookup, true, true),
leveled_runner:tictactree(SnapFun,
{Tag, Bucket, {StartK, EndK}},
JournalCheck,
TreeSize,
PartitionFilter);
get_deprecatedrunner(State,
{tictactree_idx,
{Bucket, IdxField, StartK, EndK},
TreeSize,
PartitionFilter}) ->
SnapFun = return_snapfun(State, ledger, no_lookup, true, true),
leveled_runner:tictactree(SnapFun,
{?IDX_TAG, Bucket, {IdxField, StartK, EndK}},
false,
TreeSize,
PartitionFilter).
-spec return_ledger_keyrange(atom(), any(), tuple()|all) ->
{tuple(), tuple(), tuple()|no_lookup}.
%% @doc
%% Convert a range of binary keys into a ledger key range, returning
%% {StartLK, EndLK, Query} where Query is to indicate whether the query
%% range is worth using to minimise the cost of the snapshot
return_ledger_keyrange(Tag, Bucket, KeyRange) ->
{StartKey, EndKey, Snap} =
case KeyRange of
all ->
{leveled_codec:to_ledgerkey(Bucket, null, Tag),
leveled_codec:to_ledgerkey(Bucket, null, Tag),
false};
{StartTerm, <<"$all">>} ->
{leveled_codec:to_ledgerkey(Bucket, StartTerm, Tag),
leveled_codec:to_ledgerkey(Bucket, null, Tag),
false};
{StartTerm, EndTerm} ->
{leveled_codec:to_ledgerkey(Bucket, StartTerm, Tag),
leveled_codec:to_ledgerkey(Bucket, EndTerm, Tag),
true}
end,
SnapQuery =
case Snap of
true ->
{StartKey, EndKey};
false ->
no_lookup
end,
{StartKey, EndKey, SnapQuery}.
-spec maybe_longrunning(erlang:timestamp(), atom()) -> ok.
%% @doc
%% Check the length of time an operation (named by Aspect) has taken, and
%% see if it has crossed the long running threshold. If so log to indicate
%% a long running event has occurred.
maybe_longrunning(SW, Aspect) ->
case timer:now_diff(os:timestamp(), SW) of
N when N > ?LONG_RUNNING ->
leveled_log:log("B0013", [N, Aspect]);
_ ->
ok
end.
-spec readycache_forsnapshot(ledger_cache(), tuple()|no_lookup|undefined)
-> ledger_cache().
%% @doc
%% Strip the ledger cach back to only the relevant information needed in
%% the query, and to make the cache a snapshot (and so not subject to changes
%% such as additions to the ets table)
readycache_forsnapshot(LedgerCache, {StartKey, EndKey}) ->
{KL, MinSQN, MaxSQN} = scan_table(LedgerCache#ledger_cache.mem,
StartKey,
EndKey),
case KL of
[] ->
#ledger_cache{loader=empty_cache,
index=empty_index,
min_sqn=MinSQN,
max_sqn=MaxSQN};
_ ->
#ledger_cache{loader=leveled_tree:from_orderedlist(KL,
?CACHE_TYPE),
index=empty_index,
min_sqn=MinSQN,
max_sqn=MaxSQN}
end;
readycache_forsnapshot(LedgerCache, Query) ->
% Need to convert the Ledger Cache away from using the ETS table
Tree = leveled_tree:from_orderedset(LedgerCache#ledger_cache.mem,
?CACHE_TYPE),
case leveled_tree:tsize(Tree) of
0 ->
#ledger_cache{loader=empty_cache,
index=empty_index,
min_sqn=LedgerCache#ledger_cache.min_sqn,
max_sqn=LedgerCache#ledger_cache.max_sqn};
_ ->
Idx =
case Query of
no_lookup ->
empty_index;
_ ->
LedgerCache#ledger_cache.index
end,
#ledger_cache{loader=Tree,
index=Idx,
min_sqn=LedgerCache#ledger_cache.min_sqn,
max_sqn=LedgerCache#ledger_cache.max_sqn}
end.
-spec scan_table(ets:tab(),
leveled_codec:ledger_key(), leveled_codec:ledger_key())
-> {list(leveled_codec:ledger_kv()),
non_neg_integer()|infinity,
non_neg_integer()}.
%% @doc
%% Query the ETS table to find a range of keys (start inclusive). Should also
%% return the miniumum and maximum sequence number found in the query. This
%% is just then used as a safety check when loading these results into the
%% penciller snapshot
scan_table(Table, StartKey, EndKey) ->
case ets:lookup(Table, StartKey) of
[] ->
scan_table(Table, StartKey, EndKey, [], infinity, 0);
[{StartKey, StartVal}] ->
SQN = leveled_codec:strip_to_seqonly({StartKey, StartVal}),
scan_table(Table, StartKey, EndKey,
[{StartKey, StartVal}], SQN, SQN)
end.
scan_table(Table, StartKey, EndKey, Acc, MinSQN, MaxSQN) ->
case ets:next(Table, StartKey) of
'$end_of_table' ->
{lists:reverse(Acc), MinSQN, MaxSQN};
NextKey ->
case leveled_codec:endkey_passed(EndKey, NextKey) of
true ->
{lists:reverse(Acc), MinSQN, MaxSQN};
false ->
[{NextKey, NextVal}] = ets:lookup(Table, NextKey),
SQN = leveled_codec:strip_to_seqonly({NextKey, NextVal}),
scan_table(Table,
NextKey,
EndKey,
[{NextKey, NextVal}|Acc],
min(MinSQN, SQN),
max(MaxSQN, SQN))
end
end.
-spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache(),
cache_ratio()) ->
{not_present|leveled_codec:ledger_value(),
cache_ratio()}.
%% @doc
%% Fetch only the head of the object from the Ledger (or the bookie's recent
%% ledger cache if it has just been updated). not_present is returned if the
%% Key is not found
fetch_head(Key, Penciller, LedgerCache, CacheRatio) ->
fetch_head(Key, Penciller, LedgerCache, CacheRatio, false).
-spec fetch_head(leveled_codec:ledger_key(), pid(), ledger_cache(),
cache_ratio(), boolean())
-> {not_present|leveled_codec:ledger_value(),
cache_ratio()}.
%% doc
%% The L0Index needs to be bypassed when running head_only
fetch_head(Key, Penciller, LedgerCache, {RC, CC, HC}, HeadOnly) ->
SW = os:timestamp(),
CacheResult =
case LedgerCache#ledger_cache.mem of
undefined ->
[];
Tab ->
ets:lookup(Tab, Key)
end,
case CacheResult of
[{Key, Head}] ->
{Head, {RC + 1, CC + 1, HC + 1}};
[] ->
Hash = leveled_codec:segment_hash(Key),
UseL0Idx = not HeadOnly,
% don't use the L0Index in head only mode. Object specs don't
% get an addition on the L0 index
case leveled_penciller:pcl_fetch(Penciller, Key, Hash, UseL0Idx) of
{Key, Head} ->
maybe_longrunning(SW, pcl_head),
{Head, {RC + 1, CC, HC + 1}};
not_present ->
maybe_longrunning(SW, pcl_head),
{not_present, {RC + 1, CC, HC}}
end
end.
-spec journal_notfound(integer(), pid(), leveled_codec:ledger_key(), integer())
-> {boolean(), integer()}.
%% @doc Check to see if the item is not_found in the journal. If it is found
%% return false, and drop the counter that represents the frequency this check
%% should be made. If it is not_found, this is not expected so up the check
%% frequency to the maximum value
journal_notfound(CheckFrequency, Inker, LK, SQN) ->
check_notfound(CheckFrequency,
fun() ->
leveled_inker:ink_keycheck(Inker, LK, SQN)
end).
-spec check_notfound(integer(), fun(() -> probably|missing)) ->
{boolean(), integer()}.
%% @doc Use a function to check if an item is found
check_notfound(CheckFrequency, CheckFun) ->
case leveled_rand:uniform(?MAX_KEYCHECK_FREQUENCY) of
X when X =< CheckFrequency ->
case CheckFun() of
probably ->
{false, max(?MIN_KEYCHECK_FREQUENCY, CheckFrequency - 1)};
missing ->
{true, ?MAX_KEYCHECK_FREQUENCY}
end;
_X ->
{false, CheckFrequency}
end.
-spec preparefor_ledgercache(leveled_codec:journal_key_tag()|null,
leveled_codec:ledger_key()|?DUMMY,
non_neg_integer(), any(), integer(),
leveled_codec:journal_keychanges())
-> {leveled_codec:segment_hash(),
non_neg_integer(),
list(leveled_codec:ledger_kv())}.
%% @doc
%% Prepare an object and its related key changes for addition to the Ledger
%% via the Ledger Cache.
preparefor_ledgercache(?INKT_MPUT,
?DUMMY, SQN, _O, _S, {ObjSpecs, TTL}) ->
ObjChanges = leveled_codec:obj_objectspecs(ObjSpecs, SQN, TTL),
{no_lookup, SQN, ObjChanges};
preparefor_ledgercache(?INKT_KEYD,
LedgerKey, SQN, _Obj, _Size, {IdxSpecs, TTL}) ->
{Bucket, Key} = leveled_codec:from_ledgerkey(LedgerKey),
KeyChanges =
leveled_codec:idx_indexspecs(IdxSpecs, Bucket, Key, SQN, TTL),
{no_lookup, SQN, KeyChanges};
preparefor_ledgercache(_InkTag,
LedgerKey, SQN, Obj, Size, {IdxSpecs, TTL}) ->
{Bucket, Key, MetaValue, {KeyH, _ObjH}, _LastMods} =
leveled_codec:generate_ledgerkv(LedgerKey, SQN, Obj, Size, TTL),
KeyChanges =
[{LedgerKey, MetaValue}] ++
leveled_codec:idx_indexspecs(IdxSpecs, Bucket, Key, SQN, TTL),
{KeyH, SQN, KeyChanges}.
-spec recalcfor_ledgercache(leveled_codec:journal_key_tag()|null,
leveled_codec:ledger_key()|?DUMMY,
non_neg_integer(), any(), integer(),
leveled_codec:journal_keychanges(),
ledger_cache(),
pid())
-> {leveled_codec:segment_hash(),
non_neg_integer(),
list(leveled_codec:ledger_kv())}.
%% @doc
%% When loading from the journal to the ledger, may hit a key which has the
%% `recalc` strategy. Such a key needs to recalculate the key changes by
%% comparison with the current state of the ledger, assuming it is a full
%% journal entry (i.e. KeyDeltas which may be a result of previously running
%% with a retain strategy should be ignored).
recalcfor_ledgercache(InkTag,
_LedgerKey, SQN, _Obj, _Size, {_IdxSpecs, _TTL},
_LedgerCache,
_Penciller)
when InkTag == ?INKT_MPUT; InkTag == ?INKT_KEYD ->
{no_lookup, SQN, []};
recalcfor_ledgercache(_InkTag,
LK, SQN, Obj, Size, {_IgnoreJournalIdxSpecs, TTL},
LedgerCache,
Penciller) ->
{Bucket, Key, MetaValue, {KeyH, _ObjH}, _LastMods} =
leveled_codec:generate_ledgerkv(LK, SQN, Obj, Size, TTL),
OldObject =
case check_in_ledgercache(LK, KeyH, LedgerCache, loader) of
false ->
leveled_penciller:pcl_fetch(Penciller, LK, KeyH, true);
{value, KV} ->
KV
end,
OldMetadata =
case OldObject of
not_present ->
not_present;
{LK, LV} ->
leveled_codec:get_metadata(LV)
end,
UpdMetadata = leveled_codec:get_metadata(MetaValue),
IdxSpecs =
leveled_head:diff_indexspecs(element(1, LK), UpdMetadata, OldMetadata),
{KeyH,
SQN,
[{LK, MetaValue}]
++ leveled_codec:idx_indexspecs(IdxSpecs, Bucket, Key, SQN, TTL)}.
-spec addto_ledgercache({leveled_codec:segment_hash(),
non_neg_integer(),
list(leveled_codec:ledger_kv())},
ledger_cache())
-> ledger_cache().
%% @doc
%% Add a set of changes associated with a single sequence number (journal
%% update) and key to the ledger cache. If the changes are not to be looked
%% up directly, then they will not be indexed to accelerate lookup
addto_ledgercache({H, SQN, KeyChanges}, Cache) ->
ets:insert(Cache#ledger_cache.mem, KeyChanges),
UpdIndex = leveled_pmem:prepare_for_index(Cache#ledger_cache.index, H),
Cache#ledger_cache{index = UpdIndex,
min_sqn=min(SQN, Cache#ledger_cache.min_sqn),
max_sqn=max(SQN, Cache#ledger_cache.max_sqn)}.
-spec addto_ledgercache({integer()|no_lookup,
integer(),
list(leveled_codec:ledger_kv())},
ledger_cache(),
loader)
-> ledger_cache().
%% @doc
%% Add a set of changes associated with a single sequence number (journal
%% update) to the ledger cache. This is used explicitly when loading the
%% ledger from the Journal (i.e. at startup) - and in this case the ETS insert
%% can be bypassed, as all changes will be flushed to the Penciller before the
%% load is complete.
addto_ledgercache({H, SQN, KeyChanges}, Cache, loader) ->
UpdQ = KeyChanges ++ Cache#ledger_cache.load_queue,
UpdIndex = leveled_pmem:prepare_for_index(Cache#ledger_cache.index, H),
Cache#ledger_cache{index = UpdIndex,
load_queue = UpdQ,
min_sqn=min(SQN, Cache#ledger_cache.min_sqn),
max_sqn=max(SQN, Cache#ledger_cache.max_sqn)}.
-spec check_in_ledgercache(leveled_codec:ledger_key(),
leveled_codec:segment_hash(),
ledger_cache(),
loader) ->
false | {value, leveled_codec:ledger_kv()}.
%% @doc
%% Check the ledger cache for a Key, when the ledger cache is in loader mode
%% and so is populating a queue not an ETS table
check_in_ledgercache(PK, Hash, Cache, loader) ->
case leveled_pmem:check_index(Hash, Cache#ledger_cache.index) of
[] ->
false;
_ ->
search(fun({K,_V}) -> K == PK end,
lists:reverse(Cache#ledger_cache.load_queue))
end.
-spec search(fun((any()) -> boolean()), list()) -> {value, any()}|false.
search(Pred, [Hd|Tail]) ->
case Pred(Hd) of
true -> {value, Hd};
false -> search(Pred, Tail)
end;
search(Pred, []) when is_function(Pred, 1) ->
false.
-spec maybepush_ledgercache(integer(), ledger_cache(), pid())
-> {ok|returned, ledger_cache()}.
%% @doc
%% Following an update to the ledger cache, check if this now big enough to be
%% pushed down to the Penciller. There is some random jittering here, to
%% prevent coordination across leveled instances (e.g. when running in Riak).
%%
%% The penciller may be too busy, as the LSM tree is backed up with merge
%% activity. In this case the update is not made and 'returned' not ok is set
%% in the reply. Try again later when it isn't busy (and also potentially
%% implement a slow_offer state to slow down the pace at which PUTs are being
%% received)
maybepush_ledgercache(MaxCacheSize, Cache, Penciller) ->
Tab = Cache#ledger_cache.mem,
CacheSize = ets:info(Tab, size),
TimeToPush = maybe_withjitter(CacheSize, MaxCacheSize),
if
TimeToPush ->
CacheToLoad = {Tab,
Cache#ledger_cache.index,
Cache#ledger_cache.min_sqn,
Cache#ledger_cache.max_sqn},
case leveled_penciller:pcl_pushmem(Penciller, CacheToLoad) of
ok ->
Cache0 = #ledger_cache{},
true = ets:delete(Tab),
NewTab = ets:new(mem, [ordered_set]),
{ok, Cache0#ledger_cache{mem=NewTab}};
returned ->
{returned, Cache}
end;
true ->
{ok, Cache}
end.
-spec maybe_withjitter(integer(), integer()) -> boolean().
%% @doc
%% Push down randomly, but the closer to 4 * the maximum size, the more likely
%% a push should be
maybe_withjitter(CacheSize, MaxCacheSize) when CacheSize > MaxCacheSize ->
R = leveled_rand:uniform(4 * MaxCacheSize),
(CacheSize - MaxCacheSize) > R;
maybe_withjitter(_CacheSize, _MaxCacheSize) ->
false.
-spec get_loadfun(leveled_codec:compaction_strategy(), pid(), book_state())
-> initial_loadfun().
%% @doc
%% The LoadFun will be used by the Inker when walking across the Journal to
%% load the Penciller at startup.
get_loadfun(ReloadStrat, Penciller, _State) ->
fun(KeyInJournal, ValueInJournal, _Pos, Acc0, ExtractFun) ->
{MinSQN, MaxSQN, LedgerCache} = Acc0,
{SQN, InkTag, PK} = KeyInJournal,
case SQN of
SQN when SQN < MinSQN ->
{loop, Acc0};
SQN when SQN > MaxSQN ->
leveled_log:log("B0007", [MaxSQN, SQN]),
{stop, Acc0};
_ ->
{VBin, ValSize} = ExtractFun(ValueInJournal),
% VBin may already be a term
{Obj, IdxSpecs} = leveled_codec:split_inkvalue(VBin),
Chngs =
case leveled_codec:get_tagstrategy(PK, ReloadStrat) of
recalc ->
recalcfor_ledgercache(InkTag, PK, SQN,
Obj, ValSize, IdxSpecs,
LedgerCache,
Penciller);
_ ->
preparefor_ledgercache(InkTag, PK, SQN,
Obj, ValSize, IdxSpecs)
end,
case SQN of
MaxSQN ->
leveled_log:log("B0006", [SQN]),
LC0 = addto_ledgercache(Chngs, LedgerCache, loader),
{stop, {MinSQN, MaxSQN, LC0}};
_ ->
LC0 = addto_ledgercache(Chngs, LedgerCache, loader),
{loop, {MinSQN, MaxSQN, LC0}}
end
end
end.
delete_path(DirPath) ->
ok = filelib:ensure_dir(DirPath),
{ok, Files} = file:list_dir(DirPath),
[file:delete(filename:join([DirPath, File])) || File <- Files],
file:del_dir(DirPath).
%%%============================================================================
%%% Timing Functions
%%%============================================================================
-spec update_statetimings(timing_types(),
put_timings()|get_timings()|fold_timings()|head_timings(),
integer())
->
{put_timings()|get_timings()|fold_timings()|head_timings(),
integer()}.
%% @doc
%%
%% The timings state is either in countdown to the next set of samples of
%% we are actively collecting a sample. Active collection take place
%% when the countdown is 0. Once the sample has reached the expected count
%% then there is a log of that sample, and the countdown is restarted.
%%
%% Outside of sample windows the timings object should be set to the atom
%% no_timing. no_timing is a valid state for each timings type.
update_statetimings(head, no_timing, 0) ->
{#head_timings{}, 0};
update_statetimings(put, no_timing, 0) ->
{#put_timings{}, 0};
update_statetimings(get, no_timing, 0) ->
{#get_timings{}, 0};
update_statetimings(fold, no_timing, 0) ->
{#fold_timings{}, 0};
update_statetimings(head, Timings, 0) ->
case Timings#head_timings.sample_count of
SC when SC >= ?TIMING_SAMPLESIZE ->
log_timings(head, Timings),
{no_timing, leveled_rand:uniform(10 * ?TIMING_SAMPLECOUNTDOWN)};
_SC ->
{Timings, 0}
end;
update_statetimings(put, Timings, 0) ->
case Timings#put_timings.sample_count of
SC when SC >= ?TIMING_SAMPLESIZE ->
log_timings(put, Timings),
{no_timing, leveled_rand:uniform(2 * ?TIMING_SAMPLECOUNTDOWN)};
_SC ->
{Timings, 0}
end;
update_statetimings(get, Timings, 0) ->
case Timings#get_timings.sample_count of
SC when SC >= ?TIMING_SAMPLESIZE ->
log_timings(get, Timings),
{no_timing, leveled_rand:uniform(2 * ?TIMING_SAMPLECOUNTDOWN)};
_SC ->
{Timings, 0}
end;
update_statetimings(fold, Timings, 0) ->
case Timings#fold_timings.sample_count of
SC when SC >= (?TIMING_SAMPLESIZE div 10) ->
log_timings(fold, Timings),
{no_timing,
leveled_rand:uniform(2 * (?TIMING_SAMPLECOUNTDOWN div 10))};
_SC ->
{Timings, 0}
end;
update_statetimings(_, no_timing, N) ->
{no_timing, N - 1}.
log_timings(head, Timings) ->
leveled_log:log("B0018",
[Timings#head_timings.sample_count,
Timings#head_timings.pcl_time,
Timings#head_timings.buildhead_time]);
log_timings(put, Timings) ->
leveled_log:log("B0015", [Timings#put_timings.sample_count,
Timings#put_timings.mem_time,
Timings#put_timings.ink_time,
Timings#put_timings.total_size]);
log_timings(get, Timings) ->
leveled_log:log("B0016", [Timings#get_timings.sample_count,
Timings#get_timings.head_time,
Timings#get_timings.body_time,
Timings#get_timings.fetch_count]);
log_timings(fold, Timings) ->
leveled_log:log("B0017", [Timings#fold_timings.sample_count,
Timings#fold_timings.setup_time]).
update_timings(_SW, _Stage, no_timing) ->
{no_timing, no_timing};
update_timings(SW, {head, Stage}, Timings) ->
Timer = timer:now_diff(os:timestamp(), SW),
Timings0 =
case Stage of
pcl ->
PCT = Timings#head_timings.pcl_time + Timer,
Timings#head_timings{pcl_time = PCT};
rsp ->
BHT = Timings#head_timings.buildhead_time + Timer,
CNT = Timings#head_timings.sample_count + 1,
Timings#head_timings{buildhead_time = BHT, sample_count = CNT}
end,
{os:timestamp(), Timings0};
update_timings(SW, {put, Stage}, Timings) ->
Timer = timer:now_diff(os:timestamp(), SW),
Timings0 =
case Stage of
{inker, ObjectSize} ->
INT = Timings#put_timings.ink_time + Timer,
TSZ = Timings#put_timings.total_size + ObjectSize,
Timings#put_timings{ink_time = INT, total_size = TSZ};
mem ->
PCT = Timings#put_timings.mem_time + Timer,
CNT = Timings#put_timings.sample_count + 1,
Timings#put_timings{mem_time = PCT, sample_count = CNT}
end,
{os:timestamp(), Timings0};
update_timings(SW, {get, head}, Timings) ->
Timer = timer:now_diff(os:timestamp(), SW),
GHT = Timings#get_timings.head_time + Timer,
CNT = Timings#get_timings.sample_count + 1,
Timings0 = Timings#get_timings{head_time = GHT, sample_count = CNT},
{os:timestamp(), Timings0};
update_timings(SW, {get, body}, Timings) ->
Timer = timer:now_diff(os:timestamp(), SW),
GBT = Timings#get_timings.body_time + Timer,
FCNT = Timings#get_timings.fetch_count + 1,
Timings0 = Timings#get_timings{body_time = GBT, fetch_count = FCNT},
{no_timing, Timings0};
update_timings(SW, {fold, setup}, Timings) ->
Timer = timer:now_diff(os:timestamp(), SW),
FST = Timings#fold_timings.setup_time + Timer,
CNT = Timings#fold_timings.sample_count + 1,
Timings0 = Timings#fold_timings{setup_time = FST, sample_count = CNT},
{no_timing, Timings0}.
-spec maybelog_cacheratio(cache_ratio(), boolean()) -> cache_ratio().
maybelog_cacheratio({?CACHE_LOGPOINT, CC, HC}, false) ->
leveled_log:log("B0021", [?CACHE_LOGPOINT, CC, HC]),
{0, 0, 0};
maybelog_cacheratio(CR, _IsSnap) ->
CR.
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(TEST).
reset_filestructure() ->
RootPath = "test/test_area",
leveled_inker:clean_testdir(RootPath ++ "/" ++ ?JOURNAL_FP),
leveled_penciller:clean_testdir(RootPath ++ "/" ++ ?LEDGER_FP),
RootPath.
generate_multiple_objects(Count, KeyNumber) ->
generate_multiple_objects(Count, KeyNumber, []).
generate_multiple_objects(0, _KeyNumber, ObjL) ->
ObjL;
generate_multiple_objects(Count, KeyNumber, ObjL) ->
Key = "Key" ++ integer_to_list(KeyNumber),
Value = leveled_rand:rand_bytes(256),
IndexSpec = [{add, "idx1_bin", "f" ++ integer_to_list(KeyNumber rem 10)}],
generate_multiple_objects(Count - 1,
KeyNumber + 1,
ObjL ++ [{Key, Value, IndexSpec}]).
ttl_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath}]),
ObjL1 = generate_multiple_objects(100, 1),
% Put in all the objects with a TTL in the future
Future = leveled_util:integer_now() + 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Future) end,
ObjL1),
lists:foreach(fun({K, V, _S}) ->
{ok, V} = book_get(Bookie1, "Bucket", K, ?STD_TAG)
end,
ObjL1),
lists:foreach(fun({K, _V, _S}) ->
{ok, _} = book_head(Bookie1, "Bucket", K, ?STD_TAG)
end,
ObjL1),
ObjL2 = generate_multiple_objects(100, 101),
Past = leveled_util:integer_now() - 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Past) end,
ObjL2),
lists:foreach(fun({K, _V, _S}) ->
not_found = book_get(Bookie1, "Bucket", K, ?STD_TAG)
end,
ObjL2),
lists:foreach(fun({K, _V, _S}) ->
not_found = book_head(Bookie1, "Bucket", K, ?STD_TAG)
end,
ObjL2),
{async, BucketFolder} = book_returnfolder(Bookie1,
{bucket_stats, "Bucket"}),
{_Size, Count} = BucketFolder(),
?assertMatch(100, Count),
FoldKeysFun = fun(_B, Item, FKFAcc) -> FKFAcc ++ [Item] end,
{async,
IndexFolder} = book_returnfolder(Bookie1,
{index_query,
"Bucket",
{FoldKeysFun, []},
{"idx1_bin", "f8", "f9"},
{false, undefined}}),
KeyList = IndexFolder(),
?assertMatch(20, length(KeyList)),
{ok, Regex} = re:compile("f8"),
{async,
IndexFolderTR} = book_returnfolder(Bookie1,
{index_query,
"Bucket",
{FoldKeysFun, []},
{"idx1_bin", "f8", "f9"},
{true, Regex}}),
TermKeyList = IndexFolderTR(),
?assertMatch(10, length(TermKeyList)),
ok = book_close(Bookie1),
{ok, Bookie2} = book_start([{root_path, RootPath}]),
{async,
IndexFolderTR2} = book_returnfolder(Bookie2,
{index_query,
"Bucket",
{FoldKeysFun, []},
{"idx1_bin", "f7", "f9"},
{false, Regex}}),
KeyList2 = IndexFolderTR2(),
?assertMatch(10, length(KeyList2)),
lists:foreach(fun({K, _V, _S}) ->
not_found = book_get(Bookie2, "Bucket", K, ?STD_TAG)
end,
ObjL2),
lists:foreach(fun({K, _V, _S}) ->
not_found = book_head(Bookie2, "Bucket", K, ?STD_TAG)
end,
ObjL2),
ok = book_close(Bookie2),
reset_filestructure().
hashlist_query_test_() ->
{timeout, 60, fun hashlist_query_testto/0}.
hashlist_query_testto() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
ObjL1 = generate_multiple_objects(1200, 1),
% Put in all the objects with a TTL in the future
Future = leveled_util:integer_now() + 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Future) end,
ObjL1),
ObjL2 = generate_multiple_objects(20, 1201),
% Put in a few objects with a TTL in the past
Past = leveled_util:integer_now() - 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Past) end,
ObjL2),
% Scan the store for the Bucket, Keys and Hashes
{async, HTFolder} = book_returnfolder(Bookie1,
{hashlist_query,
?STD_TAG,
false}),
KeyHashList = HTFolder(),
lists:foreach(fun({B, _K, H}) ->
?assertMatch("Bucket", B),
?assertMatch(true, is_integer(H))
end,
KeyHashList),
?assertMatch(1200, length(KeyHashList)),
ok = book_close(Bookie1),
{ok, Bookie2} = book_start([{root_path, RootPath},
{max_journalsize, 200000},
{cache_size, 500}]),
{async, HTFolder2} = book_returnfolder(Bookie2,
{hashlist_query,
?STD_TAG,
false}),
L0 = length(KeyHashList),
HTR2 = HTFolder2(),
?assertMatch(L0, length(HTR2)),
?assertMatch(KeyHashList, HTR2),
ok = book_close(Bookie2),
reset_filestructure().
hashlist_query_withjournalcheck_test_() ->
{timeout, 60, fun hashlist_query_withjournalcheck_testto/0}.
hashlist_query_withjournalcheck_testto() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
ObjL1 = generate_multiple_objects(800, 1),
% Put in all the objects with a TTL in the future
Future = leveled_util:integer_now() + 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Future) end,
ObjL1),
{async, HTFolder1} = book_returnfolder(Bookie1,
{hashlist_query,
?STD_TAG,
false}),
KeyHashList = HTFolder1(),
{async, HTFolder2} = book_returnfolder(Bookie1,
{hashlist_query,
?STD_TAG,
true}),
?assertMatch(KeyHashList, HTFolder2()),
ok = book_close(Bookie1),
reset_filestructure().
foldobjects_vs_hashtree_test_() ->
{timeout, 60, fun foldobjects_vs_hashtree_testto/0}.
foldobjects_vs_hashtree_testto() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
ObjL1 = generate_multiple_objects(800, 1),
% Put in all the objects with a TTL in the future
Future = leveled_util:integer_now() + 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"Bucket", K, V, S,
?STD_TAG,
Future) end,
ObjL1),
{async, HTFolder1} = book_returnfolder(Bookie1,
{hashlist_query,
?STD_TAG,
false}),
KeyHashList1 = lists:usort(HTFolder1()),
FoldObjectsFun = fun(B, K, V, Acc) ->
[{B, K, erlang:phash2(term_to_binary(V))}|Acc] end,
{async, HTFolder2} = book_returnfolder(Bookie1,
{foldobjects_allkeys,
?STD_TAG,
FoldObjectsFun,
true}),
KeyHashList2 = HTFolder2(),
?assertMatch(KeyHashList1, lists:usort(KeyHashList2)),
FoldHeadsFun =
fun(B, K, ProxyV, Acc) ->
{proxy_object,
_MDBin,
_Size,
{FetchFun, Clone, JK}} = binary_to_term(ProxyV),
V = FetchFun(Clone, JK),
[{B, K, erlang:phash2(term_to_binary(V))}|Acc]
end,
{async, HTFolder3} =
book_returnfolder(Bookie1,
{foldheads_allkeys,
?STD_TAG,
FoldHeadsFun,
true, true, false, false, false}),
KeyHashList3 = HTFolder3(),
?assertMatch(KeyHashList1, lists:usort(KeyHashList3)),
FoldHeadsFun2 =
fun(B, K, ProxyV, Acc) ->
{proxy_object,
MD,
_Size,
_Fetcher} = binary_to_term(ProxyV),
{Hash, _Size, _UserDefinedMD} = MD,
[{B, K, Hash}|Acc]
end,
{async, HTFolder4} =
book_returnfolder(Bookie1,
{foldheads_allkeys,
?STD_TAG,
FoldHeadsFun2,
false, false, false, false, false}),
KeyHashList4 = HTFolder4(),
?assertMatch(KeyHashList1, lists:usort(KeyHashList4)),
ok = book_close(Bookie1),
reset_filestructure().
foldobjects_vs_foldheads_bybucket_test_() ->
{timeout, 60, fun foldobjects_vs_foldheads_bybucket_testto/0}.
foldobjects_vs_foldheads_bybucket_testto() ->
folder_cache_test(10),
folder_cache_test(100),
folder_cache_test(300),
folder_cache_test(1000).
folder_cache_test(CacheSize) ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, CacheSize}]),
_ = book_returnactors(Bookie1),
ObjL1 = generate_multiple_objects(400, 1),
ObjL2 = generate_multiple_objects(400, 1),
% Put in all the objects with a TTL in the future
Future = leveled_util:integer_now() + 300,
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"BucketA", K, V, S,
?STD_TAG,
Future) end,
ObjL1),
lists:foreach(fun({K, V, S}) -> ok = book_tempput(Bookie1,
"BucketB", K, V, S,
?STD_TAG,
Future) end,
ObjL2),
FoldObjectsFun = fun(B, K, V, Acc) ->
[{B, K, erlang:phash2(term_to_binary(V))}|Acc] end,
{async, HTFolder1A} =
book_returnfolder(Bookie1,
{foldobjects_bybucket,
?STD_TAG,
"BucketA",
all,
FoldObjectsFun,
false}),
KeyHashList1A = HTFolder1A(),
{async, HTFolder1B} =
book_returnfolder(Bookie1,
{foldobjects_bybucket,
?STD_TAG,
"BucketB",
all,
FoldObjectsFun,
true}),
KeyHashList1B = HTFolder1B(),
?assertMatch(false,
lists:usort(KeyHashList1A) == lists:usort(KeyHashList1B)),
FoldHeadsFun =
fun(B, K, ProxyV, Acc) ->
{proxy_object,
_MDBin,
_Size,
{FetchFun, Clone, JK}} = binary_to_term(ProxyV),
V = FetchFun(Clone, JK),
[{B, K, erlang:phash2(term_to_binary(V))}|Acc]
end,
{async, HTFolder2A} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketA",
all,
FoldHeadsFun,
true, true,
false, false, false}),
KeyHashList2A = HTFolder2A(),
{async, HTFolder2B} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketB",
all,
FoldHeadsFun,
true, false,
false, false, false}),
KeyHashList2B = HTFolder2B(),
?assertMatch(true,
lists:usort(KeyHashList1A) == lists:usort(KeyHashList2A)),
?assertMatch(true,
lists:usort(KeyHashList1B) == lists:usort(KeyHashList2B)),
{async, HTFolder2C} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketB",
{"Key", <<"$all">>},
FoldHeadsFun,
true, false,
false, false, false}),
KeyHashList2C = HTFolder2C(),
{async, HTFolder2D} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketB",
{"Key", "Keyzzzzz"},
FoldHeadsFun,
true, true,
false, false, false}),
KeyHashList2D = HTFolder2D(),
?assertMatch(true,
lists:usort(KeyHashList2B) == lists:usort(KeyHashList2C)),
?assertMatch(true,
lists:usort(KeyHashList2B) == lists:usort(KeyHashList2D)),
CheckSplitQueryFun =
fun(SplitInt) ->
io:format("Testing SplitInt ~w~n", [SplitInt]),
SplitIntEnd = "Key" ++ integer_to_list(SplitInt) ++ "|",
SplitIntStart = "Key" ++ integer_to_list(SplitInt + 1),
{async, HTFolder2E} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketB",
{"Key", SplitIntEnd},
FoldHeadsFun,
true, false,
false, false, false}),
KeyHashList2E = HTFolder2E(),
{async, HTFolder2F} =
book_returnfolder(Bookie1,
{foldheads_bybucket,
?STD_TAG,
"BucketB",
{SplitIntStart, "Key|"},
FoldHeadsFun,
true, false,
false, false, false}),
KeyHashList2F = HTFolder2F(),
?assertMatch(true, length(KeyHashList2E) > 0),
?assertMatch(true, length(KeyHashList2F) > 0),
io:format("Length of 2B ~w 2E ~w 2F ~w~n",
[length(KeyHashList2B),
length(KeyHashList2E),
length(KeyHashList2F)]),
CompareL = lists:usort(KeyHashList2E ++ KeyHashList2F),
?assertMatch(true, lists:usort(KeyHashList2B) == CompareL)
end,
lists:foreach(CheckSplitQueryFun, [1, 4, 8, 300, 100, 400, 200, 600]),
ok = book_close(Bookie1),
reset_filestructure().
small_cachesize_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 1}]),
ok = leveled_bookie:book_close(Bookie1).
is_empty_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
% Put in an object with a TTL in the future
Future = leveled_util:integer_now() + 300,
?assertMatch(true, leveled_bookie:book_isempty(Bookie1, ?STD_TAG)),
ok = book_tempput(Bookie1,
<<"B">>, <<"K">>, {value, <<"V">>}, [],
?STD_TAG, Future),
?assertMatch(false, leveled_bookie:book_isempty(Bookie1, ?STD_TAG)),
?assertMatch(true, leveled_bookie:book_isempty(Bookie1, ?RIAK_TAG)),
ok = leveled_bookie:book_close(Bookie1).
is_empty_headonly_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500},
{head_only, no_lookup}]),
?assertMatch(true, book_isempty(Bookie1, ?HEAD_TAG)),
ObjSpecs =
[{add, <<"B1">>, <<"K1">>, <<1:8/integer>>, 100},
{remove, <<"B1">>, <<"K1">>, <<0:8/integer>>, null}],
ok = book_mput(Bookie1, ObjSpecs),
?assertMatch(false, book_isempty(Bookie1, ?HEAD_TAG)),
ok = book_close(Bookie1).
undefined_rootpath_test() ->
Opts = [{max_journalsize, 1000000}, {cache_size, 500}],
R = gen_server:start(?MODULE, [set_defaults(Opts)], []),
?assertMatch({error, no_root_path}, R).
foldkeys_headonly_test() ->
foldkeys_headonly_tester(5000, 25, "BucketStr"),
foldkeys_headonly_tester(2000, 25, <<"B0">>).
foldkeys_headonly_tester(ObjectCount, BlockSize, BStr) ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500},
{head_only, no_lookup}]),
GenObjSpecFun =
fun(I) ->
Key = I rem 6,
{add, BStr, <<Key:8/integer>>, integer_to_list(I), I}
end,
ObjSpecs = lists:map(GenObjSpecFun, lists:seq(1, ObjectCount)),
ObjSpecBlocks =
lists:map(fun(I) ->
lists:sublist(ObjSpecs, I * BlockSize + 1, BlockSize)
end,
lists:seq(0, ObjectCount div BlockSize - 1)),
lists:map(fun(Block) -> book_mput(Bookie1, Block) end, ObjSpecBlocks),
?assertMatch(false, book_isempty(Bookie1, ?HEAD_TAG)),
FolderT =
{keylist,
?HEAD_TAG, BStr,
{fun(_B, {K, SK}, Acc) -> [{K, SK}|Acc] end, []}
},
{async, Folder1} = book_returnfolder(Bookie1, FolderT),
Key_SKL1 = lists:reverse(Folder1()),
Key_SKL_Compare =
lists:usort(lists:map(fun({add, _B, K, SK, _V}) -> {K, SK} end, ObjSpecs)),
?assertMatch(Key_SKL_Compare, Key_SKL1),
ok = book_close(Bookie1),
{ok, Bookie2} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500},
{head_only, no_lookup}]),
{async, Folder2} = book_returnfolder(Bookie2, FolderT),
Key_SKL2 = lists:reverse(Folder2()),
?assertMatch(Key_SKL_Compare, Key_SKL2),
ok = book_close(Bookie2).
is_empty_stringkey_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
?assertMatch(true, book_isempty(Bookie1, ?STD_TAG)),
Past = leveled_util:integer_now() - 300,
?assertMatch(true, leveled_bookie:book_isempty(Bookie1, ?STD_TAG)),
ok = book_tempput(Bookie1,
"B", "K", {value, <<"V">>}, [],
?STD_TAG, Past),
ok = book_put(Bookie1,
"B", "K0", {value, <<"V">>}, [],
?STD_TAG),
?assertMatch(false, book_isempty(Bookie1, ?STD_TAG)),
ok = book_close(Bookie1).
scan_table_test() ->
K1 = leveled_codec:to_ledgerkey(<<"B1">>,
<<"K1">>,
?IDX_TAG,
<<"F1-bin">>,
<<"AA1">>),
K2 = leveled_codec:to_ledgerkey(<<"B1">>,
<<"K2">>,
?IDX_TAG,
<<"F1-bin">>,
<<"AA1">>),
K3 = leveled_codec:to_ledgerkey(<<"B1">>,
<<"K3">>,
?IDX_TAG,
<<"F1-bin">>,
<<"AB1">>),
K4 = leveled_codec:to_ledgerkey(<<"B1">>,
<<"K4">>,
?IDX_TAG,
<<"F1-bin">>,
<<"AA2">>),
K5 = leveled_codec:to_ledgerkey(<<"B2">>,
<<"K5">>,
?IDX_TAG,
<<"F1-bin">>,
<<"AA2">>),
Tab0 = ets:new(mem, [ordered_set]),
SK_A0 = leveled_codec:to_ledgerkey(<<"B1">>,
null,
?IDX_TAG,
<<"F1-bin">>,
<<"AA0">>),
EK_A9 = leveled_codec:to_ledgerkey(<<"B1">>,
null,
?IDX_TAG,
<<"F1-bin">>,
<<"AA9">>),
Empty = {[], infinity, 0},
?assertMatch(Empty,
scan_table(Tab0, SK_A0, EK_A9)),
ets:insert(Tab0, [{K1, {1, active, no_lookup, null}}]),
?assertMatch({[{K1, _}], 1, 1},
scan_table(Tab0, SK_A0, EK_A9)),
ets:insert(Tab0, [{K2, {2, active, no_lookup, null}}]),
?assertMatch({[{K1, _}, {K2, _}], 1, 2},
scan_table(Tab0, SK_A0, EK_A9)),
ets:insert(Tab0, [{K3, {3, active, no_lookup, null}}]),
?assertMatch({[{K1, _}, {K2, _}], 1, 2},
scan_table(Tab0, SK_A0, EK_A9)),
ets:insert(Tab0, [{K4, {4, active, no_lookup, null}}]),
?assertMatch({[{K1, _}, {K2, _}, {K4, _}], 1, 4},
scan_table(Tab0, SK_A0, EK_A9)),
ets:insert(Tab0, [{K5, {5, active, no_lookup, null}}]),
?assertMatch({[{K1, _}, {K2, _}, {K4, _}], 1, 4},
scan_table(Tab0, SK_A0, EK_A9)).
longrunning_test() ->
SW = os:timestamp(),
timer:sleep(?LONG_RUNNING div 1000 + 100),
ok = maybe_longrunning(SW, put).
coverage_cheat_test() ->
{noreply, _State0} = handle_info(timeout, #state{}),
{ok, _State1} = code_change(null, #state{}, null).
erase_journal_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 50000},
{cache_size, 100}]),
ObjL1 = generate_multiple_objects(500, 1),
% Put in all the objects with a TTL in the future
lists:foreach(fun({K, V, S}) -> ok = book_put(Bookie1,
"Bucket", K, V, S,
?STD_TAG) end,
ObjL1),
lists:foreach(fun({K, V, _S}) ->
{ok, V} = book_get(Bookie1, "Bucket", K, ?STD_TAG)
end,
ObjL1),
CheckHeadFun =
fun(Book) ->
fun({K, _V, _S}, Acc) ->
case book_head(Book, "Bucket", K, ?STD_TAG) of
{ok, _Head} -> Acc;
not_found -> Acc + 1
end
end
end,
HeadsNotFound1 = lists:foldl(CheckHeadFun(Bookie1), 0, ObjL1),
?assertMatch(0, HeadsNotFound1),
ok = book_close(Bookie1),
io:format("Bookie closed - clearing Journal~n"),
leveled_inker:clean_testdir(RootPath ++ "/" ++ ?JOURNAL_FP),
{ok, Bookie2} = book_start([{root_path, RootPath},
{max_journalsize, 5000},
{cache_size, 100}]),
HeadsNotFound2 = lists:foldl(CheckHeadFun(Bookie2), 0, ObjL1),
?assertMatch(500, HeadsNotFound2),
ok = book_destroy(Bookie2).
sqnorder_fold_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
ok = book_put(Bookie1,
<<"B">>, <<"K1">>, {value, <<"V1">>}, [],
?STD_TAG),
ok = book_put(Bookie1,
<<"B">>, <<"K2">>, {value, <<"V2">>}, [],
?STD_TAG),
FoldObjectsFun = fun(B, K, V, Acc) -> Acc ++ [{B, K, V}] end,
{async, ObjFPre} =
book_objectfold(Bookie1,
?STD_TAG, {FoldObjectsFun, []}, true, sqn_order),
{async, ObjFPost} =
book_objectfold(Bookie1,
?STD_TAG, {FoldObjectsFun, []}, false, sqn_order),
ok = book_put(Bookie1,
<<"B">>, <<"K3">>, {value, <<"V3">>}, [],
?STD_TAG),
ObjLPre = ObjFPre(),
?assertMatch([{<<"B">>, <<"K1">>, {value, <<"V1">>}},
{<<"B">>, <<"K2">>, {value, <<"V2">>}}], ObjLPre),
ObjLPost = ObjFPost(),
?assertMatch([{<<"B">>, <<"K1">>, {value, <<"V1">>}},
{<<"B">>, <<"K2">>, {value, <<"V2">>}},
{<<"B">>, <<"K3">>, {value, <<"V3">>}}], ObjLPost),
ok = book_destroy(Bookie1).
sqnorder_mutatefold_test() ->
RootPath = reset_filestructure(),
{ok, Bookie1} = book_start([{root_path, RootPath},
{max_journalsize, 1000000},
{cache_size, 500}]),
ok = book_put(Bookie1,
<<"B">>, <<"K1">>, {value, <<"V1">>}, [],
?STD_TAG),
ok = book_put(Bookie1,
<<"B">>, <<"K1">>, {value, <<"V2">>}, [],
?STD_TAG),
FoldObjectsFun = fun(B, K, V, Acc) -> Acc ++ [{B, K, V}] end,
{async, ObjFPre} =
book_objectfold(Bookie1,
?STD_TAG, {FoldObjectsFun, []}, true, sqn_order),
{async, ObjFPost} =
book_objectfold(Bookie1,
?STD_TAG, {FoldObjectsFun, []}, false, sqn_order),
ok = book_put(Bookie1,
<<"B">>, <<"K1">>, {value, <<"V3">>}, [],
?STD_TAG),
ObjLPre = ObjFPre(),
?assertMatch([{<<"B">>, <<"K1">>, {value, <<"V2">>}}], ObjLPre),
ObjLPost = ObjFPost(),
?assertMatch([{<<"B">>, <<"K1">>, {value, <<"V3">>}}], ObjLPost),
ok = book_destroy(Bookie1).
search_test() ->
?assertMatch({value, 5}, search(fun(X) -> X == 5 end, lists:seq(1, 10))),
?assertMatch(false, search(fun(X) -> X == 55 end, lists:seq(1, 10))).
check_notfound_test() ->
ProbablyFun = fun() -> probably end,
MissingFun = fun() -> missing end,
MinFreq = lists:foldl(fun(_I, Freq) ->
{false, Freq0} =
check_notfound(Freq, ProbablyFun),
Freq0
end,
100,
lists:seq(1, 5000)),
% 5000 as needs to be a lot as doesn't decrement
% when random interval is not hit
?assertMatch(?MIN_KEYCHECK_FREQUENCY, MinFreq),
?assertMatch({true, ?MAX_KEYCHECK_FREQUENCY},
check_notfound(?MAX_KEYCHECK_FREQUENCY, MissingFun)),
?assertMatch({false, 0}, check_notfound(0, MissingFun)).
-endif. | src/leveled_bookie.erl | 0.545528 | 0.637708 | leveled_bookie.erl | starcoder |
%% @doc
%% A collector for a set of metrics.
%%
%% Normal users should use {@link prometheus_gauge},
%% {@link prometheus_counter}, {@link prometheus_summary}
%% and {@link prometheus_histogram}.
%%
%% Implementing `:prometheus_collector' behaviour is for advanced uses
%% such as proxying metrics from another monitoring system.
%% It is it the responsibility of the implementer to ensure produced metrics
%% are valid.
%%
%% You will be working with Prometheus
%% data model directly (see {@link prometheus_model_helpers}).
%%
%% Callbacks:
%% - `collect_mf(Registry, Callback)' - called by exporters and formats.
%% Should call `Callback' for each `MetricFamily' of this collector;
%% - `collect_metrics(Name, Data)' - called by `MetricFamily' constructor.
%% Should return Metric list for each MetricFamily identified by `Name'.
%% `Data' is a term associated with MetricFamily by collect_mf.
%% - `deregister_cleanup(Registry)' - called when collector unregistered by
%% `Registry'. If collector is stateful you can put cleanup code here.
%%
%% Example (simplified `prometheus_vm_memory_collector'):
%% <pre lang="erlang">
%% -module(prometheus_vm_memory_collector).
%%
%% -export([deregister_cleanup/1,
%% collect_mf/2,
%% collect_metrics/2]).
%%
%% -behaviour(prometheus_collector).
%%
%% %%====================================================================
%% %% Collector API
%% %%====================================================================
%%
%% deregister_cleanup(_) -> ok.
%%
%% collect_mf(_Registry, Callback) ->
%% Memory = erlang:memory(),
%% Callback(create_gauge(erlang_vm_bytes_total,
%% "The total amount of memory currently allocated. "
%% "This is the same as the sum of the memory size "
%% "for processes and system.",
%% Memory)),
%% ok.
%%
%% collect_metrics(erlang_vm_bytes_total, Memory) ->
%% prometheus_model_helpers:gauge_metrics(
%% [
%% {[{kind, system}], proplists:get_value(system, Memory)},
%% {[{kind, processes}], proplists:get_value(processes, Memory)}
%% ]).
%%
%% %%====================================================================
%% %% Private Parts
%% %%====================================================================
%%
%% create_gauge(Name, Help, Data) ->
%% prometheus_model_helpers:create_mf(Name, Help, gauge, ?MODULE, Data).
%% </pre>
%% @end
-module(prometheus_collector).
-export([enabled_collectors/0,
register/1,
register/2,
deregister/1,
deregister/2,
collect_mf/3]).
-ifdef(TEST).
-export([collect_mf_to_list/1]).
-endif.
-export_type([collector/0,
data/0,
collect_mf_callback/0]).
-compile({no_auto_import, [register/2]}).
-include("prometheus.hrl").
%%====================================================================
%% Types
%%====================================================================
-type collector() :: atom().
-type data() :: any().
-type collect_mf_callback() ::
fun((prometheus_model:'MetricFamily'()) -> any()).
%%====================================================================
%% Callbacks
%%====================================================================
-callback collect_mf(Registry, Callback) -> ok when
Registry :: prometheus_registry:registry(),
Callback :: collect_mf_callback().
-callback collect_metrics(Name, Data) -> Metrics when
Name :: prometheus_metric:name(),
Data :: data(),
Metrics :: prometheus_model:'Metric'() | [prometheus_model:'Metric'()].
-callback deregister_cleanup(Registry) -> ok when
Registry :: prometheus_registry:registry().
%%====================================================================
%% Public API
%%====================================================================
%% @private
-spec enabled_collectors() -> [collector()].
enabled_collectors() ->
case application:get_env(prometheus, collectors) of
undefined -> all_known_collectors();
{ok, Collectors} -> Collectors
end.
%% @equiv register(Collector, default)
%% @deprecated Please use {@link prometheus_registry:register_collector/1}
register(Collector) -> register(Collector, default).
-spec register(Collector, Registry) -> ok when
Collector :: collector(),
Registry :: prometheus_registry:registry().
%% @deprecated Please use {@link prometheus_registry:register_collector/2}
register(Collector, Registry) ->
?DEPRECATED("prometheus_collector:register/2",
"prometheus_register:register_collector/2"),
ok = prometheus_registry:register_collector(Registry, Collector).
%% @equiv deregister(Collector, default)
%% @deprecated Please use {@link prometheus_registry:deregister_collector/1}
deregister(Collector) -> deregister(Collector, default).
-spec deregister(Collector, Registry) -> ok when
Collector :: collector(),
Registry :: prometheus_registry:registry().
%% @deprecated Please use {@link prometheus_registry:deregister_collector/2}
deregister(Collector, Registry) ->
?DEPRECATED("prometheus_collector:deregister/2",
"prometheus_register:deregister_collector/2"),
prometheus_registry:deregister_collector(Registry, Collector).
%% @doc Calls `Callback' for each MetricFamily of this collector.
-spec collect_mf(Registry, Collector, Callback) -> ok when
Registry :: prometheus_registry:registry(),
Collector :: collector(),
Callback :: collect_mf_callback().
collect_mf(Registry, Collector, Callback) ->
ok = Collector:collect_mf(Registry, Callback).
%%====================================================================
%% Test only
%%====================================================================
-ifdef(TEST).
%% @private
collect_mf_to_list(Collector) ->
collect_mf_to_list(default, Collector).
collect_mf_to_list(Registry, Collector) ->
try
Callback = fun (MF) ->
put(Collector, [MF|get_list(Collector)])
end,
prometheus_collector:collect_mf(Registry, Collector, Callback),
get_list(Collector)
after
erase(Collector)
end.
get_list(Key) ->
case get(Key) of
undefined ->
[];
Value ->
Value
end.
-endif.
%%====================================================================
%% Private Parts
%%====================================================================
all_known_collectors() ->
prometheus_misc:behaviour_modules(prometheus_collector). | src/prometheus_collector.erl | 0.678007 | 0.503845 | prometheus_collector.erl | starcoder |
%% Copyright (c) 2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : ttsets.erl
%% Author : <NAME>
%% Purpose : Set as a 2-3 tree.
%% This implementation uses 2-3 trees. The description of the tree
%% restructuring which is used comes from Prof. <NAME>'s notes for
%% CS230 Data Structures at Wellesley College.
-module(ttsets).
%% Standard interface.
-export([new/0,is_set/1,size/1,to_list/1,from_list/1]).
-export([is_element/2,add_element/2,del_element/2]).
-export([union/2,union/1,intersection/2,intersection/1]).
-export([is_disjoint/2,subtract/2,is_subset/2]).
-export([fold/3,filter/2]).
%% Extended interface.
-export([foreach/2]).
-compile({no_auto_import,[size/1]}). %We mean our own size/1
-ifdef(DEBUG).
-export([check_depth/1]).
-endif.
%% Data structure:
%% - {Left,Element,Right}
%% - {Left,Element,Middle,Element,Right}
%% - empty
%%
%% The term order is an arithmetic total order, so we should not
%% test exact equality for the keys. (If we do, then it becomes
%% possible that neither `>', `<', nor `=:=' matches.) Testing '<'
%% and '>' first is statistically better than testing for
%% equality, and also allows us to skip the test completely in the
%% remaining case.
-type ttset() :: empty |
{empty,any(),empty} |
{any(),any(),any()} |
{empty,any(),empty,any(),empty} |
{any(),any(),any(),any(),any()}.
-export_type([ttset/0]).
-spec new() -> Set::ttset().
%% Return a new empty set.
new() -> empty. %The empty set
-spec is_set(Set::ttset()) -> boolean().
%% Return 'true' if Set is a set, else 'false'.
is_set(empty) -> true;
is_set({A,_,B}) ->
is_set(A) andalso is_set(B);
is_set({A,_,B,_,C}) ->
is_set(A) andalso is_set(B) andalso is_set(C);
is_set(_) -> false.
-spec size(Set::ttset()) -> non_neg_integer().
%% Return the number of elements in Set.
size(empty) -> 0;
size({A,_,B}) ->
size(A) + size(B) + 1;
size({A,_,B,_,C}) ->
size(A) + size(B) + size(C) + 2.
-spec to_list(Set::ttset()) -> [Element::any()].
%% Return the elements in Set as a list.
to_list(D) -> to_list(D, []).
to_list(empty, Tail) -> Tail;
to_list({A,X,B}, Tail) ->
to_list(A, [X|to_list(B, Tail)]);
to_list({A,X,B,Y,C}, Tail) ->
to_list(A, [X|to_list(B, [Y|to_list(C, Tail)])]).
-spec from_list([Element::any()]) -> Dict::ttset().
%% Build a set from the elements in list.
from_list(List) ->
lists:foldl(fun (E, S) -> add_element(E, S) end, new(), List).
-spec is_element(Element::any(), Set::ttset()) -> boolean().
%% Return 'true' if Element is an element of Set, else 'false'.
is_element(_, empty) -> false;
is_element(E, {A,X,_}) when E < X -> is_element(E, A);
is_element(E, {_,X,B}) when E > X -> is_element(E, B);
is_element(_, {_,_,_}) -> true;
is_element(E, {A,X,_,_,_}) when E < X -> is_element(E, A);
is_element(E, {_,X,B,Y,C}) when E > X ->
if E < Y -> is_element(E, B); %Middle
E > Y -> is_element(E, C); %Right
true -> true
end;
is_element(_, {_,_,_,_,_}) -> true.
-spec add_element(Element::any(), Set::ttset()) -> Set::ttset().
%% Return Set with Element inserted in it.
add_element(E, T) ->
%% Store and check for a returned "Up" node.
case add_aux(E, T) of
{up,Lu,Eu,Ru} -> {Lu,Eu,Ru};
Node -> Node
end.
add_aux(E, empty) -> {up,empty,E,empty}; %"Up" node
add_aux(E, {empty,X,empty}=N) ->
%% Special case to avoid creating temporary "up" nodes.
%% It helps a little bit, but not much.
if E < X -> {empty,E,empty,X,empty};
E > X -> {empty,X,empty,E,empty};
true -> N
end;
add_aux(E, {A,X,B}=N) ->
if E < X -> %Down the left
add_up2_l(add_aux(E, A), X, B);
E > X -> %Down the right
add_up2_r(A, X, add_aux(E, B));
true -> N %Replace current value
end;
add_aux(E, {A,X,B,Y,C}) when E < X ->
add_up3_l(add_aux(E, A), X, B, Y, C);
add_aux(E, {A,X,B,Y,C}=N) when E > X ->
if E < Y -> %Down the middle
add_up3_m(A, X, add_aux(E, B), Y, C);
E > Y -> %Down the right
add_up3_r(A, X, B, Y, add_aux(E, C));
true -> N
end;
add_aux(_, {_,_,_,_,_}=N) -> N.
%% add_up2_l/r(L, X, R) -> {L,X,M,X,R} | {L,X,R}.
add_up2_l({up,Lu,X,Ru}, Y, R) ->
{Lu,X,Ru,Y,R};
add_up2_l(L, X, R) -> {L,X,R}.
add_up2_r(L, X, {up,Lu,Y,Ru}) ->
{L,X,Lu,Y,Ru};
add_up2_r(L, X, R) -> {L,X,R}.
%% add_up3_l/m/r(L, X, M, Y, R) ->
%% {up,L,X,R} | {L,X,M,Y,R}.
add_up3_l({up,Lu,X,Ru}, Y, M, Z, R) ->
{up,{Lu,X,Ru},Y,{M,Z,R}};
add_up3_l(L, X, M, Y, R) -> {L,X,M,Y,R}.
add_up3_m(L, X, {up,Lu,Y,Ru}, Z, R) ->
{up,{L,X,Lu},Y,{Ru,Z,R}};
add_up3_m(L, X, M, Y, R) -> {L,X,M,Y,R}.
add_up3_r(L, X, M, Y, {up,Lu,Z,Ru}) ->
{up,{L,X,M},Y,{Lu,Z,Ru}};
add_up3_r(L, X, M, Y, R) -> {L,X,M,Y,R}.
-spec del_element(Element::any(), Set::ttset()) -> Set::ttset().
%% Return Set but with Element removed.
del_element(E, T) ->
case del_aux(E, T) of
{up,T1} -> T1;
T1 -> T1
end.
del_aux(_, empty) -> empty; %No element
del_aux(E, {empty,X,empty}=N) ->
if E < X; E > X -> N; %No element
true -> {up,empty}
end;
del_aux(E, {A,X,B}) ->
if E < X -> %Down the left
del_up2_l(del_aux(E, A), X, B);
E > X -> %Down the right
del_up2_r(A, X, del_aux(E, B));
true ->
{Bm,B1}= del_min(B),
del_up2_r(A, Bm, B1)
end;
del_aux(E, {empty,X,empty,Y,empty}=N) ->
if E < X -> N; %No element
E > X ->
if E < Y -> N; %No element
E > Y -> N;
true -> {empty,X,empty}
end;
true -> {empty,Y,empty}
end;
del_aux(E, {A,X,B,Y,C}) when E < X ->
del_up3_l(del_aux(E, A), X, B, Y, C);
del_aux(E, {A,X,B,Y,C}) when E > X ->
if E < Y ->
del_up3_m(A, X, del_aux(E, B), Y, C);
E > Y ->
del_up3_r(A, X, B, Y, del_aux(E, C));
true ->
{Cm,C1} = del_min(C),
del_up3_r(A, X, B, Cm, C1)
end;
del_aux(_, {A,_,B,Y,C}) ->
{Bm,B1} = del_min(B),
del_up3_m(A, Bm, B1, Y, C).
del_min(T) ->
%%io:format("em: ~p\n-> ~p\n", [T,T1]),
del_min1(T).
del_min1({empty,X,empty}) -> {X,{up,empty}};
del_min1({A,X,B}) ->
{Min,A1} = del_min1(A),
{Min,del_up2_l(A1, X, B)};
del_min1({empty,X,empty,Y,empty}) ->
{X,{empty,Y,empty}};
del_min1({A,X,B,Y,C}) ->
{Min,A1} = del_min1(A),
{Min,del_up3_l(A1, X, B, Y, C)}.
%% del_up2_l/r(L, X, R) -> Node | {up,Node}.
%% We use the same naming of nodes and keys as in the text. It makes
%% checking the rules easier.
del_up2_l({up,L}, X, {M,Y,R}) -> %1.1
{up,{L,X,M,Y,R}};
del_up2_l({up,A}, X, {B,Y,C,Z,D}) -> %2.1
{{A,X,B},Y,{C,Z,D}};
del_up2_l(L, X, R) -> {L,X,R}.
del_up2_r({L,X,M}, Y, {up,R}) -> %1.2
{up,{L,X,M,Y,R}};
del_up2_r({A,X,B,Y,C}, Z, {up,D}) -> %2.2
{{A,X,B},Y,{C,Z,D}};
del_up2_r(L, X, R) -> {L,X,R}.
%% del_up2_r(L, X, {up,R}) -> del_up2_r1(L, X, R);
%% del_up2_r(L, X, R) -> {L,K,V,R}.
%% del_up2_r1({L,X,M}, Y, R) -> %1.2
%% {up,{L,X,M,Y,R}};
%% del_up2_r1({A,X,B,Y,C}, Z, D) -> %2.2
%% {{A,X,B},Y,{C,Z,D}}.
%% del_up3_l/m/r(L, X, M, Y, R) -> Node | {up,Node}.
%% We use the same naming of nodes and keys as in the text. It makes
%% checking the rules easier. N.B. there are alternate valid choices
%% for the middle case!
del_up3_l({up,A}, X, {B,Y,C}, Z, D) -> %3a.1
{{A,X,B,Y,C},Z,D};
del_up3_l({up,A}, W, {B,X,C,Y,D}, Z, E) -> %4a.1
{{A,W,B},X,{C,Y,D},Z,E};
del_up3_l(A, X, B, Y, C) -> {A,X,B,Y,C}.
del_up3_m({A,X,B}, Y, {up,C}, Z, D) -> %3a.2
{{A,X,B,Y,C},Z,D};
del_up3_m(A, X, {up,B}, Y, {C,Z,D}) -> %3b.1
{A,X,{B,Y,C,Z,D}};
del_up3_m({A,W,B,X,C}, Y, {up,D}, Z, E) -> %4a.2
{{A,W,B},X,{C,Y,D},Z,E};
del_up3_m(A, W, {up,B}, X, {C,Y,D,Z,E}) -> %4b.1
{A,W,{B,X,C},Y,{D,Z,E}};
del_up3_m(A, X, B, Y, C) -> {A,X,B,Y,C}.
del_up3_r(A, X, {B,Y,C}, Z, {up,D}) -> %3b.2
{A,X,{B,Y,C,Z,D}};
del_up3_r(A, W, {B,X,C,Y,D}, Z, {up,E}) -> %4b.2
{A,W,{B,X,C},Y,{D,Z,E}};
del_up3_r(A, X, B, Y, C) -> {A,X,B,Y,C}.
-spec union(Set1::ttset(), Set2::ttset()) -> Set::ttset().
%% Return the union of Set1 and Set2.
union(S1, S2) ->
fold(fun (E, S) -> add_element(E, S) end, S2, S1).
-spec union(Sets::[ttset()]) -> Set::ttset().
%% Return the union of the list of sets.
union([S1,S2|Ss]) ->
%% Do our own unions here to try and fold over smaller set.
U0 = union(Ss),
U1 = fold(fun (E, S) -> add_element(E, S) end, U0, S2),
fold(fun (E, S) -> add_element(E, S) end, U1, S1);
union([S]) -> S;
union([]) -> empty.
-spec intersection(Set1::ttset(), Set2::ttset()) -> Set::ttset().
%% Return the intersection of Set1 and Set2.
intersection(S1, S2) ->
filter(fun (E) -> is_element(E, S1) end, S2).
-spec intersection(Sets::[ttset()]) -> Set::ttset().
%% Return the intersection of the list of sets.
intersection([S]) -> S;
intersection([S|Ss]) ->
lists:foldl(fun (S1, I) -> intersection(S1, I) end, S, Ss).
-spec is_disjoint(Set1::ttset(), Set2::ttset()) -> boolean().
%% Check whether Set1 and Set2 are disjoint.
is_disjoint(S1, S2) ->
fold(fun (E, Dis) -> Dis andalso (not is_element(E, S2)) end, true, S1).
-spec subtract(Set1::ttset(), Set2::ttset()) -> Set::ttset().
%% Return all and only the elements in Set1 which are not elements of Set2.
subtract(S1, S2) ->
filter(fun (E) -> not is_element(E, S2) end, S1).
-spec is_subset(Set1::ttset(), Set2::ttset()) -> boolean().
%% Return 'true' when every element of Set1 is also an element of
%% Set2, else 'false'.
is_subset(S1, S2) ->
fold(fun (E, Sub) -> Sub andalso is_element(E, S2) end, true, S1).
-spec fold(Fun::fun(), Acc::any(), Set::ttset()) -> any().
%% Apply Fun to each element in Set. Do it left to right, even if
%% this is not specified.
fold(_, Acc, empty) -> Acc;
fold(F, Acc0, {A,X,B}) ->
Acc1 = F(X, fold(F, Acc0, A)),
fold(F, Acc1, B);
fold(F, Acc0, {A,X,B,Y,C}) ->
Acc1 = F(X, fold(F, Acc0, A)),
Acc2 = F(Y, fold(F, Acc1, B)),
fold(F, Acc2, C).
-spec filter(Fun::fun(), Set::ttset()) -> Set::ttset().
%% Apply Fun to each element in Dict. Do it left to right, even if
%% this is not specified.
filter(F, S) -> filter(F, S, new()).
filter(_, empty, New) -> New;
filter(F, {A,X,B}, New0) ->
New1 = filter(F, A, New0),
New2 = case F(X) of
true -> add_element(X, New1);
false -> New1
end,
filter(F, B, New2);
filter(F, {A,X,B,Y,C}, New0) ->
New1 = filter(F, A, New0),
New2 = case F(X) of
true -> add_element(X, New1);
false -> New1
end,
New3 = filter(F, B, New2),
New4 = case F(Y) of
true -> add_element(Y, New3);
false -> New3
end,
filter(F, C, New4).
%% Extended interface.
-spec foreach(Fun::fun(), Set::ttset()) -> ok.
%% Apply Fun to each element in Set. Do it left to right, even if
%% this is not specified.
foreach(_, empty) -> ok;
foreach(F, {A,X,B}) ->
foreach(F, A),
F(X),
foreach(F, B);
foreach(F, {A,X,B,Y,C}) ->
foreach(F, A),
F(X),
foreach(F, B),
F(Y),
foreach(F, C).
-ifdef(DEBUG).
%% Check the depth of all the leaves, should all be the same.
check_depth(T) -> check_depth(T, 1, orddict:new()).
check_depth(empty, D, Dd) ->
orddict:update_counter(D, 1, Dd);
check_depth({L,_,R}, D, Dd0) ->
Dd1 = orddict:update_counter(two, 1, Dd0),
Dd2 = check_depth(L, D+1, Dd1),
check_depth(R, D+1, Dd2);
check_depth({L,_,M,_,R}, D, Dd0) ->
Dd1 = orddict:update_counter(three, 1, Dd0),
Dd2 = check_depth(L, D+1, Dd1),
Dd3 = check_depth(M, D+1, Dd2),
check_depth(R, D+1, Dd3).
-endif. | src/ttsets.erl | 0.599368 | 0.431584 | ttsets.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_stats_httpd).
-include_lib("couch/include/couch_db.hrl").
-export([handle_stats_req/1]).
%% exported for use by chttpd_misc
-export([transform_stats/1, nest/1, to_ejson/1, extract_path/2]).
handle_stats_req(#httpd{method='GET', path_parts=[_ | Path]}=Req) ->
flush(Req),
Stats0 = couch_stats:fetch(),
Stats = transform_stats(Stats0),
Nested = nest(Stats),
EJSON0 = to_ejson(Nested),
EJSON1 = extract_path(Path, EJSON0),
couch_httpd:send_json(Req, EJSON1).
transform_stats(Stats) ->
transform_stats(Stats, []).
transform_stats([], Acc) ->
Acc;
transform_stats([{Key, Props} | Rest], Acc) ->
{_, Type} = proplists:lookup(type, Props),
transform_stats(Rest, [{Key, transform_stat(Type, Props)} | Acc]).
transform_stat(counter, Props) ->
Props;
transform_stat(gauge, Props) ->
Props;
transform_stat(histogram, Props) ->
lists:map(fun
({value, Value}) ->
{value, lists:map(fun
({Key, List}) when Key == percentile; Key == histogram ->
{Key, [tuple_to_list(Item) || Item <- List]};
(Else) ->
Else
end, Value)};
(Else) ->
Else
end, Props).
nest(Proplist) ->
nest(Proplist, []).
nest([], Acc) ->
Acc;
nest([{[Key|Keys], Value}|Rest], Acc) ->
Acc1 = case proplists:lookup(Key, Acc) of
{Key, Old} ->
[{Key, nest([{Keys, Value}], Old)}|proplists:delete(Key, Acc)];
none ->
Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
[{Key, Term}|Acc]
end,
nest(Rest, Acc1).
to_ejson([{_, _}|_]=Proplist) ->
EJSONProps = lists:map(
fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
Proplist
),
{EJSONProps};
to_ejson(NotAProplist) ->
NotAProplist.
extract_path([], EJSON) ->
EJSON;
extract_path([Key | Rest], {Props}) ->
case proplists:lookup(Key, Props) of
{Key, SubEJSON} ->
extract_path(Rest, SubEJSON);
none ->
null
end;
extract_path([_ | _], _NotAnObject) ->
null.
maybe_format_key(Key) when is_list(Key) ->
list_to_binary(Key);
maybe_format_key(Key) when is_atom(Key) ->
list_to_binary(atom_to_list(Key));
maybe_format_key(Key) when is_integer(Key) ->
list_to_binary(integer_to_list(Key));
maybe_format_key(Key) when is_binary(Key) ->
Key.
flush(Req) ->
case couch_util:get_value("flush", chttpd:qs(Req)) of
"true" ->
couch_stats_aggregator:flush();
_Else ->
ok
end. | src/couch_stats/src/couch_stats_httpd.erl | 0.601359 | 0.47098 | couch_stats_httpd.erl | starcoder |
%% =====================================================================
%% This library is free software; you can redistribute it and/or modify
%% it under the terms of the GNU Lesser General Public License as
%% published by the Free Software Foundation; either version 2 of the
%% License, or (at your option) any later version.
%%
%% This library is distributed in the hope that it will be useful, but
%% WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
%% Lesser General Public License for more details.
%%
%% You should have received a copy of the GNU Lesser General Public
%% License along with this library; if not, write to the Free Software
%% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
%% USA
%%
%% =====================================================================
%% @copyright 1997-2006 <NAME>
%% @author <NAME> <<EMAIL>>
%% @end
%% =====================================================================
%% @doc Functions for reading comment lines from Erlang source code.
-module(erl_comment_scan).
-export([file/1, join_lines/1, scan_lines/1, string/1]).
-export_type([comment/0]).
%% =====================================================================
-type comment() :: {integer(), integer(), integer(), [string()]}.
-type commentLine() :: {integer(), integer(), integer(), string()}.
%% =====================================================================
%% @spec file(FileName::file:filename()) -> [Comment]
%%
%% Comment = {Line, Column, Indentation, Text}
%% Line = integer()
%% Column = integer()
%% Indentation = integer()
%% Text = [string()]
%%
%% @doc Extracts comments from an Erlang source code file. Returns a
%% list of entries representing <em>multi-line</em> comments, listed in
%% order of increasing line-numbers. For each entry, `Text'
%% is a list of strings representing the consecutive comment lines in
%% top-down order; the strings contain <em>all</em> characters following
%% (but not including) the first comment-introducing `%'
%% character on the line, up to (but not including) the line-terminating
%% newline.
%%
%% Furthermore, `Line' is the line number and
%% `Column' the left column of the comment (i.e., the column
%% of the comment-introducing `%' character).
%% `Indent' is the indentation (or padding), measured in
%% character positions between the last non-whitespace character before
%% the comment (or the left margin), and the left column of the comment.
%% `Line' and `Column' are always positive
%% integers, and `Indentation' is a nonnegative integer.
%%
%% Evaluation exits with reason `{read, Reason}' if a read
%% error occurred, where `Reason' is an atom corresponding to
%% a Posix error code; see the module {@link //kernel/file} for details.
-spec file(file:filename()) -> [comment()].
file(Name) ->
Name1 = filename(Name),
case catch {ok, file:read_file(Name1)} of
{ok, V} ->
case V of
{ok, B} ->
string(binary_to_list(B));
{error, E} ->
error_read_file(Name1),
exit({read, E})
end;
{'EXIT', E} ->
error_read_file(Name1),
exit(E);
R ->
error_read_file(Name1),
throw(R)
end.
%% =====================================================================
%% @spec string(string()) -> [Comment]
%%
%% Comment = {Line, Column, Indentation, Text}
%% Line = integer()
%% Column = integer()
%% Indentation = integer()
%% Text = [string()]
%%
%% @doc Extracts comments from a string containing Erlang source code.
%% Except for reading directly from a string, the behaviour is the same
%% as for {@link file/1}.
%%
%% @see file/1
-spec string(string()) -> [comment()].
string(Text) ->
lists:reverse(join_lines(scan_lines(Text))).
%% =====================================================================
%% @spec scan_lines(string()) -> [CommentLine]
%%
%% CommentLine = {Line, Column, Indent, Text}
%% Line = integer()
%% Column = integer()
%% Indent = integer()
%% Text = string()
%%
%% @doc Extracts individual comment lines from a source code string.
%% Returns a list of comment lines found in the text, listed in order of
%% <em>decreasing</em> line-numbers, i.e., the last comment line in the
%% input is first in the resulting list. `Text' is a single
%% string, containing all characters following (but not including) the
%% first comment-introducing `%' character on the line, up
%% to (but not including) the line-terminating newline. For details on
%% `Line', `Column' and `Indent', see {@link file/1}.
-spec scan_lines(string()) -> [commentLine()].
scan_lines(Text) ->
scan_lines(Text, 1, 0, 0, []).
scan_lines([$\040 | Cs], L, Col, M, Ack) ->
scan_lines(Cs, L, Col + 1, M, Ack);
scan_lines([$\t | Cs], L, Col, M, Ack) ->
scan_lines(Cs, L, tab(Col), M, Ack);
scan_lines([$\n | Cs], L, _Col, _M, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack);
scan_lines([$\r, $\n | Cs], L, _Col, _M, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack);
scan_lines([$\r | Cs], L, _Col, _M, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack);
scan_lines([$% | Cs], L, Col, M, Ack) ->
scan_comment(Cs, "", L, Col, M, Ack);
scan_lines([$$ | Cs], L, Col, _M, Ack) ->
scan_char(Cs, L, Col + 1, Ack);
scan_lines([$" | Cs], L, Col, _M, Ack) ->
scan_string(Cs, $", L, Col + 1, Ack);
scan_lines([$' | Cs], L, Col, _M, Ack) ->
scan_string(Cs, $', L, Col + 1, Ack);
scan_lines([_C | Cs], L, Col, _M, Ack) ->
N = Col + 1,
scan_lines(Cs, L, N, N, Ack);
scan_lines([], _L, _Col, _M, Ack) ->
Ack.
tab(Col) ->
Col - (Col rem 8) + 8.
scan_comment([$\n | Cs], Cs1, L, Col, M, Ack) ->
seen_comment(Cs, Cs1, L, Col, M, Ack);
scan_comment([$\r, $\n | Cs], Cs1, L, Col, M, Ack) ->
seen_comment(Cs, Cs1, L, Col, M, Ack);
scan_comment([$\r | Cs], Cs1, L, Col, M, Ack) ->
seen_comment(Cs, Cs1, L, Col, M, Ack);
scan_comment([C | Cs], Cs1, L, Col, M, Ack) ->
scan_comment(Cs, [C | Cs1], L, Col, M, Ack);
scan_comment([], Cs1, L, Col, M, Ack) ->
seen_comment([], Cs1, L, Col, M, Ack).
%% Add a comment line to the ackumulator and return to normal
%% scanning. Note that we compute column positions starting at 0
%% internally, but the column values in the comment descriptors
%% should start at 1.
seen_comment(Cs, Cs1, L, Col, M, Ack) ->
%% Compute indentation and strip trailing spaces
N = Col - M,
Text = lists:reverse(string:strip(Cs1, left)),
Ack1 = [{L, Col + 1, N, Text} | Ack],
scan_lines(Cs, L + 1, 0, 0, Ack1).
scan_string([Quote | Cs], Quote, L, Col, Ack) ->
N = Col + 1,
scan_lines(Cs, L, N, N, Ack);
scan_string([$\t | Cs], Quote, L, Col, Ack) ->
scan_string(Cs, Quote, L, tab(Col), Ack);
scan_string([$\n | Cs], Quote, L, _Col, Ack) ->
%% Newlines should really not occur in strings/atoms, but we
%% want to be well behaved even if the input is not.
scan_string(Cs, Quote, L + 1, 0, Ack);
scan_string([$\r, $\n | Cs], Quote, L, _Col, Ack) ->
scan_string(Cs, Quote, L + 1, 0, Ack);
scan_string([$\r | Cs], Quote, L, _Col, Ack) ->
scan_string(Cs, Quote, L + 1, 0, Ack);
scan_string([$\\, _C | Cs], Quote, L, Col, Ack) ->
scan_string(Cs, Quote, L, Col + 2, Ack); % ignore character C
scan_string([_C | Cs], Quote, L, Col, Ack) ->
scan_string(Cs, Quote, L, Col + 1, Ack);
scan_string([], _Quote, _L, _Col, Ack) ->
%% Finish quietly.
Ack.
scan_char([$\t | Cs], L, Col, Ack) ->
N = tab(Col),
scan_lines(Cs, L, N, N, Ack); % this is not just any whitespace
scan_char([$\n | Cs], L, _Col, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack); % handle this, just in case
scan_char([$\r, $\n | Cs], L, _Col, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack);
scan_char([$\r | Cs], L, _Col, Ack) ->
scan_lines(Cs, L + 1, 0, 0, Ack);
scan_char([$\\, _C | Cs], L, Col, Ack) ->
N = Col + 2, % character C must be ignored
scan_lines(Cs, L, N, N, Ack);
scan_char([_C | Cs], L, Col, Ack) ->
N = Col + 1, % character C must be ignored
scan_lines(Cs, L, N, N, Ack);
scan_char([], _L, _Col, Ack) ->
%% Finish quietly.
Ack.
%% =====================================================================
%% @spec join_lines([CommentLine]) -> [Comment]
%%
%% CommentLine = {Line, Column, Indent, string()}
%% Line = integer()
%% Column = integer()
%% Indent = integer()
%% Comment = {Line, Column, Indent, Text}
%% Text = [string()]
%%
%% @doc Joins individual comment lines into multi-line comments. The
%% input is a list of entries representing individual comment lines,
%% <em>in order of decreasing line-numbers</em>; see
%% {@link scan_lines/1} for details. The result is a list of
%% entries representing <em>multi-line</em> comments, <em>still listed
%% in order of decreasing line-numbers</em>, but where for each entry,
%% `Text' is a list of consecutive comment lines in order of
%% <em>increasing</em> line-numbers (i.e., top-down).
%%
%% @see scan_lines/1
-spec join_lines([commentLine()]) -> [comment()].
join_lines([{L, Col, Ind, Txt} | Lines]) ->
join_lines(Lines, [Txt], L, Col, Ind);
join_lines([]) ->
[].
%% In the following, we assume that the current `Txt' is never empty.
%% Recall that the list is in reverse line-number order.
join_lines([{L1, Col1, Ind1, Txt1} | Lines], Txt, L, Col, Ind) ->
if L1 =:= L - 1, Col1 =:= Col, Ind + 1 =:= Col ->
%% The last test above checks that the previous
%% comment was alone on its line; otherwise it won't
%% be joined with the current; this is not always what
%% one wants, but works well in general.
join_lines(Lines, [Txt1 | Txt], L1, Col1, Ind1);
true ->
%% Finish the current comment and let the new line
%% start the next one.
[{L, Col, Ind, Txt}
| join_lines(Lines, [Txt1], L1, Col1, Ind1)]
end;
join_lines([], Txt, L, Col, Ind) ->
[{L, Col, Ind, Txt}].
%% =====================================================================
%% Utility functions for internal use
filename([C|T]) when is_integer(C), C > 0, C =< 255 ->
[C | filename(T)];
filename([]) ->
[];
filename(N) ->
report_error("bad filename: `~P'.", [N, 25]),
exit(error).
error_read_file(Name) ->
report_error("error reading file `~s'.", [Name]).
report_error(S, Vs) ->
error_logger:error_msg(lists:concat([?MODULE, ": ", S, "\n"]), Vs).
%% ===================================================================== | dependencies/otp/r15b03-1/lib/syntax_tools/src/erl_comment_scan.erl | 0.530723 | 0.457318 | erl_comment_scan.erl | starcoder |
%%% @doc This is a loose translation of the following link from ACM:
%%% http://queue.acm.org/appendices/codel.html
%%% http://pollere.net/CoDelnotes.html
%%% http://pollere.net/CoDel.html
%%%
%%% The document you want to read is
%%% "Controlling queue Delay" <NAME>, <NAME>, http://queue.acm.org/detail.cfm?id=2209336
%%% But also note that some of the other papers are interesting. Especially Kathie Nichols notes are of
%%% interest.
%%%
%%% @end
-module(sv_codel).
%% Public API
-export([new/0, new/2, in/3, out/2, len/1, remove/3]).
-export([init/2, enqueue/3, dequeue/2, delete/1]).
%% Scrutiny
-export([qstate/1]).
-type task() :: term().
-define(Q, sv_queue_ets).
%% Internal state
-record(state, {
%% The underlying queue to use. For now, since we are mainly in a test phase, we just use a standard
%% functional queue. But later the plan is to use a module here and then call the right kind of queue
%% functions for that module.
queue = ?Q:new(),
%% The `dropping' field tracks if the CoDel system is in a dropping state or not.
dropping = false,
%% If we are dropping, this value tracks the point in time where the next packet should
%% be dropped from the queue.
drop_next = 0,
%% First above time tracks when we first began seeing too much delay imposed by the queue.
%% This value may be 0 in which case it means we have not seen such a delay.
first_above_time = 0,
%% This variable tracks how many packets/jobs were recently dropped from the queue.
%% The value decays over time if no packets are dropped and is used to manipulate the control
%% law of the queue.
count = 0,
%% The `interval' and `target' are configurable parameters, described in @see init/2.
interval = 100, % ms
target = 5 %ms
}).
%% @doc Look at the queue state as a proplist
%% @end
-spec qstate(#state{}) -> [{atom(), term()}].
qstate(#state {
queue = Q,
dropping = Drop,
drop_next = DN,
interval = I,
target = T,
first_above_time = FAT,
count = C
}) ->
[{queue, Q},
{dropping, Drop},
{drop_next, DN},
{interval, I},
{target, T},
{first_above_time, FAT},
{count, C}].
%% Queue API
%% -----------------------------
new() -> new(5, 100).
new(Target, Interval) ->
init(Target*1000, Interval*1000).
len(#state { queue = Q }) -> ?Q:len(Q).
in(Item, Ts, CoDelState) ->
enqueue(Item, Ts, CoDelState).
out(Ts, CoDelState) ->
dequeue(Ts, CoDelState).
remove(Item, TS, CoDelState) ->
queue_remove(Item, TS, CoDelState).
%% @doc Initialize the CoDel state
%% <p>The value `Target' defines the delay target in ms. If the queue has a sojourn-time through the queue
%% which is above this value, then the queue begins to consider dropping packets.</p>
%% <p>The value `Interval' is the window we have to be above `Target' before we consider that there may be
%% problems. As such, it provides a hysteresis on the queue as well and small increases in latency does
%% not affect the queue.</p>
%% <p>Note that the interval makes sure we can use the queue as "good queue". If we get a sudden small
%% spike in jobs, then the queue will make sure they get smoothed out and processed with no loss of jobs.
%% But it also protects against "bad queue" where a standing queue won't dissipate due to consistent
%% overload of the system</p>
%% @end
-spec init(pos_integer(), pos_integer()) -> #state{}.
init(Target, Interval) when Target > Interval -> exit(misconfiguration);
init(Target, Interval) -> #state{ target = Target, interval = Interval }.
delete(#state { queue = Q }) -> ?Q:delete(Q).
%% @doc Enqueue a packet
%% <p>Enqueue packet `Pkt' at time `TS' into the queue.</p>
%% @end
-spec enqueue(task(), term(), #state{}) -> #state{}.
enqueue(Pkt, TS, #state { queue = Q } = State) ->
State#state { queue = ?Q:in({Pkt, TS}, TS, Q) }.
%% @doc queue_remove/3 removes a packet from the queue
%% @end
queue_remove(Item, TS, #state { queue = Q } = State) ->
State#state { queue = ?Q:remove(Item, TS, Q) }.
%% @doc Dequeue a packet from the CoDel system
%% Given a point in time, `Now' and a CoDel `State', extract the next task from it.
%% @end
-spec dequeue(Now, InState) ->
{empty, [Pkt], OutState} | {drop, [Pkt], OutState} | {Pkt, [Pkt], OutState}
when
Now :: term(),
Pkt :: task(),
InState :: #state{},
OutState :: #state{}.
dequeue(Now, State) ->
dequeue_(Now, dodequeue(Now, State)).
%% Internal functions
%% ---------------------------------------------------------
%% The control law defines the packet drop rate. Given a time T we drop the next packet at T+I, where
%% I is the interval. Now, if we need to drop yet another packet, we drop it at I/math:sqrt(C) where C
%% is the number of packets we have dropped so far in this round.
control_law(T, I, C) ->
T + I / math:sqrt(C).
%% This is a helper function. It dequeues from the underlying queue and then analyzes the Sojourn
%% time together with the next function, dodequeue_.
dodequeue({TS, _Uniq} = Now, #state { queue = Q } = State) ->
case ?Q:out(Now, Q) of
{empty, [], NQ} ->
sv:report(TS div 1000, {dodequeue, 0, 0}),
{nodrop, empty, State#state { first_above_time = 0, queue = NQ }};
{{Pkt, {InT, _}}, [], NQ} ->
Sojourn = TS - InT,
sv:report(TS div 1000, {dodequeue, ?Q:len(NQ), Sojourn div 1000}),
dodequeue_(Now, Pkt, Sojourn, State#state { queue = NQ })
end.
%% Case split:
%% The sojourn time through the queue is less than our target value.
%% Thus, we should not drop, and we reset when we were first above.
dodequeue_(_Now, Pkt, Sojourn, #state { target = T } = State)
when Sojourn < T ->
{nodrop, Pkt, State#state { first_above_time = 0 }};
%% We are above target, but this is the first time we are above target.
%% We set up the point in time when we went above the target to start
%% tracking this.
dodequeue_({TS, _}, Pkt, _Sojourn, #state { first_above_time = FAT, interval = I } = State)
when FAT == 0 ->
{nodrop, Pkt, State#state { first_above_time = TS + I }};
%% We have been above target for more than one interval. This is when we need to start dropping.
dodequeue_({TS, _}, Pkt, _Sojourn, #state { first_above_time = FAT } = State)
when TS >= FAT ->
{drop, Pkt, State};
%% We are above target, but we have not yet been above target for a complete interval. Wait and see
%% what happens, but don't begin dropping packets just yet.
dodequeue_(_Now, Pkt, _Sojourn, State) ->
{nodrop, Pkt, State}.
%% Dequeue worker. This drives the meat of the dequeue steps.
%% Case split:
%% We are in the dropping state, but are transitioning to not dropping.
dequeue_(Now, {nodrop, Pkt, #state { dropping = true } = State}) ->
dequeue_drop_next(Now, Pkt, State#state { dropping = false }, []);
%% We are in the dropping state and are to continue dropping.
dequeue_(Now, {drop, Pkt, #state { dropping = true } = State}) ->
dequeue_drop_next(Now, Pkt, State, []);
%% We are not in the dropping state, but should start dropping.
dequeue_(Now, {drop, Pkt, #state { dropping = false } = State}) ->
dequeue_start_drop(Now, Pkt, State);
%% Default case for normal operation.
dequeue_(_Now, {nodrop, Pkt, #state { dropping = false } = State}) ->
{Pkt, [], State}.
%% Consider dropping the next packet from the queue.
%% This function drives a loop until the next timepoint
%% where we should drop is in the future. The helper
%% dequeue_drop_next_/3 carries out the book-keeping
dequeue_drop_next({TS, _} = Now, Pkt, #state { drop_next = DN, dropping = true } = State, Dropped)
when TS >= DN ->
dequeue_drop_next_(Now, dodequeue(Now, State), [Pkt | Dropped]);
dequeue_drop_next(_Now, Pkt, State, Dropped) ->
{Pkt, Dropped, State}.
%% If the Sojourn time improves, we leave the dropping state.
dequeue_drop_next_(Now, {nodrop, Pkt, State}, Dropped) ->
dequeue_drop_next(Now, Pkt, State#state { dropping = false }, Dropped);
%% We are still to drop packets, so update the count and the
%% control law for the next loop round.
dequeue_drop_next_(
Now,
{drop, Pkt,
#state {
count = C,
interval = I,
drop_next = DN } = State},
Dropped) ->
NewState = State#state { count = C + 1, drop_next = control_law(DN, I, C+1) },
dequeue_drop_next(Now, Pkt, NewState, Dropped).
%% Function for setting up the dropping state. When we start dropping, we evaluate a bit on
%% how long ago we last dropped. If we did this recently, we do not start off from the bottom of
%% the control law, but rather pick a point a bit up the function. On the other hand, if it is a long time
%% ago, we just pick the usual starting point of 1.
dequeue_start_drop({TS, _}, Pkt, #state { drop_next = DN, interval = Interval, count = Count } = State)
when TS - DN < Interval, Count > 2 ->
{drop, [Pkt], State#state {
dropping = true,
count = Count - 2,
drop_next = control_law(TS, Interval, Count - 2) }};
dequeue_start_drop({TS, _}, Pkt, #state { interval = I } = State) ->
{drop, [Pkt], State#state {
dropping = true,
count = 1,
drop_next = control_law(TS, I, 1) }}. | src/sv_codel.erl | 0.563738 | 0.510069 | sv_codel.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright <2013-2018> <
%% Technische Universität Kaiserslautern, Germany
%% Université Pierre et Marie Curie / Sorbonne-Université, France
%% Universidade NOVA de Lisboa, Portugal
%% Université catholique de Louvain (UCL), Belgique
%% INESC TEC, Portugal
%% >
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either expressed or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% List of the contributors to the development of Antidote: see AUTHORS file.
%% Description and complete License: see LICENSE file.
%% -------------------------------------------------------------------
%% antidote_crdt_counter_fat: A convergent, replicated, operation based Fat Counter
%% The state of this fat counter is list of pairs where each pair is an integer
%% and a related token.
%% Basically when the counter recieves {increment, N} or {decrement, N} it generates
%% a pair {N, NewToken}.
%% On update, all seen tokens are removed and the new pair is then added to the state.
%% This token keeps growing ("Fat" Counter) but it useful as it allows the reset
%% functionaility, On reset(), all seen tokens are removed.
%% link to paper: http://haslab.uminho.pt/cbm/files/a3-younes.pdf
-module(antidote_crdt_counter_fat).
-behaviour(antidote_crdt).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([
new/0,
value/1,
downstream/2,
update/2,
equal/2,
to_binary/1,
from_binary/1,
is_operation/1,
is_bottom/1,
require_state_downstream/1
]).
-type uniqueToken() :: term().
-type state() :: orddict:orddict(uniqueToken(), integer()).
-type op() ::
{increment, integer()}
| {decrement, integer()}
| {reset, {}}.
-type effect() ::
{uniqueToken(), integer()}
| [uniqueToken()].
%% @doc Create a new, empty fat counter
-spec new() -> state().
new() ->
orddict:new().
%% @doc The value of this counter is equal to the sum of all the values
%% having tokens.
-spec value(state()) -> integer().
value(FatCounter) ->
lists:sum([V || {_, V} <- FatCounter]).
-spec downstream(op(), state()) -> {ok, effect()}.
downstream(Op, FatCtr) ->
Token = unique(),
case Op of
{increment, Value} when is_integer(Value) ->
{ok, {Token, Value}};
{decrement, Value} when is_integer(Value) ->
{ok, {Token, -Value}};
{reset, {}} ->
{ok, orddict:fetch_keys(FatCtr)}
end.
-spec unique() -> uniqueToken().
unique() ->
crypto:strong_rand_bytes(20).
-spec update(effect(), state()) -> {ok, state()}.
update({Token, Value}, FatCtr) ->
% insert new value
{ok, orddict:store(Token, Value, FatCtr)};
update(Overridden, FatCtr) ->
{ok, apply_downstreams(Overridden, FatCtr)}.
%% @private apply a list of downstream ops to a given orset
apply_downstreams([], FatCtr) ->
FatCtr;
apply_downstreams(_Tokens, []) ->
[];
apply_downstreams([Token1 | TokensRest] = Tokens, [{Token2, Value2} | FatCtrRest] = FatCtr) ->
if
Token1 == Token2 ->
apply_downstreams(TokensRest, FatCtrRest);
Token1 > Token2 ->
[{Token2, Value2} | apply_downstreams(Tokens, FatCtrRest)];
true ->
apply_downstreams(TokensRest, FatCtr)
end.
-spec equal(state(), state()) -> boolean().
equal(FatCtr1, FatCtr2) ->
FatCtr1 == FatCtr2.
-define(TAG, 85).
-define(V1_VERS, 1).
-spec to_binary(state()) -> binary().
to_binary(FatCtr) ->
<<?TAG:8/integer, ?V1_VERS:8/integer, (term_to_binary(FatCtr))/binary>>.
%% @doc Decode binary
-spec from_binary(binary()) -> {ok, state()} | {error, term()}.
from_binary(<<?TAG:8/integer, ?V1_VERS:8/integer, Bin/binary>>) ->
{ok, antidote_crdt:from_binary(Bin)}.
is_bottom(FatCtr) ->
FatCtr == new().
%% @doc The following operation verifies
%% that Operation is supported by this particular CRDT.
-spec is_operation(term()) -> boolean().
is_operation({increment, Value}) when is_integer(Value) -> true;
is_operation({decrement, Value}) when is_integer(Value) -> true;
is_operation({reset, {}}) -> true;
is_operation(_) -> false.
require_state_downstream(Op) ->
Op == {reset, {}}.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual(0, value(new())).
update_increment_test() ->
FatCnt0 = new(),
{ok, Increment1} = downstream({increment, 5}, FatCnt0),
{ok, FatCnt1} = update(Increment1, FatCnt0),
{ok, Decrement1} = downstream({decrement, 2}, FatCnt1),
{ok, FatCnt2} = update(Decrement1, FatCnt1),
{ok, Increment2} = downstream({increment, 1}, FatCnt2),
{ok, FatCnt3} = update(Increment2, FatCnt2),
{ok, Reset1} = downstream({reset, {}}, FatCnt3),
{ok, FatCnt4} = update(Reset1, FatCnt3),
{ok, Decrement2} = downstream({decrement, 2}, FatCnt4),
{ok, FatCnt5} = update(Decrement2, FatCnt4),
io:format("FatCnt0 = ~p~n", [FatCnt0]),
io:format("Increment1 = ~p~n", [Increment1]),
io:format("FatCnt1 = ~p~n", [FatCnt1]),
io:format("Decrement1 = ~p~n", [Decrement1]),
io:format("FatCnt2 = ~p~n", [FatCnt2]),
?assertEqual(0, value(FatCnt0)),
?assertEqual(5, value(FatCnt1)),
?assertEqual(3, value(FatCnt2)),
?assertEqual(4, value(FatCnt3)),
?assertEqual(0, value(FatCnt4)),
?assertEqual(-2, value(FatCnt5)).
-endif. | apps/antidote_crdt/src/antidote_crdt_counter_fat.erl | 0.530723 | 0.448789 | antidote_crdt_counter_fat.erl | starcoder |
-module(h3).
-export([num_hexagons/1,
edge_length_meters/1,
edge_length_kilometers/1,
hex_area_m2/1,
hex_area_km2/1,
from_geo/2,
to_geo/1,
to_geo_boundary/1,
to_string/1,
from_string/1,
get_resolution/1,
get_base_cell/1,
is_valid/1,
is_class3/1,
is_pentagon/1,
parent/2,
children/2,
k_ring/2,
k_ring_distances/2,
max_k_ring_size/1,
compact/1,
uncompact/2,
indices_are_neighbors/2,
get_unidirectional_edge/2,
grid_distance/2
]).
-on_load(init/0).
-define(APPNAME, h3).
-define(LIBNAME, 'h3').
-type coord() :: {float(), float()}.
-type h3index() :: non_neg_integer().
-type resolution() :: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15.
-spec num_hexagons(resolution()) -> pos_integer().
num_hexagons(_) ->
not_loaded(?LINE).
-spec edge_length_meters(resolution()) -> float().
edge_length_meters(_) ->
not_loaded(?LINE).
-spec edge_length_kilometers(resolution()) -> float().
edge_length_kilometers(_) ->
not_loaded(?LINE).
-spec hex_area_m2(non_neg_integer()) -> float().
hex_area_m2(_) ->
not_loaded(?LINE).
-spec hex_area_km2(non_neg_integer()) -> float().
hex_area_km2(_) ->
not_loaded(?LINE).
%% @doc Indexes the location at the specified resolution. Takes the
%% coordinate in degrees.
-spec from_geo(coord(), resolution()) -> h3index().
from_geo(_, _) ->
not_loaded(?LINE).
%% @doc Finds the centroid of the index. Returns the coordinate in
%% degrees.
-spec to_geo(h3index()) -> coord().
to_geo(_) ->
not_loaded(?LINE).
%% @doc Finds the geo boundary of the given index. The retuurned list
%% of coordinates is in degrees.
-spec to_geo_boundary(h3index()) -> [coord()].
to_geo_boundary(_) ->
not_loaded(?LINE).
%% @doc Converts the given index to its string representation.
-spec to_string(h3index()) -> string().
to_string(_) ->
not_loaded(?LINE).
%% @doc Converts the string representation to an index.
-spec from_string(string()) -> h3index().
from_string(_) ->
not_loaded(?LINE).
%% @doc Returns the resolution of the index.
-spec get_resolution(h3index()) -> resolution().
get_resolution(_) ->
not_loaded(?LINE).
%% @doc Returns the base cell number of the index.
-spec get_base_cell(h3index()) -> h3index().
get_base_cell(_) ->
not_loaded(?LINE).
%% @doc Returns whethr the given index is valid.
-spec is_valid(h3index()) -> boolean().
is_valid(_) ->
not_loaded(?LINE).
%% @doc Returns whether the given index has a resolution with Class III orientation.
-spec is_class3(h3index()) -> boolean().
is_class3(_) ->
not_loaded(?LINE).
%% @doc Returns whether the given index represents a pentagonal cell.
-spec is_pentagon(h3index()) -> boolean().
is_pentagon(_) ->
not_loaded(?LINE).
%% @doc Returns the parent (coarser) index containing the given Index.
%% Requests for higher resolutions than the the resolution of `Index'
%% are an error.
-spec parent(Index::h3index(), ParentRes::resolution()) -> h3index().
parent(_, _) ->
not_loaded(?LINE).
%% @doc Returns the children container by the given Index at the given child resolution ChildRes.
-spec children(Index::h3index(), ChildRes::resolution()) -> [h3index(),...].
children(_, _) ->
not_loaded(?LINE).
%% @doc Produces indices within the given distance K from the given
%% origin Index. k-ring 0 is defined as the origin index, k-ring 1 is
%% defined as k-ring 0 and all neighboring indices, and so on. Output
%% is returned in no particular order.
-spec k_ring(Index::h3index(), K::non_neg_integer()) -> [h3index(),...].
k_ring(_, _) ->
not_loaded(?LINE).
%% @doc Produces indices and the associated distance within the given
%% distance K from the given origin Index. k-ring 0 is defined as the
%% origin index, k-ring 1 is defined as k-ring 0 and all neighboring
%% indices, and so on. Output is returned in no particular order.
-spec k_ring_distances(Index::h3index(), K::non_neg_integer()) ->
[{h3index(), non_neg_integer()},...].
k_ring_distances(_, _) ->
not_loaded(?LINE).
%% @doc Returns the maximum number of indices that result from the
%% k-ring algorithm with the given K.
-spec max_k_ring_size(K::non_neg_integer()) -> non_neg_integer().
max_k_ring_size(_) ->
not_loaded(?LINE).
%% @doc Compacts the given list of indexes as best as
%% possible. Returns a list of at most the same length as the input
%% list.
-spec compact([h3index(),...]) -> [h3index(),...].
compact(_) ->
not_loaded(?LINE).
%% @doc Uncompacts given list of indexes to the given resolution.
-spec uncompact([h3index(),...], resolution()) -> [h3index(),...].
uncompact(_,_) ->
not_loaded(?LINE).
-spec indices_are_neighbors(h3index(), h3index()) -> boolean().
indices_are_neighbors(_, _) ->
not_loaded(?LINE).
%% @doc Returns the distance in grid cells between the two
%% indexes. Throws a `badarg' if the distance can not be
%% found. Finding the distance can fail because the two indexes are
%% not comparable (different resolutions), too far apart, or are
%% separated by pentagonal distortion.
-spec grid_distance(h3index(), h3index()) -> integer().
grid_distance(_, _) ->
not_loaded(?LINE).
%% @doc Returns a unidirectiol edge based on the given origin
%% and destination.
-spec get_unidirectional_edge(Origin::h3index(), Destination::h3index()) -> Edge::h3index().
get_unidirectional_edge(_, _) ->
not_loaded(?LINE).
init() ->
SoName = case code:priv_dir(?APPNAME) of
{error, bad_name} ->
case filelib:is_dir(filename:join(["..", priv])) of
true ->
filename:join(["..", priv, ?LIBNAME]);
_ ->
filename:join([priv, ?LIBNAME])
end;
Dir ->
filename:join(Dir, ?LIBNAME)
end,
erlang:load_nif(SoName, 0).
not_loaded(Line) ->
erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}). | src/h3.erl | 0.631935 | 0.573559 | h3.erl | starcoder |
%% Licensed under the Apache License, Version 2.0 (the "License"); you may
%% not use this file except in compliance with the License. You may obtain
%% a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% Alternatively, you may use this file under the terms of the GNU Lesser
%% General Public License (the "LGPL") as published by the Free Software
%% Foundation; either version 2.1, or (at your option) any later version.
%% If you wish to allow use of your version of this file only under the
%% terms of the LGPL, you should delete the provisions above and replace
%% them with the notice and other provisions required by the LGPL; see
%% <http://www.gnu.org/licenses/>. If you do not delete the provisions
%% above, a recipient may use your version of this file under the terms of
%% either the Apache License or the LGPL.
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2006 <NAME>
%% @private
%% @see eunit
%% @doc Event serializing and multiplexing process, to be used as the
%% main "supervisor" process for en EUnit test runner. See eunit_proc
%% for details about the events that will be sent to the listeners
%% (provided to this process at startup). This process guarantees that
%% listeners will receive events in order, even if tests execute in
%% parallel. For every received 'begin' event, there will be exactly one
%% 'end' or 'cancel' event. For a cancelling event with identifier Id,
%% no further events will arrive whose identifiers have Id as prefix.
-module(eunit_serial).
-include("eunit.hrl").
-include("eunit_internal.hrl").
-export([start/1]).
%% Notes:
%% * Due to concurrency, there are no guarantees that we will receive
%% all status messages for the items within a group before we receive
%% the 'end' message of the group itself.
%%
%% * A cancelling event may arrive at any time, and may concern items we
%% are not yet expecting (if tests are executed in parallel), or may
%% concern not only the current item but possibly a group ancestor of
%% the current item (as in the case of a group timeout).
%%
%% * It is not possible to use selective receive to extract only those
%% cancelling messages that affect the current item and its parents;
%% basically, because we cannot have a dynamically computed prefix as a
%% pattern in a receive. Hence, we must extract each cancelling event as
%% it arrives and keep track of them separately.
%%
%% * Before we wait for a new item, we must check whether it (and thus
%% also all its subitems, if any) is already cancelled.
%%
%% * When a new cancelling event arrives, we must either store it for
%% future use, and/or cancel the current item and possibly one or more
%% of its parent groups.
-record(state, {listeners :: sets:set(),
cancelled = eunit_lib:trie_new() :: gb_trees:tree(),
messages = dict:new() :: dict:dict()}).
start(Pids) ->
spawn(serializer_fun(Pids)).
serializer_fun(Pids) ->
fun () ->
St = #state{listeners = sets:from_list(Pids),
cancelled = eunit_lib:trie_new(),
messages = dict:new()},
expect([], undefined, 0, St),
exit(normal)
end.
%% collect beginning and end of an expected item; return {Done, NewSt}
%% where Done is true if there are no more items of this group
expect(Id, ParentId, GroupMinSize, St0) ->
case wait(Id, 'begin', ParentId, GroupMinSize, St0) of
{done, St1} ->
{true, St1};
{cancel, prefix, _Msg, St1} ->
%% if a parent caused the cancel, signal done with group and
%% cast no cancel event (since the item might not exist)
{true, St1};
{cancel, exact, Msg, St1} ->
cast_cancel(Id, Msg, St1),
{false, St1};
{ok, Msg, St1} ->
%%?debugVal({got_begin, Id, Msg}),
cast(Msg, St1),
St2 = case Msg of
{status, _, {progress, 'begin', {group, _Info}}} ->
group(Id, 0, St1);
_ ->
St1
end,
case wait(Id, 'end', ParentId, GroupMinSize, St2) of
{cancel, Why, Msg1, St3} ->
%% we know the item exists, so always cast a cancel
%% event, and signal done with the group if a parent
%% caused the cancel
cast_cancel(Id, Msg1, St3),
{(Why =:= prefix), St3};
{ok, Msg1, St3} ->
%%?debugVal({got_end, Id, Msg1}),
cast(Msg1, St3),
{false, St3}
end
end.
%% collect group items in order until group is done
group(ParentId, GroupMinSize, St) ->
N = GroupMinSize + 1,
case expect(ParentId ++ [N], ParentId, GroupMinSize, St) of
{false, St1} ->
group(ParentId, N, St1);
{true, St1} ->
St1
end.
cast_cancel(Id, undefined, St) ->
%% reasonable message for implicitly cancelled events
cast({status, Id, {cancel, undefined}}, St);
cast_cancel(_Id, Msg, St) ->
cast(Msg, St).
cast(Msg, St) ->
sets:fold(fun (L, M) -> L ! M end, Msg, St#state.listeners),
ok.
%% wait for a particular begin or end event, that might have arrived or
%% been cancelled already, or might become cancelled later, or might not
%% even exist (for the last+1 element of a group)
wait(Id, Type, ParentId, GroupMinSize, St) ->
%%?debugVal({wait, Id, Type}),
case check_cancelled(Id, St) of
no ->
case recall(Id, St) of
undefined ->
wait_1(Id, Type, ParentId, GroupMinSize, St);
Msg ->
{ok, Msg, forget(Id, St)}
end;
Why ->
%%?debugVal({cancelled, Why, Id, ParentId}),
{cancel, Why, recall(Id, St), forget(Id, St)}
end.
%% the event has not yet arrived or been cancelled - wait for more info
wait_1(Id, Type, ParentId, GroupMinSize, St) ->
receive
{status, Id, {progress, Type, _}}=Msg ->
%%?debugVal({Type, ParentId, Id}),
{ok, Msg, St};
{status, ParentId, {progress, 'end', {GroupMinSize, _}}}=Msg ->
%% the parent group ended (the final status of a group is
%% the count of its subitems), and we have seen all of its
%% subtests, so the currently expected event does not exist
%%?debugVal({end_group, ParentId, Id, GroupMinSize}),
{done, remember(ParentId, Msg, St)};
{status, SomeId, {cancel, _Cause}}=Msg ->
%%?debugVal({got_cancel, SomeId, _Cause}),
St1 = set_cancelled(SomeId, Msg, St),
wait(Id, Type, ParentId, GroupMinSize, St1)
end.
set_cancelled(Id, Msg, St0) ->
St = remember(Id, Msg, St0),
St#state{cancelled = eunit_lib:trie_store(Id, St0#state.cancelled)}.
check_cancelled(Id, St) ->
%% returns 'no', 'exact', or 'prefix'
eunit_lib:trie_match(Id, St#state.cancelled).
remember(Id, Msg, St) ->
St#state{messages = dict:store(Id, Msg, St#state.messages)}.
forget(Id, St) ->
%% this is just to enable garbage collection of old messages
St#state{messages = dict:store(Id, undefined, St#state.messages)}.
recall(Id, St) ->
case dict:find(Id, St#state.messages) of
{ok, Msg} -> Msg;
error -> undefined
end. | lib/eunit/src/eunit_serial.erl | 0.5083 | 0.410815 | eunit_serial.erl | starcoder |
%% Stolen/borrowed from
%% https://github.com/Andrew-William-Smith/advent-of-code/blob/2021-erlang/src/aoc2021_day22.erl
-module(aoc2021_day22).
-behavior(aoc_puzzle).
-export([parse/1, solve1/1, solve2/1, info/0]).
-include("aoc_puzzle.hrl").
-include_lib("eunit/include/eunit.hrl").
-spec info() -> aoc_puzzle().
info() ->
#aoc_puzzle{module = ?MODULE,
year = 2021,
day = 22,
name = "Reactor Reboot",
expected = {601104, 1262883317822267},
has_input_file = true}.
-type input_type() :: any().
-type result_type() :: integer().
-spec parse(Binary :: binary()) -> input_type().
parse(Binary) ->
F = fun list_to_integer/1,
lists:map(fun(B) ->
[OnOff, XMin, XMax, YMin, YMax, ZMin, ZMax] =
string:tokens(binary_to_list(B), " =..,xyz"),
{list_to_atom(OnOff),
{{F(XMin), F(XMax) + 1}, {F(YMin), F(YMax) + 1}, {F(ZMin), F(ZMax) + 1}}}
end,
binary:split(Binary, <<"\n">>, [trim_all, global])).
-spec solve1(Input :: input_type()) -> result_type().
solve1(Input) ->
BoundCuboid = {{-50, 50}, {-50, 50}, {-50, 50}},
InBounds = [Step || {_, C} = Step <- Input, cuboid_difference(C, BoundCuboid) /= [C]],
lists:sum(
lists:map(fun cuboid_volume/1, reboot_reactor([], InBounds))).
-spec solve2(Input :: input_type()) -> result_type().
solve2(Input) ->
lists:sum(
lists:map(fun cuboid_volume/1, reboot_reactor([], Input))).
cuboid_difference({{XL1, XH1}, {YL1, YH1}, {ZL1, ZH1}} = A,
{{XL2, XH2}, {YL2, YH2}, {ZL2, ZH2}})
when XL1 > XH2
orelse XL2 > XH1
orelse YL1 > YH2
orelse YL2 > YH1
orelse ZL1 > ZH2
orelse ZL2 > ZH1 ->
[A];
cuboid_difference({{XL1, XH1} = X1, {YL1, YH1}, {ZL1, ZH1} = Z1},
{{XL2, XH2}, {YL2, YH2}, {ZL2, ZH2}}) ->
Top = ?_if(YL2 > YL1, [{X1, {YL1, YL2}, Z1}], []),
Bottom = ?_if(YH1 > YH2, [{X1, {YH2, YH1}, Z1}], []),
YBounds = {max(YL1, YL2), min(YH1, YH2)},
Left = ?_if(XL2 > XL1, [{{XL1, XL2}, YBounds, Z1}], []),
Right = ?_if(XH1 > XH2, [{{XH2, XH1}, YBounds, Z1}], []),
XBounds = {max(XL1, XL2), min(XH1, XH2)},
Near = ?_if(ZL2 > ZL1, [{XBounds, YBounds, {ZL1, ZL2}}], []),
Far = ?_if(ZH1 > ZH2, [{XBounds, YBounds, {ZH2, ZH1}}], []),
Top ++ Bottom ++ Left ++ Right ++ Near ++ Far.
reboot_reactor(Cuboids, []) ->
Cuboids;
reboot_reactor(Cuboids, [{Instruction, NewCuboid} | RestStep]) ->
Difference = lists:flatten([cuboid_difference(C, NewCuboid) || C <- Cuboids]),
NextCuboids =
case Instruction of
on ->
[NewCuboid | Difference];
off ->
Difference
end,
reboot_reactor(NextCuboids, RestStep).
cuboid_volume({{XLo, XHi}, {YLo, YHi}, {ZLo, ZHi}}) ->
(XHi - XLo) * (YHi - YLo) * (ZHi - ZLo). | src/2021/aoc2021_day22.erl | 0.642881 | 0.642012 | aoc2021_day22.erl | starcoder |
-module(gleam_stdlib).
-include_lib("eunit/include/eunit.hrl").
-export([expect_equal/2, expect_not_equal/2, expect_true/1, expect_false/1,
expect_is_ok/1, expect_is_error/1, atom_from_string/1,
atom_create_from_string/1, atom_to_string/1, map_get/2,
iodata_append/2, iodata_prepend/2, identity/1, decode_int/1,
decode_string/1, decode_bool/1, decode_float/1, decode_thunk/1, decode_atom/1,
decode_pair/1, decode_list/1, decode_field/2, parse_int/1, parse_float/1, compare_strings/2]).
expect_equal(Actual, Expected) -> ?assertEqual(Expected, Actual).
expect_not_equal(Actual, Expected) -> ?assertNotEqual(Expected, Actual).
expect_true(A) -> ?assert(A).
expect_false(A) -> ?assertNot(A).
expect_is_ok(A) -> ?assertMatch({ok, _}, A).
expect_is_error(A) -> ?assertMatch({error, _}, A).
map_get(Map, Key) ->
case maps:find(Key, Map) of
error -> {error, nil};
OkFound -> OkFound
end.
atom_create_from_string(S) ->
binary_to_atom(S, utf8).
atom_to_string(S) ->
atom_to_binary(S, utf8).
atom_from_string(S) ->
try {ok, binary_to_existing_atom(S, utf8)} catch
error:badarg -> {error, atom_not_loaded}
end.
iodata_append(Iodata, String) -> [Iodata, String].
iodata_prepend(Iodata, String) -> [String, Iodata].
identity(X) -> X.
decode_error_msg(Type, Data) ->
{error, iolist_to_binary(io_lib:format("Expected ~s, got `~p`", [Type, Data]))}.
decode_atom(Data) when is_atom(Data) -> {ok, Data};
decode_atom(Data) -> decode_error_msg("an Atom", Data).
decode_string(Data) when is_binary(Data) -> {ok, Data};
decode_string(Data) -> decode_error_msg("a String", Data).
decode_int(Data) when is_integer(Data) -> {ok, Data};
decode_int(Data) -> decode_error_msg("an Int", Data).
decode_float(Data) when is_float(Data) -> {ok, Data};
decode_float(Data) -> decode_error_msg("a Float", Data).
decode_bool(Data) when is_boolean(Data) -> {ok, Data};
decode_bool(Data) -> decode_error_msg("a Bool", Data).
decode_thunk(Data) when is_function(Data, 0) -> {ok, Data};
decode_thunk(Data) -> decode_error_msg("a zero arity function", Data).
decode_pair(Data = {_, _}) -> {ok, Data};
decode_pair(Data) -> decode_error_msg("a 2 element tuple", Data).
decode_list(Data) when is_list(Data) -> {ok, Data};
decode_list(Data) -> decode_error_msg("a List", Data).
decode_field(Data, Key) ->
case Data of
#{Key := Value} ->
{ok, Value};
_ ->
decode_error_msg(io_lib:format("a map with key `~p`", [Key]), Data)
end.
parse_int(String) ->
case string:to_integer(binary:bin_to_list(String)) of
{Integer, []} ->
{ok, Integer};
_ ->
{error, nil}
end.
parse_float(String) ->
case string:to_float(binary:bin_to_list(String)) of
{Float, []} ->
{ok, Float};
_ ->
{error, nil}
end.
compare_strings(Lhs, Rhs) ->
if
Lhs == Rhs ->
eq;
Lhs < Rhs ->
lt;
true ->
gt
end. | src/gleam_stdlib.erl | 0.52975 | 0.661445 | gleam_stdlib.erl | starcoder |
%% vim: set ai et sw=4 sts=4:
%% See LICENSE for licensing information.
-module(solarized_assert).
-export([ output_equal_to_file/5
, output_equal_to_file/7
]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%=======================================================================
-spec output_equal_to_file(App, File, Test, TestName, Props) -> ok
when
App :: atom(),
File :: file:name_all(),
Test :: fun(() -> TestResult),
TestName :: string(),
TestResult :: term(),
Props :: list({module, atom()} | {line, integer()}).
output_equal_to_file(App, File, Test, TestName, Props0)
when is_atom(App) andalso
(is_atom(File) orelse is_list(File)) andalso
is_function(Test, 0) andalso
is_list(TestName) andalso
is_list(Props0) ->
output_equal_to_file(App, File, Test, TestName, 80, 25, Props0).
%=======================================================================
-spec output_equal_to_file(App, File, Test, TestName, Columns, Rows, Props)
-> ok
when
App :: atom(),
File :: file:name_all(),
Test :: fun(() -> TestResult),
TestName :: string(),
TestResult :: term(),
Columns :: solarized_capture:geometry(),
Rows :: solarized_capture:geometry(),
Props :: list({module, atom()} | {line, integer()}).
output_equal_to_file(App, File, Test, TestName, Columns, Rows, Props0)
when is_atom(App) andalso
(is_atom(File) orelse is_list(File)) andalso
is_function(Test, 0) andalso
is_list(TestName) andalso
is_integer(Columns) andalso
is_integer(Rows) andalso
is_list(Props0) ->
Output = solarized_capture:output(Test, Columns, Rows),
FileName = case is_atom(File) of
true ->
atom_to_list(File);
false ->
File
end,
Props =
[ {app, atom_to_list(App)}
, {file, FileName}
, {expression, TestName}
, {columns, integer_to_list(Columns)}
, {rows, integer_to_list(Rows)}
| Props0
],
check_equal_to_file(App, File, Output, Props).
%=======================================================================
check_equal_to_file(App, File, Output, Props0) ->
BaseDir = code:lib_dir(App, test),
ok = ensure_test_dir(BaseDir),
BaseFile = filename:join(BaseDir, File),
ExpectFile = [BaseFile, ".expect"],
OutputFile = [BaseFile, ".output"],
Props =
[ {expect_file, ExpectFile}
, {output_file, OutputFile}
| Props0
],
equal_to_file(Output, OutputFile, ExpectFile, Props).
%=======================================================================
ensure_test_dir(TestDir) ->
case file:make_dir(TestDir) of
ok ->
ok;
{error, eexist} ->
ok
end.
%-----------------------------------------------------------------------
equal_to_file(Output, OutputFile, ExpectFile, Props) ->
case file:read_file(ExpectFile) of
{ok, Expect} when Expect =:= Output ->
ok = delete_output_file(OutputFile),
ok;
{ok, _} ->
not_equal_to_file(Output, OutputFile, ExpectFile, Props);
{error, enoent} ->
not_equal_to_file(Output, OutputFile, ExpectFile, Props)
end.
%-----------------------------------------------------------------------
-spec not_equal_to_file(binary(), string(), string(), list()) -> no_return().
not_equal_to_file(Output, OutputFile, ExpectFile, Props) ->
ok = file:write_file(OutputFile, Output, [binary]),
erlang:error(
{ outputEqualToFile
, [ {expect_file, ExpectFile}
, {output_file, OutputFile}
| Props
]}).
%-----------------------------------------------------------------------
delete_output_file(OutputFile) ->
case file:delete(OutputFile) of
ok ->
ok;
{error, enoent} ->
ok
end. | src/solarized_assert.erl | 0.537041 | 0.50293 | solarized_assert.erl | starcoder |
%% Copyright (c) 2019 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : luerl_lint.erl
%% Author : <NAME>
%% Purpose : A basic LUA 5.3 error checker for Luerl.
%% There isn't much checking which can be done here as most is allowed
%% and generates run-time errors or strangeness. So far all we can
%% check is the legal use of varargs ... and warn if assignments have
%% different number of variables and expressions.
-module(luerl_comp_lint).
-include("luerl.hrl").
-include("luerl_comp.hrl").
-export([chunk/2,format_error/1]).
-record(lint, {opts=[], %Options
pars=[], %Local function parameters
errors=[], %Errors
warnings=[] %Warnings
}).
%% format_error(Error) -> String.
format_error(illegal_varargs) ->
"cannot use '...' outside a vararg function";
format_error(assign_mismatch) ->
"assign mismatch variables and expressions".
%% chunk(Code, Opts) -> {ok,Warnings} | {error,Errors,Warnings}.
chunk(#code{code=Code}, Opts) ->
St0 = #lint{opts=Opts},
St1 = functiondef(Code, St0),
return_status(St1).
return_status(#lint{errors=[]}=St) ->
{ok,St#lint.warnings};
return_status(#lint{errors=Es,warnings=Ws}) ->
{error,Es,Ws}.
%% stmts(Stmts, State) -> State.
stmts([S|Ss], St0) ->
St1 = stmt(S, St0),
stmts(Ss, St1);
stmts([], St) -> St.
%% stmt(Stmt, State) -> State.
stmt(#assign_stmt{}=A, St) -> assign_stmt(A, St);
stmt(#call_stmt{}=C, St) -> call_stmt(C, St);
stmt(#return_stmt{}=R, St) -> return_stmt(R, St);
stmt(#break_stmt{}, St) -> St;
stmt(#block_stmt{}=B, St) -> block_stmt(B, St);
stmt(#while_stmt{}=W, St) -> while_stmt(W, St);
stmt(#repeat_stmt{}=R, St) -> repeat_stmt(R, St);
stmt(#if_stmt{}=If, St) -> if_stmt(If, St);
stmt(#nfor_stmt{}=For, St) -> numfor_stmt(For, St);
stmt(#gfor_stmt{}=For, St) -> genfor_stmt(For, St);
stmt(#local_assign_stmt{}=For, St) ->
local_assign_stmt(For, St);
stmt(#local_fdef_stmt{}=For, St) ->
local_fdef_stmt(For, St);
stmt(#expr_stmt{}=E, St) ->
expr_stmt(E, St).
%% assign_stmt(Assign, State) -> State.
%% call_stmt(Call, State) -> State.
%% return_stmt(Return, State) -> State.
%% block_stmt(Block, State) -> State.
%% while_stmt(While, State) -> State.
%% repeat_stmt(Repeat, State) -> State.
%% if_stmt(If, State) -> State.
%% numfor_stmt(Numfor, State) -> State.
%% genfor_stmt(Genfor, State) -> State.
%% local_assign_stmt(Assign, State) -> State.
%% local_fdef_stmt(Fdef, State) -> State.
%% expr_stmt(Expr, State) -> State.
assign_stmt(#assign_stmt{l=Anno,vs=Vs,es=Es}, St0) ->
%% Must work more on this to get it right.
%% St1 = ?IF(length(Vs) =/= length(Es),
%% assign_mismatch_warning(Anno, St0), St0),
St1 = St0,
St2 = lists:foldl(fun (V, S) -> assign_var(V, S) end, St1, Vs),
explist(Es, St2).
assign_var(#dot{e=Exp,r=Rest}, St0) ->
St1 = prefixexp_first(Exp, St0),
assign_var_rest(Rest, St1);
assign_var(#var{l=Anno,n='...'}, St) ->
%% Not allowed to bind ... .
illegal_varargs_error(Anno, St);
assign_var(_Var, St) -> St.
assign_var_rest(#dot{e=Exp,r=Rest}, St0) ->
St1 = prefixexp_element(Exp, St0),
assign_var_rest(Rest, St1);
assign_var_rest(Exp, St) -> assign_var_last(Exp, St).
assign_var_last(#key{k=Exp}, St) ->
exp(Exp, St).
call_stmt(#call_stmt{call=Exp}, St) ->
exp(Exp, St).
return_stmt(#return_stmt{es=Es}, St) ->
explist(Es, St).
block_stmt(#block_stmt{ss=Ss}, St) ->
stmts(Ss, St).
while_stmt(#while_stmt{e=Exp,b=Ss}, St0) ->
St1 = exp(Exp, St0),
block(Ss, St1).
repeat_stmt(#repeat_stmt{b=Ss}, St) ->
block(Ss, St).
if_stmt(#if_stmt{tests=Ts,else=Else}, St0) ->
Fun = fun ({E,B}, S0) ->
S1 = exp(E, S0),
block(B, S1)
end,
St1 = lists:foldl(Fun, St0, Ts),
block(Else, St1).
numfor_stmt(#nfor_stmt{init=I,limit=L,step=S,b=B}, St0) ->
St1 = explist([I,L,S], St0),
block(B, St1).
genfor_stmt(#gfor_stmt{gens=Gs,b=B}, St0) ->
St1 = explist(Gs, St0),
block(B, St1).
local_assign_stmt(#local_assign_stmt{l=Anno,vs=Vs,es=Es}, St0) ->
%% Must work more on this to get it right.
%% St1 = ?IF(length(Vs) =/= length(Es),
%% assign_mismatch_warning(Anno, St0), St0),
St1 = St0,
explist(Es, St1).
local_fdef_stmt(#local_fdef_stmt{f=F}, St) ->
functiondef(F, St).
expr_stmt(#expr_stmt{exp=Exp}, St) ->
exp(Exp, St).
%% block(Block, State) -> State.
block(#block{ss=Ss}, St) ->
stmts(Ss, St).
%% explist(Exprs, State) -> State.
%% exp(Expr, State) -> State.
%% prefixexp(Expr, State) -> State.
explist(Es, St) ->
lists:foldl(fun (E, S) -> exp(E, S) end, St, Es).
exp(#lit{}, St) -> St;
exp(#fdef{}=F, St) -> functiondef(F, St);
exp(#op{as=Es}, St) ->
explist(Es, St);
exp(#tc{fs=Fs}, St) ->
tableconstructor(Fs, St);
exp(E, St) ->
prefixexp(E, St).
prefixexp(#dot{e=Exp,r=Rest}, St0) ->
St1 = prefixexp_first(Exp, St0),
prefixexp_rest(Rest, St1);
prefixexp(Exp, St) -> prefixexp_first(Exp, St).
prefixexp_first(#single{e=Exp}, St) ->
exp(Exp, St);
prefixexp_first(#var{}=V, St) ->
var(V, St).
prefixexp_rest(#dot{e=Exp,r=Rest}, St0) ->
St1 = prefixexp_element(Exp, St0),
prefixexp_rest(Rest, St1);
prefixexp_rest(Exp, St) -> prefixexp_element(Exp, St).
prefixexp_element(#key{k=Exp}, St) ->
exp(Exp, St);
prefixexp_element(#fcall{as=Es}, St) ->
explist(Es, St);
prefixexp_element(#mcall{m=Lit,as=Es}, St0) ->
St1 = lit(Lit, St0),
explist(Es, St1).
%% functiondef(FuncDef, State) -> State.
functiondef(#fdef{ps=Ps,ss=Ss}, #lint{pars=Pars}=St0) ->
St1 = St0#lint{pars=Ps}, %Use current parameters
St2 = stmts(Ss, St1),
St2#lint{pars=Pars}. %Reset previous parameters
%% tableconstructor(Fields, State) -> State.
tableconstructor(Fs, St) ->
Fun = fun (#efield{v=Exp}, S) -> exp(Exp, S);
(#kfield{k=Key,v=Val}, S0) ->
S1 = exp(Key, S0),
exp(Val, S1)
end,
lists:foldl(Fun, St, Fs).
%% var(Var, State) -> State.
var(#var{l=Anno,n='...'}, St) ->
case lists:keymember('...', #var.n, St#lint.pars) of
true -> St;
false ->
illegal_varargs_error(Anno, St)
end;
var(_Var, St) -> St.
%% lit(Lit, State) -> State.
lit(#lit{l=Anno,v='...'}, St) ->
case lists:keymember('...', #var.n, St#lint.pars) of
true -> St;
false ->
illegal_varargs_error(Anno, St)
end;
lit(_Lit, St) -> St.
%% add_error(Annotation, Error, State) -> State.
%% add_warning(Annotation, Warning, State) -> State.
%% Add errors/warnings to the state.
add_error(Anno, E, #lint{errors=Errs}=St) ->
L = luerl_anno:line(Anno),
St#lint{errors=Errs ++ [{L,?MODULE,E}]}.
%% add_warning(Anno, W, #lint{warnings=Warns}=St) ->
%% L = luerl_anno:line(Anno),
%% St#lint{warnings=Warns ++ [{L,?MODULE,W}]}.
illegal_varargs_error(Anno, St) ->
add_error(Anno, illegal_varargs, St).
%% assign_mismatch_warning(Anno, St) ->
%% add_warning(Anno, assign_mismatch, St). | src/luerl_comp_lint.erl | 0.551332 | 0.411525 | luerl_comp_lint.erl | starcoder |
% @copyright 2011-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc API functions for the Map-Reduce system
%%
%% The Map-Reduce system will snapshot the database and apply the the
%% supplied job to that data. Since snapshots work on the overlay level
%% only the hashed keys are available.
%% Since hashed keys are not useful in this context, the MR system will
%% only considers values of the following structure as inputs:
%% ```
%% {Key::string(), Value::term()}
%% or
%% {Tag::atom(), Key:string(), Value::term()}
%% '''
%% Either all 2-tuples are considered to be input or, if
%% `{tag, some_tag}' is found in the options list, all 3-tuples with `some_tag'
%% as the first element are.
%%
%% A job description is a 2-tuple where the first element is a list of
%% phase specifications and the second element a list of options.
%% A simple example job could look like this:
%% ```
%% Map = fun({_Key, Line}) ->
%% Tokens = string:tokens(Line, " \n,.;:?!()\"'-_"),
%% [{string:to_lower(X),1} || X <- Tokens]
%% end,
%% Reduce = fun(KVList) ->
%% lists:map(fun({K, V}) ->
%% {K, lists:sum(V)}
%% end, KVList)
%% end,
%% api_mr:start_job({[{map, erlanon, Map},
%% {reduce, erlanon, Reduce}],
%% []}).
%% '''
%% It considers all `{string(), string()}' as input and returns the word count of all
%% values.
%%
%% @end
%% @version $Id$
-module(api_mr).
-author('<EMAIL>').
-vsn('$Id$').
%% -define(TRACE(X, Y), io:format(X, Y)).
-define(TRACE(X, Y), ok).
-export([start_job/1]).
-include("scalaris.hrl").
%% @doc synchronous call to start a map reduce job.
%% it will return the results of the job.
-spec start_job(mr_state:job_description()) -> [any()].
start_job(Job) ->
Id = randoms:getRandomString(),
api_dht_raw:unreliable_lookup(api_dht:hash_key(Id), {mr_master, init, comm:this(), Id, Job}),
wait_for_results([], intervals:empty(), Id).
-spec wait_for_results([any()], intervals:interval(), mr_state:jobid()) -> [any()].
wait_for_results(Data, Interval, Id) ->
{NewData, NewInterval} =
begin
trace_mpath:thread_yield(),
receive
?SCALARIS_RECV({mr_results, PartData, PartInterval, Id},
case PartData of
{error, Reason} ->
{[{error, Reason}], intervals:all()};
PartData ->
{[PartData | Data], intervals:union(PartInterval, Interval)}
end)
end
end,
?TRACE("mr_api: received data for job ~p: ~p~n", [Id, hd(NewData)]),
case intervals:is_all(NewInterval) of
true ->
lists:append(NewData);
_ ->
wait_for_results(NewData, NewInterval, Id)
end. | src/api_mr.erl | 0.735642 | 0.685637 | api_mr.erl | starcoder |
-module(counters_buckets).
-export([new/0,
new/1,
position/2,
bound/2]).
-export_type([bucket_bound/0,
buckets/0]).
-ifdef(TEST).
-export([default/0,
exponential/3,
linear/3]).
-endif.
%%====================================================================
%% Types
%%====================================================================
-type bucket_bound() :: number() | infinity.
-type buckets() :: [bucket_bound(), ...].
%%====================================================================
%% Public API
%%====================================================================
new() ->
default() ++ [infinity].
%% @doc
%% Buckets constructor
%% @end
new([]) ->
erlang:error({no_buckets, []});
new(undefined) ->
erlang:error({no_buckets, undefined});
new(default) ->
default() ++ [infinity];
new({linear, Start, Step, Count}) ->
linear(Start, Step, Count) ++ [infinity];
new({exponential, Start, Factor, Count}) ->
exponential(Start, Factor, Count) ++ [infinity];
new(RawBuckets) when is_list(RawBuckets) ->
Buckets = lists:map(fun validate_bound/1, RawBuckets),
case lists:sort(Buckets) of
Buckets ->
case lists:last(Buckets) of
infinity -> Buckets;
_ -> Buckets ++ [infinity]
end;
_ ->
erlang:error({invalid_buckets, Buckets, "buckets not sorted"})
end;
new(Buckets) ->
erlang:error({invalid_buckets, Buckets, "not a list"}).
validate_bound(Bound) when is_number(Bound) ->
Bound;
validate_bound(infinity) ->
infinity;
validate_bound(Bound) ->
erlang:error({invalid_bound, Bound}).
%% @doc
%% Default buckets.
%% <pre lang="erlang">
%% 1> counters_buckets:default().
%% [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
%% </pre>
%% @end
-spec default() -> buckets().
default() -> [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10].
%% @doc
%% Creates `Count' buckets, where the lowest bucket has an
%% upper bound of `Start' and each following bucket's upper bound is `Factor'
%% times the previous bucket's upper bound.
%% <pre lang="erlang">
%% 3> counters_buckets:exponential(100, 1.2, 3).
%% [100, 120, 144]
%% </pre>
%% The function raises `{invalid_value, Value, Message}' error if `Count'
%% isn't positive, if `Start' isn't positive, or if `Factor' is less than or
%% equals to 1.
%% @end
-spec exponential(number(), number(), pos_integer()) -> buckets().
exponential(_Start, _Factor, Count) when Count < 1 ->
erlang:error({invalid_value, Count, "Buckets count should be positive"});
exponential(Start, _Factor, _Count) when Start =< 0 ->
erlang:error({invalid_value, Start, "Buckets start should be positive"});
exponential(_Start, Factor, _Count) when Factor =< 1 ->
erlang:error({invalid_value, Factor,
"Buckets factor should be greater than 1"});
exponential(Start, Factor, Count) ->
[try_to_maintain_integer_bounds(Start*math:pow(Factor, I)) ||
I <- lists:seq(0, Count-1)].
%% @doc
%% Creates `Count' buckets, each `Width' wide, where the lowest
%% bucket has an upper bound of `Start'.
%% <pre lang="erlang">
%% 2> counters_buckets:linear(10, 5, 6).
%% [10, 15, 20, 25, 30, 35]
%% </pre>
%% The function raises `{invalid_value, Value, Message}' error if `Count'
%% is zero or negative.
%% @end
-spec linear(number(), number(), pos_integer()) -> buckets().
linear(_Start, _Step, Count) when Count < 1 ->
erlang:error({invalid_value, Count, "Buckets count should be positive"});
linear(Start, Step, Count) ->
linear(Start, Step, Count, []).
position(Buckets, Value) ->
position(Buckets, fun(Bound) ->
Value =< Bound
end, 0).
bound([], _Value) ->
undefined;
bound([H|L], Value) ->
case Value =< H of
true ->
H;
false ->
bound(L, Value)
end.
%%====================================================================
%% Private Parts
%%====================================================================
linear(_Current, _Step, 0, Acc) ->
lists:reverse(Acc);
linear(Current, Step, Count, Acc) ->
linear(try_to_maintain_integer_bounds(Current + Step),
Step,
Count - 1,
[Current|Acc]).
-spec try_to_maintain_integer_bounds(integer()) -> integer();
(float()) -> integer() | float().
try_to_maintain_integer_bounds(Bound) when is_integer(Bound) -> Bound;
try_to_maintain_integer_bounds(Bound) when is_float(Bound) ->
TBound = trunc(Bound),
case TBound == Bound of
true -> TBound;
false -> Bound
end.
position([], _Pred, _Pos) ->
0;
position([H|L], Pred, Pos) ->
case Pred(H) of
true ->
Pos;
false ->
position(L, Pred, Pos + 1)
end. | src/counters_buckets.erl | 0.566139 | 0.442938 | counters_buckets.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% This module implements local or single-node Sharding.
%%%
%%% <b>Shards</b> is compatible with ETS API, most of the functions
%%% preserves the same contract and semantic, but there are some
%%% exceptions (check functions documentation).
%%%
%%% Shards provides a logical view of a single ETS table, but
%%% internally, that logical table is partitioned in multiple
%%% physical ETS tables or <b>Shards</b>, where `Shards = [0 .. N-1]',
%%% and `N' is the number of shards you want to partition the table.
%%%
%%% The K/V pairs are distributed across these shards, therefore,
%%% some of the functions don't follow the same semantics as the
%%% original ones (analogous ETS functions).
%%%
%%% A good example of those are the query-based functions, which
%%% return multiple results; in case of `ordered_set', with a
%%% particular order. For example: `select/1,2,3`,
%%% `select_reverse/1,2,3', `match/1,2,3', `match_object/1,2,3', etc.
%%%
%%% For those cases, the order which results are returned is not
%%% guaranteed to be the same as the original ETS functions.
%%%
%%% Additionally to the ETS functions, `shards_local' module allows
%%% to pass an optional argument, the `State'. When `shards' is
%%% called without the `State', it must fetch the `state' first,
%%% and it is recovered doing an extra call to an ETS control table
%%% owned by `shards_owner_sup'. If any microsecond matters, you can
%%% skip it call by calling `shards_local' directly and passing
%%% the `State'. E.g.:
%%%
%%% ```
%%% % create a table
%%% tab_name = shards:new(tab_name, [{n_shards, 4}]).
%%%
%%% % you can get the state at any time by calling:
%%% State = shards_state:get(tab_name).
%%%
%%% % normal way
%%% shards:lookup(table, key1).
%%%
%%% % calling shards_local directly
%%% shards_local:lookup(table, key1, State).
%%%
%%% % if you have created the table with default options,
%%% % you can skip the sate
%%% shards_local:lookup(table, key1).
%%% '''
%%%
%%% Pools of shards can be added/removed dynamically. For example,
%%% using `shards:new/2' you can add more pools, and `shards:delete/1'
%%% to remove the pool you wish.
%%% @end
%%%-------------------------------------------------------------------
-module(shards_local).
%% ETS API
-export([
all/0,
delete/1, delete/2, delete/3,
delete_all_objects/1, delete_all_objects/2,
delete_object/2, delete_object/3,
file2tab/1, file2tab/2,
first/1, first/2,
foldl/3, foldl/4,
foldr/3, foldr/4,
give_away/3, give_away/4,
i/0,
info/1, info/2, info/3,
info_shard/1, info_shard/2,
insert/2, insert/3,
insert_new/2, insert_new/3,
is_compiled_ms/1,
last/1, last/2,
lookup/2, lookup/3,
lookup_element/3, lookup_element/4,
match/2, match/3, match/4, match/1,
match_delete/2, match_delete/3,
match_object/2, match_object/3, match_object/4, match_object/1,
match_spec_compile/1,
match_spec_run/2,
member/2, member/3,
new/2,
next/2, next/3,
prev/2, prev/3,
rename/2, rename/3,
safe_fixtable/2, safe_fixtable/3,
select/2, select/3, select/4, select/1,
select_count/2, select_count/3,
select_delete/2, select_delete/3,
select_replace/2, select_replace/3,
select_reverse/2, select_reverse/3, select_reverse/4, select_reverse/1,
setopts/2, setopts/3,
tab2file/2, tab2file/3, tab2file/4,
tab2list/1, tab2list/2,
tabfile_info/1,
table/1, table/2, table/3,
test_ms/2,
take/2, take/3,
update_counter/3, update_counter/4, update_counter/5,
update_element/3, update_element/4
]).
%%%===================================================================
%%% Types & Macros
%%%===================================================================
%% ETS Types
-type access() :: public | protected | private.
-type tab() :: atom().
-type type() :: set | ordered_set | bag | duplicate_bag.
-type cont() ::
'$end_of_table'
| {tab(), integer(), integer(), ets:comp_match_spec(), list(), integer()}
| {tab(), _, _, integer(), ets:comp_match_spec(), list(), integer(), integer()}.
%% @type tweaks() =
%% {write_concurrency, boolean()}
%% | {read_concurrency, boolean()}
%% | compressed.
%%
%% ETS tweaks option
-type tweaks() ::
{write_concurrency, boolean()}
| {read_concurrency, boolean()}
| compressed.
%% @type shards_opt() =
%% {scope, l | g}
%% | {n_shards, pos_integer()}
%% | {pick_shard_fun, shards_state:pick_fun()}
%% | {pick_node_fun, shards_state:pick_fun()}
%% | {restart_strategy, one_for_one | one_for_all}.
%%
%% Shards extended options.
-type shards_opt() ::
{scope, l | g}
| {n_shards, pos_integer()}
| {pick_shard_fun, shards_state:pick_fun()}
| {pick_node_fun, shards_state:pick_fun()}
| {restart_strategy, one_for_one | one_for_all}
| {sup_name, module()}.
%% @type option() =
%% type() | access() | named_table
%% | {keypos, pos_integer()}
%% | {heir, pid(), HeirData :: term()}
%% | {heir, none} | tweaks()
%% | shards_opt().
%%
%% Create table options – used by `new/2'.
-type option() ::
type() | access() | named_table
| {keypos, pos_integer()}
| {heir, pid(), HeirData :: term()}
| {heir, none} | tweaks()
| shards_opt().
%% ETS Info Tuple
-type info_tuple() ::
{compressed, boolean()}
| {heir, pid() | none}
| {keypos, pos_integer()}
| {memory, non_neg_integer()}
| {name, atom()}
| {named_table, boolean()}
| {node, node()}
| {owner, pid()}
| {protection, access()}
| {size, non_neg_integer()}
| {type, type()}
| {write_concurrency, boolean()}
| {read_concurrency, boolean()}
| {shards, [atom()]}.
%% ETS Info Item
-type info_item() ::
compressed | fixed | heir | keypos | memory
| name | named_table | node | owner | protection
| safe_fixed | size | stats | type
| write_concurrency | read_concurrency
| shards.
%% ETS TabInfo Item
-type tabinfo_item() ::
{name, atom()}
| {type, type()}
| {protection, access()}
| {named_table, boolean()}
| {keypos, non_neg_integer()}
| {size, non_neg_integer()}
| {extended_info, [md5sum | object_count]}
| {version, {Major :: non_neg_integer(), Minor :: non_neg_integer()}}
| {shards, [atom()]}.
%% @type continuation() = {
%% Tab :: atom(),
%% MatchSpec :: ets:match_spec(),
%% Limit :: pos_integer(),
%% Shard :: non_neg_integer(),
%% Continuation :: cont()
%% }.
%%
%% Defines the convention to `ets:select/1,3' continuation:
%% <ul>
%% <li>`Tab': Table name.</li>
%% <li>`MatchSpec': The `ets:match_spec()'.</li>
%% <li>`Limit': Results limit.</li>
%% <li>`Shard': Shards number.</li>
%% <li>`Continuation': The `ets:continuation()'.</li>
%% </ul>
-type continuation() :: {
Tab :: atom(),
MatchSpec :: ets:match_spec(),
Limit :: pos_integer(),
Shard :: non_neg_integer(),
Continuation :: cont()
}.
%% @type filename() = string() | binary() | atom().
-type filename() :: string() | binary() | atom().
% Exported Types
-export_type([
option/0,
info_tuple/0,
info_item/0,
tabinfo_item/0,
continuation/0,
filename/0
]).
%% Macro to check if the given Filename has the right type
-define(is_filename(FN_), is_list(FN_); is_binary(FN_); is_atom(FN_)).
%%%===================================================================
%%% ETS API
%%%===================================================================
%% @equiv ets:all()
all() ->
ets:all().
%% @doc
%% This operation behaves like `ets:delete/1'.
%%
%% @see ets:delete/1.
%% @end
-spec delete(Tab :: atom()) -> true.
delete(Tab) ->
SupName = shards_state:sup_name(Tab),
ok = shards_sup:terminate_child(SupName, Tab),
true.
%% @equiv delete(Tab, Key, shards_state:new())
delete(Tab, Key) ->
delete(Tab, Key, shards_state:new()).
%% @doc
%% This operation behaves like `ets:delete/2'.
%%
%% @see ets:delete/2.
%% @end
-spec delete(
Tab :: atom(),
Key :: term(),
State :: shards_state:state()
) -> true.
delete(Tab, Key, State) ->
_ = mapred(Tab, Key, {fun ets:delete/2, [Key]}, nil, State, d),
true.
%% @equiv delete_all_objects(Tab, shards_state:new())
delete_all_objects(Tab) ->
delete_all_objects(Tab, shards_state:new()).
%% @doc
%% This operation behaves like `ets:delete_all_objects/1'.
%%
%% @see ets:delete_all_objects/1.
%% @end
-spec delete_all_objects(
Tab :: atom(),
State :: shards_state:state()
) -> true.
delete_all_objects(Tab, State) ->
_ = mapred(Tab, fun ets:delete_all_objects/1, State),
true.
%% @equiv delete_object(Tab, Object, shards_state:new())
delete_object(Tab, Object) ->
delete_object(Tab, Object, shards_state:new()).
%% @doc
%% This operation behaves like `ets:delete_object/2'.
%%
%% @see ets:delete_object/2.
%% @end
-spec delete_object(
Tab :: atom(),
Object :: tuple(),
State :: shards_state:state()
) -> true.
delete_object(Tab, Object, State) when is_tuple(Object) ->
Key = element(1, Object),
_ = mapred(Tab, Key, {fun ets:delete_object/2, [Object]}, nil, State, d),
true.
%% @equiv file2tab(Filename, [])
file2tab(Filename) ->
file2tab(Filename, []).
%% @doc
%% Similar to `shards:file2tab/2'. Moreover, it restores the
%% supervision tree for the `shards' corresponding to the given
%% file, such as if they had been created using `shards:new/2,3'.
%%
%% @see ets:file2tab/2.
%% @end
-spec file2tab(
Filename :: filename(),
Options :: [{verify, boolean()}]
) -> {ok, Tab :: atom()} | {error, Reason :: term()}.
file2tab(Filename, Options) when ?is_filename(Filename) ->
try
StrFilename = shards_lib:to_string(Filename),
Metadata = shards_lib:read_tabfile(StrFilename),
{name, Tab} = lists:keyfind(name, 1, Metadata),
{state, State} = lists:keyfind(state, 1, Metadata),
{shards, ShardTabs} = lists:keyfind(shards, 1, Metadata),
Tab = new(Tab, [{restore, ShardTabs, Options} | state_to_tab_opts(State)]),
{ok, Tab}
catch
throw:Error ->
Error;
error:{error, _} = Error ->
Error;
error:{badarg, Arg} ->
{error, {read_error, {file_error, Arg, enoent}}}
end.
%% private
state_to_tab_opts(State) ->
Scope = shards_state:scope(State),
NewState = maps:remove(module, shards_state:to_map(State)),
maps:to_list(NewState#{scope => Scope}).
%% @equiv first(Tab, shards_state:new())
first(Tab) ->
first(Tab, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:first/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:first/1.
%% @end
-spec first(Tab :: atom(), State :: shards_state:state()) ->
Key :: term() | '$end_of_table'.
first(Tab, State) ->
N = shards_state:n_shards(State),
Shard = N - 1,
first(Tab, ets:first(shards_lib:shard_name(Tab, Shard)), Shard).
%% @private
first(Tab, '$end_of_table', Shard) when Shard > 0 ->
NextShard = Shard - 1,
first(Tab, ets:first(shards_lib:shard_name(Tab, NextShard)), NextShard);
first(_, '$end_of_table', _) ->
'$end_of_table';
first(_, Key, _) ->
Key.
%% @equiv foldl(Function, Acc0, Tab, shards_state:new())
foldl(Fun, Acc0, Tab) ->
foldl(Fun, Acc0, Tab, shards_state:new()).
%% @doc
%% This operation behaves like `ets:foldl/3'.
%%
%% @see ets:foldl/3.
%% @end
%-spec foldl(Function, Acc0, Tab, State) -> Acc1 when
-spec foldl(
Fun :: fun((Element :: term(), Acc) -> Acc),
Acc :: term(),
Tab :: atom(),
State :: shards_state:state()
) -> Acc
when Acc :: term().
foldl(Fun, Acc, Tab, State) ->
N = shards_state:n_shards(State),
fold(Tab, N, foldl, [Fun, Acc]).
%% @equiv foldr(Function, Acc0, Tab, shards_state:new())
foldr(Fun, Acc0, Tab) ->
foldr(Fun, Acc0, Tab, shards_state:new()).
%% @doc
%% This operation behaves like `ets:foldr/3'.
%%
%% @see ets:foldr/3.
%% @end
-spec foldr(
Fun :: fun((Element :: term(), Acc) -> Acc),
Acc :: term(),
Tab :: atom(),
State :: shards_state:state()
) -> Acc
when Acc :: term().
foldr(Fun, Acc, Tab, State) ->
N = shards_state:n_shards(State),
fold(Tab, N, foldr, [Fun, Acc]).
%% @equiv give_away(Tab, Pid, GiftData, shards_state:new())
give_away(Tab, Pid, GiftData) ->
give_away(Tab, Pid, GiftData, shards_state:new()).
%% @doc
%% Equivalent to `ets:give_away/3' for each shard table. It returns
%% a `boolean()' instead that just `true'. Returns `true' if the
%% function was applied successfully on each shard, otherwise
%% `false' is returned.
%%
%% <p><font color="red"><b>WARNING: It is not recommended execute
%% this function, since it might cause an unexpected behavior.
%% Once this function is executed, `shards' doesn't control/manage
%% the ETS shards anymore. So from this point, you should use
%% ETS API instead. Also it is recommended to run `shards:delete/1'
%% after run this function.
%% </b></font></p>
%%
%% @see ets:give_away/3.
%% @end
-spec give_away(
Tab :: atom(),
Pid :: pid(),
GiftData :: term(),
State :: shards_state:state()
) -> true.
give_away(Tab, Pid, GiftData, State) ->
Map = {fun shards_owner:apply_ets_fun/3, [give_away, [Pid, GiftData]]},
Reduce = {fun(_, Acc) -> Acc end, true},
mapred(Tab, Map, Reduce, State).
%% @equiv ets:i()
i() ->
ets:i().
%% @equiv info(Tab, shards_state:new())
info(Tab) ->
info(Tab, shards_state:new()).
%% @doc
%% If 2nd argument is `info_item()' this function behaves like
%% `ets:info/2', but if it is the `shards_state:state()',
%% it behaves like `ets:info/1'.
%%
%% This function also adds info about the shards `{shards, [atom()]}'.
%%
%% @see ets:info/1.
%% @see ets:info/2.
%% @end
-spec info(
Tab :: atom(),
StateOrItem :: shards_state:state() | info_item()
) -> Value | [info_tuple()] | undefined
when Value :: any().
info(Tab, Item) when is_atom(Item) ->
info(Tab, Item, shards_state:new());
info(Tab, State) ->
case whereis(Tab) of
undefined ->
undefined;
_Pid ->
InfoLists = mapred(Tab, fun ets:info/1, State),
shards_info(Tab, InfoLists, [memory])
end.
%% @doc
%% This operation is analogous to `ets:info/2'.
%%
%% @see ets:info/2.
%% @end
-spec info(
Tab :: atom(),
Item :: info_item(),
State :: shards_state:state()
) -> any() | undefined.
info(Tab, Item, State) ->
case info(Tab, State) of
undefined -> undefined;
TabInfo -> shards_lib:keyfind(Item, TabInfo)
end.
%% @equiv ets:info(ShardTab)
info_shard(ShardTab) ->
ets:info(ShardTab).
%% @equiv ets:info(ShardTab, Item)
info_shard(ShardTab, Item) ->
ets:info(ShardTab, Item).
%% @equiv insert(Tab, ObjOrObjs, shards_state:new())
insert(Tab, ObjOrObjs) ->
insert(Tab, ObjOrObjs, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:insert_new/2', BUT with a big
%% difference, <b>IT IS NOT ATOMIC</b>, which means, if it fails inserting
%% some object, previous inserted objects are not rolled back, in that
%% case an error is raised.
%%
%% @see ets:insert/2.
%% @end
-spec insert(
Tab :: atom(),
ObjOrObjs :: tuple() | [tuple()],
State :: shards_state:state()
) -> true | no_return().
insert(Tab, ObjOrObjs, State) when is_list(ObjOrObjs) ->
maps:fold(fun(Shard, Group, Acc) ->
Acc = ets:insert(Shard, Group)
end, true, group_keys_by_shard(Tab, ObjOrObjs, State));
insert(Tab, ObjOrObjs, State) when is_tuple(ObjOrObjs) ->
ets:insert(get_shard(Tab, ObjOrObjs, State), ObjOrObjs).
%% @equiv insert_new(Tab, ObjOrObjs, shards_state:new())
insert_new(Tab, ObjOrObjs) ->
insert_new(Tab, ObjOrObjs, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:insert_new/2', BUT with a big
%% difference, <b>IT IS NOT ATOMIC</b>, which means, if it fails inserting
%% some object, previous inserted objects are not rolled back, in that case
%% only that object is affected, the rest may be successfully inserted.
%%
%% This function returns `true' if all entries were successfully inserted.
%% If one of the given entries within `ObjOrObjs' fails, the tuple
%% `{false, FailedObjs}' is returned, where `FailedObjs' contains the list
%% of the failed objects. If only one entry/object is passed to this function
%% and it fails, only `false' is returned.
%%
%% <b>Example:</b>
%%
%% ```
%% > shards:insert_new(mytab, {k1, 1}).
%% true
%%
%% > shards:insert_new(mytab, {k1, 1}).
%% false
%%
%% > shards:insert_new(mytab, [{k1, 1}, {k2, 2}]).
%% {false,[{k1,1}]}
%% '''
%%
%% @see ets:insert_new/2.
%% @end
-spec insert_new(Tab :: atom(), ObjOrObjs, State :: shards_state:state()) ->
boolean() | {false, ObjOrObjs}
when ObjOrObjs :: tuple() | [tuple()].
insert_new(Tab, ObjOrObjs, State) when is_list(ObjOrObjs) ->
Result =
maps:fold(fun(Shard, Group, Acc) ->
case do_insert_new(Tab, Shard, Group, State) of
true -> Acc;
false -> Group ++ Acc
end
end, [], group_keys_by_shard(Tab, ObjOrObjs, State)),
case Result of
[] -> true;
_ -> {false, Result}
end;
insert_new(Tab, ObjOrObjs, State) when is_tuple(ObjOrObjs) ->
do_insert_new(Tab, get_shard(Tab, ObjOrObjs, State), ObjOrObjs, State).
%% @private
do_insert_new(Tab, Shard, Objs, State) ->
Key = shards_lib:key_from_object(shards_state:keypos(State), Objs),
case shards_state:eval_pick_shard(Key, r, State) of
any ->
Map = {fun ets:lookup/2, [Key]},
Reduce = fun erlang:'++'/2,
case mapred(Tab, Map, Reduce, State) of
[] -> ets:insert_new(Shard, Objs);
_ -> false
end;
_ ->
ets:insert_new(Shard, Objs)
end.
%% @equiv ets:is_compiled_ms(Term)
is_compiled_ms(Term) ->
ets:is_compiled_ms(Term).
%% @equiv last(Tab, shards_state:new())
last(Tab) ->
last(Tab, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:last/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:last/1.
%% @end
-spec last(Tab :: atom(), State :: shards_state:state()) ->
Key :: term() | '$end_of_table'.
last(Tab, State) ->
case ets:info(shards_lib:shard_name(Tab, 0), type) of
ordered_set ->
ets:last(shards_lib:shard_name(Tab, 0));
_ ->
first(Tab, State)
end.
%% @equiv lookup(Tab, Key, shards_state:new())
lookup(Tab, Key) ->
lookup(Tab, Key, shards_state:new()).
%% @doc
%% This operation behaves like `ets:lookup/2'.
%%
%% @see ets:lookup/2.
%% @end
-spec lookup(
Tab :: atom(),
Key :: term(),
State :: shards_state:state()
) -> [Object :: tuple()].
lookup(Tab, Key, State) ->
Map = {fun ets:lookup/2, [Key]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Key, Map, Reduce, State, r).
%% @equiv lookup_element(Tab, Key, Pos, shards_state:new())
lookup_element(Tab, Key, Pos) ->
lookup_element(Tab, Key, Pos, shards_state:new()).
%% @doc
%% This operation behaves like `ets:lookup_element/3'.
%%
%% @see ets:lookup_element/3.
%% @end
-spec lookup_element(
Tab :: atom(),
Key :: term(),
Pos :: pos_integer(),
State :: shards_state:state()
) -> Element :: term() | [term()].
lookup_element(Tab, Key, Pos, State) ->
Shard = shards_state:eval_pick_shard(Key, r, State),
lookup_element(Tab, Shard, Key, Pos, State).
%% @private
lookup_element(Tab, any, Key, Pos, State) ->
LookupElem = fun(Tx, Kx, Px) ->
try
ets:lookup_element(Tx, Kx, Px)
catch
error:badarg -> {error, notfound}
end
end,
Filter =
lists:filter(fun
({error, notfound}) -> false;
(_) -> true
end, mapred(Tab, {LookupElem, [Key, Pos]}, State)),
case Filter of
[] -> error(badarg);
_ -> lists:append(Filter)
end;
lookup_element(Tab, Shard, Key, Pos, _State) ->
ShardName = shards_lib:shard_name(Tab, Shard),
ets:lookup_element(ShardName, Key, Pos).
match(Tab, Pattern) ->
match(Tab, Pattern, shards_state:new()).
%% @doc
%% If 3rd argument is `pos_integer()' this function behaves
%% like `ets:match/3', but if it is the `shards_state:state()',
%% it behaves like `ets:match/2'.
%%
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match/2.
%% @see ets:match/3.
%% @end
-spec match(
Tab :: atom(),
Pattern :: ets:match_pattern(),
StateOrLimit :: shards_state:state() | pos_integer()
) -> [Match] | {[Match], continuation()} | '$end_of_table'
when Match :: [term()].
match(Tab, Pattern, Limit) when is_integer(Limit), Limit > 0 ->
match(Tab, Pattern, Limit, shards_state:new());
match(Tab, Pattern, State) ->
Map = {fun ets:match/2, [Pattern]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Map, Reduce, State).
%% @doc
%% This operation behaves similar to `ets:match/3'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match/3.
%% @end
-spec match(
Tab :: atom(),
Pattern :: ets:match_pattern(),
Limit :: pos_integer(),
State :: shards_state:state()
) -> {[Match :: term()], continuation()} | '$end_of_table'.
match(Tab, Pattern, Limit, State) ->
N = shards_state:n_shards(State),
q(match, Tab, Pattern, Limit, q_fun(), Limit, N - 1, {[], nil}).
%% @doc
%% This operation behaves similar to `ets:match/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match/1.
%% @end
-spec match(continuation()) ->
{[Match :: term()], continuation()} | '$end_of_table'.
match({_, _, Limit, _, _} = Continuation) ->
q(match, Continuation, q_fun(), Limit, []).
%% @equiv match_delete(Tab, Pattern, shards_state:new())
match_delete(Tab, Pattern) ->
match_delete(Tab, Pattern, shards_state:new()).
%% @doc
%% This operation behaves like `ets:match_delete/2'.
%%
%% @see ets:match_delete/2.
%% @end
-spec match_delete(
Tab :: atom(),
Pattern :: ets:match_pattern(),
State :: shards_state:state()
) -> true.
match_delete(Tab, Pattern, State) ->
Map = {fun ets:match_delete/2, [Pattern]},
Reduce = {fun erlang:'and'/2, true},
mapred(Tab, Map, Reduce, State).
%% @equiv match_object(Tab, Pattern, shards_state:new())
match_object(Tab, Pattern) ->
match_object(Tab, Pattern, shards_state:new()).
%% @doc
%% If 3rd argument is `pos_integer()' this function behaves like
%% `ets:match_object/3', but if it is the `shards_state:state()',
%% it behaves like `ets:match_object/2'.
%%
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match_object/2.
%% @see ets:match_object/3.
%% @end
-spec match_object(
Tab :: atom(),
Pattern :: ets:match_pattern(),
StateOrLimit :: shards_state:state() | pos_integer()
) -> [Object :: tuple()] | {[term()], continuation()} | '$end_of_table'.
match_object(Tab, Pattern, Limit) when is_integer(Limit), Limit > 0 ->
match_object(Tab, Pattern, Limit, shards_state:new());
match_object(Tab, Pattern, State) ->
Map = {fun ets:match_object/2, [Pattern]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Map, Reduce, State).
%% @doc
%% This operation behaves similar to `ets:match_object/3'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match_object/3.
%% @end
-spec match_object(
Tab :: atom(),
Pattern :: ets:match_pattern(),
Limit :: pos_integer(),
State :: shards_state:state()
) -> {[Match :: term()], continuation()} | '$end_of_table'.
match_object(Tab, Pattern, Limit, State) ->
N = shards_state:n_shards(State),
q(match_object, Tab, Pattern, Limit, q_fun(), Limit, N - 1, {[], nil}).
%% @doc
%% This operation behaves similar to `ets:match_object/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:match_object/1.
%% @end
-spec match_object(continuation()) ->
{[Match :: term()], continuation()} | '$end_of_table'.
match_object({_, _, Limit, _, _} = Continuation) ->
q(match_object, Continuation, q_fun(), Limit, []).
%% @equiv ets:match_spec_compile(MatchSpec)
match_spec_compile(MatchSpec) ->
ets:match_spec_compile(MatchSpec).
%% @equiv ets:match_spec_run(List, CompiledMatchSpec)
match_spec_run(List, CompiledMatchSpec) ->
ets:match_spec_run(List, CompiledMatchSpec).
%% @equiv member(Tab, Key, shards_state:new())
member(Tab, Key) ->
member(Tab, Key, shards_state:new()).
%% @doc
%% This operation behaves like `ets:member/2'.
%%
%% @see ets:member/2.
%% @end
-spec member(
Tab :: atom(),
Key :: term(),
State :: shards_state:state()
) -> boolean().
member(Tab, Key, State) ->
case mapred(Tab, Key, {fun ets:member/2, [Key]}, nil, State, r) of
Result when is_list(Result) ->
lists:member(true, Result);
Result ->
Result
end.
%% @doc
%% This operation is analogous to `ets:new/2', BUT it behaves totally
%% different. When this function is called, instead of create a single
%% table, a new supervision tree is created and added to `shards_sup'.
%%
%% This supervision tree has a main supervisor `shards_sup' which
%% creates a control ETS table and also creates `N' number of
%% `shards_owner' (being `N' the number of shards). Each `shards_owner'
%% creates an ETS table to represent each shard, so this `gen_server'
%% acts as the table owner.
%%
%% Finally, when you create a table, internally `N' physical tables
%% are created (one per shard), but `shards' encapsulates all this
%% and you see only one logical table (similar to how a distributed
%% storage works).
%%
%% <b>IMPORTANT: By default, `NumShards = number of schedulers'.</b>
%%
%% @see ets:new/2.
%% @end
-spec new(Name, Options :: [option()]) -> Name when Name :: atom().
new(Name, Options) ->
SupName = shards_lib:keyfind(sup_name, Options, shards_sup),
do_new(SupName, Name, Options).
%% @private
do_new(SupName, Name, Options) ->
case shards_sup:start_child(SupName, Name, Options) of
{ok, Pid} ->
true = register(Name, Pid),
Name;
{error, {shutdown, {_, _, {restore_error, Error}}}} ->
error(Error);
{error, Reason} ->
error({badarg, Reason})
end.
%% @equiv next(Tab, Key1, shards_state:new())
next(Tab, Key1) ->
next(Tab, Key1, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:next/2'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning. It raises a `bad_pick_fun_ret'
%% exception in case of pick fun returns `any'.
%%
%% @see ets:next/2.
%% @end
-spec next(Tab :: atom(), Key, State :: shards_state:state()) ->
Key | '$end_of_table'
when Key :: term().
next(Tab, Key1, State) ->
case shards_state:eval_pick_shard(Key1, r, State) of
any ->
error(bad_pick_fun_ret);
Shard ->
ShardName = shards_lib:shard_name(Tab, Shard),
next_(Tab, ets:next(ShardName, Key1), Shard)
end.
%% @private
next_(Tab, '$end_of_table', Shard) when Shard > 0 ->
NextShard = Shard - 1,
next_(Tab, ets:first(shards_lib:shard_name(Tab, NextShard)), NextShard);
next_(_, '$end_of_table', _) ->
'$end_of_table';
next_(_, Key2, _) ->
Key2.
%% @equiv prev(Tab, Key1, shards_state:new())
prev(Tab, Key1) ->
prev(Tab, Key1, shards_state:new()).
%% @doc
%% This operation behaves similar to `ets:prev/2'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:prev/2.
%% @end
-spec prev(Tab :: atom(), Key, State :: shards_state:state()) ->
Key | '$end_of_table'
when Key :: term().
prev(Tab, Key1, State) ->
case ets:info(shards_lib:shard_name(Tab, 0), type) of
ordered_set ->
ets:prev(shards_lib:shard_name(Tab, 0), Key1);
_ ->
next(Tab, Key1, State)
end.
%% @equiv rename(Tab, Name, shards_state:new())
rename(Tab, Name) ->
rename(Tab, Name, shards_state:new()).
%% @doc
%% Equivalent to `ets:rename/2'.
%%
%% Renames the table name and all its associated shard tables.
%% If something unexpected occurs during the process, an exception
%% will be thrown.
%%
%% @see ets:rename/2.
%% @end
-spec rename(Tab :: atom(), Name, State :: shards_state:state()) ->
Name | no_return()
when Name :: atom().
rename(Tab, Name, State) ->
ok =
lists:foreach(fun(Shard) ->
ShardName = shards_lib:shard_name(Tab, Shard),
NewShardName = shards_lib:shard_name(Name, Shard),
NewShardName = do_rename(ShardName, NewShardName)
end, shards_lib:iterator(State)),
do_rename(Tab, Name).
%% @private
do_rename(OldName, NewName) ->
NewName = ets:rename(OldName, NewName),
Pid = shards_lib:get_pid(OldName),
true = unregister(OldName),
true = register(NewName, Pid),
NewName.
%% @equiv safe_fixtable(Tab, Fix, shards_state:new())
safe_fixtable(Tab, Fix) ->
safe_fixtable(Tab, Fix, shards_state:new()).
%% @doc
%% Equivalent to `ets:safe_fixtable/2' for each shard table.
%% It returns a `boolean()' instead that just `true'.
%% Returns `true' if the function was applied successfully
%% on each shard, otherwise `false' is returned.
%%
%% @see ets:safe_fixtable/2.
%% @end
-spec safe_fixtable(
Tab :: atom(),
Fix :: boolean(),
State :: shards_state:state()
) -> boolean().
safe_fixtable(Tab, Fix, State) ->
Map = {fun ets:safe_fixtable/2, [Fix]},
Reduce = {fun erlang:'and'/2, true},
mapred(Tab, Map, Reduce, State).
%% @equiv select(Tab, MatchSpec, shards_state:new())
select(Tab, MatchSpec) ->
select(Tab, MatchSpec, shards_state:new()).
%% @doc
%% If 3rd argument is `pos_integer()' this function behaves like
%% `ets:select/3', but if it is the `shards_state:state()',
%% it behaves like `ets:select/2'.
%%
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select/2.
%% @see ets:select/3.
%% @end
-spec select(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
StateOrLimit :: shards_state:state() | pos_integer()
) -> [Match] | {[Match], continuation()} | '$end_of_table'
when Match :: term().
select(Tab, MatchSpec, Limit) when is_integer(Limit), Limit > 0 ->
select(Tab, MatchSpec, Limit, shards_state:new());
select(Tab, MatchSpec, State) ->
Map = {fun ets:select/2, [MatchSpec]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Map, Reduce, State).
%% @doc
%% This operation behaves similar to `ets:select/3'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select/3.
%% @end
-spec select(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
Limit :: pos_integer(),
State :: shards_state:state()
) -> {[Match :: term()], continuation()} | '$end_of_table'.
select(Tab, MatchSpec, Limit, State) ->
N = shards_state:n_shards(State),
q(select, Tab, MatchSpec, Limit, q_fun(), Limit, N - 1, {[], nil}).
%% @doc
%% This operation behaves similar to `ets:select/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select/1.
%% @end
-spec select(continuation()) ->
{[Match :: term()], continuation()} | '$end_of_table'.
select({_, _, Limit, _, _} = Continuation) ->
q(select, Continuation, q_fun(), Limit, []).
%% @equiv select_count(Tab, MatchSpec, shards_state:new())
select_count(Tab, MatchSpec) ->
select_count(Tab, MatchSpec, shards_state:new()).
%% @doc
%% This operation behaves like `ets:select_count/2'.
%%
%% @see ets:select_count/2.
%% @end
-spec select_count(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
State :: shards_state:state()
) -> non_neg_integer().
select_count(Tab, MatchSpec, State) ->
Map = {fun ets:select_count/2, [MatchSpec]},
Reduce = {fun erlang:'+'/2, 0},
mapred(Tab, Map, Reduce, State).
%% @equiv select_delete(Tab, MatchSpec, shards_state:new())
select_delete(Tab, MatchSpec) ->
select_delete(Tab, MatchSpec, shards_state:new()).
%% @doc
%% This operation behaves like `ets:select_delete/2'.
%%
%% @see ets:select_delete/2.
%% @end
-spec select_delete(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
State :: shards_state:state()
) -> non_neg_integer().
select_delete(Tab, MatchSpec, State) ->
Map = {fun ets:select_delete/2, [MatchSpec]},
Reduce = {fun erlang:'+'/2, 0},
mapred(Tab, Map, Reduce, State).
%% @equiv select_replace(Tab, MatchSpec, shards_state:new())
select_replace(Tab, MatchSpec) ->
select_replace(Tab, MatchSpec, shards_state:new()).
%% @doc
%% This operation behaves like `ets:select_replace/2'.
%%
%% @see ets:select_replace/2.
%% @end
-spec select_replace(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
State :: shards_state:state()
) -> non_neg_integer().
select_replace(Tab, MatchSpec, State) ->
Map = {fun ets:select_replace/2, [MatchSpec]},
Reduce = {fun erlang:'+'/2, 0},
mapred(Tab, Map, Reduce, State).
%% @equiv select_reverse(Tab, MatchSpec, shards_state:new())
select_reverse(Tab, MatchSpec) ->
select_reverse(Tab, MatchSpec, shards_state:new()).
%% @doc
%% If 3rd argument is `pos_integer()' this function behaves like
%% `ets:select_reverse/3', but if it is the `shards_state:state()',
%% it behaves like `ets:select_reverse/2'.
%%
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select_reverse/2.
%% @see ets:select_reverse/3.
%% @end
-spec select_reverse(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
StateOrLimit :: shards_state:state() | pos_integer()
) -> [Match] | {[Match], continuation()} | '$end_of_table'
when Match :: term().
select_reverse(Tab, MatchSpec, Limit) when is_integer(Limit), Limit > 0 ->
select_reverse(Tab, MatchSpec, Limit, shards_state:new());
select_reverse(Tab, MatchSpec, State) ->
Map = {fun ets:select_reverse/2, [MatchSpec]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Map, Reduce, State).
%% @doc
%% This operation behaves similar to `ets:select_reverse/3'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select_reverse/3.
%% @end
-spec select_reverse(
Tab :: atom(),
MatchSpec :: ets:match_spec(),
Limit :: pos_integer(),
State :: shards_state:state()
) -> {[Match :: term()], continuation()} | '$end_of_table'.
select_reverse(Tab, MatchSpec, Limit, State) ->
N = shards_state:n_shards(State),
q(select_reverse, Tab, MatchSpec, Limit, q_fun(), Limit, N - 1, {[], nil}).
%% @doc
%% This operation behaves similar to `ets:select_reverse/1'.
%% The order in which results are returned, might be not the same
%% as the original ETS function. Remember shards architecture
%% described at the beginning.
%%
%% @see ets:select_reverse/1.
%% @end
-spec select_reverse(continuation()) ->
{[Match :: term()], continuation()} | '$end_of_table'.
select_reverse({_, _, Limit, _, _} = Continuation) ->
q(select_reverse, Continuation, q_fun(), Limit, []).
%% @equiv setopts(Tab, Opts, shards_state:new())
setopts(Tab, Opts) ->
setopts(Tab, Opts, shards_state:new()).
%% @doc
%% Equivalent to `ets:setopts/2' for each shard table. It returns
%% a `boolean()' instead that just `true'. Returns `true' if the
%% function was applied successfully on each shard, otherwise
%% `false' is returned.
%%
%% @see ets:setopts/2.
%% @end
-spec setopts(Tab :: atom(), Opt | [Opt], State :: shards_state:state()) ->
boolean()
when Opt :: {heir, pid(), HeirData :: term()} | {heir, none}.
setopts(Tab, Opts, State) ->
Map = {fun shards_owner:apply_ets_fun/3, [setopts, [Opts]]},
Reduce = {fun erlang:'and'/2, true},
mapred(Tab, Map, Reduce, State).
%% @equiv tab2file(Tab, Filenames, shards_state:new())
tab2file(Tab, Filename) ->
tab2file(Tab, Filename, shards_state:new()).
%% @equiv tab2file/4
tab2file(Tab, Filename, Options) when is_list(Options) ->
tab2file(Tab, Filename, Options, shards_state:new());
tab2file(Tab, Filename, State) ->
tab2file(Tab, Filename, [], State).
%% @doc
%% Similar to `ets:tab2file/3', but it behaves different.
%% This function generates one file per shard using `ets:tab2file/3',
%% and also generate a master file with the given `Filename' that
%% holds the information about the other shards files in order to
%% be able to recover them using `ets:file2tab/1,2'.
%%
%% @see ets:tab2file/3.
%% @end
-spec tab2file(
Tab :: atom(),
Filename :: filename(),
Options :: [Option],
State :: shards_state:state()
) -> ok | {error, Reason :: term()}
when Option ::
{extended_info, [md5sum | object_count]}
| {sync, boolean()}
| {nodes, [node()]}.
tab2file(Tab, Filename, Options, State) when ?is_filename(Filename) ->
StrFilename = shards_lib:to_string(Filename),
{Nodes, NewOpts} =
case lists:keytake(nodes, 1, Options) of
{value, {nodes, Val}, Opts1} ->
{Val, Opts1};
_ ->
{[node()], Options}
end,
ShardFilenamePairs =
shards_lib:reduce_while(fun(Shard, Acc) ->
ShardName = shards_lib:shard_name(Tab, Shard),
ShardFilename = StrFilename ++ "." ++ integer_to_list(Shard),
case ets:tab2file(ShardName, ShardFilename, NewOpts) of
ok ->
{cont, [{ShardName, ShardFilename} | Acc]};
{error, _} = Error ->
{halt, Error}
end
end, [], shards_lib:iterator(State)),
case ShardFilenamePairs of
{error, _} = Error ->
Error;
_ ->
FileContent = [
{name, Tab},
{state, State},
{shards, ShardFilenamePairs},
{nodes, Nodes}
],
shards_lib:write_tabfile(StrFilename, FileContent)
end.
%% @equiv tab2list(Tab, shards_state:new())
tab2list(Tab) ->
tab2list(Tab, shards_state:new()).
%% @doc
%% This operation behaves like `ets:tab2list/1'.
%%
%% @see ets:tab2list/1.
%% @end
-spec tab2list(Tab :: atom(), State :: shards_state:state()) ->
[Object :: tuple()].
tab2list(Tab, State) ->
mapred(Tab, fun ets:tab2list/1, fun erlang:'++'/2, State).
%% @doc
%% This operation is analogous to `ets:tabfile_info/1', but it
%% adds info about the shards `{shards, [atom()]}'.
%%
%% @see ets:tabfile_info/1.
%% @end
-spec tabfile_info(filename()) ->
{ok, [tabinfo_item()]} | {error, Reason :: term()}.
tabfile_info(Filename) when ?is_filename(Filename) ->
try
StrFilename = shards_lib:to_string(Filename),
Metadata = shards_lib:read_tabfile(StrFilename),
{name, Tab} = lists:keyfind(name, 1, Metadata),
{shards, ShardTabs} = lists:keyfind(shards, 1, Metadata),
ShardsTabInfo =
lists:foldl(fun({_, ShardFN}, Acc) ->
case ets:tabfile_info(ShardFN) of
{ok, TabInfo} -> [TabInfo | Acc];
Error -> throw(Error)
end
end, [], ShardTabs),
{ok, shards_info(Tab, ShardsTabInfo)}
catch
throw:Error -> Error
end.
%% @equiv table(Tab, shards_state:new())
table(Tab) ->
table(Tab, shards_state:new()).
%% @equiv table/3
table(Tab, Options) when is_list(Options) ->
table(Tab, Options, shards_state:new());
table(Tab, State) ->
table(Tab, [], State).
%% @doc
%% Similar to `ets:table/2', but it returns a list of `ets:table/2'
%% responses, one for each shard table.
%%
%% @see ets:table/2.
%% @end
-spec table(
Tab :: atom(),
Options :: [Option] | Option,
State :: shards_state:state()
) -> [qlc:query_handle()]
when NObjs :: default | pos_integer(),
MS :: ets:match_spec(),
Option ::
{n_objects, NObjs}
| {traverse, first_next | last_prev | select | {select, MS}}.
table(Tab, Options, State) ->
mapred(Tab, {fun ets:table/2, [Options]}, State).
%% @equiv ets:test_ms(Tuple, MatchSpec)
test_ms(Tuple, MatchSpec) ->
ets:test_ms(Tuple, MatchSpec).
%% @equiv take(Tab, Key, shards_state:new())
take(Tab, Key) ->
take(Tab, Key, shards_state:new()).
%% @doc
%% This operation behaves like `ets:take/2'.
%%
%% @see ets:take/2.
%% @end
-spec take(Tab :: atom(), Key :: term(), State :: shards_state:state()) ->
[Object :: tuple()].
take(Tab, Key, State) ->
Map = {fun ets:take/2, [Key]},
Reduce = fun erlang:'++'/2,
mapred(Tab, Key, Map, Reduce, State, r).
%% @equiv update_counter(Tab, Key, UpdateOp, shards_state:new())
update_counter(Tab, Key, UpdateOp) ->
update_counter(Tab, Key, UpdateOp, shards_state:new()).
%% @doc
%% This operation behaves like `ets:update_counter/3'.
%%
%% @see ets:update_counter/3.
%% @end
-spec update_counter(
Tab :: atom(),
Key :: term(),
UpdateOp :: term(),
State :: shards_state:state()
) -> Result :: integer() | [integer()].
update_counter(Tab, Key, UpdateOp, State) ->
Map = {fun ets:update_counter/3, [Key, UpdateOp]},
mapred(Tab, Key, Map, nil, State, w).
%% @doc
%% This operation behaves like `ets:update_counter/4'.
%%
%% @see ets:update_counter/4.
%% @end
-spec update_counter(
Tab :: atom(),
Key :: term(),
UpdateOp :: term(),
Default :: tuple(),
State :: shards_state:state()
) -> Result :: integer() | [integer()].
update_counter(Tab, Key, UpdateOp, Default, State) ->
Map = {fun ets:update_counter/4, [Key, UpdateOp, Default]},
mapred(Tab, Key, Map, nil, State, w).
%% @equiv update_element(Tab, Key, ElementSpec, shards_state:new())
update_element(Tab, Key, ElementSpec) ->
update_element(Tab, Key, ElementSpec, shards_state:new()).
%% @doc
%% This operation behaves like `ets:update_element/3'.
%%
%% @see ets:update_element/3.
%% @end
-spec update_element(
Tab :: atom(),
Key :: term(),
ElementSpec :: {Pos, Value} | [{Pos, Value}],
State :: shards_state:state()
) -> boolean()
when Pos :: pos_integer(), Value :: term().
update_element(Tab, Key, ElementSpec, State) ->
Map = {fun ets:update_element/3, [Key, ElementSpec]},
mapred(Tab, Key, Map, nil, State, w).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% @private
mapred(Tab, Map, State) ->
mapred(Tab, Map, nil, State).
%% @private
mapred(Tab, Map, Reduce, State) ->
mapred(Tab, nil, Map, Reduce, State, r).
%% @private
mapred(Tab, Key, Map, nil, State, Op) ->
mapred(Tab, Key, Map, fun(E, Acc) -> [E | Acc] end, State, Op);
mapred(Tab, nil, Map, Reduce, State, _) ->
case shards_state:n_shards(State) of
N when N =< 1 ->
s_mapred(Tab, N, Map, Reduce);
N ->
p_mapred(Tab, N, Map, Reduce)
end;
mapred(Tab, Key, {MapFun, Args} = Map, Reduce, State, Op) ->
N = shards_state:n_shards(State),
PickShardFun = shards_state:pick_shard_fun(State),
case PickShardFun(Key, N, Op) of
any ->
s_mapred(Tab, N, Map, Reduce);
Shard ->
apply(MapFun, [shards_lib:shard_name(Tab, Shard) | Args])
end.
%% @private
s_mapred(Tab, NumShards, {MapFun, Args}, {ReduceFun, AccIn}) ->
lists:foldl(fun(Shard, Acc) ->
MapRes = apply(MapFun, [shards_lib:shard_name(Tab, Shard) | Args]),
ReduceFun(MapRes, Acc)
end, AccIn, shards_lib:iterator(NumShards));
s_mapred(Tab, NumShards, MapFun, ReduceFun) ->
{Map, Reduce} = mapred_funs(MapFun, ReduceFun),
s_mapred(Tab, NumShards, Map, Reduce).
%% @private
p_mapred(Tab, NumShards, {MapFun, Args}, {ReduceFun, AccIn}) ->
Tasks =
lists:foldl(fun(Shard, Acc) ->
AsyncTask =
shards_task:async(fun() ->
apply(MapFun, [shards_lib:shard_name(Tab, Shard) | Args])
end),
[AsyncTask | Acc]
end, [], shards_lib:iterator(NumShards)),
lists:foldl(fun(Task, Acc) ->
MapRes = shards_task:await(Task),
ReduceFun(MapRes, Acc)
end, AccIn, Tasks);
p_mapred(Tab, NumShards, MapFun, ReduceFun) ->
{Map, Reduce} = mapred_funs(MapFun, ReduceFun),
p_mapred(Tab, NumShards, Map, Reduce).
%% @private
mapred_funs(MapFun, ReduceFun) ->
Map =
case is_function(MapFun) of
true -> {MapFun, []};
_ -> MapFun
end,
{Map, {ReduceFun, []}}.
%% @private
get_shard(Tab, Object, State) ->
get_shard(Tab, Object, w, State) .
%% @private
get_shard(Tab, Object, Op, State) ->
Key = element(1, Object),
shards_lib:shard_name(Tab, shards_state:eval_pick_shard(Key, Op, State)).
%% @private
group_keys_by_shard(Tab, Objects, State) ->
lists:foldr(fun(Object, Acc) ->
Shard = get_shard(Tab, Object, State),
Acc#{Shard => [Object | maps:get(Shard, Acc, [])]}
end, #{}, Objects).
%% @private
fold(Tab, NumShards, Fold, [Fun, Acc]) ->
lists:foldl(fun(Shard, FoldAcc) ->
ShardName = shards_lib:shard_name(Tab, Shard),
apply(ets, Fold, [Fun, FoldAcc, ShardName])
end, Acc, shards_lib:iterator(NumShards)).
%% @private
shards_info(Tab, InfoLists) ->
shards_info(Tab, InfoLists, []).
%% @private
shards_info(Tab, [FirstInfo | RestInfoLists], ExtraAttrs) ->
{name, InitShard} = lists:keyfind(name, 1, FirstInfo),
FirstInfo1 = lists:keystore(name, 1, FirstInfo, {name, Tab}),
lists:foldl(fun(InfoList, InfoListAcc) ->
shards_lib:keyupdate(fun
(shards, Shards) ->
{name, ShardName} = lists:keyfind(name, 1, InfoList),
[ShardName | Shards];
(K, V) ->
{K, V1} = lists:keyfind(K, 1, InfoList),
V + V1
end, [size, shards] ++ ExtraAttrs, InfoListAcc)
end, [{shards, [InitShard]} | FirstInfo1], RestInfoLists).
%% @private
q(_, Tab, MatchSpec, Limit, _, 0, Shard, {Acc, Continuation}) ->
{Acc, {Tab, MatchSpec, Limit, Shard, Continuation}};
q(_, _, _, _, _, _, Shard, {[], _}) when Shard < 0 ->
'$end_of_table';
q(_, Tab, MatchSpec, Limit, _, _, Shard, {Acc, _}) when Shard < 0 ->
{Acc, {Tab, MatchSpec, Limit, Shard, '$end_of_table'}};
q(F, Tab, MatchSpec, Limit, QFun, I, Shard, {Acc, '$end_of_table'}) ->
q(F, Tab, MatchSpec, Limit, QFun, I, Shard - 1, {Acc, nil});
q(F, Tab, MatchSpec, Limit, QFun, I, Shard, {Acc, _}) ->
case ets:F(shards_lib:shard_name(Tab, Shard), MatchSpec, I) of
{L, Cont} ->
NewAcc = {QFun(L, Acc), Cont},
q(F, Tab, MatchSpec, Limit, QFun, I - length(L), Shard, NewAcc);
'$end_of_table' ->
q(F, Tab, MatchSpec, Limit, QFun, I, Shard, {Acc, '$end_of_table'})
end.
%% @private
q(_, {Tab, MatchSpec, Limit, Shard, Continuation}, _, 0, Acc) ->
{Acc, {Tab, MatchSpec, Limit, Shard, Continuation}};
q(F, {Tab, MatchSpec, Limit, Shard, '$end_of_table'}, QFun, I, Acc) ->
q(F, Tab, MatchSpec, Limit, QFun, I, Shard - 1, {Acc, nil});
q(F, {Tab, MatchSpec, Limit, Shard, Continuation}, QFun, I, Acc) ->
case ets:F(Continuation) of
{L, Cont} ->
NewAcc = QFun(L, Acc),
q(F, {Tab, MatchSpec, Limit, Shard, Cont}, QFun, I - length(L), NewAcc);
'$end_of_table' ->
q(F, {Tab, MatchSpec, Limit, Shard, '$end_of_table'}, QFun, I, Acc)
end.
%% @private
q_fun() ->
fun(L1, L0) -> L1 ++ L0 end. | src/shards_local.erl | 0.567218 | 0.616676 | shards_local.erl | starcoder |
%%% Module Description:
%%% Compiles Erlang Source code to Core Erlang
-module(coregen).
-author(["<NAME>", "<NAME>"]).
-vsn(1.0).
-export([to_core_erlang/1,
to_core_erlang/2,
to_core_erlang_ast/1,
to_core_erlang_ast/2]).
%% Compiles a given Erlang source file to a CoreErlang file, and writes the resultant
%% CoreErlang output to a file in the same directory as the given module
to_core_erlang(Module) ->
to_core_erlang(Module, filepath:path(Module)).
%% Compiles a given Erlang source file to a CoreErlang file, and returns the raw result
%% to the caller of this function.
to_core_erlang(Module, return) ->
compile:file(Module, [to_core, binary]);
%% Compiles a given Erlang source file to a CoreErlang file, and writes the resultant
%% CoreErlang output to the given OutputDirectory
to_core_erlang(Module, OutputDirectory) ->
case re:run(OutputDirectory, ".*/$") of
nomatch ->
{error, output_directory_not_valid};
_ ->
compile:file(Module, to_core),
% Compiling always generates output in working directory so lets
% move it into the directory where source code exists
FileName = filepath:name(Module),
OldLocation = FileName ++ ".core",
NewLocation = OutputDirectory ++ OldLocation,
filepath:move(OldLocation, NewLocation),
{ok, core_compiled}
end.
%% Compiles a given Erlang source file to a CoreErlang AST file, and writes the resultant
%% CoreErlang AST output to a file in the same directory as the given module
to_core_erlang_ast(Module) ->
to_core_erlang_ast(Module, filepath:path(Module)).
%% Compiles a given Erlang source file to a CoreErlang AST file, and returns the raw result
%% to the caller of this function.
to_core_erlang_ast(Module, return) ->
ModuleName = filepath:name(Module),
to_core_erlang(Module, "./"), % Write Core Erlang source for given module so we can
% read it to scan and parse
case file:read_file(ModuleName ++ ".core") of
{ok, Bin} ->
case core_scan:string(binary_to_list(Bin)) of
{ok, Toks, _} ->
Result = core_parse:parse(Toks);
{error, E, _} ->
Result = {error, {scan, E}}
end;
{error, E} ->
Result = {error, {read, E}}
end,
file:delete(ModuleName ++ ".core"),
Result;
%% Compiles a given Erlang source file to a CoreErlang AST file, and writes the resultant
%% CoreErlang AST output to a file in the given output directory
to_core_erlang_ast(Module, OutputDirectory) ->
ModuleName = filepath:name(Module),
case to_core_erlang_ast(Module, return) of
{ok, AST} ->
% Write the .core file
to_core_erlang(Module, OutputDirectory),
% Write AST
file:write_file(OutputDirectory ++ ModuleName ++ ".ast",
lists:flatten(io_lib:format("~p", [AST]))),
{ok, ast_compiled};
{error, E} ->
{error, E}
end. | doc/corpus/src/erl/coregen.erl | 0.504394 | 0.477371 | coregen.erl | starcoder |
%%%-------------------------------------------------------------------
%% wrapper implementations for the APIs & RPCs for the gateway service
%% basically this module handles various RPC and function calls from grpcbox_stream
%% and routes it to the required application specific handler module
%% due to issues with the rust grpc client, we have amalgamated what were
%% previously distinct grpc services ( such as state channels and routing )
%% which had module specific implementations
%% into a single service as defined in the gateway proto
%% rather than combining the server side implementations into one
%% single module, this top level module was added instead and simply
%% routes incoming RPCs to their service specific module
%% this was we can maintain functional seperation of concerns
%%%-------------------------------------------------------------------
-module(helium_gateway_service).
-behavior(helium_gateway_bhvr).
-include("../grpc/autogen/server/gateway_pb.hrl").
%% common APIs
-export([
init/2,
handle_info/3
]).
%% config APIs
-export([
config/2,
config_update/2
]).
%% validators APIs
-export([
validators/2
]).
%% routing APIs
-export([
routing/2
]).
%% state channel related APIs
-export([
is_active_sc/2,
is_overpaid_sc/2,
close_sc/2,
follow_sc/2
]).
%%%-------------------------------------------------------------------
%% common API implementations
%%%-------------------------------------------------------------------
%% its really only stream RPCs which need to handle the init
%% as its those which are likely to manage their own state
%% unary APIs only need to return the same passed in StreamState
-spec init(atom(), grpcbox_stream:t()) -> grpcbox_stream:t().
%%
%% validators unary APIs
%%
init(_RPC = validators, StreamState) ->
StreamState;
%%
%% config unary APIs
%%
init(_RPC = config, StreamState) ->
StreamState;
%%
%% config streaming APIs
%%
init(RPC = config_update, StreamState) ->
helium_config_impl:init(RPC, StreamState);
%%
%% routing streaming APIs
%%
init(RPC = routing, StreamState) ->
helium_routing_impl:init(RPC, StreamState);
%%
%% state channel streaming APIs
%%
init(RPC = follow_sc, StreamState) ->
helium_state_channels_impl:init(RPC, StreamState);
%%
%% state channel unary APIs
%%
init(_RPC = is_active_sc, StreamState) ->
StreamState;
init(_RPC = is_overpaid_sc, StreamState) ->
StreamState;
init(_RPC = close_sc, StreamState) ->
StreamState.
%%
%% Any API can potentially handle info msgs
%%
-spec handle_info(atom(), any(), grpcbox_stream:t()) -> grpcbox_stream:t().
handle_info(_RPC = validators, Msg, StreamState) ->
helium_validators_impl:handle_info(Msg, StreamState);
handle_info(_RPC = config, Msg, StreamState) ->
helium_config_impl:handle_info(Msg, StreamState);
handle_info(_RPC = config_update, Msg, StreamState) ->
helium_config_impl:handle_info(Msg, StreamState);
handle_info(_RPC = routing, Msg, StreamState) ->
helium_routing_impl:handle_info(Msg, StreamState);
handle_info(_RPC = is_active_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = is_overpaid_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = close_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC = follow_sc, Msg, StreamState) ->
helium_state_channels_impl:handle_info(Msg, StreamState);
handle_info(_RPC, _Msg, StreamState) ->
lager:warning("got unhandled info msg, RPC ~p, Msg, ~p", [_RPC, _Msg]),
StreamState.
%%%-------------------------------------------------------------------
%% Config RPC implementations
%%%-------------------------------------------------------------------
-spec validators(
ctx:ctx(),
gateway_pb:gateway_validators_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
validators(Ctx, Message) -> helium_validators_impl:validators(Ctx, Message).
-spec config(
ctx:ctx(),
gateway_pb:gateway_config_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
config(Ctx, Message) -> helium_config_impl:config(Ctx, Message).
-spec config_update(
gateway_pb:gateway_config_update_req_v1_pb(),
grpcbox_stream:t()
) -> {ok, grpcbox_stream:t()} | grpcbox_stream:grpc_error_response().
config_update(Msg, StreamState) -> helium_config_impl:config_update(Msg, StreamState).
%%%-------------------------------------------------------------------
%% Routing RPC implementations
%%%-------------------------------------------------------------------
-spec routing(gateway_pb:gateway_routing_req_v1_pb(), grpcbox_stream:t()) ->
{ok, grpcbox_stream:t()} | grpcbox_stream:grpc_error_response().
routing(Msg, StreamState) -> helium_routing_impl:routing(Msg, StreamState).
%%%-------------------------------------------------------------------
%% State channel RPC implementations
%%%-------------------------------------------------------------------
-spec is_active_sc(
ctx:ctx(),
gateway_pb:gateway_sc_is_active_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
is_active_sc(Ctx, Message) -> helium_state_channels_impl:is_active_sc(Ctx, Message).
-spec is_overpaid_sc(
ctx:ctx(),
gateway_pb:gateway_sc_is_overpaid_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()} | grpcbox_stream:grpc_error_response().
is_overpaid_sc(Ctx, Message) -> helium_state_channels_impl:is_overpaid_sc(Ctx, Message).
-spec close_sc(
ctx:ctx(),
gateway_pb:gateway_sc_close_req_v1_pb()
) -> {ok, gateway_pb:gateway_resp_v1_pb(), ctx:ctx()}.
close_sc(Ctx, Message) -> helium_state_channels_impl:close_sc(Ctx, Message).
-spec follow_sc(
gateway_pb:gateway_sc_follow_req_v1(),
grpcbox_stream:t()
) -> {ok, grpcbox_stream:t()} | grpcbox_stream:grpc_error_response().
follow_sc(Msg, StreamState) -> helium_state_channels_impl:follow_sc(Msg, StreamState). | src/grpc/helium_gateway_service.erl | 0.515132 | 0.424173 | helium_gateway_service.erl | starcoder |
%%%=============================================================================
%% Copyright 2012- Klarna AB
%% Copyright 2015- AUTHORS
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc Json schema validation module.
%%
%% This module is the core of jesse, it implements the validation functionality
%% according to the standard.
%% @end
%%%=============================================================================
-module(jesse_schema_validator).
%% API
-export([ validate/3
, validate_definition/4
, validate_with_state/3
]).
%% Includes
-include("jesse_schema_validator.hrl").
%% Behaviour definition
-callback check_value(Value, Attr, State) ->
State | no_return()
when
Value :: any(),
Attr :: {binary(), jesse:json_term()},
State :: jesse_state:state().
-callback init_state(Opts :: jesse_state:validator_opts()) ->
validator_state().
-type validator_state() :: any().
-export_type([ validator_state/0
]).
%%% API
%% @doc Validates json `Data' against `JsonSchema' with `Options'.
%% If the given json is valid, then it is returned to the caller as is,
%% otherwise an exception will be thrown.
-spec validate( JsonSchema :: jesse:json_term()
, Data :: jesse:json_term()
, Options :: [{Key :: atom(), Data :: any()}]
) -> {ok, jesse:json_term()}
| no_return().
validate(JsonSchema, Value, Options) ->
State = jesse_state:new(JsonSchema, Options),
NewState = validate_with_state(JsonSchema, Value, State),
{result(NewState), Value}.
%% @doc Validates json `Data' against `Definition' in `JsonSchema' with `Options'.
%% If the given json is valid, then it is returned to the caller as is,
%% otherwise an exception will be thrown.
-spec validate_definition( Definition :: string()
, JsonSchema :: jesse:json_term()
, Data :: jesse:json_term()
, Options :: [{Key :: atom(), Data :: any()}]
) -> {ok, jesse:json_term()}
| no_return().
validate_definition(Defintion, JsonSchema, Value, Options) ->
State = jesse_state:new(JsonSchema, Options),
Schema = make_definition_ref(Defintion),
NewState = validate_with_state(Schema, Value, State),
{result(NewState), Value}.
%% @doc Validates json `Data' against `JsonSchema' with `State'.
%% If the given json is valid, then the latest state is returned to the caller,
%% otherwise an exception will be thrown.
-spec validate_with_state( JsonSchema :: jesse:json_term()
, Data :: jesse:json_term()
, State :: jesse_state:state()
) -> jesse_state:state()
| no_return().
validate_with_state(JsonSchema0, Value, State) ->
Validator = select_validator(JsonSchema0, State),
JsonSchema = jesse_json_path:unwrap_value(JsonSchema0),
run_validator(Validator, Value, JsonSchema, State).
%%% Internal functions
%% @doc Gets validator from the state or else
%% selects an appropriate one by schema version.
%% @private
select_validator(JsonSchema, State) ->
case jesse_state:get_validator(State) of
undefined ->
select_validator_by_schema(get_schema_ver(JsonSchema, State), State);
Validator ->
Validator
end.
select_validator_by_schema(?json_schema_draft3, _) ->
jesse_validator_draft3;
select_validator_by_schema(?json_schema_draft4, _) ->
jesse_validator_draft4;
select_validator_by_schema(SchemaURI, State) ->
jesse_error:handle_schema_invalid({?schema_unsupported, SchemaURI}, State).
%% @doc Returns "$schema" property from `JsonSchema' if it is present,
%% otherwise the default schema version from `State' is returned.
%% @private
get_schema_ver(JsonSchema, State) ->
case jesse_json_path:value(?SCHEMA, JsonSchema, ?not_found) of
?not_found -> jesse_state:get_default_schema_ver(State);
SchemaVer -> SchemaVer
end.
%% @doc Returns a result depending on `State'.
%% @private
result(State) ->
ErrorList = jesse_state:get_error_list(State),
case ErrorList of
[] -> ok;
_ -> throw(ErrorList)
end.
%% @doc Goes through attributes of the given `JsonSchema' and
%% validates the `Value' against them calling `Validator'.
%% @private
run_validator(_Validator, _Value, [], State) ->
State;
run_validator(Validator, Value, [Attr | Attrs], State0) ->
State = Validator:check_value( Value
, Attr
, State0
),
run_validator(Validator, Value, Attrs, State).
%% @doc Makes a $ref schema object pointing to the given `Definition'
%% in schema defintions.
%% @private
make_definition_ref(Definition) ->
Definition1 = list_to_binary(Definition),
[{<<"$ref">>, <<"#/definitions/", Definition1/binary>>}]. | src/jesse_schema_validator.erl | 0.695338 | 0.439627 | jesse_schema_validator.erl | starcoder |
%% common_test suite for test
-module(integrity_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
-include("cqerl_protocol.hrl").
-compile(export_all).
-import(test_helper, [maybe_get_client/1, get_client/1]).
%%--------------------------------------------------------------------
%% Function: suite() -> Info
%%
%% Info = [tuple()]
%% List of key/value pairs.
%%
%% Description: Returns list of tuples to set default properties
%% for the suite.
%%
%% Note: The suite/0 function is only meant to be used to return
%% default data values, not perform any other operations.
%%--------------------------------------------------------------------
suite() ->
[{timetrap, {seconds, 20}} | test_helper:requirements()].
%%--------------------------------------------------------------------
%% Function: groups() -> [Group]
%%
%% Group = {GroupName,Properties,GroupsAndTestCases}
%% GroupName = atom()
%% The name of the group.
%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
%% Group properties that may be combined.
%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
%% TestCase = atom()
%% The name of a test case.
%% Shuffle = shuffle | {shuffle,Seed}
%% To get cases executed in random order.
%% Seed = {integer(),integer(),integer()}
%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
%% repeat_until_any_ok | repeat_until_any_fail
%% To get execution of cases repeated.
%% N = integer() | forever
%%
%% Description: Returns a list of test case group definitions.
%%--------------------------------------------------------------------
connection_tests() ->
[
random_selection,
failed_connection
].
database_tests() ->
[
{initial, [sequence], [connect, create_keyspace]},
create_table,
simple_insertion_roundtrip,
async_insertion_roundtrip,
emptiness,
missing_prepared_query,
missing_prepared_batch,
options,
cache_cleanup,
{transactions, [parallel],
[
{types, [parallel],
[
all_datatypes,
% custom_encoders,
collection_types,
counter_type,
varint_type,
decimal_type
]},
batches_and_pages
]}
].
groups() ->
[
{hash,
[
{connection_hash, [sequence], connection_tests() -- [random_selection]},
{database_hash, [sequence], database_tests()}
]}
].
%%--------------------------------------------------------------------
%% Function: all() -> GroupsAndTestCases
%%
%% GroupsAndTestCases = [{group,GroupName} | TestCase]
%% GroupName = atom()
%% Name of a test case group.
%% TestCase = atom()
%% Name of a test case.
%%
%% Description: Returns the list of groups and test cases that
%% are to be executed.
%%
%% NB: By default, we export all 1-arity user defined functions
%%--------------------------------------------------------------------
all() ->
[datatypes_test,
protocol_test,
{group, hash}
].
%%--------------------------------------------------------------------
%% Function: init_per_suite(Config0) ->
%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
%%
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding the test case configuration.
%% Reason = term()
%% The reason for skipping the suite.
%%
%% Description: Initialization before the suite.
%%
%% Note: This function is free to add any key/value pairs to the Config
%% variable, but should NOT alter/remove any existing entries.
%%--------------------------------------------------------------------
init_per_suite(Config) ->
test_helper:standard_setup("test_keyspace_2", Config).
%%--------------------------------------------------------------------
%% Function: end_per_suite(Config0) -> void() | {save_config,Config1}
%%
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding the test case configuration.
%%
%% Description: Cleanup after the suite.
%%--------------------------------------------------------------------
end_per_suite(_Config) ->
ok.
%%--------------------------------------------------------------------
%% Function: init_per_group(GroupName, Config0) ->
%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
%%
%% GroupName = atom()
%% Name of the test case group that is about to run.
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding configuration data for the group.
%% Reason = term()
%% The reason for skipping all test cases and subgroups in the group.
%%
%% Description: Initialization before each test case group.
%%--------------------------------------------------------------------
init_per_group(NoKeyspace, Config) when NoKeyspace == connection_pooler;
NoKeyspace == initial ->
%% Here we remove the keyspace configuration, since we're going to drop it
%% Otherwise, subsequent requests would sometimes fail saying that no keyspace was specified
[{keyspace, undefined} | proplists:delete(keyspace, Config)];
init_per_group(_Group, Config) ->
Config.
%%--------------------------------------------------------------------
%% Function: end_per_group(GroupName, Config0) ->
%% void() | {save_config,Config1}
%%
%% GroupName = atom()
%% Name of the test case group that is finished.
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding configuration data for the group.
%%
%% Description: Cleanup after each test case group.
%%--------------------------------------------------------------------
end_per_group(_, _Config) ->
ok.
%%--------------------------------------------------------------------
%% Function: init_per_testcase(TestCase, Config0) ->
%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
%%
%% TestCase = atom()
%% Name of the test case that is about to run.
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding the test case configuration.
%% Reason = term()
%% The reason for skipping the test case.
%%
%% Description: Initialization before each test case.
%%
%% Note: This function is free to add any key/value pairs to the Config
%% variable, but should NOT alter/remove any existing entries.
%%--------------------------------------------------------------------
init_per_testcase(_TestCase, Config) ->
Config.
%%--------------------------------------------------------------------
%% Function: end_per_testcase(TestCase, Config0) ->
%% void() | {save_config,Config1} | {fail,Reason}
%%
%% TestCase = atom()
%% Name of the test case that is finished.
%% Config0 = Config1 = [tuple()]
%% A list of key/value pairs, holding the test case configuration.
%% Reason = term()
%% The reason for failing the test case.
%%
%% Description: Cleanup after each test case.
%%--------------------------------------------------------------------
end_per_testcase(_TestCase, Config) ->
Config.
datatypes_test(_Config) ->
ok = eunit:test(cqerl_datatypes).
protocol_test(_Config) ->
ok = eunit:test(cqerl_protocol).
get_multiple_clients(_Config, 0, Acc) -> Acc;
get_multiple_clients(Config, N, Acc) ->
get_multiple_clients(Config, N-1, [get_client(Config) | Acc]).
random_selection(Config) ->
Clients = get_multiple_clients(Config, 200, []),
DistinctPids = lists:foldl(fun({Pid, _Ref}, Pids) ->
case lists:member(Pid, Pids) of
true -> Pids;
false -> [Pid | Pids]
end
end, [], Clients),
MaxSize = proplists:get_value(pool_min_size, Config),
MaxSize = length(DistinctPids).
failed_connection(Config) ->
{error, _} = maybe_get_client([{keyspace, <<"not_a_real_keyspace">>} | Config]),
{error, _} = maybe_get_client([{keyspace, <<"another_fake_keyspace">>} | Config]),
% A previous bug would cause timeouts on subsequent calls with an already
% used invalid keyspace. Test that case here.
{error, _} = maybe_get_client([{keyspace, <<"not_a_real_keyspace">>} | Config]).
connect(Config) ->
{Pid, Ref} = get_client(Config),
true = is_pid(Pid),
true = is_reference(Ref),
ok.
create_keyspace(Config) ->
test_helper:create_keyspace(<<"test_keyspace_2">>, Config).
create_table(Config) ->
Client = get_client(Config),
ct:log("Got client ~w~n", [Client]),
Q = "CREATE TABLE entries1(id varchar, age int, email varchar, PRIMARY KEY(id));",
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries1">>}} =
cqerl:run_query(Client, Q).
simple_insertion_roundtrip(Config) ->
Client = get_client(Config),
Q = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>,
{ok, void} = cqerl:run_query(Client, #cql_query{statement=Q, values=[
{id, "hello"},
{age, 18},
{email, <<"<EMAIL>">>}
]}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{statement = <<"SELECT * FROM entries1;">>}),
Row = cqerl:head(Result),
<<"hello">> = proplists:get_value(id, Row),
18 = proplists:get_value(age, Row),
<<"<EMAIL>">> = proplists:get_value(email, Row),
Result.
emptiness(Config) ->
Client = get_client(Config),
{ok, void} = cqerl:run_query(Client, "update entries1 set email = null where id = 'hello';"),
{ok, Result} = cqerl:run_query(Client, "select * from entries1 where id = 'hello';"),
Row = cqerl:head(Result),
null = proplists:get_value(email, Row),
{ok, void} = cqerl:run_query(Client, #cql_query{statement="update entries1 set age = ? where id = 'hello';",
values=[{age, null}]}),
{ok, Result2} = cqerl:run_query(Client, "select * from entries1 where id = 'hello';"),
Row2 = cqerl:head(Result2),
null = proplists:get_value(age, Row2).
missing_prepared_query(Config) ->
Q = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>,
InsertQuery = #cql_query{statement = Q, values =
[{id, "abc"}, {age, 22}, {email, "<EMAIL>"}]},
with_client(Config, fun(Client) -> {ok, _Result} = cqerl:run_query(Client, InsertQuery) end),
%% SELECT * is particularly good at exposing stale cached metadata bugs
Q2 = <<"SELECT * FROM entries1 WHERE id = ?">>,
SelectQuery = #cql_query{statement = Q2, values = [{id, "abc"}]},
%% Spawn a bunch of clients in order to utilize the full pool's worth of connections
Pid = self(),
SelectFun = fun(Client) ->
try
Pid ! cqerl:run_query(Client, SelectQuery)
catch
C:R ->
Pid ! {error, {C, R}}
end
end,
SpawnSelectFun = fun() -> with_client(Config, SelectFun) end,
NumToSpawn = 200,
[erlang:spawn(SpawnSelectFun) || _ <- lists:seq(1, NumToSpawn)],
ok = expect_ok_results(NumToSpawn),
%% This query causes prepared queries on the table to be invalidated:
with_client(Config, fun(Client) ->
{ok, _} = cqerl:run_query(Client, "ALTER TABLE entries1 ADD newcol int")
end),
[erlang:spawn(SpawnSelectFun) || _ <- lists:seq(1, NumToSpawn)],
ok = expect_ok_results(NumToSpawn).
with_client(Config, Fun) ->
Client = get_client(Config),
Fun(Client).
expect_ok_results(0) ->
ok;
expect_ok_results(Num) ->
receive
{ok, _Result} ->
expect_ok_results(Num - 1);
Error ->
Error
end.
missing_prepared_batch(Config) ->
Client = get_client(Config),
S1 = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>,
V1 = [{id, "abc"}, {age, 22}, {email, "<EMAIL>"}],
Q1 = #cql_query{statement = S1, values = V1},
S2 = "INSERT INTO entries1(id, age) VALUES (?, ?)",
V2 = [ {id, "fff"}, {age, 66} ],
Q2 = #cql_query{statement = S2, values = V2},
{ok, _Result} = cqerl:run_query(Client, #cql_query_batch{queries = [Q1, Q2]}),
%% This query causes prepared queries on the table to be invalidated:
{ok, _} = cqerl:run_query(Client, "ALTER TABLE entries1 ADD newcol2 int"),
%% This query will attempt to use the prepared queries and fail, falling back to re-preparing them:
{ok, _Result} = cqerl:run_query(Client, #cql_query_batch{queries = [Q1, Q2]}).
async_insertion_roundtrip(Config) ->
Client = get_client(Config),
Ref = cqerl:send_query(Client, #cql_query{
statement = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>,
values = [
{id, "1234123"},
{age, 45},
{email, <<"<EMAIL>">>}
]
}),
receive {result, Ref, void} -> ok end,
Ref2 = cqerl:send_query(Client, #cql_query{statement = <<"SELECT * FROM entries1 WHERE id='1234123'">>}),
receive
{result, Ref2, Result=#cql_result{}} ->
Res=[{id, <<"1234123">>}, {age, 45}, {email, <<"<EMAIL>">>}]
= cqerl:head(Result);
Other ->
Res = undefined,
ct:fail("Received: ~p~n", [Other])
end,
Res.
cache_cleanup(Config) ->
Client = get_client(Config),
{ClientPID, _} = Client,
Q = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>,
CQLQuery = #cql_query{statement = Q, values = [{id, "abc"}, {age, 22}, {email, "<EMAIL>"}]},
{ok, _Result} = cqerl:run_query(Client, CQLQuery),
% Query should now be cached:
?assertMatch(#cqerl_cached_query{}, cqerl_cache:lookup(ClientPID, CQLQuery)),
exit(ClientPID, crash),
timer:sleep(250),
?assertEqual(queued, cqerl_cache:lookup(ClientPID, CQLQuery)).
datatypes_columns(Cols) ->
datatypes_columns(1, Cols, <<>>).
datatypes_columns(_I, [], Bin) -> Bin;
datatypes_columns(I, [ColumnType|Rest], Bin) ->
Column = list_to_binary(io_lib:format("col~B ~s, ", [I, ColumnType])),
datatypes_columns(I+1, Rest, << Bin/binary, Column/binary >>).
all_datatypes(Config) ->
Client = get_client(Config),
Time = {23, 4, 123},
Date = {1970, 1, 1},
AbsTime = (12 * 3600 + 4 * 60 + 123) * math:pow(10, 9),
{Cols, InsQ, RRow1, RRow2} = case proplists:get_value(protocol_version, Config) of
3 ->
{
datatypes_columns([ascii, bigint, blob, boolean, decimal, double,
float, int, timestamp, uuid, varchar,
timeuuid, inet, varint]),
#cql_query{statement = <<"INSERT INTO entries2(col1, col2, col3,
col4, col5, col6, col7, col8, col9, col10,
col11, col12, col13, col14
) VALUES (?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?)">>},
[
{col1, "hello"},
{col2, 9223372036854775807},
{col3, <<1,2,3,4,5,6,7,8,9,10>>},
{col4, true},
{col5, {1234, 5}},
{col6, 5.1235131241221e-6},
{col7, 5.12351e-6},
{col8, 2147483647},
{col9, now},
{col10, new},
{col11, <<"Юникод"/utf8>>},
{col12, now},
{col13, {127, 0, 0, 1}},
{col14, 666}
], [
{col1, <<"foobar">>},
{col2, -9223372036854775806},
{col3, <<1,2,3,4,5,6,7,8,9,10>>},
{col4, false},
{col5, {1234, -5}},
{col6, -5.1235131241220e-6},
{col7, -5.12351e-6},
{col8, -2147483646},
{col9, 1984336643},
{col10, <<22,6,195,126,110,122,64,242,135,15,38,179,46,108,22,64>>},
{col11, <<"åäö"/utf8>>},
{col12, <<250,10,224,94,87,197,17,227,156,99,146,79,0,0,0,195>>},
{col13, {0,0,0,0,0,0,0,0}},
{col14, 666}
]
};
_ ->
{
datatypes_columns([ascii, bigint, blob, boolean, decimal, double,
float, int, timestamp, uuid, varchar, timeuuid, inet, varint,
tinyint, smallint, date, time]),
#cql_query{statement = <<"INSERT INTO entries2(col1, col2, col3,
col4, col5, col6, col7, col8, col9, col10,
col11, col12, col13, col14, col15, col16,
col17, col18) VALUES (?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)">>},
[
{col1, "hello"},
{col2, 9223372036854775807},
{col3, <<1,2,3,4,5,6,7,8,9,10>>},
{col4, true},
{col5, {1234, 5}},
{col6, 5.1235131241221e-6},
{col7, 5.12351e-6},
{col8, 2147483647},
{col9, now},
{col10, new},
{col11, <<"Юникод"/utf8>>},
{col12, now},
{col13, {127, 0, 0, 1}},
{col14, 666},
{col15, 120},
{col16, 1200},
{col17, Date},
{col18, Time}
],
[
{col1, <<"foobar">>},
{col2, -9223372036854775806},
{col3, <<1,2,3,4,5,6,7,8,9,10>>},
{col4, false},
{col5, {1234, -5}},
{col6, -5.1235131241220e-6},
{col7, -5.12351e-6},
{col8, -2147483646},
{col9, 1984336643},
{col10, <<22,6,195,126,110,122,64,242,135,15,38,179,46,108,22,64>>},
{col11, <<"åäö"/utf8>>},
{col12, <<250,10,224,94,87,197,17,227,156,99,146,79,0,0,0,195>>},
{col13, {0,0,0,0,0,0,0,0}},
{col14, 666},
{col15, -120},
{col16, -1200},
{col17, Date},
{col18, AbsTime}
]
}
end,
CreationQ = <<"CREATE TABLE entries2(", Cols/binary, " PRIMARY KEY(col1));">>,
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries2">>}} =
cqerl:run_query(Client, CreationQ),
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{values=RRow2}),
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{values=RRow1}),
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{
statement="INSERT INTO entries2(col1, col11) values (?, ?);",
values=RRow3=[ {col1, foobaz}, {col11, 'åäö'} ]
}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{statement = <<"SELECT * FROM entries2;">>}),
{Row1, Result1} = cqerl:next(Result),
{Row2, Result2} = cqerl:next(Result1),
{Row3, _Result3} = cqerl:next(Result2),
lists:foreach(fun
(Row) ->
ReferenceRow = case proplists:get_value(col1, Row) of
<<"hello">> -> RRow1;
<<"foobar">> -> RRow2;
<<"foobaz">> -> RRow3
end,
lists:foreach(fun
({col12, _}) -> true = uuid:is_v1(proplists:get_value(col12, Row));
({col10, _}) -> true = uuid:is_v4(proplists:get_value(col10, Row));
({col9, _}) -> ok; %% Yeah, I know...
({col16, {Y, M, D}}) ->
{Y, M, D} = proplists:get_value(col16, Row);
({col18, _}) -> proplists:get_value(col18, Row) == AbsTime;
({col1, Key}) when is_list(Key) ->
Val = list_to_binary(Key),
Val = proplists:get_value(col1, Row);
({Col, Key}) when is_atom(Key), Col == col1 orelse Col == col11 ->
Val = atom_to_binary(Key, utf8),
Val = proplists:get_value(Col, Row);
({col7, Val0}) ->
Val = round(Val0 * 1.0e11),
Val = round(proplists:get_value(col7, Row) * 1.0e11);
({Key, Val}) ->
Val = proplists:get_value(Key, Row, null)
end, ReferenceRow)
end, [Row1, Row2, Row3]),
[Row1, Row2, Row3].
custom_encoders(Config) ->
Client = get_client(Config),
Cols = datatypes_columns([varchar, varchar, varchar]),
CreationQ = <<"CREATE TABLE entries2_1(", Cols/binary, " PRIMARY KEY(col1, col2, col3));">>,
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries2_1">>}} =
cqerl:run_query(Client, CreationQ),
InsQ = #cql_query{statement = <<"INSERT INTO entries2_1(col1, col2, col3) VALUES (?, ?, ?)">>},
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{values=RRow1=[
{col1, <<"test">>},
{col2, <<"hello">>},
{col3, <<"testing tuples">>}
]}),
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{values=RRow2=[
{col1, <<"test">>},
{col2, <<"nice to have">>},
{col3, <<"custom encoder">>}
]}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{
statement = <<"SELECT * FROM entries2_1 WHERE col1 = ? AND (col2,col3) IN ?;">>,
values = [
{col1, <<"test">>},
{'in(col2,col3)', [
{<<"hello">>,<<"testing tuples">>},
{<<"nice to have">>,<<"custom encoder">>}
]}
],
% provide custom encoder for TupleType
value_encode_handler = fun({{custom, <<"org.apache.cassandra.db.marshal.TupleType", _Rest/binary>>}, Tuple}, Query) ->
GetElementBinary = fun(Value) ->
Bin = cqerl_datatypes:encode_data({text, Value}, Query),
Size = size(Bin),
<<Size:32/big-signed-integer, Bin/binary>>
end,
<< << (GetElementBinary(Value))/binary >> || Value <- tuple_to_list(Tuple) >>
end
}),
[RRow1, RRow2] = cqerl:all_rows(Result),
ok.
options(Config) ->
application:set_env(cqerl, maps, true),
application:set_env(cqerl, text_uuids, true),
Client = get_client(Config),
Cols = datatypes_columns([timeuuid, uuid]),
CreationQ = <<"CREATE TABLE entries2_2(", Cols/binary, " PRIMARY KEY(col1));">>,
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries2_2">>}} =
cqerl:run_query(Client, CreationQ),
UUIDState = uuid:new(self()),
{TimeUUID, _} = uuid:get_v1(UUIDState),
UUID = uuid:get_v4(),
InsQ = #cql_query{statement = <<"INSERT INTO entries2_2(col1, col2) VALUES (?, ?)">>},
{ok, void} = cqerl:run_query(Client, InsQ#cql_query{values=[
{col1, TimeUUID},
{col2, UUID}
]}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{statement = <<"SELECT * FROM entries2_2;">>}),
TextTimeUUID = uuid:uuid_to_string(TimeUUID, binary_standard),
TextUUID = uuid:uuid_to_string(UUID, binary_standard),
[#{col1 := TextTimeUUID,
col2 := TextUUID}] = cqerl:all_rows(Result),
application:unset_env(cqerl, maps),
application:unset_env(cqerl, text_uuids).
collection_types(Config) ->
Client = get_client(Config),
CreationQ = <<"CREATE TABLE entries3(key varchar, numbers list<int>, names set<varchar>, phones map<varchar, varchar>, PRIMARY KEY(key));">>,
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries3">>}} =
cqerl:run_query(Client, CreationQ),
{ok, void} = cqerl:run_query(Client, #cql_query{
statement = <<"INSERT INTO entries3(key, numbers, names, phones) values (?, ?, ?, ?);">>,
values = [
{key, "First collection"},
{numbers, [1,2,3,4,5]},
{names, ["matt", "matt", "yvon"]},
{phones, [{<<"home">>, <<"418-123-4545">>}, {"work", "555-555-5555"}]}
]
}),
{ok, void} = cqerl:run_query(Client, #cql_query{
statement = "UPDATE entries3 SET names = names + {'martin'} WHERE key = ?",
values = [{key, "First collection"}]
}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{statement = "SELECT * FROM entries3;"}),
Row = cqerl:head(Result),
<<"First collection">> = proplists:get_value(key, Row),
[1,2,3,4,5] = proplists:get_value(numbers, Row),
Names = proplists:get_value(names, Row),
3 = length(Names),
true = lists:member(<<"matt">>, Names),
true = lists:member(<<"yvon">>, Names),
true = lists:member(<<"martin">>, Names),
lists:foreach(fun
({<<"home">>, <<"418-123-4545">>}) -> ok;
({<<"work">>, <<"555-555-5555">>}) -> ok;
(_) -> throw(unexpected_value)
end, proplists:get_value(phones, Row)),
Row.
counter_type(Config) ->
Client = get_client(Config),
CreationQ = <<"CREATE TABLE entries4(key varchar, count counter, PRIMARY KEY(key));">>,
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created, keyspace = <<"test_keyspace_2">>, name = <<"entries4">>}} =
cqerl:run_query(Client, CreationQ),
{ok, void} = cqerl:run_query(Client, #cql_query{
statement = <<"UPDATE entries4 SET count = count + ? WHERE key = ?;">>,
values = [
{key, "First counter"},
{count, 18}
]
}),
{ok, void} = cqerl:run_query(Client, #cql_query{
statement = "UPDATE entries4 SET count = count + 10 WHERE key = ?;",
values = [{key, "First counter"}]
}),
{ok, Result=#cql_result{}} = cqerl:run_query(Client, #cql_query{statement = "SELECT * FROM entries4;"}),
Row = cqerl:head(Result),
<<"First counter">> = proplists:get_value(key, Row),
28 = proplists:get_value(count, Row),
Row.
varint_type(Config) ->
Client = get_client(Config),
CreationQ = "CREATE TABLE varint_test (key varint PRIMARY KEY, sval text)",
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created,
keyspace = <<"test_keyspace_2">>,
name = <<"varint_test">>}} =
cqerl:run_query(Client, CreationQ),
Statement = "INSERT INTO varint_test(key, sval) VALUES (?, ?)",
TestVals = varint_test_ranges(),
lists:foreach(fun(K) ->
{ok, void} =
cqerl:run_query(Client,
#cql_query{statement = Statement,
values = [{key, K},
{sval,
integer_to_list(K)}
]})
end,
TestVals),
Statement2 = "SELECT * FROM varint_test",
{ok, Result} = cqerl:run_query(Client, #cql_query{statement =
Statement2,
page_size = 2000}),
Rows = cqerl:all_rows(Result),
Vals = lists:sort(check_extract_varints(Rows)),
ct:log("Vals ~p ~p: ~p", [length(Rows), length(Vals), Vals]),
Vals = lists:sort(TestVals).
varint_test_ranges() ->
Ranges = [
% Small ints:
{0, 300},
% Signed 16-bit:
{32700, 32800},
% Unsigned 16-bit:
{65530, 65540},
% Huge ints:
{100000000000000000000000, 100000000000000000000099},
% Super Huge ints - way more than 2^64:
{100000000000000000000000000000000000000000000000,
100000000000000000000000000000000000000000000099},
% Small negative:
{-5, -1},
{-133, -125},
% Signed 16-bit
{-32780, -32760},
% Signed 32-bit
{-65544, -65531},
{-100000000000000000000099, -100000000000000000000000},
% Super Huge -ve ints - way more than 2^64:
{-100000000000000000000000000000000000000000000111,
-100000000000000000000000000000000000000000000005}
],
lists:flatten([[L, H] || {L, H} <- Ranges]).
check_extract_varints(Rows) ->
Ints = [proplists:get_value(key, Row) || Row <- Rows],
CheckInts = [binary_to_integer(proplists:get_value(sval, Row))
|| Row <- Rows],
Ints = CheckInts,
Ints.
decimal_type(Config) ->
Client = get_client(Config),
CreationQ = "CREATE TABLE decimal_test (key decimal PRIMARY KEY,
scale int, unscaled varint)",
ct:log("Executing : ~s~n", [CreationQ]),
{ok, #cql_schema_changed{change_type=created,
keyspace = <<"test_keyspace_2">>,
name = <<"decimal_test">>}} =
cqerl:run_query(Client, CreationQ),
Statement = "INSERT INTO decimal_test(key, scale, unscaled)
VALUES (?, ?, ?)",
TestVals = decimal_test_ranges(),
ct:log("Inserting : ~p~n", [TestVals]),
lists:foreach(fun({U, S}) ->
{ok, void} =
cqerl:run_query(Client,
#cql_query{statement = Statement,
values = [{key, {U, S}},
{unscaled, U},
{scale, S}
]})
end,
TestVals),
Statement2 = "SELECT * FROM decimal_test",
{ok, Result} = cqerl:run_query(Client, #cql_query{statement =
Statement2,
page_size = 20000}),
Rows = cqerl:all_rows(Result),
Vals = lists:sort(check_extract_decimals(Rows)),
ct:log("Vals ~p ~p: ~p", [length(TestVals), length(Vals), Vals]),
Vals = lists:sort(TestVals).
decimal_test_ranges() ->
[{U, S} || U <- varint_test_ranges(),
S <- lists:seq(-5, 5) ++ [2147483647, -2147483648]].
check_extract_decimals(Rows) ->
Decimals = [proplists:get_value(key, Row) || Row <- Rows],
CheckUnScales = [proplists:get_value(unscaled, Row) || Row <- Rows],
CheckScales = [proplists:get_value(scale, Row) || Row <- Rows],
{Unscales, Scales} = lists:unzip(Decimals),
ct:log("Unscales: ~p~n", [Unscales]),
Unscales = CheckUnScales,
ct:log("Scales: ~p~n", [Scales]),
Scales = CheckScales,
Decimals.
inserted_rows(0, _Q, Acc) ->
lists:reverse(Acc);
inserted_rows(N, Q, Acc) ->
ID = list_to_binary(io_lib:format("~B", [N])),
inserted_rows(N-1, Q, [ Q#cql_query{values=[
{id, ID},
{age, 10+N},
{email, list_to_binary(["test", ID, "@gmail.com"])}
]} | Acc ]).
batches_and_pages(Config) ->
Client = get_client(Config),
T1 = os:timestamp(),
N = 90,
Bsz = 25,
{ok, void} = cqerl:run_query(Client, "TRUNCATE entries1;"),
Q = #cql_query{statement = <<"INSERT INTO entries1(id, age, email) VALUES (?, ?, ?)">>},
Batch = #cql_query_batch{queries=inserted_rows(N, Q, [])},
ct:log("Batch : ~w~n", [Batch]),
{ok, void} = cqerl:run_query(Client, Batch),
AddIDs = fun (Result, IDs0) ->
lists:foldr(fun (Row, IDs) ->
ID = proplists:get_value(id, Row),
{IDint, _} = string:to_integer(binary_to_list(ID)),
IDint = proplists:get_value(age, Row) - 10,
IDsize = size(ID),
<< _:4/binary, ID:IDsize/binary, _Rest/binary >> = proplists:get_value(email, Row),
gb_sets:add(ID, IDs)
end,
IDs0, cqerl:all_rows(Result))
end,
{ok, Result} = cqerl:run_query(Client, #cql_query{page_size=Bsz, statement="SELECT * FROM entries1;"}),
IDs1 = AddIDs(Result, gb_sets:new()),
{ok, Result2} = cqerl:fetch_more(Result),
Ref1 = cqerl:fetch_more_async(Result2),
IDs2 = AddIDs(Result2, IDs1),
FetchMoreRef = receive
{result, Ref1, Result3} ->
Ref2 = cqerl:fetch_more_async(Result3),
IDs3 = AddIDs(Result3, IDs2),
Ref2
end,
receive
{result, FetchMoreRef, Result4} ->
IDs4 = AddIDs(Result4, IDs3),
N = gb_sets:size(IDs4)
end,
ct:log("Time elapsed inserting ~B entries and fetching in batches of ~B: ~B ms",
[N, Bsz, round(timer:now_diff(os:timestamp(), T1)/1000)]). | test/integrity_SUITE.erl | 0.504394 | 0.47591 | integrity_SUITE.erl | starcoder |
-module(riak_core_aae_vnode).
-export([maybe_create_hashtrees/4,
update_hashtree/4]).
-export([request_hashtree_pid/2,
hashtree_pid/2,
rehash/4]).
-define(DEFAULT_HASHTREE_TOKENS, 90).
-type preflist() :: [{Index::integer(), Node :: term()}].
%%%===================================================================
%%% Behaviour callbacks
%%%===================================================================
%% @doc aae_repair is called when the AAE system detectes a difference
%% the simplest method to handle this is causing a read-repair if the
%% system supports it. But the actual implemetation is left to the
%% vnode to handle whatever is best.
-callback aae_repair(Bucket::binary(), Key::term()) -> term().
%% @doc hash_object is called by the AAE subsyste to hash a object when the
%% tree first gets generated, a object needs to be hash or is inserted.
%% To AAE system does not care for the details as long as it returns a binary
%% and is deterministic in it's outcome. (see {@link riak_core_index_hashtree})
-callback hash_object({Bucket::binary(), Key::term()}, Obj::term()) -> binary().
%% @doc Returns the vnode master for this vnode type, that is the same
%% used when registering the vnode.
%% This function is required by the {@link riak_core_index_hashtree} to
%% send rehash requests to a vnode.
-callback master() -> Master::atom().
%%%===================================================================
%%% AAE Calls
%%%===================================================================
%% @doc This is a asyncronous command that needs to send a term in the form
%% `{ok, Hashtree::pid()}` or `{error, wrong_node}` to the process it was called
%% from.
%% It is required by the {@link riak_core_entropy_manager} to determin what
%% hashtree serves a partition on a given erlang node.
-spec request_hashtree_pid(_Master::atom(), Partition::non_neg_integer()) -> ok.
request_hashtree_pid(Master, Partition) ->
ReqId = {hashtree_pid, Partition},
riak_core_vnode_master:command({Partition, node()},
{hashtree_pid, node()},
{raw, ReqId, self()},
Master).
%% @doc Returns the hashtree for the partiion of this service/vnode combination.
-spec hashtree_pid(_Master::atom(), Partition::non_neg_integer()) ->
{error, wrong_node} |
{ok, HashTree::pid()}.
hashtree_pid(Master, Partition) ->
riak_core_vnode_master:sync_command({Partition, node()},
{hashtree_pid, node()},
Master,
infinity).
%% Used by {@link riak_core_exchange_fsm} to force a vnode to update the hashtree
%% for repaired keys. Typically, repairing keys will trigger read repair that
%% will update the AAE hash in the write path. However, if the AAE tree is
%% divergent from the KV data, it is possible that AAE will try to repair keys
%% that do not have divergent KV replicas. In that case, read repair is never
%% triggered. Always rehashing keys after any attempt at repair ensures that
%% AAE does not try to repair the same non-divergent keys over and over.
-spec rehash(_Master::atom(), _Preflist::preflist(),
_Bucket::binary(), _Key::binary()) -> ok.
rehash(Master, Preflist, Bucket, Key) ->
riak_core_vnode_master:command(Preflist,
{rehash, {Bucket, Key}},
ignore,
Master).
%%%===================================================================
%%% Utility functions
%%%===================================================================
%% @doc This function is a working example of how to implement hashtree
%% creation for a VNode, using this is recommended, it will need to be
%% called during the init process.
%% It also requires the calling vnode to implement a handle_info match on
%% `retry_create_hashtree` which will need to either call this function
%% again or do nothing if a valid hashtree already exists.
%% In addition to that the calling VNode will be set up to monitor the
%% created hashtree so it should listen for
%% `{'DOWN', _, _, Pid, _}` where Pid is the pid of the created hashtree
%% to recreate a new one if this should die.
-spec maybe_create_hashtrees(atom(), integer(), atom(), pid()|undefined) ->
pid()|undefined.
maybe_create_hashtrees(Service, Index, VNode, Last) ->
maybe_create_hashtrees(riak_core_entropy_manager:enabled(), Service, Index,
VNode, Last).
-spec maybe_create_hashtrees(boolean(), atom(), integer(), atom(), pid()|undefined) ->
pid()|undefined.
maybe_create_hashtrees(false, _Service, _Index, _VNode, Last) ->
lager:debug("Hashtree not enabled."),
Last;
maybe_create_hashtrees(true, Service, Index, VNode, Last) ->
%% Only maintain a hashtree if a primary vnode
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
case riak_core_ring:vnode_type(Ring, Index) of
primary ->
%%{ok, ModCaps} = Mod:capabilities(ModState),
%% Empty = case is_empty(State) of
%% {true, _} -> true;
%% {false, _, _} -> false
%% end,
%%Opts = [vnode_empty || Empty],
Opts = [],
case riak_core_index_hashtree:start(Service, Index, self(),
VNode, Opts) of
{ok, Trees} ->
monitor(process, Trees),
Trees;
Error ->
lager:info("~p/~p: unable to start index_hashtree: ~p",
[Service, Index, Error]),
erlang:send_after(1000, self(), retry_create_hashtree),
Last
end;
_ ->
Last
end.
%% @doc A Utility function that implements partially asyncronous updates
%% To the hashtree. It will allow up to `riak_core.anti_entropy_max_async`
%% asyncronous hashtree updates before requiering a syncronous update.
%% `riak_core.anti_entropy_max_async` if not set defaults to 90.
-spec update_hashtree(binary(), term(), binary(), pid()) -> ok.
update_hashtree(Bucket, Key, RObj, Trees) ->
Items = [{object, {Bucket, Key}, RObj}],
case get_hashtree_token() of
true ->
riak_core_index_hashtree:async_insert(Items, [], Trees),
ok;
false ->
riak_core_index_hashtree:insert(Items, [], Trees),
reset_hashtree_token(),
ok
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec max_hashtree_tokens() -> pos_integer().
max_hashtree_tokens() ->
app_helper:get_env(riak_core,
anti_entropy_max_async,
?DEFAULT_HASHTREE_TOKENS).
get_hashtree_token() ->
Tokens = get(hashtree_tokens),
case Tokens of
undefined ->
put(hashtree_tokens, max_hashtree_tokens() - 1),
true;
N when N > 0 ->
put(hashtree_tokens, Tokens - 1),
true;
_ ->
false
end.
reset_hashtree_token() ->
put(hashtree_tokens, max_hashtree_tokens()).
%%%===================================================================
%%% Placehodlers for callback functions (to give you a idea how they look)
%%%===================================================================
%% @doc aae_repair is called when the AAE system detectes a difference
%% the simplest method to handle this is causing a read-repair if the
%% system supports it. But the actual implemetation is left to the
%% vnode to handle whatever is best.
%% -spec aae_repair(Bucket::binary(), Key::binary()) -> term().
%%aae_repair(_Bucket, _Key) ->
%% aae_repair.
%% @doc hash_object is called by the AAE subsyste to hash a object when the
%% tree first gets generated, a object needs to be hash or is inserted.
%% To AAE system does not care for the details as long as it returns a binary
%% and is deterministic in it's outcome. (see {@link riak_core_index_hashtree})
%% -spec hash_object({Bucket::binary(), Key::binary()}, Obj::term()) -> binary().
%% hash_object(_BKey, _Obj) ->
%% <<>>.
%% @doc Returns the vnode master for this vnode type, that is the same
%% used when registering the vnode.
%% This function is required by the {@link riak_core_index_hashtree} to
%% send rehash requests to a vnode.
%% -spec master() -> Master::atom().
%% master() ->
%% some_other_strange_atom_form_master. | src/riak_core_aae_vnode.erl | 0.64131 | 0.41739 | riak_core_aae_vnode.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core_coverage_plan: Create a plan to cover a minimal set of VNodes.
%%
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc A module to calculate a plan to cover a minimal set of VNodes.
%% There is also an option to specify a number of primary VNodes
%% from each preference list to use in the plan.
-module(riak_core_coverage_plan).
%% API
-export([create_plan/5]).
-type index() :: chash:index_as_int().
-type req_id() :: non_neg_integer().
-type coverage_vnodes() :: [{index(), node()}].
-type vnode_filters() :: [{node(), [{index(), [index()]}]}].
-type coverage_plan() :: {coverage_vnodes(), vnode_filters()}.
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc Create a coverage plan to distribute work to a set
%% covering VNodes around the ring.
-spec create_plan(all | allup, pos_integer(), pos_integer(),
req_id(), atom()) ->
{error, term()} | coverage_plan().
create_plan(VNodeSelector, NVal, PVC, ReqId, Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
PartitionCount = chashbin:num_partitions(CHBin),
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
%% Create a coverage plan with the requested primary
%% preference list VNode coverage.
%% Get a list of the VNodes owned by any unavailble nodes
Members = riak_core_ring:all_members(Ring),
NonCoverageNodes = [Node || Node <- Members,
riak_core_ring:get_member_meta(Ring, Node, participate_in_coverage) == false],
DownVNodes = [Index ||
{Index, _Node}
<- riak_core_apl:offline_owners(Service, CHBin, NonCoverageNodes)],
%% Calculate an offset based on the request id to offer
%% the possibility of different sets of VNodes being
%% used even when all nodes are available.
Offset = ReqId rem NVal,
RingIndexInc = chash:ring_increment(PartitionCount),
AllKeySpaces = lists:seq(0, PartitionCount - 1),
UnavailableKeySpaces = [(DownVNode div RingIndexInc) || DownVNode <- DownVNodes],
%% Create function to map coverage keyspaces to
%% actual VNode indexes and determine which VNode
%% indexes should be filtered.
CoverageVNodeFun =
fun({Position, KeySpaces}, Acc) ->
%% Calculate the VNode index using the
%% ring position and the increment of
%% ring index values.
VNodeIndex = (Position rem PartitionCount) * RingIndexInc,
Node = chashbin:index_owner(VNodeIndex, CHBin),
CoverageVNode = {VNodeIndex, Node},
case length(KeySpaces) < NVal of
true ->
%% Get the VNode index of each keyspace to
%% use to filter results from this VNode.
KeySpaceIndexes = [(((KeySpaceIndex+1) rem
PartitionCount) * RingIndexInc) ||
KeySpaceIndex <- KeySpaces],
{CoverageVNode, [{VNodeIndex, KeySpaceIndexes} | Acc]};
false ->
{CoverageVNode, Acc}
end
end,
%% The offset value serves as a tiebreaker in the
%% compare_next_vnode function and is used to distribute
%% work to different sets of VNodes.
CoverageResult = find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
lists:min([PVC, NVal]),
[]),
case CoverageResult of
{ok, CoveragePlan} ->
%% Assemble the data structures required for
%% executing the coverage operation.
lists:mapfoldl(CoverageVNodeFun, [], CoveragePlan);
{insufficient_vnodes_available, _KeySpace, PartialCoverage} ->
case VNodeSelector of
allup ->
%% The allup indicator means generate a coverage plan
%% for any available VNodes.
lists:mapfoldl(CoverageVNodeFun, [], PartialCoverage);
all ->
{error, insufficient_vnodes_available}
end
end.
%% ====================================================================
%% Internal functions
%% ====================================================================
%% @private
find_coverage(AllKeySpaces, Offset, NVal, PartitionCount, UnavailableKeySpaces, PVC, []) ->
%% Calculate the available keyspaces.
AvailableKeySpaces = [{((VNode+Offset) rem PartitionCount),
VNode,
n_keyspaces(VNode, NVal, PartitionCount)}
|| VNode <- (AllKeySpaces -- UnavailableKeySpaces)],
case find_coverage_vnodes(
ordsets:from_list(AllKeySpaces),
AvailableKeySpaces,
[]) of
{ok, CoverageResults} ->
case PVC of
1 ->
{ok, CoverageResults};
_ ->
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC-1,
CoverageResults)
end;
Error ->
Error
end;
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC,
ResultsAcc) ->
%% Calculate the available keyspaces. The list of
%% keyspaces for each vnode that have already been
%% covered by the plan are subtracted from the complete
%% list of keyspaces so that coverage plans that
%% want to cover more one preflist vnode work out
%% correctly.
AvailableKeySpaces = [{((VNode+Offset) rem PartitionCount),
VNode,
n_keyspaces(VNode, NVal, PartitionCount) --
proplists:get_value(VNode, ResultsAcc, [])}
|| VNode <- (AllKeySpaces -- UnavailableKeySpaces)],
case find_coverage_vnodes(ordsets:from_list(AllKeySpaces),
AvailableKeySpaces,
ResultsAcc) of
{ok, CoverageResults} ->
UpdateResultsFun =
fun({Key, NewValues}, Results) ->
case proplists:get_value(Key, Results) of
undefined ->
[{Key, NewValues} | Results];
Values ->
UniqueValues = lists:usort(Values ++ NewValues),
[{Key, UniqueValues} |
proplists:delete(Key, Results)]
end
end,
UpdatedResults =
lists:foldl(UpdateResultsFun, ResultsAcc, CoverageResults),
case PVC of
1 ->
{ok, UpdatedResults};
_ ->
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC-1,
UpdatedResults)
end;
Error ->
Error
end.
%% @private
%% @doc Find the N key spaces for a VNode
n_keyspaces(VNode, N, PartitionCount) ->
ordsets:from_list([X rem PartitionCount ||
X <- lists:seq(PartitionCount + VNode - N,
PartitionCount + VNode - 1)]).
%% @private
%% @doc Find a minimal set of covering VNodes
find_coverage_vnodes([], _, Coverage) ->
{ok, lists:sort(Coverage)};
find_coverage_vnodes(KeySpace, [], Coverage) ->
{insufficient_vnodes_available, KeySpace, lists:sort(Coverage)};
find_coverage_vnodes(KeySpace, Available, Coverage) ->
Res = next_vnode(KeySpace, Available),
case Res of
{0, _, _} -> % out of vnodes
find_coverage_vnodes(KeySpace, [], Coverage);
{_NumCovered, VNode, _} ->
{value, {_, VNode, Covers}, UpdAvailable} = lists:keytake(VNode, 2, Available),
UpdCoverage = [{VNode, ordsets:intersection(KeySpace, Covers)} | Coverage],
UpdKeySpace = ordsets:subtract(KeySpace, Covers),
find_coverage_vnodes(UpdKeySpace, UpdAvailable, UpdCoverage)
end.
%% @private
%% @doc Find the next vnode that covers the most of the
%% remaining keyspace. Use VNode id as tie breaker.
next_vnode(KeySpace, Available) ->
CoverCount = [{covers(KeySpace, CoversKeys), VNode, TieBreaker} ||
{TieBreaker, VNode, CoversKeys} <- Available],
hd(lists:sort(fun compare_next_vnode/2, CoverCount)).
%% @private
%% There is a potential optimization here once
%% the partition claim logic has been changed
%% so that physical nodes claim partitions at
%% regular intervals around the ring.
%% The optimization is for the case
%% when the partition count is not evenly divisible
%% by the n_val and when the coverage counts of the
%% two arguments are equal and a tiebreaker is
%% required to determine the sort order. In this
%% case, choosing the lower node for the final
%% vnode to complete coverage will result
%% in an extra physical node being involved
%% in the coverage plan so the optimization is
%% to choose the upper node to minimize the number
%% of physical nodes.
compare_next_vnode({CA, _VA, TBA}, {CB, _VB, TBB}) ->
if
CA > CB -> %% Descending sort on coverage
true;
CA < CB ->
false;
true ->
TBA < TBB %% If equal coverage choose the lower node.
end.
%% @private
%% @doc Count how many of CoversKeys appear in KeySpace
covers(KeySpace, CoversKeys) ->
ordsets:size(ordsets:intersection(KeySpace, CoversKeys)). | src/riak_core_coverage_plan.erl | 0.646683 | 0.426919 | riak_core_coverage_plan.erl | starcoder |
-module(ddb_bmp).
%% for CLI
-export([show/1, compare/1, repair/1]).
-ignore_xref([show/1, compare/1, repair/1]).
show(["--width", WidthS, TimeS, BucketS | MetricS]) ->
Width = list_to_integer(WidthS),
show_bitmap(TimeS, BucketS, MetricS, Width);
show([TimeS, BucketS | MetricS]) ->
show_bitmap(TimeS, BucketS, MetricS, 100).
compare(["--width", WidthS, TimeS, BucketS | MetricS]) ->
Width = list_to_integer(WidthS),
compare_nodes(TimeS, BucketS, MetricS, Width);
compare([TimeS, BucketS | MetricS]) ->
compare_nodes(TimeS, BucketS, MetricS, 100).
repair([TimeS, BucketS | MetricS]) ->
Bucket = list_to_binary(BucketS),
Time = list_to_integer(TimeS),
MetricL = [list_to_binary(M) || M <- MetricS],
Metric = dproto:metric_from_list(MetricL),
PPF = dalmatiner_opt:ppf(Bucket),
Base = Time div PPF,
{ok, M} = metric:get(Bucket, Metric, PPF, Base*PPF, PPF, undefined, []),
io:format("Read ~p datapoints.~n", [mmath_bin:length(M)]).
%%====================================================================
%% Internal functions
%%====================================================================
get_bmps(Bucket, Metric, Time) ->
Nodes = get_nodes(Bucket, Metric, Time),
get_nodes(Nodes, Bucket, Metric, Time, []).
compare_nodes(TimeS, BucketS, MetricS, Width) ->
Bucket = list_to_binary(BucketS),
Time = list_to_integer(TimeS),
MetricL = [list_to_binary(M) || M <- MetricS],
Metric = dproto:metric_from_list(MetricL),
Results = [{_, _, B0} | Rest] = get_bmps(Bucket, Metric, Time),
Union = calc_f(fun bitmap:union/2, B0, Rest),
Intersection = calc_f(fun bitmap:intersection/2, B0, Rest),
case {Union, Intersection} of
{_R, _R} ->
io:format("No difference~n");
_ ->
io:format("Total difference:~n"),
bitmap:display_diff(Union, Intersection, Width),
show_diff(Results, Union, Width),
io:format("~n")
end.
show_diff([], _Union, _Width) ->
ok;
show_diff([{Node, P, not_found} | R], Union, Width) ->
io:format("~n=== ~s (~p)~n This node had no data~n", [Node, P]),
show_diff(R, Union, Width);
show_diff([{Node, P, Union} | R], Union, Width) ->
io:format("~n=== ~s (~p)~n"
"* No missing points~n", [Node, P]),
show_diff(R, Union, Width);
show_diff([{Node, P, Bitmap} | R], Union, Width) ->
io:format("~n=== ~s (~p)~n", [Node, P]),
bitmap:display_diff(Union, Bitmap, Width),
show_diff(R, Union, Width).
calc_f(_F, R, []) ->
R;
calc_f(F, not_found, [{_, _, not_found} | R])->
calc_f(F, not_found, R);
calc_f(F, not_found, [{_, _, B0} | R])->
{ok, B1} = bitmap:new([{size, bitmap:size(B0)}]),
calc_f(F, F(B0, B1), R);
calc_f(F, B0, [{_, _, not_found} | R]) ->
{ok, B1} = bitmap:new([{size, bitmap:size(B0)}]),
calc_f(F, F(B0, B1), R);
calc_f(F, B0, [{_, _, B1} | R]) ->
calc_f(F, F(B0, B1), R).
get_nodes([], _Bucket, _Metric, _Time, Acc) ->
Acc;
get_nodes([{P, N} = PN | R], Bucket, Metric, Time, Acc) ->
case metric_vnode:get_bitmap(PN, Bucket, Metric, Time) of
{ok, BMP} ->
get_nodes(R, Bucket, Metric, Time, [{N, P, BMP} | Acc]);
_O ->
get_nodes(R, Bucket, Metric, Time, [{N, P, not_found} | Acc])
end.
show_bitmap(TimeS, BucketS, MetricS, Width) ->
Bucket = list_to_binary(BucketS),
Time = list_to_integer(TimeS),
MetricL = [list_to_binary(M) || M <- MetricS],
Metric = dproto:metric_from_list(MetricL),
Nodes = get_bmps(Bucket, Metric, Time),
case [{P, BMP} || {Node, P, BMP} <- Nodes, Node =:= node()] of
[] ->
io:format("No valid node found, try on: ~p~n",
[[Node || {Node, _, _} <- Nodes]]),
error;
L ->
lists:map(fun({P, not_found}) ->
io:format("=== ~p~n", [P]),
io:format("* No data for this time range~n");
({P, BMP}) ->
io:format("=== ~p~n", [P]),
bitmap:display(BMP, Width),
io:format("~n")
end, L)
end.
get_nodes(Bucket, Metric, Time) ->
PPF = dalmatiner_opt:ppf(Bucket),
{ok, N} = application:get_env(dalmatiner_db, n),
Base = Time div PPF,
DocIdx = riak_core_util:chash_key({Bucket, {Metric, Base}}),
riak_core_apl:get_apl(DocIdx, N, metric). | apps/dalmatiner_db/src/ddb_bmp.erl | 0.521959 | 0.492432 | ddb_bmp.erl | starcoder |
%% @doc This module provides utility functions for formatting, reading
%% measurements from file and acquiring measurements using the
%% sensor-specific Python script.
-module(util).
-export([get_measurement/1, get_average_measurement/3, read_history/1,
format_time/1, format_csv/1, format_csv_line/1,
format_json/1, format_json_item/1]).
%% @spec get_measurement(Pin::string()) -> weather:measurement()
%% @doc Uses the Python script to read temperature and humidity on the given
%% GPIO pin and returns them as a tuple with the local time.
get_measurement(Pin) ->
% Call the Python script with the given pin
Out = os:cmd("python am2302.py " ++ Pin),
% Strip whitespace (linebreaks)
Cleaned = re:replace(Out, "(^\\s+)|(\\s+$)", "", [global, {return, list}]),
case Cleaned =:= "failure" of
true ->
% Python script was unsuccessful, return accordingly
{erlang:system_time(seconds), failure};
false ->
% Get individual strings from output
[TempStr, HumStr] = string:tokens(Cleaned, ","),
% Convert them to floats
Temp = list_to_float(TempStr),
Hum = list_to_float(HumStr),
% Return measurements as tuple
{erlang:system_time(seconds), {Temp, Hum}}
end.
%% @spec get_average_measurement(Pin::string(), Number::integer(),
%% Interval::integer()) -> weather:measurement()
%% @doc Takes the given number of measurements, sleeping for the interval (ms)
%% inbetween. Returns the average of all measurements with the average of all
%% timestamps as the time.
get_average_measurement(Pin, Number, Interval) ->
get_average_measurement(Pin, Number, Interval, []).
%% @spec get_average_measurement(Pin::string(), Number::integer(),
%% Interval::integer(),
%% Measurements::[weather:measurement()]) ->
%% weather:measurement()
%% @doc Takes the given number of measurements, sleeping for the interval (ms)
%% inbetween. Returns the average of all measurements with the average of all
%% timestamps as the time. This is the function doing the actual work of
%% collecting the measurements with an accumulator.
get_average_measurement(_, 0, _, Measurements) ->
compute_average(Measurements);
get_average_measurement(Pin, Number, Interval, Measurements) ->
M = get_measurement(Pin),
timer:sleep(Interval),
get_average_measurement(Pin, Number - 1, Interval, [M | Measurements]).
%% @spec compute_average(Measurements::[weather:measurement()]) ->
%% weather:measurement()
%% @doc Calculates and returns the average of the given measurements.
compute_average(Measurements) ->
% Remove failed measurements
M = lists:filter(fun({_, Res}) -> Res /= failure end, Measurements),
N = length(M),
case N > 0 of
true ->
% Collect the sum of the individual tuple values
{STime, {STemp, SHum}} = lists:foldl(
fun({Time, {Temp, Hum}}, {STi, {STe, SHu}}) ->
{STi + Time, {STe + Temp, SHu + Hum}} end,
{0, {0, 0}},
M),
% Calculate and truncate measurement averages
AvTime = trunc(STime / N),
[AvTempStr] = io_lib:format("~.1f", [STemp / N]),
[AvHumStr] = io_lib:format("~.1f", [SHum / N]),
% Return the average measurement
{AvTime, {list_to_float(AvTempStr),list_to_float(AvHumStr)}};
false ->
% No valid measurements, return a failure
{erlang:system_time(seconds), failure}
end.
%% @spec read_history(Filename::string()) -> [weather:measurement()]
%% @doc Reads the measurements that have previously been written to file and
%% returns them in reverse chronological order.
read_history(Filename) ->
Lines = read_lines(Filename),
convert_lines(Lines).
%% @spec convert_lines(Lines::[string()] | no_such_file) ->
%% [weather:measurement()]
%% @doc Deals with the result of reading the lines from CSV, converting it
%% to a reversed list of measurements.
convert_lines(no_such_file) ->
% Return no measurements in case file doesn't exist
[];
convert_lines(Lines) ->
% Otherwise, start conversion with empty accumulator
convert_lines(Lines, []).
%% @spec convert_lines(Lines::[string()], Acc::[weather:measurement()]) ->
%% [weather:measurement()]
%% @doc Converts the CSV lines to a list of measurements, reversing the order
%% in the process by using an accumulator.
convert_lines([], Acc) ->
Acc;
convert_lines([Line | R], Acc) ->
[SecondsUTC, Temp, Hum] = string:tokens(Line, ","),
convert_lines(R, [{list_to_integer(SecondsUTC),
{list_to_float(Temp), list_to_float(Hum)}} | Acc]).
%% @spec read_lines(Filename::string()) -> [string()] | no_such_file
%% @doc Returns the list of lines from the given file.
%% The lines are trimmed, so leading and trailing whitespace, including
%% line breaks, is removed.
read_lines({error, enoent}) ->
% File could not be found, return error.
no_such_file;
read_lines({ok, Fd}) ->
% File was opened, read it, close it and return the lines.
Lines = read(Fd),
file:close(Fd),
Lines;
read_lines(Filename) ->
% We were given the file name, open it
read_lines(file:open(Filename, [read])).
%% @spec read(InputDevice::io_device()) -> [string()]
%% @doc Using the given input device, returns all lines as a list of strings
%% with trimmed lines.
read(Fd) ->
case file:read_line(Fd) of
{ok, Data} ->
% Strip whitespace
[re:replace(Data, "(^\\s+)|(\\s+$)", "", [global, {return, list}])
| read(Fd)];
eof -> []
end.
%% @spec format_time(Seconds::integer()) -> string()
%% @doc Takes seconds since 1970 (UTC) and returns the corresponding local
%% time formatted as "YYYY/MM/DD hh:mm:ss".
format_time(Seconds) ->
Base = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
% Get UTC datetime from seconds
TimeUTC = calendar:gregorian_seconds_to_datetime(Base + Seconds),
% Convert UTC datetime to local datetime
{{Year, Month, Day}, {Hour, Min, Sec}} =
calendar:universal_time_to_local_time(TimeUTC),
% Format
lists:flatten(io_lib:format(
"~4..0w/~2..0w/~2..0w ~2..0w:~2..0w:~2..0w",
[Year, Month, Day, Hour, Min, Sec])).
%% @spec format_csv(Measurements::[weather:measurement()]) -> list()
%% @doc Accepts a list of measurements in reverse chronological order and
%% returns its data in chronological order formatted as CSV.
format_csv(Measurements) ->
format_csv(Measurements, []).
%% @spec format_csv(Measurements::[weather:measurement()],
%% Acc::string()) -> list()
%% @doc Accepts a list of measurements in reverse chronological order and
%% returns its data in chronological order formatted as CSV. Uses an
%% accumulator to efficiently reverse the list while converting its entries.
format_csv([], Acc) ->
Acc;
format_csv([Measurement | R], Acc) ->
% Format the data for CSV
Line = format_csv_line(Measurement),
% Continue with next measurement
format_csv(R, [Line | Acc]).
%% @spec format_csv_line(Measurement::weather:measurement()) -> list()
%% @doc Returns a CSV formatted version of this measurement.
format_csv_line({SecondsUTC, {Temp, Hum}}) ->
lists:flatten(io_lib:format("~p,~p,~p~n", [SecondsUTC, Temp, Hum])).
%% @spec format_json(Measurements::[weather:measurement()]) -> list()
%% @doc Accepts a list of measurements in reverse chronological order and
%% returns its data in chronological order formatted as JSON.
format_json(Measurements) ->
% Start accumulator with last character, the closing bracket
format_json(Measurements, "]").
%% @spec format_json(Measurements::[weather:measurement()],
%% Acc::string()) -> list()
%% @doc Accepts a list of measurements in reverse chronological order and
%% returns its data in chronological order formatted as JSON. Uses an
%% accumulator to efficiently reverse the list while converting its entries.
format_json([], Acc) ->
% Get first measurement
F = lists:nth(1, Acc),
% Delete leading comma of the measurement
NewF = lists:delete(",", F),
% Delete first measurement from accumulator
NewAcc = lists:delete(F, Acc),
% Add updated first measurement to accumulator
FinalAcc = [NewF | NewAcc],
% End accumulator with first character, the opening bracket
["[" | FinalAcc];
format_json([Measurement | R], Acc) ->
% Format the data for CSV
Line = ["," | format_json_item(Measurement)],
% Continue with next measurement
format_json(R, [Line | Acc]).
%% @spec format_json_item(Measurement::weather:measurement()) -> list()
%% @doc Returns a JSON formatted version of this measurement.
format_json_item({SecondsUTC, {Temp, Hum}}) ->
lists:flatten(io_lib:format("[~p,~p,~p]", [SecondsUTC, Temp, Hum])). | pi/util.erl | 0.722331 | 0.622172 | util.erl | starcoder |
%% ==========================================================================================================
%% Ram - An in-memory distributed KV store for Erlang and Elixir.
%%
%% The MIT License (MIT)
%%
%% Copyright (c) 2021 <NAME> <<EMAIL>>.
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%% THE SOFTWARE.
%% ==========================================================================================================
%% ===================================================================
%% @doc Exposes all of the Key Value store APIs.
%%
%% Ram doesn't have to run on every node of the Erlang cluster. You may start Ram only on those nodes where you
%% need it. When started, Ram creates a logical overlay network running on top of the Erlang distribution cluster.
%%
%% Nodes where Ram runs form a subcluster: they will synchronize data between themselves, and themselves only.
%% All of the data is replicated on every node of the subcluster.
%%
%% <h2>Quickstart</h2>
%% <h3>Elixir</h3>
%% ```
%% iex(1)> :ram.get("key")
%% :undefined
%% iex(2)> :ram.put("key", "value")
%% :ok
%% iex(3)> :ram.get("key")
%% "value"
%% '''
%% <h3>Erlang</h3>
%% ```
%% 1> ram:get("key").
%% undefined
%% 2> ram:put("key", "value").
%% ok
%% 3> ram:get("key").
%% "value"
%% '''
%%
%% <h2>Internals</h2>
%% Ram operations are <strong>A</strong>tomic (take effect on all nodes involved, or on none of the nodes),
%% <strong>C</strong>onsistent (the data is the same across all nodes)
%% and <strong>I</strong>solated (operations on different nodes in a network do not interfere with each other).
%% They are not <strong>D</strong>urable since Ram is an in-memory only database.
%%
%% To do so, every operation creates a global lock with {@link global:trans/4} in the caller process, which then
%% runs a transaction with a 2-phase commit protocol. If transactions fail, they raise
%% `error({commit_timeout, {bad_nodes, BadNodes}})' where `BadNodes' is the list of subcluster nodes where the
%% transaction could not complete.
%%
%% <h2>Conflict Resolution</h2>
%% In case of net splits or bad network conditions, a specific Key might get put simultaneously on two different nodes.
%% When this happens, the cluster experiences a Key conflict.
%%
%% Ram will resolve this conflict by choosing a single Value. By default, Ram keeps track of the time
%% when a registration takes place with {@link erlang:system_time/0}, compares values between conflicting processes and
%% keeps the one with the higher value (the Value that was put more recently). This is a very simple mechanism
%% that can be imprecise, as system clocks are not perfectly aligned in a cluster.
%% @end
%% ===================================================================
-module(ram).
%% API
-export([start/0, stop/0]).
-export([subcluster_nodes/0]).
-export([get/1, get/2, fetch/1]).
-export([put/2]).
-export([update/3]).
-export([delete/1]).
%% ===================================================================
%% API
%% ===================================================================
%% @doc Starts Ram manually.
%%
%% In most cases Ram will be started as one of your application's dependencies,
%% however you may use this helper method to start it manually.
-spec start() -> ok.
start() ->
{ok, _} = application:ensure_all_started(ram),
ok.
%% @doc Stops Ram manually.
-spec stop() -> ok | {error, Reason :: term()}.
stop() ->
application:stop(ram).
%% @doc Returns the nodes of Ram's subcluster.
-spec subcluster_nodes() -> [node()] | not_running.
subcluster_nodes() ->
ram_kv:subcluster_nodes().
%% @equiv get(Key, undefined)
%% @end
-spec get(Key :: term()) -> Value :: term().
get(Key) ->
get(Key, undefined).
%% @doc Returns the Key's Value or Default if the Key is not found.
%%
%% <h2>Examples</h2>
%% <h3>Elixir</h3>
%% ```
%% iex(1)> :ram.get("key")
%% :undefined
%% iex(2)> :ram.get("key", "default")
%% "default"
%% iex(3)> :ram.put("key", "value")
%% :ok
%% iex(4)> :ram.get("key")
%% "value"
%% '''
%% <h3>Erlang</h3>
%% ```
%% 1> ram:get("key").
%% undefined
%% 2> ram:get("key", "default").
%% "default"
%% 3> ram:put("key", "value").
%% ok
%% 4> ram:get("key").
%% "value"
%% '''
-spec get(Key :: term(), Default :: term()) -> Value :: term().
get(Key, Default) ->
ram_kv:get(Key, Default).
%% @doc Looks up a Key.
%%
%% Returns `error' if the Key is not found.
-spec fetch(Key :: term()) -> {ok, Value :: term()} | error.
fetch(Key) ->
ram_kv:fetch(Key).
%% @doc Puts a Value for a Key.
-spec put(Key :: term(), Value :: term()) -> ok.
put(Key, Value) ->
ram_kv:put(Key, Value).
%% @doc Atomically updates a Key with the given function.
%%
%% If Key is found then the existing Value is passed to the fun and its result is used as the updated Value of Key.
%% If Key is not found, Default is put as the Value of Key. The Default value will not be passed through the update function.
%%
%% <h2>Examples</h2>
%% <h3>Elixir</h3>
%% ```
%% iex(1)> update_fun = fn existing_value -> existing_value * 2 end
%% #Function<44.65746770/1 in :erl_eval.expr/5>
%% iex(2)> :ram.update("key", 10, update_fun)
%% ok
%% iex(3)> :ram.get("key")
%% 10
%% iex(4)> :ram.update("key", 10, update_fun)
%% ok
%% iex(5)> :ram.get("key")
%% 20
%% '''
%% <h3>Erlang</h3>
%% ```
%% 1> UpdateFun = fun(ExistingValue) -> ExistingValue * 2 end.
%% #Fun<erl_eval.44.65746770>
%% 2> ram:update("key", 10, UpdateFun).
%% ok
%% 3> ram:get("key").
%% 10
%% 4> ram:update("key", 10, UpdateFun).
%% ok
%% 5> ram:get("key").
%% 20
%% '''
-spec update(Key :: term(), Default :: term(), function()) -> ok.
update(Key, Default, Fun) ->
ram_kv:update(Key, Default, Fun).
%% @doc Deletes a Key.
-spec delete(Key :: term()) -> ok.
delete(Key) ->
ram_kv:delete(Key). | src/ram.erl | 0.565419 | 0.479016 | ram.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(knit_release).
-export([
rel_file/0,
rel_file/1,
paths/0,
paths/1,
tmp_location/1,
extract/1,
load/0,
load/1,
package/0
]).
-include_lib("sasl/src/systools.hrl").
-include("knit.hrl").
rel_file() ->
{BootRelName, BootRelVsn} = knit_vsn:boot_rel(),
Filename = filename:absname(filename:join([
knit_cfg:get(build_dir),
"releases",
BootRelVsn,
BootRelName
])),
case filelib:is_regular(Filename ++ ".rel") of
true ->
Filename;
false ->
?ABORT("Unable to locate the current release file.", [])
end.
rel_file(Version) ->
{BootRelName, _} = knit_vsn:boot_rel(),
Filename = filename:absname(filename:join([
tmp_location(Version),
BootRelName,
"releases",
Version,
BootRelName
])),
case filelib:is_regular(Filename ++ ".rel") of
true ->
Filename;
false ->
Fmt = "Release ~s is missing rel file: ~s",
?BAD_CONFIG(Fmt, [Version, Filename])
end.
paths() ->
Patterns = [
filename:join([knit_cfg:get(build_dir), "lib", "*", "ebin"])
],
paths0(Patterns).
paths(Version) ->
{BootRelName, _} = knit_vsn:boot_rel(),
Patterns = [
filename:join([tmp_location(Version), BootRelName, "lib", "*", "ebin"])
],
paths0(Patterns).
paths0(Patterns) ->
Paths0 = lists:foldl(fun(P, Acc) ->
filelib:wildcard(P) ++ Acc
end, [], Patterns),
lists:filter(fun filelib:is_dir/1, Paths0).
tmp_location(Version) ->
TmpDir = knit_cfg:get(tmp_dir),
VsnDir = filename:join(TmpDir, Version),
filename:absname(VsnDir).
extract(Version) ->
TarballName = knit_vsn:release_tarball(Version),
knit_log:debug("Extracting: ~s", [TarballName]),
RelDir = tmp_location(Version),
knit_file:ensure_dir_exists(RelDir),
knit_file:do_in_dir(RelDir, fun() ->
ok = erl_tar:extract(TarballName, [compressed])
end).
load() ->
load(current, rel_file(), paths()).
load(Version) ->
load(Version, rel_file(Version), paths(Version)).
load(Version, RelFile, Paths) ->
?TRACE("Loading ~s from:~n~p", [Version, Paths]),
case systools_make:get_release(RelFile, Paths) of
{ok, Rel, Apps, Warnings} ->
knit_util:report_warnings(systools_make, Warnings),
{RelFile, Rel, Apps};
{error, Error} ->
Reason = systools_make:format_error(Error),
?BAD_CONFIG("Error loading release ~s: ~s", [Version, Reason])
end.
package() ->
TarballName = knit_vsn:release_tarball(),
knit_log:info("Creating release tarball: ~s", [TarballName]),
Force = knit_cfg:get(force),
case filelib:is_file(TarballName) of
true when Force ->
knit_log:info("Overwriting: ~s", [TarballName]),
knit_file:rm_rf(TarballName);
true ->
?IO_ERROR("Refusing to overwrite: ~s", [TarballName]);
false ->
ok
end,
TarDir = filename:dirname(knit_cfg:get(build_dir)),
BuildDirName = filename:basename(knit_cfg:get(build_dir)),
knit_file:do_in_dir(TarDir, fun() ->
Tar = case erl_tar:open(TarballName, [write, compressed]) of
{ok, Tar0} ->
Tar0;
{error, Reason} ->
Fmt = "Error creating tar file ~s: ~s",
?IO_ERROR(Fmt, [TarballName, erl_tar:format_error(Reason)])
end,
ok = erl_tar:add(Tar, BuildDirName, []),
ok = erl_tar:close(Tar)
end). | src/knit_release.erl | 0.509764 | 0.420481 | knit_release.erl | starcoder |
% @doc GRiSP I²C API.
%
% <a href="https://en.wikipedia.org/wiki/I²C">Inter-Integrated Circuit
% (I²C)</a> is a synchronous addressable serial communication bus. Two types of
% nodes can exist, controllers and targets. Multiple controllers and targets
% can exist on the bus simultaneously. Controllers generate the clock signal
% and initiate communication with targets. Targets follow the clock and respond
% when addressed. A GRiSP board acts as a controller to any connected target
% peripherals on the I²C buses.
%
% In GRiSP there are two main I²C buses, one "internal" bus (`i2c0') and
% one "external" bus (`i2c1'). The internal bus talks to targets on the SoM and
% the board itself, such as EEPROM memory or the 1-Wire controller. The
% external bus talks to any connected target peripherals, either on the PMOD
% connector or the raised I²C pins.
%
% Communication consists of either read or write messages. A read or write must
% be addressed to a specific target (chip). A target is addressed by a 7-bit
% address. The registry to use is specified in a target specific way. E.g. to
% read a register, some targets expect a write message with the register address
% as a value, followed by a read message.
%
% <em>Please refer to the specification for a specific target chip for
% instructions of what messages to send and receive, and how the registers are
% structured.</em>
-module(grisp_i2c).
-include("grisp_nif.hrl").
% API
-export([buses/0]).
-export([open/1]).
-export([detect/1]).
-export([read/4]).
-export([write/4]).
-export([transfer/2]).
% Callbacks
-export([on_load/0]).
-on_load(on_load/0).
%--- Types ---------------------------------------------------------------------
-type bus_name() :: atom().
-type bus_path() :: iodata().
-opaque bus() :: reference().
-type reg_addr() :: byte().
-type target_addr() :: 1..127.
-type length() :: non_neg_integer().
-type message() :: {
Type :: read | write,
Target :: target_addr(),
Flags :: non_neg_integer(),
Payload :: iodata() | length()
}.
-type error() :: {error, any()}.
-export_type([bus_name/0]).
-export_type([bus/0]).
-export_type([reg_addr/0]).
-export_type([target_addr/0]).
-export_type([message/0]).
%--- API -----------------------------------------------------------------------
% @doc Lists I²C buses.
%
% === Examples ===
% ```
% 1> grisp_i2c:buses()
% #{i2c0 => #{name => i2c0,path => <<"/dev/i2c-0">>},
% i2c1 => #{name => i2c1,path => <<"/dev/i2c-1">>}}
% '''
-spec buses() -> #{bus_name() := #{name := bus_name(), path := bus_path()}}.
buses() ->
#{
i2c0 => #{name => i2c0, path => <<"/dev/i2c-0">>},
i2c1 => #{name => i2c1, path => <<"/dev/i2c-1">>}
}.
% @doc Opens an I²C bus device by name.
%
% === Examples ===
% ```
% 2> I2C0 = grisp_i2c:open(i2c1).
% #Ref<0.4157010815.3886678017.238942>
% '''
% @see buses/0
-spec open(bus_name()) -> bus().
open(Name) ->
#{path := Path} = maps:get(Name, buses()),
i2c_open_nif(null(Path)).
% @doc Detects I²C devices on a bus.
%
% Returns the address of each found device.
%
% === Examples ===
% ```
% 3> [io_lib:format("0x~.16B", [Target]) || Target <- grisp_i2c:detect(I2C0)].
% ["0x18","0x36","0x37","0x52","0x57","0x5A","0x5F"]
% '''
-spec detect(bus()) -> [target_addr()].
detect(Bus) -> [Target || Target <- lists:seq(1, 127), present(Bus, Target)].
% @doc Performs a simplified read from a register on an I²C chip.
%
% This function sends a write message with the single byte register address as
% value, then a read message of the specified length.
%
% <em><b>Note!</b> Only some chips support this simple operation. Please consult
% the target chip specification.</em>
%
% === Examples ===
% ```
% 4> grisp_i2c:read(I2C1, TargetAddr, RegAddr, 1).
% <<255>>
% '''
-spec read(bus(), target_addr(), reg_addr(), length()) -> binary().
read(Bus, Target, Register, Length) ->
[ok, Resp] = transfer(Bus, [
{write, Target, 0, <<Register>>},
{read, Target, 1, Length}
]),
Resp.
% @doc Performs a simplified write to a register on an I²C chip.
%
% This function sends a write message with the single byte register address as
% the first byte and then the data as the following bytes.
%
% <em><b>Note!</b> Only some chips support this simple operation. Please consult
% the target chip specification.</em>
%
% === Examples ===
% ```
% 5> grisp_i2c:write(I2C1, TargetAddr, RegAddr, <<Value:8>>).
% ok
% '''
-spec write(bus(), target_addr(), reg_addr(), binary()) -> ok.
write(Bus, Target, Register, Data) ->
[ok] = transfer(Bus, [{write, Target, 0, <<Register, Data/binary>>}]),
ok.
% @doc Transfers I²C messages on a bus.
%
% === Examples ===
% ```
% 6> grisp_i2c:transfer(I2C1, [{read, TargetAddr, RegAddr, 1}])
% [<<255>>]
% 7> grisp_i2c:transfer(I2C1, [{write, TargetAddr, RegAddr, <<Value:8>>}])
% ok
% '''
-spec transfer(bus(), [message()]) -> [ok | binary()] | error().
transfer(Bus, Messages) -> i2c_transfer_nif(Bus, Messages).
%--- Callbacks -----------------------------------------------------------------
% @private
on_load() -> ok = erlang:load_nif(atom_to_list(?MODULE), 0).
%--- Internal ------------------------------------------------------------------
i2c_open_nif(_Bus) -> ?NIF_STUB.
i2c_transfer_nif(_Bus, _Messages) -> ?NIF_STUB.
null(Bin) -> [Bin, 0].
present(Bus, Target) ->
case grisp_i2c:transfer(Bus, [{write, Target, 0, <<>>}]) of
{error, _} -> false;
[ok] -> true
end. | src/grisp_i2c.erl | 0.751375 | 0.564939 | grisp_i2c.erl | starcoder |
-module(day19).
-export([main/0]).
-include_lib("eunit/include/eunit.hrl").
main() ->
Rules = parse(load("rules.txt")),
Input = load("input.txt"),
io:format("~p~n", [part_one(Rules, Input)]),
io:format("~p~n", [part_two(Rules, Input)]),
ok.
% Part one: count the number of strings in the input that match the given set
% of rules.
part_one(Rules, Input) ->
count_matching(Rules, Input).
% Part two: count the number of strings in the input that match the updated set
% of rules, including loop and pair rules.
part_two(Rules, Input) ->
NewRules = Rules#{
8 => {loop, 42},
11 => {pair, 42, 31}
},
count_matching(NewRules, Input).
% Helper that counts the number of input lines matching the given set of rules.
count_matching(Rules, Input) ->
lists:foldl(fun (I, Acc) ->
Acc + case matches(Rules, I) of
true -> 1;
false -> 0
end
end, 0, Input).
% Determines whether the given matcher matches the given line, applying rule
% zero with a continuation that checks if the entire input has been matched.
% Rules are one of:
%
% {literal, "literal"} - matches a literal sequence of characters
% {sequence, [1, 2, 3]} - applies a sequence of other rules (by ID) in order
% {choice, [{...}, {...}]] - a choice between multiple (inline) rules
% {loop, 42} - a safe form of ID: 42 | 42 ID
% {pair, 24, 42} - a safe form of ID: 24 42 | 24 ID 42
matches(Rules, Line) ->
matches(Rules, maps:get(0, Rules), Line, fun nothing_left/1).
nothing_left("") -> true;
nothing_left(_) -> false.
% Literal rule: matches if the line begins with the provided literal. If so,
% strips off the prefix and continues matching the remainder of the input.
matches(_, {literal, Literal}, Line, Continuation) ->
case string:prefix(Line, Literal) of
nomatch -> false;
Suffix -> Continuation(Suffix)
end;
% Choice rule: matches if the line matches at least one of the child rules.
% Tries the first one, and if it fails recursively calls itself to check the
% next. If none of them succeed, returns false.
matches(Rules, {choice, [Rule | Rest]}, Line, Continuation) ->
case matches(Rules, Rule, Line, Continuation) of
true -> true;
false -> matches(Rules, {choice, Rest}, Line, Continuation)
end;
matches(_, {choice, []}, _, _) -> false;
% Loop rule: matches any number of consecutive iterations of rule N. Conceptually,
% it's `ID: N | N ID`, except doing that naively with a choice rule will lead to an
% infinite loop. We short circuit if we fail to match `N` instead of trying `N ID`
% (which will certainly fail as well).
matches(Rules, {loop, N}, Line, Continuation) ->
matches(Rules, maps:get(N, Rules), Line, fun (Suffix) ->
case Continuation(Suffix) of
true -> true;
false -> matches(Rules, {loop, N}, Suffix, Continuation)
end
end);
% Pair rule: matches any number of consecutive iterations of rule N, followed by
% the SAME number of iterations of rule O. Conceptually, it's `ID: N O | N ID O`.
% As with the loop rule we short circuit if we fail to match `N` instead of trying
% to match `N ID O` (which will likewise fail). Unlike the loop rule, we need to
% wrap the continuation with an `O` matcher each time we successfully match an `N`.
matches(Rules, {pair, N, O}, Line, Continuation) ->
Wrapper = fun (Suffix) ->
matches(Rules, maps:get(O, Rules), Suffix, Continuation)
end,
matches(Rules, maps:get(N, Rules), Line, fun (Suffix) ->
case Wrapper(Suffix) of
true -> true;
false -> matches(Rules, {pair, N, O}, Suffix, Wrapper)
end
end);
% Sequence rule: matches if the line matches each of the child rules in
% sequence. Evaluates the first, pushing a continuation onto the stack that
% evaluates the rest (and finally running the originally-passed-in continuation
% if each rule in the sequence returns true).
matches(Rules, {sequence, [N | Rest]}, Line, Continuation) ->
matches(Rules, maps:get(N, Rules), Line, fun (Suffix) ->
matches(Rules, {sequence, Rest}, Suffix, Continuation)
end);
matches(_, {sequence, []}, Line, Continuation) -> Continuation(Line).
-ifdef(TEST).
matches_test() ->
Input = [
"0: 4 1 5",
"1: 2 3 | 3 2",
"2: 4 4 | 5 5",
"3: 4 5 | 5 4",
"4: \"a\"",
"5: \"b\""
],
Rules = parse(Input),
?assertEqual(true, matches(Rules, "ababbb")),
?assertEqual(true, matches(Rules, "abbbab")),
?assertEqual(false, matches(Rules, "bababa")),
?assertEqual(false, matches(Rules, "aaabbb")),
?assertEqual(false, matches(Rules, "aaaabbb")).
-endif.
-ifdef(TEST).
loop_test() ->
Rules = #{
0 => {sequence, [1, 11]}, % a*b
1 => {loop, 10}, % a*
10 => {literal, "a"},
11 => {literal, "b"}
},
?assertEqual(false, matches(Rules, "a")),
?assertEqual(false, matches(Rules, "aaaaa")),
?assertEqual(false, matches(Rules, "b")),
?assertEqual(true, matches(Rules, "ab")),
?assertEqual(true, matches(Rules, "aaaaab")).
-endif.
-ifdef(TEST).
pair_test() ->
Rules = #{
0 => {sequence, [1, 10]}, % ab[...]baab
1 => {pair, 10, 11}, % ab[...]ba
10 => {sequence, [12, 13]}, % ab
11 => {sequence, [13, 12]}, % ba
12 => {literal, "a"},
13 => {literal, "b"}
},
?assertEqual(false, matches(Rules, "ab")),
?assertEqual(false, matches(Rules, "ababab")),
?assertEqual(false, matches(Rules, "ababbaab")),
?assertEqual(false, matches(Rules, "abababbababa")),
?assertEqual(true, matches(Rules, "abbaab")),
?assertEqual(true, matches(Rules, "abababbababaab")).
-endif.
% Parses a set of rules, returning a map from rule ID to rule definition.
parse(Lines) -> maps:from_list(lists:map(fun parse_rule/1, Lines)).
parse_rule(Line) ->
[ID, Rest] = string:split(Line, ": "),
Parts = string:split(Rest, " | ", all),
Rule = case length(Parts) of
1 -> parse_clause(hd(Parts));
_ -> {choice, lists:map(fun parse_clause/1, Parts)}
end,
{list_to_integer(ID), Rule}.
parse_clause(Line) when hd(Line) == $" ->
{literal, string:trim(Line, both, "\"")};
parse_clause(Line) ->
{sequence, lists:map(fun list_to_integer/1, string:split(Line, " ", all))}.
-ifdef(TEST).
parse_test() ->
Rules = parse(["0: 1 2", "1: \"a\"", "2: 1 3 | 3 1", "3: \"b\""]),
?assertEqual(4, map_size(Rules)),
?assertEqual({sequence, [1, 2]}, maps:get(0, Rules)),
?assertEqual({literal, "a"}, maps:get(1, Rules)),
?assertEqual({choice, [{sequence, [1, 3]}, {sequence, [3, 1]}]}, maps:get(2, Rules)).
-endif.
% Loads the given input file.
load(Filename) ->
{ok, File} = file:open(Filename, [read]),
{ok, Text} = file:read(File, 1024*1024),
string:split(Text, "\n", all). | day19/day19.erl | 0.572484 | 0.643553 | day19.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_emsort).
% This is an implementation of an external N-way merge sort. It's primary
% purpose is to be used during database compaction as an optimization for
% managing the docid btree.
%
% Trunk currently writes the docid btree as its compacting the database but
% this is quite inneficient as its written out of order in the general case
% as writes are ordered by update_seq.
%
% The general design of this module is a very standard merge sort with one
% caveat due to append only files. This is described in more detail in the
% sorting phase.
%
% The basic algorithm is in two halves. The first half stores KV pairs to disk
% which is then followed by the actual sorting phase that streams KV's back
% to the client using a fold-like function. After some basic definitions we'll
% describe both phases.
%
% Key/Value apairs (aka, KV pairs, or KVs) are simply lists of two-tuples with
% a key as the first element and an arbitrary value as the second. The key of
% this pair is what used to determine the sort order based on native Erlang
% term comparison.
%
% Internally, KVs are stored as lists with a max size defined by
% #ems.chain_chunk. These lists are then chained together on disk using disk
% offsets as a poor man's linked list. The basic format of a list looks like
% {KVs, DiskOffset} where DiskOffset is either the atom nil which means "end
% of the list" or an integer that is a file position offset that is the
% location of another {KVs, DiskOffset} term. The head of each list is
% referred to with a single DiskOffset. The set of terms that extend from
% this initial DiskOffset to the last {KVs, nil} term is referred to in the
% code as a chain. Two important facts are that one call to couch_emsort:add/2
% creates a single chain, and that a chain is always sorted on disk (though its
% possible to be sorted in descending order which will be discussed later).
%
% The second major internal structure is the back bone. This is a list of
% chains that has a quite similar structure to chains but contains different
% data types and has no guarantee on ordering. The back bone is merely the
% list of all head DiskOffsets. The structure has the similar structure of
% {DiskOffsets, DiskOffset} that we use for chains, except that DiskOffsets is
% a list of integers that refer to the heads of chains. The maximum size of
% DiskOffsets is defined by #ems.bb_chunk. It is important to note that the
% backbone has no defined ordering. The other thing of note is that the RAM
% bounds are loosely defined as:
%
% #ems.bb_chunk * #ems.chain_chunk * avg_size(KV).
%
% Build Phase
% -----------
%
% As mentioned, each call to couch_emsort:add/2 creates a chain from the
% list of KVs that are passed in. This list is first sorted and then the
% chain is created by foldr-ing (note: r) across the list to build the
% chain on disk. It is important to note that the final chain is then
% sorted in ascending order on disk.
%
%
% Sort Phase
% ----------
%
% The sort phase is where the merge sort kicks in. This is generally your
% average merge sort with a caveat for append only storage. First the
% general outline.
%
% The general outline for this sort is that it iteratively merges chains
% in the backbone until less than #ems.bb_chunk chains exist. At this
% point it switches to the last merge sort phase where it just streams
% the sorted KVs back to the client using a fold function.
%
% The general chain merging is a pretty standard merge sort. You load up
% the initial KVs from each phase, pick the next one in sort order and
% then when you run out of KVs you're left with a single DiskOffset for
% the head of a single chain that represents the merge. These new
% DiskOffsets are used to build the new back bone.
%
% The one caveat here is that we're using append only storage. This is
% important because once we make a pass we've effectively reversed the
% sort order of each chain. Ie, the first merge results in chains that
% are ordered in descending order. Since, one pass reverses the list
% the trick is that each phase does two passes. The first phase picks
% the smallest KV to write next and the second phase picks the largest.
% In this manner each time we do a back bone merge we end up with chains
% that are always sorted in an ascending order.
%
% The one downfall is that in the interest of simplicity the sorting is
% restricted to Erlang's native term sorting. A possible extension would
% be to allow two comparison functions to be used, but this module is
% currently only used for docid sorting which is hardcoded to be raw
% Erlang ordering.
%
% Diagram
% -------
%
% If it helps, this is a general diagram of the internal structures. A
% couple points to note since this is ASCII art. The BB pointers across
% the top are lists of chains going down. Each BBN item is one of the
% {DiskOffsets, DiskOffset} structures discussed earlier. Going down,
% the CMN nodes are actually representing #ems.bb_chunk chains in parallel
% going off the back bone. It is important and not represented in this
% diagram that within these groups the chains don't have to be the same
% length. That's just a limitiationg of my ASCII artistic abilities.
%
% The BBN* node is marked with a * to denote that it is the only state
% that we store when writing headeres to disk as it has pointers that
% lead us to all data in the tree.
%
% BB1 <- BB2 <- BB3 <- BBN*
% | | | |
% v v v v
% CA1 CB1 CC1 CD1
% | | |
% v v v
% CA2 CC2 CD2
% | |
% v v
% CA3 CD3
%
-export([open/1, open/2, get_fd/1, get_state/1]).
-export([add/2, merge/1, sort/1, iter/1, next/1]).
-record(ems, {
fd,
root,
bb_chunk = 10,
chain_chunk = 100
}).
open(Fd) ->
{ok, #ems{fd=Fd}}.
open(Fd, Options) ->
{ok, set_options(#ems{fd=Fd}, Options)}.
set_options(Ems, []) ->
Ems;
set_options(Ems, [{root, Root} | Rest]) ->
set_options(Ems#ems{root=Root}, Rest);
set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) ->
set_options(Ems#ems{chain_chunk=Count}, Rest);
set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) ->
set_options(Ems#ems{bb_chunk=Count}, Rest).
get_fd(#ems{fd=Fd}) ->
Fd.
get_state(#ems{root=Root}) ->
Root.
add(Ems, []) ->
{ok, Ems};
add(Ems, KVs) ->
Pos = write_kvs(Ems, KVs),
{ok, add_bb_pos(Ems, Pos)}.
sort(#ems{}=Ems) ->
{ok, Ems1} = merge(Ems),
iter(Ems1).
merge(#ems{root=undefined}=Ems) ->
{ok, Ems};
merge(#ems{}=Ems) ->
{ok, decimate(Ems)}.
iter(#ems{root=undefined}=Ems) ->
{ok, {Ems, []}};
iter(#ems{root={BB, nil}}=Ems) ->
Chains = init_chains(Ems, small, BB),
{ok, {Ems, Chains}};
iter(#ems{root={_, _}}) ->
{error, not_merged}.
next({_Ems, []}) ->
finished;
next({Ems, Chains}) ->
{KV, RestChains} = choose_kv(small, Ems, Chains),
{ok, KV, {Ems, RestChains}}.
add_bb_pos(#ems{root=undefined}=Ems, Pos) ->
Ems#ems{root={[Pos], nil}};
add_bb_pos(#ems{root={BB, Prev}}=Ems, Pos) ->
{NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk),
Ems#ems{root={NewBB, NewPrev}}.
write_kvs(Ems, KVs) ->
% Write the list of KV's to disk in sorted order in chunks
% of 100. Also make sure that the order is so that they
% can be streamed in asscending order.
{LastKVs, LastPos} =
lists:foldr(fun(KV, Acc) ->
append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
end, {[], nil}, lists:sort(KVs)),
{ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}),
Final.
decimate(#ems{root={_BB, nil}}=Ems) ->
% We have less than bb_chunk backbone pointers so we're
% good to start streaming KV's back to the client.
Ems;
decimate(#ems{root={BB, NextBB}}=Ems) ->
% To make sure we have a bounded amount of data in RAM
% at any given point we first need to decimate the data
% by performing the first couple iterations of a merge
% sort writing the intermediate results back to disk.
% The first pass gives us a sort with pointers linked from
% largest to smallest.
{RevBB, RevNextBB} = merge_back_bone(Ems, small, BB, NextBB),
% We have to run a second pass so that links are pointed
% back from smallest to largest.
{FwdBB, FwdNextBB} = merge_back_bone(Ems, big, RevBB, RevNextBB),
% Continue deicmating until we have an acceptable bound on
% the number of keys to use.
decimate(Ems#ems{root={FwdBB, FwdNextBB}}).
merge_back_bone(Ems, Choose, BB, NextBB) ->
BBPos = merge_chains(Ems, Choose, BB),
merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}).
merge_rest_back_bone(_Ems, _Choose, nil, Acc) ->
Acc;
merge_rest_back_bone(Ems, Choose, BBPos, Acc) ->
{ok, {BB, NextBB}} = couch_file:pread_term(Ems#ems.fd, BBPos),
NewPos = merge_chains(Ems, Choose, BB),
{NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk),
merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}).
merge_chains(Ems, Choose, BB) ->
Chains = init_chains(Ems, Choose, BB),
merge_chains(Ems, Choose, Chains, {[], nil}).
merge_chains(Ems, _Choose, [], ChainAcc) ->
{ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc),
CPos;
merge_chains(#ems{chain_chunk=CC}=Ems, Choose, Chains, Acc) ->
{KV, RestChains} = choose_kv(Choose, Ems, Chains),
{NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC),
merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}).
init_chains(Ems, Choose, BB) ->
Chains = lists:map(fun(CPos) ->
{ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
{KVs, NextKVs}
end, BB),
order_chains(Choose, Chains).
order_chains(small, Chains) -> lists:sort(Chains);
order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)).
choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) ->
{KV, Rest};
choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) ->
{ok, Chain} = couch_file:pread_term(Ems#ems.fd, Pos),
case Choose of
small -> {KV, ins_small_chain(RestChains, Chain, [])};
big -> {KV, ins_big_chain(RestChains, Chain, [])}
end;
choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) ->
case Choose of
small -> {KV, ins_small_chain(RestChains, {RestKVs, Prev}, [])};
big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])}
end.
ins_small_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1<K2 ->
ins_small_chain(Rest, C2, [C1 | Acc]);
ins_small_chain(Rest, Chain, Acc) ->
lists:reverse(Acc, [Chain | Rest]).
ins_big_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1>K2 ->
ins_big_chain(Rest, C2, [C1 | Acc]);
ins_big_chain(Rest, Chain, Acc) ->
lists:reverse(Acc, [Chain | Rest]).
append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size ->
{ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}),
{[Pos], PrevList};
append_item(_Ems, {List, Prev}, Pos, _Size) ->
{[Pos | List], Prev}. | src/couch/src/couch_emsort.erl | 0.800029 | 0.728736 | couch_emsort.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(kai_config).
-behaviour(gen_server).
-export([start_link/1, stop/0]).
-export([get/1, node_info/0]).
-export([
init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3
]).
-include("kai.hrl").
-define(SERVER, ?MODULE).
start_link(Args) ->
gen_server:start_link({local, ?SERVER}, ?MODULE, Args, _Opts = []).
init(Args) ->
ets:new(config, [set, private, named_table]),
lists:foreach(
fun({Key, Value}) -> ets:insert(config, {Key, Value}) end,
Args
),
Hostname =
case proplists:get_value(hostname, Args) of
undefined -> {ok, H} = inet:gethostname(), H;
H -> H
end,
{ok, Address} = inet:getaddr(Hostname, inet),
Port = proplists:get_value(rpc_port, Args),
ets:insert(config, {node, {Address, Port}}),
NumberOfBuckets = proplists:get_value(number_of_buckets, Args),
Exponent = round( math:log(NumberOfBuckets) / math:log(2) ),
ets:insert(config, {number_of_buckets, trunc( math:pow(2, Exponent) )}),
{ok, []}.
terminate(_Reason, _State) ->
ets:delete(config),
ok.
do_get(Key) ->
case ets:lookup(config, Key) of
[{Key, Value}|_] -> Value;
_ -> undefined
end.
do_get([], ListOfValues) ->
lists:reverse(ListOfValues);
do_get([Key|Rest], ListOfValues) ->
do_get(Rest, [do_get(Key)|ListOfValues]).
get(ListOfKeys, State) when is_list(ListOfKeys)->
{reply, do_get(ListOfKeys, []), State};
get(Key, State) ->
{reply, do_get(Key), State}.
node_info(State) ->
[LocalNode, NumberOfVirtualNode] =
do_get([node, number_of_virtual_nodes], []),
Info = [{number_of_virtual_nodes, NumberOfVirtualNode}],
{reply, {node_info, LocalNode, Info}, State}.
handle_call(stop, _From, State) ->
{stop, normal, stopped, State};
handle_call({get, Key}, _From, State) ->
get(Key, State);
handle_call(node_info, _From, State) ->
node_info(State).
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
stop() ->
gen_server:call(?SERVER, stop).
get(Key) ->
gen_server:call(?SERVER, {get, Key}).
node_info() ->
gen_server:call(?SERVER, node_info). | src/kai_config.erl | 0.591841 | 0.438124 | kai_config.erl | starcoder |
% @copyright 2011-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Basic Histogram.
%% <NAME> and <NAME>, "A streaming parallel
%% decision tree algorithm", J. Machine Learning Research 11
%% (2010), pp. 849--872.
%% @end
%% @version $Id$
-module(histogram).
-author('<EMAIL>').
-vsn('$Id$').
% external API
-export([create/1, add/2, add/3, get_data/1, get_size/1,
get_num_elements/1, get_num_inserts/1, merge/2]).
-export([merge_weighted/3, normalize_count/2]).
% private API for unit tests:
-export([find_smallest_interval/1, merge_interval/2,
tester_create_histogram/2, tester_is_valid_histogram/1]).
-export([foldl_until/2, foldr_until/2]).
-include("scalaris.hrl").
-include("record_helpers.hrl").
-ifdef(with_export_type_support).
-export_type([histogram/0, value/0]).
-endif.
-type value() :: number().
-type data_item() :: {value(), pos_integer()}.
-type data_list() :: list(data_item()).
-record(histogram, {size = ?required(histogram, size):: non_neg_integer(),
data = [] :: data_list(),
data_size = 0 :: non_neg_integer(),
inserts = 0 :: non_neg_integer()
}).
-opaque histogram() :: #histogram{}.
%% @doc Creates an empty Size sized histogram
-spec create(Size::non_neg_integer()) -> histogram().
create(Size) ->
#histogram{size = Size}.
-spec add(Value::value(), Histogram::histogram()) -> histogram().
add(Value, Histogram) ->
add(Value, 1, Histogram).
-spec add(Value::value(), Count::pos_integer(), Histogram::histogram()) -> histogram().
add(_Value, _Count, Histogram = #histogram{size = 0}) ->
Histogram;
add(Value, Count, Histogram = #histogram{data = OldData, data_size = DataSize, inserts = Inserts}) ->
DataNew = insert({Value, Count}, OldData),
resize(Histogram#histogram{data = DataNew, data_size = DataSize + 1,
inserts = Inserts + Count}).
-spec get_data(Histogram::histogram()) -> data_list().
get_data(Histogram) ->
Histogram#histogram.data.
-spec get_size(Histogram::histogram()) -> non_neg_integer().
get_size(Histogram) ->
Histogram#histogram.size.
-spec get_num_elements(Histogram::histogram()) -> non_neg_integer().
get_num_elements(Histogram) ->
Histogram#histogram.data_size.
-spec get_num_inserts(Histogram::histogram()) -> non_neg_integer().
get_num_inserts(Histogram) ->
Histogram#histogram.inserts.
%% @doc Merges the given two histograms by adding every data point of Hist2
%% to Hist1.
-spec merge(Hist1::histogram(), Hist2::histogram()) -> histogram().
merge(Hist1 = #histogram{size = 0}, _Hist2) -> Hist1;
merge(Hist1 = #histogram{data = Hist1Data}, #histogram{data = Hist2Data}) ->
NewData = lists:foldl(fun insert/2, Hist1Data, Hist2Data),
resize(Hist1#histogram{data = NewData, data_size = length(NewData)}).
%% @doc Merges Hist2 into Hist1 and applies a weight to the Count of Hist2
-spec merge_weighted(Hist1::histogram(), Hist2::histogram(), Weight::pos_integer()) -> histogram().
merge_weighted(Hist1, #histogram{data = Hist2Data} = Hist2, Weight) ->
WeightedData = lists:keymap(fun(Count) -> Count * Weight end, 2, Hist2Data),
WeightedHist2 = Hist2#histogram{data = WeightedData},
merge(Hist1, WeightedHist2).
%% @doc Normalizes the Count by a normalization constant N
-spec normalize_count(N::pos_integer(), Histogram::histogram()) -> histogram().
normalize_count(N, Histogram) ->
Data = histogram:get_data(Histogram),
DataNew = lists:keymap(fun(Count) -> Count div N end, 2, Data),
DataNew2 = lists:filter(fun({_Value, Count}) ->
Count > 0
end, DataNew),
resize(Histogram#histogram{data = DataNew2, data_size = length(DataNew2)}).
%% @doc Traverses the histogram until TargetCount entries have been found
%% and returns the value at this position.
%% TODO change this to expect non empty histogram
-spec foldl_until(TargetCount::non_neg_integer(), histogram())
-> {fail, Value::value() | nil, SumSoFar::non_neg_integer()} |
{ok, Value::value() | nil, Sum::non_neg_integer()}.
foldl_until(TargetCount, Histogram) ->
HistData = get_data(Histogram),
foldl_until_helper(TargetCount, HistData, _SumSoFar = 0, _BestValue = nil).
%% @doc Like foldl_until but traverses the list from the right
-spec foldr_until(TargetCount::non_neg_integer(), histogram())
-> {fail, Value::value() | nil, SumSoFar::non_neg_integer()} |
{ok, Value::value() | nil, Sum::non_neg_integer()}.
foldr_until(TargetCount, Histogram) ->
HistData = get_data(Histogram),
foldl_until_helper(TargetCount, lists:reverse(HistData), _SumSoFar = 0, _BestValue = nil).
%% @doc Private method only exported for histogram_rt
-spec foldl_until_helper(TargetCount::non_neg_integer(), DataList::data_list(),
SumSoFar::non_neg_integer(), BestValue::nil | non_neg_integer())
-> {fail, Value::value() | nil, SumSoFar::non_neg_integer()} |
{ok, Value::value() | nil, Sum::non_neg_integer()}.
foldl_until_helper(TargetCount, _List, SumSoFar, BestValue)
when SumSoFar >= TargetCount ->
{ok, BestValue, SumSoFar};
foldl_until_helper(_TargetVal, [], SumSoFar, BestValue) ->
{fail, BestValue, SumSoFar};
foldl_until_helper(TargetCount, [{Val, Count} | Other], SumSoFar, _BestValue) ->
foldl_until_helper(TargetCount, Other, SumSoFar + Count, Val).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% private
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Resizes the given histogram to fit its maximum size (reduces the data).
%% PRE: histogram maximum size > 0 (from create/1)
-spec resize(Histogram::histogram()) -> histogram().
resize(Histogram = #histogram{data = Data, size = Size, data_size = DataSize})
when DataSize > Size andalso DataSize > 1 ->
?DBG_ASSERT(Size > 0),
%% we need at least two items to do the following:
MinFirstValue = find_smallest_interval(Data),
NewHistogram = Histogram#histogram{data = merge_interval(MinFirstValue, Data),
data_size = DataSize - 1},
resize(NewHistogram);
resize(#histogram{} = Histogram) ->
Histogram.
-spec insert(Value::data_item(), Data::data_list()) -> data_list().
insert({Value, _} = DataItem, [{Value2, _} | _] = Data) when Value =< Value2 ->
[DataItem | Data];
insert(DataItem, [DataItem2 | Rest]) ->
[DataItem2 | insert(DataItem, Rest)];
insert(DataItem, []) ->
[DataItem].
%% @doc Finds the smallest interval between two consecutive values and returns
%% the position of the first value (in the list's order).
%% Returning the position instead of the value ensures that the correct
%% items are merged when duplicate entries are in the histogram.
%% PRE: length(Data) >= 2
-spec find_smallest_interval(Data::data_list()) -> MinFirstValue::value().
find_smallest_interval([{Value, _}, {Value2, _} | Rest]) ->
find_smallest_interval_loop(Value2 - Value, Value, Value2, Rest).
-spec find_smallest_interval_loop(MinInterval::value(), MinFirstValue::value(), LastValue::number(), Data::data_list()) -> MinValue::value().
find_smallest_interval_loop(MinInterval, MinFirstValue, LastValue, [{Value, _} | Rest]) ->
Diff = Value - LastValue,
case MinInterval =< Diff of
true -> NewMinInterval = MinInterval,
NewMinFirstValue = MinFirstValue;
_ -> NewMinInterval = Diff,
NewMinFirstValue = LastValue
end,
find_smallest_interval_loop(NewMinInterval, NewMinFirstValue, Value, Rest);
find_smallest_interval_loop(_MinInterval, MinFirstValue, _LastValue, []) ->
MinFirstValue.
%% @doc Merges two consecutive values if the first one of them is at PosMinValue.
%% Stops after the first match.
%% PRE: length(Data) >= 2, two consecutive values with the given difference
-spec merge_interval(MinFirstValue::value(), Data::data_list()) -> data_list().
merge_interval(Value, [{Value, Count}, {Value2, Count2} | Rest]) when is_float(Value) orelse is_float(Value2) ->
[{(Value * Count + Value2 * Count2) / (Count + Count2), Count + Count2} | Rest];
merge_interval(Value, [{Value, Count}, {Value2, Count2} | Rest]) ->
[{(Value * Count + Value2 * Count2) div (Count + Count2), Count + Count2} | Rest];
merge_interval(MinFirstValue, [DataItem | Rest]) ->
[DataItem | merge_interval(MinFirstValue, Rest)].
-spec tester_create_histogram(Size::non_neg_integer(), Data::data_list()) -> histogram().
tester_create_histogram(Size = 0, _Data) ->
#histogram{size = Size, data = [], data_size = 0};
tester_create_histogram(Size, Data) ->
DataSize = length(Data),
if DataSize > Size ->
#histogram{size = DataSize, data = Data, data_size = DataSize};
true ->
#histogram{size = Size, data = Data, data_size = DataSize}
end.
-spec tester_is_valid_histogram(X::term()) -> boolean().
tester_is_valid_histogram(#histogram{size = 0, data = [], data_size = 0}) ->
true;
tester_is_valid_histogram(#histogram{size = Size, data = Data, data_size = DataSize})
when Size > 0->
Size >= DataSize andalso length(Data) =:= DataSize;
tester_is_valid_histogram(_) ->
false. | src/histogram.erl | 0.572125 | 0.605449 | histogram.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(hocon_schema_doc).
-export([gen/2]).
-include("hoconsc.hrl").
-include("hocon_private.hrl").
gen(Schema, undefined) ->
gen(Schema, "# HOCON Document");
gen(Schema, Title) when is_list(Title) orelse is_binary(Title) ->
gen(Schema, #{title => Title, body => <<>>});
gen(Schema, #{title := Title, body := Body}) ->
{RootNs, RootFields, Structs} = hocon_schema:find_structs(Schema),
[Title,
"\n",
Body,
"\n",
fmt_structs(2, RootNs, [{RootNs, "Root Config Keys", #{fields => RootFields}}]),
fmt_structs(3, RootNs, Structs)].
fmt_structs(_HeadWeight, _RootNs, []) -> [];
fmt_structs(HeadWeight, RootNs, [{Ns, Name, Fields} | Rest]) ->
[fmt_struct(HeadWeight, RootNs, Ns, Name, Fields), "\n" |
fmt_structs(HeadWeight, RootNs, Rest)].
fmt_struct(HeadWeight, RootNs, Ns0, Name, #{fields := Fields} = Meta) ->
Ns = case RootNs =:= Ns0 of
true -> undefined;
false -> Ns0
end,
Paths = case Meta of
#{paths := Ps} -> lists:sort(maps:keys(Ps));
_ -> []
end,
FullNameDisplay = ref(Ns, Name),
[ hocon_md:h(HeadWeight, FullNameDisplay)
, fmt_paths(Paths)
, case Meta of
#{desc := StructDoc} -> StructDoc;
_ -> []
end
, "\n**Fields**\n\n"
, fmt_fields(Ns, Fields)
].
fmt_paths([]) -> [];
fmt_paths(Paths) ->
Envs = lists:map(fun(Path0) ->
Path = string:tokens(Path0, "."),
Env = string:uppercase(string:join(Path, "__")),
hocon_util:env_prefix("EMQX_") ++ Env
end, Paths),
["\n**Config paths**\n\n",
simple_list(Paths),
"\n"
"\n**Env overrides**\n\n",
simple_list(Envs),
"\n"
].
simple_list(L) ->
[[" - ", hocon_md:code(I), "\n"] || I <- L].
fmt_fields(_Ns, []) -> [];
fmt_fields(Ns, [{Name, FieldSchema} | Fields]) ->
case hocon_schema:field_schema(FieldSchema, hidden) of
true -> fmt_fields(Ns, Fields);
_ -> [bin(fmt_field(Ns, Name, FieldSchema)) | fmt_fields(Ns, Fields)]
end.
fmt_field(Ns, Name, FieldSchema) ->
Type = fmt_type(Ns, hocon_schema:field_schema(FieldSchema, type)),
Default = fmt_default(hocon_schema:field_schema(FieldSchema, default)),
Desc = hocon_schema:field_schema(FieldSchema, desc),
[ ["- ", bin(Name), ": ", Type, "\n"]
, case Default =/= undefined of
true -> ["\n", hocon_md:indent(2, [["Default = ", Default]]), "\n"];
false -> []
end
, case Desc =/= undefined of
true -> ["\n", hocon_md:indent(2, [Desc]), "\n"];
false -> []
end
, "\n"
].
fmt_default(undefined) -> undefined;
fmt_default(Value) ->
case hocon_pp:do(Value, #{newline => "", embedded => true}) of
[OneLine] -> ["`", OneLine, "`"];
Lines -> ["\n```\n", [[L, "\n"] || L <- Lines], "```"]
end.
fmt_type(Ns, T) -> hocon_md:code(do_type(Ns, T)).
do_type(_Ns, A) when is_atom(A) -> bin(A); % singleton
do_type(Ns, Ref) when is_list(Ref) -> do_type(Ns, ?REF(Ref));
do_type(Ns, ?REF(Ref)) -> hocon_md:local_link(ref(Ns, Ref), ref(Ns, Ref));
do_type(_Ns, ?R_REF(Module, Ref)) -> do_type(hocon_schema:namespace(Module), ?REF(Ref));
do_type(Ns, ?ARRAY(T)) -> io_lib:format("[~s]", [do_type(Ns, T)]);
do_type(Ns, ?UNION(Ts)) -> lists:join(" | ", [do_type(Ns, T) || T <- Ts]);
do_type(_Ns, ?ENUM(Symbols)) -> lists:join(" | ", [bin(S) || S <- Symbols]);
do_type(Ns, ?LAZY(T)) -> do_type(Ns, T);
do_type(Ns, ?MAP(Name, T)) -> ["{$", bin(Name), " -> ", do_type(Ns, T), "}"];
do_type(_Ns, {'$type_refl', #{name := Type}}) -> lists:flatten(Type).
ref(undefined, Name) -> Name;
ref(Ns, Name) ->
%% when namespace is the same as reference name
%% we do not prepend the reference link with namespace
%% because the root name is already unique enough
case bin(Ns) =:= bin(Name) of
true -> bin(Ns);
false -> [bin(Ns), ":", bin(Name)]
end.
bin(S) when is_list(S) -> unicode:characters_to_binary(S, utf8);
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
bin(B) when is_binary(B) -> B. | src/hocon_schema_doc.erl | 0.609292 | 0.418935 | hocon_schema_doc.erl | starcoder |
-module(gm_options).
-export([
opt/1
]).
%% Options for interacting with GraphicsMagick function
-spec opt(atom() | tuple()) -> tuple().
opt('+adjoin') ->
{"+adjoin"};
opt(adjoin) ->
{"-adjoin"};
opt(auto_orient) ->
{"-auto-orient"};
opt({background, Color}) ->
{"-background", ":color", [{color, Color}]};
opt({blur, Radius, Sigma}) ->
{"-blur", ":radiusx:sigma", [
{radius, Radius},
{sigma, Sigma}
]};
opt({compose, Operator}) ->
{"-compose", ":operator", [{operator, Operator}]};
opt(create_directories) ->
{"-create-directories"};
opt({crop, Width, Height}) ->
{"-crop", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({crop, Width, Height, XOffset, YOffset}) ->
{"-crop", ":widthx:height+:x_offset+:y_offset", [
{width, Width},
{height, Height},
{x_offset, XOffset},
{y_offset, YOffset}
]};
opt({define, Key}) ->
{"-define", ":key", [{key, Key}]};
opt({define, Key, Value}) ->
{"-define", ":key=:value", [
{key, Key},
{value, Value}
]};
opt({dissolve, Percent}) ->
{"-dissolve", ":percent", [{percent, Percent}]};
opt({draw, Primitive, XInset, YInset}) ->
{"-draw", ":primitive :xinset,:yinset", [
{primitive, Primitive},
{xinset, XInset},
{yinset, YInset}
]};
opt({draw, Primitive, XInset, YInset, XOffset, YOffset}) ->
{"-draw", ":primitive :x_inset,:y_inset :x_offset,:y_offset", [
{primitive, Primitive},
{x_inset, XInset},
{y_inset, YInset},
{x_offset, XOffset},
{y_offset, YOffset}
]};
opt({edge, Radius}) ->
{"-edge", ":radius", [{radius, Radius}]};
opt({extent, Width, Height}) ->
{"-extent", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt(flatten) ->
{"-flatten"};
opt({fill, Color}) ->
{"-fill", ":color", [{color, Color}]};
opt(flip) ->
{"-flip"};
opt({font, Font}) ->
{"-font", ":font", [{font, Font}]};
opt({format, Format}) ->
{"-format", ":format", [{format, Format}]};
opt({geometry, Width, Height}) ->
{"-geometry", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({geometry, Width, Height, XOffset, YOffset}) ->
opt({geometry, Width, Height, XOffset, YOffset, ''});
opt({geometry, Width, Height, XOffset, YOffset, ResizeOption}) ->
XOffsetOption = case XOffset < 0 of
true -> erlang:integer_to_list(XOffset);
false -> "+" ++ erlang:integer_to_list(XOffset)
end,
YOffsetOption = case YOffset < 0 of
true -> erlang:integer_to_list(YOffset);
false -> "+" ++ erlang:integer_to_list(YOffset)
end,
{"-geometry", ":widthx:height:x_offset:y_offset:resize_option", [
{width, Width},
{height, Height},
{x_offset, XOffsetOption},
{y_offset, YOffsetOption},
{resize_option, ResizeOption}
]};
opt({gravity, Gravity}) ->
{"-gravity", ":gravity", [{gravity, Gravity}]};
opt({implode, Factor}) ->
{"-implode", ":factor", [{factor, Factor}]};
opt({interlace, Interlace}) ->
{"-interlace", ":interlace", [{interlace, Interlace}]};
opt({label, Text}) ->
{"label:\"" ++ Text ++ "\""};
opt(magnify) ->
{"-magnify"};
opt('+matte') ->
{"+matte"};
opt(matte) ->
{"-matte"};
opt({median, Radius}) ->
{"-median", ":radius", [{radius, Radius}]};
opt(negate) ->
{"-negate"};
opt({opaque, Color}) ->
{"-opaque", ":color", [{color, Color}]};
opt({output_directory, Dir}) ->
{"-output-directory", ":output_directory", [{output_directory, Dir}]};
opt({pattern, Pattern}) ->
{"PATTERN:" ++ Pattern};
opt({pointsize, Value}) ->
{"-pointsize", ":value", [{value, Value}]};
opt({'+profile', Profile}) ->
{"+profile", ":profile", [
{profile, Profile}
]};
opt({quality, Quality}) ->
{"-quality", ":quality", [{quality, Quality}]};
opt({'+raise', Width, Height}) ->
{"+raise", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({raise , Width, Height}) ->
{"-raise", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({resize, Width, Height}) ->
{"-resize", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({rotate, Degrees}) ->
{"-rotate", ":degrees", [{degrees, Degrees}]};
opt({sharpen, Radius}) ->
{"-sharpen", ":radius", [{radius, Radius}]};
opt({sharpen, Radius, Sigma}) ->
{"-sharpen", ":radiusx:sigma", [
{radius, Radius},
{sigma, Sigma}
]};
opt({size, Width, Height}) ->
{"-size", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({spread, Amount}) ->
{"-spread", ":amount", [{amount, Amount}]};
opt(strip) ->
{"-strip"};
opt({swirl, Degrees}) ->
{"-swirl", ":degrees", [{degrees, Degrees}]};
opt({thumbnail, Width, Height}) ->
{"-thumbnail", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({transparent, Color}) ->
{"-transparent", ":color", [{color, Color}]};
opt({type, Type}) ->
{"-type", ":type", [{type, Type}]};
opt({watermark, Width, Height}) ->
{"-watermark", ":widthx:height", [
{width, Width},
{height, Height}
]};
opt({wave, Amplitude, WaveLength}) ->
{"-wave", ":amplitudex:wave_length", [
{amplitude, Amplitude},
{wave_length, WaveLength}
]}. | src/gm_options.erl | 0.54577 | 0.542742 | gm_options.erl | starcoder |
%% Given an integer consisting of 4 digits, we need to maximize it in
%% 24 hour format. For example, 4372 should return a String of the
%% form 23:47, which is the maximum 24 hour value that can be obtained
%% from the given integer. Assume the given integer always contains
%% exactly 4 digits.
-module(hackerlrank_clock).
-include_lib("eunit/include/eunit.hrl").
%% API
-export([max/1]).
%%%===================================================================
%%% API
%%%===================================================================
max(Digits) ->
case split_hours(Digits) of
{ok, {Hours, SuggestedMin}} ->
case minutes(SuggestedMin) of
{ok, Minutes} ->
lists:flatten(io_lib:format("~p~p:~p~p", Hours ++ Minutes));
{error, unsplittable} = Error ->
Error
end;
{error, _Reason} = Error ->
Error
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
split_hours(Digits) ->
Patterns = [{2, 3}, {1, 9}, {0, 9}],
Sorted = lists:sort(Digits),
split_hours(Sorted, Patterns).
split_hours(_Digits, [] = _Patterns) ->
{error, unsplittable};
split_hours(Digits, [{HourDec, HourMax} | Patterns]) ->
case split_val(Digits, HourDec) of
{_, []} ->
split_hours(Digits, Patterns);
{HDigits, [HourDec | Tail]} ->
RevDigits = lists:reverse(Tail) ++ HDigits,
case split_max(RevDigits, HourMax) of
{_, []} ->
split_hours(Digits, Patterns);
{MinDigits, [Max | MinTail]} ->
{ok, {[HourDec, Max], MinDigits ++ MinTail}}
end;
_ ->
split_hours(Digits, Patterns)
end.
split_val(Digits, Val) ->
split_val(Digits, Val, []).
split_val([Head | _Tail] = Digits, Val, Acc) when Head >= Val ->
{Acc, Digits};
split_val([Head | Tail] = _Digits, Val, Acc) ->
split_val(Tail, Val, [Head | Acc]);
split_val([] = Digits, _Val, Acc) ->
{Acc, Digits}.
split_max(Digits, Max) ->
split_max(Digits, Max, []).
split_max([Digit | Digits], Max, Acc) when Digit > Max ->
split_max(Digits, Max, [Digit | Acc]);
split_max(Digits, _Max, Acc) ->
{Acc, Digits}.
minutes([X, Y]) ->
MinSorted = lists:reverse(lists:sort([{X, Y}, {Y, X}])),
max_minutes(MinSorted).
max_minutes([{X, Y} | _Tail]) when X * 10 + Y < 60 ->
{ok, [X, Y]};
max_minutes([_Pair | Tail]) ->
max_minutes(Tail);
max_minutes([]) ->
{error, unsplittable}.
split_val_test_() ->
[
?_assertEqual({[1], [2, 3, 4]}, split_val([1, 2, 3, 4], 2)),
?_assertEqual({[], [1, 2, 3, 4]}, split_val([1, 2, 3, 4], 1)),
?_assertEqual({[3, 2, 1], [4]}, split_val([1, 2, 3, 4], 4)),
?_assertEqual({[4, 3, 2, 1], []}, split_val([1, 2, 3, 4], 5))
].
split_max_test_() ->
[
?_assertEqual({[], [4, 3, 2, 1]}, split_max([4, 3, 2, 1], 4)),
?_assertEqual({[], [4, 3, 2, 1]}, split_max([4, 3, 2, 1], 5)),
?_assertEqual({[4], [3, 2, 1]}, split_max([4, 3, 2, 1], 3)),
?_assertEqual({[2, 3, 4], [1]}, split_max([4, 3, 2, 1], 1)),
?_assertEqual({[1, 2, 3, 4], []}, split_max([4, 3, 2, 1], 0))
].
split_hours_test_() ->
[
?_assertEqual({ok, {[2, 3], [4, 1]}}, split_hours([1, 2, 3, 4])),
?_assertEqual({ok, {[2, 3], [4, 7]}}, split_hours([4, 3, 7, 2])),
?_assertEqual({ok, {[1, 9], [9, 5]}}, split_hours([9, 9, 5, 1])),
?_assertEqual({ok, {[0, 0], [0, 0]}}, split_hours([0, 0, 0, 0])),
?_assertEqual({ok, {[1, 0], [0, 0]}}, split_hours([0, 0, 0, 1])),
?_assertEqual({ok, {[2, 0], [0, 0]}}, split_hours([0, 0, 0, 2])),
?_assertEqual({ok, {[2, 0], [4, 0]}}, split_hours([0, 2, 0, 4])),
?_assertEqual({ok, {[1, 9], [0, 0]}}, split_hours([0, 1, 0, 9])),
?_assertEqual({error, unsplittable}, split_hours([9, 9, 5, 2])),
?_assertEqual({error, unsplittable}, split_hours([7, 6, 4, 2]))
].
minutes_test_() ->
[
?_assertEqual({ok, [5, 9]}, minutes([5, 9])),
?_assertEqual({ok, [5, 9]}, minutes([9, 5])),
?_assertEqual({ok, [0, 6]}, minutes([6, 0])),
?_assertEqual({error, unsplittable}, minutes([6, 7]))
].
max_test_() ->
[
?_assertEqual("23:47", max([4, 3, 7, 2])),
?_assertEqual("19:59", max([9, 9, 5, 1])),
?_assertEqual({error, unsplittable}, max([7, 6, 4, 2])),
?_assertEqual({error, unsplittable}, max([7, 6, 3, 2]))
]. | src/hackerlrank_clock.erl | 0.556279 | 0.846451 | hackerlrank_clock.erl | starcoder |
-module(week1).
% Shapes
-export([area/1, perimeter/1, enclose/1]).
% Bits
-export([bits/1, bits2/1]).
% Tests
-export([test/0]).
% Shapes
-type point() :: {X :: number(), Y :: number()}.
-type circle() :: {'circle', Center :: point(), Readius :: number()}.
-type rectangle() ::
{'rectangle', Center :: point(), Height :: number(), Width :: number()}.
-type triangle() ::
{'triangle', A :: point(), B :: point(), C :: point()}.
-type shape() :: circle() | rectangle() | triangle().
-export_type([shape/0]).
-spec area(Shape :: shape()) -> number().
area({circle, {_X, _Y}, R}) ->
math:pi() * R * R;
area({rectangle, {_X, _Y}, H, W}) ->
H * W;
area({triangle, A, B, C}) ->
LA = length(A, B),
LB = length(B, C),
LC = length(A, C),
S = (LA + LB + LC)/2,
math:sqrt(S * (S - LA) * (S - LB) * (S - LC)).
-spec perimeter(Shape :: shape()) -> number().
perimeter({circle, {_X, _Y}, R}) ->
2 * math:pi() * R;
perimeter({rectangle, {_X, _Y}, H, W}) ->
2 * (H + W);
perimeter({triangle, A, B, C}) ->
length(A, B) + length(B, C) + length(A, C).
-spec enclose(Shape :: shape()) -> rectangle().
enclose({circle, {_X, _Y} = C, R}) ->
{rectangle, C, 2 * R, 2 * R};
enclose({rectangle, {_X, _Y}, _H, _W} = R) ->
R;
enclose({triangle, {X1, Y1}, {X2, Y2}, {X3, Y3}}) ->
Xmin = min(X1, X2, X3),
Xmax = max(X1, X2, X3),
Ymin = min(Y1, Y2, Y3),
Ymax = max(Y1, Y2, Y3),
{rectangle,
{avg(Xmin, Xmax), avg(Ymin, Ymax)}, Ymax - Ymin, Xmax - Xmin}.
% Bits
-spec bits(X :: non_neg_integer()) -> non_neg_integer().
bits(X) when is_integer(X), X >= 0 ->
bits(X, 0).
% slower because allocates memory on the stack
-spec bits2(X :: non_neg_integer()) -> non_neg_integer().
bits2(0) -> 0;
bits2(X) when is_integer(X), X > 0 ->
(X band 1) + bits2(X bsr 1).
%% Internal functions
bits(0, A) -> A;
bits(X, A) ->
bits(X band (X - 1), A + 1). % X band (X - 1) removes leftmost one
% bits(X bsr 1, A + (X band 1)). % bit shift and bit masking solution
% bits(X div 2, A + (X rem 2)). % dividing by 2 works as shift
length({X1, Y1}, {X2, Y2}) ->
DX = X2 - X1,
DY = Y2 - Y1,
math:sqrt(DX * DX + DY * DY).
min(A, B, C) ->
min(A, min(B, C)).
max(A, B, C) ->
max(A, max(B, C)).
avg(A, B) ->
(A + B) / 2.
%% Tests
test() ->
C = {circle, {1.0, 2.0}, 2.5},
R = {rectangle, {2.0, 1.0}, 2.0, 3.0},
T = {triangle, {1.0, 1.0}, {3.0, 0.0}, {2.0, 2.0}},
[[19.634954084936208,6.0,1.4999999999999998],
[15.707963267948966,10.0,5.8863495173726745],
[{rectangle,{1.0,2.0},5.0,5.0},
{rectangle,{2.0,1.0},2.0,3.0},
{rectangle,{2.0,1.0},2.0,2.0}]] =
[ [ ?MODULE:F(X) || X <- [C, R, T] ]
|| F <- [area, perimeter, enclose] ],
1 = bits(8),
3 = bits(7),
1 = bits2(8),
3 = bits2(7),
ok. | week1/week1.erl | 0.651798 | 0.591989 | week1.erl | starcoder |
%% @doc Quaternion module - simplifies working with and perfoming math on quaternions.
%%
%% @copyright 2012 <NAME>
%% Licensed under the MIT license; see the LICENSE file for details.
-module(quaternion).
% ---------------------------------------------------------------------------------------------------------------------
% external api
-export([quat_to_list/1, to_quat/1, add/2, subtract/2, multiply/2, divide/2, reorient/2]).
-export([scale_rotation/2, norm/1, length/1, unit/1, conjugate/1, inverse/1, reciprocal/1]).
-export([compose/2, relative_to/2, rotate/2, from_axis_angle/2, from_axis_angle/3]).
-export([from_body_rates/1, from_body_rates/2, from_euler/1, from_euler/2, rad2deg/1, deg2rad/1, is_zero/1]).
-export([init/0]).
-export_type([quat/0]).
% A quaternion (w + xi + yj + zk)
-type quat() :: {
W :: float(),
X :: float(),
Y :: float(),
Z :: float()
}.
% ---------------------------------------------------------------------------------------------------------------------
-define(NORMALIZED_TOLERANCE, 0.0000001).
-define(IDENTITY, {1, 0, 0, 0}).
%% --------------------------------------------------------------------------------------------------------------------
%% NIF module
%% --------------------------------------------------------------------------------------------------------------------
% Don't enable this unless testing, or until _all_ functions are implemented in C++ too.
% (loading the NIF module actually replaces this module, so we lose the Erlang implementations if we load the C++ ones)
%-on_load(init/0).
init() ->
case erlang:load_nif("./quaternion", 0) of
{error, {load_failed, _}} ->
erlang:load_nif("./ebin/quaternion", 0);
ok -> ok
end.
%% --------------------------------------------------------------------------------------------------------------------
%% External API
%% --------------------------------------------------------------------------------------------------------------------
%% @doc Convert from a quaternion to a list
quat_to_list({W, X, Y, Z}) ->
[W, X, Y, Z].
%% @doc Convert from a list or quaternion tuple to a quaternion.
to_quat({_W, _X, _Y, _Z} = Quat) ->
Quat;
to_quat([W, X, Y, Z]) ->
{W, X, Y, Z}.
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Adds the two quaternions together.
add({W1, X1, Y1, Z1}, {W2, X2, Y2, Z2}) ->
{W1 + W2, X1 + X2, Y1 + Y2, Z1 + Z2}.
%% @doc Subtracts the second quaternion from the first.
subtract({W1, X1, Y1, Z1}, {W2, X2, Y2, Z2}) ->
{W1 - W2, X1 - X2, Y1 - Y2, Z1 - Z2}.
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Quaternion Multiplication
multiply(Factor, {W, X, Y, Z}) when is_integer(Factor); is_float(Factor) ->
{Factor * W, Factor * X, Factor * Y, Factor * Z};
multiply({_W, _X, _Y, _Z} = Quat, Factor) when is_integer(Factor); is_float(Factor) ->
multiply(Factor, Quat);
multiply({W1, X1, Y1, Z1}, {W2, X2, Y2, Z2}) ->
%(Q1 * Q2).w = (w1w2 - x1x2 - y1y2 - z1z2)
%(Q1 * Q2).x = (w1x2 + x1w2 + y1z2 - z1y2)
%(Q1 * Q2).y = (w1y2 - x1z2 + y1w2 + z1x2)
%(Q1 * Q2).z = (w1z2 + x1y2 - y1x2 + z1w2)
{
W1 * W2 - X1 * X2 - Y1 * Y2 - Z1 * Z2,
W1 * X2 + X1 * W2 + Y1 * Z2 - Z1 * Y2,
W1 * Y2 - X1 * Z2 + Y1 * W2 + Z1 * X2,
W1 * Z2 + X1 * Y2 - Y1 * X2 + Z1 * W2
}.
%% @doc Scales the quaternion by the given factor.
divide({_, _, _, _}, 0) ->
{error, division_by_zero};
divide({W, X, Y, Z}, Factor) when is_number(Factor) ->
{W / Factor, X / Factor, Y / Factor, Z / Factor}.
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Reorient q1's axis of rotation by rotating it by q2, but leave q1's angle of rotation intact.
reorient({W, X, Y, Z}, {_, _, _, _}=Q2) ->
OriginalRotation = 2 * math:acos(W),
Axis = rotate(vector:unit({X, Y, Z}), Q2),
from_axis_angle(Axis, OriginalRotation).
%% @doc Scale the rotation of the quaternion by the given factor. Note: This is not the same as multiplying.
scale_rotation(Factor, {W, X, Y, Z}) when is_integer(Factor); is_float(Factor) ->
OriginalRotation = 2 * math:acos(W),
Unit = vector:unit({X, Y, Z}),
from_axis_angle(Unit, OriginalRotation * Factor).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Returns the squared length of the quaternion. This is useful in some optimization cases, as it avoids a sqrt call.
squared_norm({W, X, Y, Z}) ->
math:pow(W, 2) + math:pow(X, 2) + math:pow(Y, 2) + math:pow(Z, 2).
%% @doc Returns the length of the quaternion.
norm({_, _, _, _} = Quat) ->
math:sqrt(squared_norm(Quat)).
%% @doc Returns the length of the quaternion.
length(Quat) ->
norm(Quat).
% ---------------------------------------------------------------------------------------------------------------------
% @doc Returns a unit quaternion in the same direction as Quat.
unit({_, _, _, _} = Quat) ->
QLS = squared_norm(Quat),
unit(QLS, Quat).
%% @doc hidden
unit(0, {_, _, _, _} = Quat) ->
Quat;
%% @doc hidden
unit(QLS, {_, _, _, _} = Quat) ->
Norm = abs(QLS - 1.0),
case Norm < ?NORMALIZED_TOLERANCE of
true ->
Quat;
_ ->
divide(Quat, math:sqrt(QLS))
end.
% ---------------------------------------------------------------------------------------------------------------------
%% @doc
conjugate({W, X, Y, Z}) ->
{W, -X, -Y, -Z}.
%% @doc
inverse({_, _, _, _} = Quat) ->
divide(conjugate(Quat), norm(Quat)).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc
reciprocal({_, _, _, _} = Quat) ->
divide(conjugate(Quat), squared_norm(Quat)).
%% @doc Get the quaternion which results from composing the rotations
%% represented by `First' and `Second'.
compose({_, _, _, _} = First, {_, _, _, _} = Second) ->
multiply(First, Second).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Get the quaternion representing the orientation of `Target'
%% relative to `Reference'.
relative_to({_, _, _, _} = Target, {_, _, _, _} = Reference) ->
multiply(multiply(Reference, Target), conjugate(Reference)).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Rotates the vector by Rotation.
rotate({X, Y, Z}, {_, _, _, _} = Rotation) ->
{_, X1, Y1, Z1} = relative_to({0, X, Y, Z}, Rotation),
{X1, Y1, Z1}.
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Converts from an axis and angle (radians) to a quaternion.
from_axis_angle({_, _, _} = Axis, Angle) when is_number(Angle) ->
from_axis_angle(radians, Axis, Angle).
%% @doc Converts from an axis and angle (radians) to a quaternion.
from_axis_angle(radians, {_, _, _} = Axis, Angle) when is_number(Angle) ->
ComplexFactor = math:sin(Angle / 2),
{X, Y, Z} = vector:multiply(ComplexFactor, Axis),
{math:cos(Angle / 2), X, Y, Z};
%% @doc Converts from an axis and angle (degrees), to a quaternion.
from_axis_angle(degrees, Axis, Angle) when is_number(Angle) ->
DegAngle = deg2rad(Angle),
from_axis_angle(radians, {_, _, _} = Axis, DegAngle).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Converts from body rates (radians) to a quaternion.
from_body_rates({_, _, _} = Vec) ->
from_body_rates(radians, Vec).
%% @doc Converts from body rates (radians) to a quaternion.
from_body_rates(radians, {X, Y, Z} = Vec) ->
case vector:is_zero(Vec) of
true ->
?IDENTITY;
_ ->
%FIXME: THIS IS WRONG!
Vec1 = {Y, Z, X},
Speed = vector:norm(Vec1),
Axis = vector:divide(Vec1, Speed),
from_axis_angle(Axis, Speed)
end;
%% @doc Converts from body rates (degrees) to a quaternion.
from_body_rates(degrees, {X, Y, Z}) ->
from_body_rates(radians, {deg2rad(X), deg2rad(Y), deg2rad(Z)}).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Converts from a vector of euler angles (radians) to a quaternion.
from_euler({_, _, _} = Vec) ->
from_euler(radians, Vec).
%% @doc Converts from a vector of euler angles (radians) to a quaternion.
%%
%% Based on: http://www.euclideanspace.com/maths/geometry/rotations/conversions/eulerToQuaternion/index.htm
from_euler(radians, {Heading, Pitch, Roll}) ->
HalfHeading = Heading / 2,
HalfPitch = Pitch / 2,
HalfRoll = Roll / 2,
CosHalfHeading = math:cos(HalfHeading),
CosHalfPitch = math:cos(HalfPitch),
CosHalfRoll = math:cos(HalfRoll),
SinHalfHeading = math:sin(HalfHeading),
SinHalfPitch = math:sin(HalfPitch),
SinHalfRoll = math:sin(HalfRoll),
{
CosHalfHeading * CosHalfPitch * CosHalfRoll - SinHalfHeading * SinHalfPitch * SinHalfRoll,
SinHalfHeading * SinHalfPitch * CosHalfRoll + CosHalfHeading * CosHalfPitch * SinHalfRoll,
SinHalfHeading * CosHalfPitch * CosHalfRoll + CosHalfHeading * SinHalfPitch * SinHalfRoll,
CosHalfHeading * SinHalfPitch * CosHalfRoll - SinHalfHeading * CosHalfPitch * SinHalfRoll
};
%% @doc Converts from a vector of euler angles (degrees) to a quaternion.
from_euler(degrees, {Heading, Pitch, Roll}) ->
from_euler(radians, {deg2rad(Heading), deg2rad(Pitch), deg2rad(Roll)}).
% ---------------------------------------------------------------------------------------------------------------------
%% @doc Checks to see if this is a zero quaternion
is_zero({0, 0, 0, 0}) ->
true;
is_zero({_, _, _, _}) ->
false.
%% --------------------------------------------------------------------------------------------------------------------
%% Internal API
%% --------------------------------------------------------------------------------------------------------------------
%%% @doc Convert radians to degrees.
rad2deg(Radians) ->
Radians * (180 / math:pi()).
%%% @doc Convert radians to degrees.
deg2rad(Degrees) ->
Degrees * (math:pi() / 180). | apps/pre_entity_layer/src/quaternion.erl | 0.793306 | 0.462109 | quaternion.erl | starcoder |
-module(problem2015_13).
-export([solve1/1, solve2/1]).
-type neighbour() :: string().
-type neighbours() :: [ neighbour() ].
-type mod() :: integer().
-type neighbour_mod() :: { neighbour(), mod(), neighbour() }.
-type neighbour_mods_map() :: #{ { neighbour(), neighbour() } := mod() }.
%%% COMMON
-spec parse_neighbour_mod( string() ) -> neighbour_mod().
parse_neighbour_mod( Line ) ->
[ Name, _Would, GainLose, Points, _Happiness, _Units, _By, _Sitting, _Next, _To, Neighbour ] = string:tokens( Line, " ." ),
PointsNum = erlang:list_to_integer( Points ),
PointsModifier = case GainLose of
"gain" -> PointsNum;
"lose" -> -PointsNum
end,
{ Name, PointsModifier , Neighbour }.
-spec parse_neighbour_mods( string() ) -> neighbour_mods_map().
parse_neighbour_mods( Input ) ->
Lines = string:tokens( Input, "\n" ),
lists:foldl( fun( Line, Map ) ->
{ Name, Mod, Neighbour } = parse_neighbour_mod( Line ),
Map#{ { Name, Neighbour } => Mod }
end,
#{},
Lines ).
-spec get_mod( neighbour(), neighbour(), neighbour_mods_map() ) -> mod().
get_mod( Neighbour1, Neighbour2, ModsMap ) ->
maps:get( { Neighbour1, Neighbour2 }, ModsMap ).
-spec get_neighbours( neighbour_mods_map() ) -> neighbours().
get_neighbours( NeighbourModsMap ) ->
{ Neighbours, _ } = lists:unzip( maps:keys( NeighbourModsMap ) ),
lists:usort( Neighbours ).
-spec calc_happinness_impl( zipper:zipper( neighbour() ), neighbour_mods_map() ) -> mod().
calc_happinness_impl( NeighboursZipper, NeighbourModsMap ) ->
Current = zipper:get( NeighboursZipper ),
Left = zipper:get( zipper:prev( NeighboursZipper ) ),
Right = zipper:get( zipper:next( NeighboursZipper ) ),
LeftMod = get_mod( Current, Left, NeighbourModsMap ),
RightMod = get_mod( Current, Right, NeighbourModsMap ),
Mod = LeftMod + RightMod,
case zipper:is_last( NeighboursZipper ) of
true -> Mod;
false -> Mod + calc_happinness_impl( zipper:next( NeighboursZipper ), NeighbourModsMap )
end.
-spec calc_happinness( neighbours(), neighbour_mods_map() ) -> mod().
calc_happinness( Neighbours, NeighbourModsMap ) ->
NeighboursZipper = zipper:from_list( Neighbours ),
calc_happinness_impl( NeighboursZipper, NeighbourModsMap ).
solve( NeighbourModsMap ) ->
Neighbours = get_neighbours( NeighbourModsMap ),
NeigbhourPermutations = listz:permutations( Neighbours ),
AllPossibleHappinnessVariants = lists:map( fun( Permutation ) -> calc_happinness( Permutation, NeighbourModsMap ) end, NeigbhourPermutations ),
lists:max( AllPossibleHappinnessVariants ).
%%% PART 1
-spec solve1( string() ) -> integer().
solve1( Input ) ->
NeighbourModsMap = parse_neighbour_mods( Input ),
solve( NeighbourModsMap ).
%%% PART 2
-spec add_me_to_mods_map( neighbour(), neighbour_mods_map() ) -> neighbour_mods_map().
add_me_to_mods_map( MyName, NeighbourModsMap ) ->
Neighbours = get_neighbours( NeighbourModsMap ),
lists:foldl(
fun( Neighbour, AccMap ) ->
AccMap#{ { Neighbour, MyName } => 0, { MyName, Neighbour } => 0 }
end,
NeighbourModsMap,
Neighbours ).
-spec solve2( string() ) -> integer().
solve2( Input ) ->
NeighbourModsMap = parse_neighbour_mods( Input ),
NeighbourModsMapWithMe = add_me_to_mods_map( "me", NeighbourModsMap ),
solve( NeighbourModsMapWithMe ).
%%% TESTS
-include_lib("eunit/include/eunit.hrl").
test_input() -> "
Alice would gain 54 happiness units by sitting next to Bob.
Alice would lose 79 happiness units by sitting next to Carol.
Alice would lose 2 happiness units by sitting next to David.
Bob would gain 83 happiness units by sitting next to Alice.
Bob would lose 7 happiness units by sitting next to Carol.
Bob would lose 63 happiness units by sitting next to David.
Carol would lose 62 happiness units by sitting next to Alice.
Carol would gain 60 happiness units by sitting next to Bob.
Carol would gain 55 happiness units by sitting next to David.
David would gain 46 happiness units by sitting next to Alice.
David would lose 7 happiness units by sitting next to Bob.
David would gain 41 happiness units by sitting next to Carol.".
solve1_test_() ->
[ ?_assertEqual( 330, solve1( test_input() ) ) ]. | src/2015/problem2015_13.erl | 0.506347 | 0.516595 | problem2015_13.erl | starcoder |
-module(regex2post).
-include_lib("eunit/include/eunit.hrl").
%% API
-export([convert/1]).
%% @doc
%% Converts given regexp to postfix notation later used for building regular expression graph.
convert([]) -> [];
convert(Input) -> convert(Input, 0, [], []).
convert([], WrittenCount, OperatorStack, Output) ->
PostfixNotation = lists:append(lists:reverse(OperatorStack), Output),
case (length(OperatorStack) =:= 0) and (WrittenCount > 1) of
true -> Result = concatenation(PostfixNotation);
false -> Result = PostfixNotation
end,
lists:reverse(Result);
convert([Char | Input], WrittenCount, OperatorStack, Output) ->
case is_unary_operator(Char) of
true -> convert(Input, WrittenCount, OperatorStack, [Char | Output]);
false ->
case WrittenCount =:= 2 of
true ->
{LeftOperators, NewOutput} = append_operator(OperatorStack, Output),
convert([Char | Input], 1, LeftOperators, NewOutput);
false ->
{LeftInput, NewWrittenCount, NewOperatorStack, NewOutput} =
consume_next_char([Char | Input], WrittenCount, OperatorStack, Output),
convert(LeftInput, NewWrittenCount, NewOperatorStack, NewOutput)
end
end.
%% @doc
%% Appends operator from stack to outputted postfix notation.
append_operator(OperatorStack, Output) ->
case length(OperatorStack) > 0 of
true ->
[Operator | LeftOperators] = OperatorStack,
{LeftOperators, [Operator | Output]};
false ->
{OperatorStack, concatenation(Output)}
end.
%% @doc
%% Reads and processes next char from regular expression.
consume_next_char([Char | Input], WrittenCount, OperatorStack, Output) ->
case Char of
$( ->
[Inside, Outside] = extract_from_parentheses(Input),
PostfixOfInside = lists:reverse(convert(Inside)),
NewOutput = lists:append(PostfixOfInside, Output),
{Outside, WrittenCount + 1, OperatorStack, NewOutput};
BinaryOperator when BinaryOperator =:= $+ ->
{Input, WrittenCount, [BinaryOperator | OperatorStack], Output};
UnaryOperator when UnaryOperator =:= $* ->
{Input, WrittenCount, OperatorStack, [UnaryOperator | Output]};
CharLiteral ->
{Input, WrittenCount + 1, OperatorStack, [CharLiteral | Output]}
end.
%% @doc
%% Extracts input from the most outer parentheses.
extract_from_parentheses(Input) -> extract_from_parentheses(Input, [], 0).
extract_from_parentheses([Char | Input], Inside, Count) ->
case Char of
$( -> extract_from_parentheses(Input, [Char | Inside], Count + 1);
$) -> case Count of
0 -> [lists:reverse(Inside), Input];
_ -> extract_from_parentheses(Input, [Char | Inside], Count - 1)
end;
NotParentheses -> extract_from_parentheses(Input, [NotParentheses | Inside], Count)
end.
%% @doc
%% Checks if an operator is a unary operator (Kleene star).
is_unary_operator(Char) -> lists:member(Char, [$*]).
%% @doc
%% Appends concatenation operator.
concatenation(Output) -> [$. | Output].
%% TEST
re2post_test() ->
[
?assert(convert("") =:= []),
?assert(convert("a") =:= "a"),
?assert(convert("(a)") =:= "a"),
?assert(convert("((a))") =:= "a"),
?assert(convert("aaa") =:= "aa.a."),
?assert(convert("a(bb)") =:= "abb.."),
?assert(convert("a+c") =:= "ac+"),
?assert(convert("b+c+d") =:= "bc+d+"),
?assert(convert("(aa)+(bb)") =:= "aa.bb.+"),
?assert(convert("a(b+(cc)*)d(aa)*") =:= "abcc.*+.d.aa.*.")
]. | apps/grepper/src/regex2post.erl | 0.57678 | 0.677821 | regex2post.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2007-2020. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(beam_trim).
-export([module/2]).
-import(lists, [any/2,member/2,reverse/1,reverse/2,splitwith/2,sort/1]).
-record(st,
{safe :: cerl_sets:set(beam_asm:label()) %Safe labels.
}).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
module({Mod,Exp,Attr,Fs0,Lc}, _Opts) ->
Fs = [function(F) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
function({function,Name,Arity,CLabel,Is0}) ->
try
St = #st{safe=safe_labels(Is0, [])},
Is = trim(Is0, St, []),
{function,Name,Arity,CLabel,Is}
catch
Class:Error:Stack ->
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
trim([{kill,_}|_]=Is0, St, Acc) ->
{Kills0,Is1} = splitwith(fun({kill,_}) -> true;
(_) -> false
end, Is0),
Kills = sort(Kills0),
try
%% Find out the size and layout of the stack frame.
%% Example of a layout:
%%
%% [{kill,{y,0}},{dead,{y,1},{live,{y,2}},{kill,{y,3}}]
%%
%% That means that y0 and y3 are to be killed, that y1
%% has been killed previously, and that y2 is live.
{FrameSize,Layout} = frame_layout(Is1, Kills, St),
%% Calculate all recipes that are not worse in terms
%% of estimated execution time. The recipes are ordered
%% in descending order from how much they trim.
IsNotRecursive = is_not_recursive(Is1),
Recipes = trim_recipes(Layout, IsNotRecursive),
%% Try the recipes in order. A recipe may not work out because
%% a register that was previously killed may be
%% resurrected. If that happens, the next recipe, which trims
%% less, will be tried.
try_remap(Recipes, Is1, FrameSize)
of
{Is,TrimInstr} ->
%% One of the recipes was applied.
trim(Is, St, reverse(TrimInstr)++Acc)
catch
not_possible ->
%% No recipe worked out. Use the original kill
%% instructions.
trim(Is1, St, reverse(Kills, Acc))
end;
trim([I|Is], St, Acc) ->
trim(Is, St, [I|Acc]);
trim([], _, Acc) ->
reverse(Acc).
%% is_not_recursive([Instruction]) -> true|false.
%% Test whether the next call or apply instruction may
%% do a recursive call. Return `true` if the call is
%% definitely not recursive, and `false` otherwise.
is_not_recursive([{call_ext,_,Ext}|_]) ->
case Ext of
{extfunc,M,F,A} ->
erl_bifs:is_pure(M, F, A);
_ ->
false
end;
is_not_recursive([{block,_}|Is]) -> is_not_recursive(Is);
is_not_recursive([{line,_}|Is]) -> is_not_recursive(Is);
is_not_recursive(_) -> false.
%% trim_recipes([{kill,R}|{live,R}|{dead,R}]) -> [Recipe].
%% Recipe = {Kills,NumberToTrim,Moves}
%% Kills = [{kill,Y}]
%% Moves = [{move,SrcY,DstY}]
%%
%% Calculate how to best trim the stack and kill the correct
%% Y registers. Return a list of possible recipes. The best
%% recipe (the one that trims the most) is first in the list.
trim_recipes(Layout, IsNotRecursive) ->
Recipes = construct_recipes(Layout, 0, [], []),
NumOrigKills = length([I || {kill,_}=I <- Layout]),
IsTooExpensive = is_too_expensive_fun(IsNotRecursive),
[R || R <- Recipes,
not is_too_expensive(R, NumOrigKills, IsTooExpensive)].
construct_recipes([{kill,{y,Trim0}}|Ks], Trim0, Moves, Acc) ->
Trim = Trim0 + 1,
Recipe = {Ks,Trim,Moves},
construct_recipes(Ks, Trim, Moves, [Recipe|Acc]);
construct_recipes([{dead,{y,Trim0}}|Ks], Trim0, Moves, Acc) ->
Trim = Trim0 + 1,
Recipe = {Ks,Trim,Moves},
construct_recipes(Ks, Trim, Moves, [Recipe|Acc]);
construct_recipes([{live,{y,Trim0}=Src}|Ks0], Trim0, Moves0, Acc) ->
case take_last_dead(Ks0) of
none ->
%% No more recipes are possible.
Acc;
{Dst,Ks} ->
Trim = Trim0 + 1,
Moves = [{move,Src,Dst}|Moves0],
Recipe = {Ks,Trim,Moves},
construct_recipes(Ks, Trim, Moves, [Recipe|Acc])
end;
construct_recipes([], _, _, Acc) -> Acc.
take_last_dead(L) ->
take_last_dead_1(reverse(L)).
take_last_dead_1([{kill,Reg}|Is]) ->
{Reg,reverse(Is)};
take_last_dead_1([{dead,Reg}|Is]) ->
{Reg,reverse(Is)};
take_last_dead_1(_) -> none.
%% Is trimming too expensive?
is_too_expensive({Ks,_,Moves}, NumOrigKills, IsTooExpensive) ->
NumKills = num_kills(Ks, 0),
NumMoves = length(Moves),
IsTooExpensive(NumKills, NumMoves, NumOrigKills).
num_kills([{kill,_}|T], Acc) ->
num_kills(T, Acc+1);
num_kills([_|T], Acc) ->
num_kills(T, Acc);
num_kills([], Acc) -> Acc.
is_too_expensive_fun(true) ->
%% This call is not recursive (because it is a call to a BIF).
%% Here we should avoid trimming if the trimming sequence is
%% likely to be more expensive than the original sequence.
fun(NumKills, NumMoves, NumOrigKills) ->
Penalty =
if
%% Slightly penalize the use of any `move`
%% instruction to avoid replacing two `kill`
%% instructions with a `move` and a `trim`.
NumMoves =/= 0 -> 1;
true -> 0
end,
1 + Penalty + NumKills + NumMoves > NumOrigKills
end;
is_too_expensive_fun(false) ->
%% This call **may** be recursive. In a recursive function that
%% builds up a huge stack, having unused stack slots will be very
%% expensive. Therefore, we want to be biased towards trimming.
%% We will do that by not counting the `trim` instruction in
%% the formula below.
fun(NumKills, NumMoves, NumOrigKills) ->
NumKills + NumMoves > NumOrigKills
end.
%% try_remap([Recipe], [Instruction], FrameSize) ->
%% {[Instruction],[TrimInstruction]}.
%% Try to renumber Y registers in the instruction stream. The
%% first recipe that works will be used.
%%
%% This function will issue a `not_possible` exception if none
%% of the recipes were possible to apply.
try_remap([R|Rs], Is, FrameSize) ->
{TrimInstr,Map} = expand_recipe(R, FrameSize),
try
{remap(Is, Map, []),TrimInstr}
catch
throw:not_possible ->
try_remap(Rs, Is, FrameSize)
end;
try_remap([], _, _) -> throw(not_possible).
expand_recipe({Layout,Trim,Moves}, FrameSize) ->
Kills = [Kill || {kill,_}=Kill <- Layout],
{Kills++reverse(Moves, [{trim,Trim,FrameSize-Trim}]),create_map(Trim, Moves)}.
create_map(Trim, []) ->
fun({y,Y}) when Y < Trim -> throw(not_possible);
({y,Y}) -> {y,Y-Trim};
({frame_size,N}) -> N - Trim;
(Any) -> Any
end;
create_map(Trim, Moves) ->
Map0 = [{Src,Dst-Trim} || {move,{y,Src},{y,Dst}} <- Moves],
Map = maps:from_list(Map0),
IllegalTargets = cerl_sets:from_list([Dst || {move,_,{y,Dst}} <- Moves]),
fun({y,Y0}) when Y0 < Trim ->
case Map of
#{Y0:=Y} -> {y,Y};
#{} -> throw(not_possible)
end;
({y,Y}) ->
case cerl_sets:is_element(Y, IllegalTargets) of
true -> throw(not_possible);
false -> {y,Y-Trim}
end;
({frame_size,N}) -> N - Trim;
(Any) -> Any
end.
remap([{'%',_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{block,Bl0}|Is], Map, Acc) ->
Bl = remap_block(Bl0, Map, []),
remap(Is, Map, [{block,Bl}|Acc]);
remap([{bs_get_tail,Src,Dst,Live}|Is], Map, Acc) ->
I = {bs_get_tail,Map(Src),Map(Dst),Live},
remap(Is, Map, [I|Acc]);
remap([{bs_start_match4,Fail,Live,Src,Dst}|Is], Map, Acc) ->
I = {bs_start_match4,Fail,Live,Map(Src),Map(Dst)},
remap(Is, Map, [I|Acc]);
remap([{bs_set_position,Src1,Src2}|Is], Map, Acc) ->
I = {bs_set_position,Map(Src1),Map(Src2)},
remap(Is, Map, [I|Acc]);
remap([{call_fun,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{call,_,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{call_ext,_,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{apply,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]);
remap([{bif,Name,Fail,Ss,D}|Is], Map, Acc) ->
I = {bif,Name,Fail,[Map(S) || S <- Ss],Map(D)},
remap(Is, Map, [I|Acc]);
remap([{gc_bif,Name,Fail,Live,Ss,D}|Is], Map, Acc) ->
I = {gc_bif,Name,Fail,Live,[Map(S) || S <- Ss],Map(D)},
remap(Is, Map, [I|Acc]);
remap([{get_map_elements,Fail,M,{list,L0}}|Is], Map, Acc) ->
L = [Map(E) || E <- L0],
I = {get_map_elements,Fail,Map(M),{list,L}},
remap(Is, Map, [I|Acc]);
remap([{bs_init,Fail,Info,Live,Ss0,Dst0}|Is], Map, Acc) ->
Ss = [Map(Src) || Src <- Ss0],
Dst = Map(Dst0),
I = {bs_init,Fail,Info,Live,Ss,Dst},
remap(Is, Map, [I|Acc]);
remap([{bs_put=Op,Fail,Info,Ss}|Is], Map, Acc) ->
I = {Op,Fail,Info,[Map(S) || S <- Ss]},
remap(Is, Map, [I|Acc]);
remap([{kill,Y}|T], Map, Acc) ->
remap(T, Map, [{kill,Map(Y)}|Acc]);
remap([{make_fun2,_,_,_,_}=I|T], Map, Acc) ->
remap(T, Map, [I|Acc]);
remap([{deallocate,N}|Is], Map, Acc) ->
I = {deallocate,Map({frame_size,N})},
remap(Is, Map, [I|Acc]);
remap([{swap,Reg1,Reg2}|Is], Map, Acc) ->
I = {swap,Map(Reg1),Map(Reg2)},
remap(Is, Map, [I|Acc]);
remap([{test,Name,Fail,Ss}|Is], Map, Acc) ->
I = {test,Name,Fail,[Map(S) || S <- Ss]},
remap(Is, Map, [I|Acc]);
remap([{test,Name,Fail,Live,Ss,Dst}|Is], Map, Acc) ->
I = {test,Name,Fail,Live,[Map(S) || S <- Ss],Map(Dst)},
remap(Is, Map, [I|Acc]);
remap([return|_]=Is, _, Acc) ->
reverse(Acc, Is);
remap([{line,_}=I|Is], Map, Acc) ->
remap(Is, Map, [I|Acc]).
remap_block([{set,Ds0,Ss0,Info}|Is], Map, Acc) ->
Ds = [Map(D) || D <- Ds0],
Ss = [Map(S) || S <- Ss0],
remap_block(Is, Map, [{set,Ds,Ss,Info}|Acc]);
remap_block([], _, Acc) -> reverse(Acc).
%% safe_labels([Instruction], Accumulator) -> gb_set()
%% Build a gb_set of safe labels. The code at a safe
%% label does not depend on the values in a specific
%% Y register, only that all Y registers are initialized
%% so that it safe to scan the stack when an exception
%% is generated.
%%
%% In other words, code at a safe label will continue
%% to work if Y registers have been renumbered and
%% the size of the stack frame has changed.
safe_labels([{label,L}|Is], Acc) ->
case is_safe_label(Is) of
true -> safe_labels(Is, [L|Acc]);
false -> safe_labels(Is, Acc)
end;
safe_labels([_|Is], Acc) ->
safe_labels(Is, Acc);
safe_labels([], Acc) -> cerl_sets:from_list(Acc).
is_safe_label([{'%',_}|Is]) ->
is_safe_label(Is);
is_safe_label([{line,_}|Is]) ->
is_safe_label(Is);
is_safe_label([{badmatch,{Tag,_}}|_]) ->
Tag =/= y;
is_safe_label([{case_end,{Tag,_}}|_]) ->
Tag =/= y;
is_safe_label([{try_case_end,{Tag,_}}|_]) ->
Tag =/= y;
is_safe_label([if_end|_]) ->
true;
is_safe_label([{block,Bl}|Is]) ->
is_safe_label_block(Bl) andalso is_safe_label(Is);
is_safe_label([{call_ext,_,{extfunc,M,F,A}}|_]) ->
erl_bifs:is_exit_bif(M, F, A);
is_safe_label(_) -> false.
is_safe_label_block([{set,Ds,Ss,_}|Is]) ->
IsYreg = fun({y,_}) -> true;
(_) -> false
end,
%% This instruction is safe if the instruction
%% neither reads or writes Y registers.
not (any(IsYreg, Ss) orelse any(IsYreg, Ds)) andalso
is_safe_label_block(Is);
is_safe_label_block([]) -> true.
%% frame_layout([Instruction], [{kill,_}], St) ->
%% [{kill,Reg} | {live,Reg} | {dead,Reg}]
%% Figure out the layout of the stack frame.
frame_layout(Is, Kills, #st{safe=Safe}) ->
N = frame_size(Is, Safe),
IsKilled = fun(R) -> is_not_used(R, Is) end,
{N,frame_layout_1(Kills, 0, N, IsKilled, [])}.
frame_layout_1([{kill,{y,Y}}=I|Ks], Y, N, IsKilled, Acc) ->
frame_layout_1(Ks, Y+1, N, IsKilled, [I|Acc]);
frame_layout_1(Ks, Y, N, IsKilled, Acc) when Y < N ->
R = {y,Y},
I = case IsKilled(R) of
false -> {live,R};
true -> {dead,R}
end,
frame_layout_1(Ks, Y+1, N, IsKilled, [I|Acc]);
frame_layout_1([], Y, Y, _, Acc) ->
frame_layout_2(Acc).
frame_layout_2([{live,_}|Is]) -> frame_layout_2(Is);
frame_layout_2(Is) -> reverse(Is).
%% frame_size([Instruction], SafeLabels) -> FrameSize
%% Find out the frame size by looking at the code that follows.
%%
%% Implicitly, also check that the instructions are a straight
%% sequence of code that ends in a return. Any branches are
%% to safe labels (i.e., the code at those labels don't depend
%% on the contents of any Y register).
frame_size([{'%',_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{block,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{call_fun,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{call,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{call_ext,_,_}=I|Is], Safe) ->
case beam_jump:is_exit_instruction(I) of
true -> throw(not_possible);
false -> frame_size(Is, Safe)
end;
frame_size([{apply,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{bif,_,{f,L},_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{gc_bif,_,{f,L},_,_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{test,_,{f,L},_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{test,_,{f,L},_,_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{bs_init,{f,L},_,_,_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{bs_put,{f,L},_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{kill,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{make_fun2,_,_,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{get_map_elements,{f,L},_,_}|Is], Safe) ->
frame_size_branch(L, Is, Safe);
frame_size([{deallocate,N}|_], _) ->
N;
frame_size([{line,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{bs_start_match4,Fail,_,_,_}|Is], Safe) ->
case Fail of
{f,L} -> frame_size_branch(L, Is, Safe);
_ -> frame_size(Is, Safe)
end;
frame_size([{bs_set_position,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{bs_get_tail,_,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size([{swap,_,_}|Is], Safe) ->
frame_size(Is, Safe);
frame_size(_, _) -> throw(not_possible).
frame_size_branch(0, Is, Safe) ->
frame_size(Is, Safe);
frame_size_branch(L, Is, Safe) ->
case cerl_sets:is_element(L, Safe) of
false -> throw(not_possible);
true -> frame_size(Is, Safe)
end.
%% is_not_used(Y, [Instruction]) -> true|false.
%% Test whether the value of Y is unused in the instruction sequence.
%% Return true if the value of Y is not used, and false if it is used.
%%
%% This function handles the same instructions as frame_size/2. It
%% assumes that any labels in the instructions are safe labels.
is_not_used(Y, [{'%',_}|Is]) ->
is_not_used(Y, Is);
is_not_used(Y, [{apply,_}|Is]) ->
is_not_used(Y, Is);
is_not_used(Y, [{bif,_,{f,_},Ss,Dst}|Is]) ->
is_not_used_ss_dst(Y, Ss, Dst, Is);
is_not_used(Y, [{block,Bl}|Is]) ->
case is_not_used_block(Y, Bl) of
used -> false;
killed -> true;
transparent -> is_not_used(Y, Is)
end;
is_not_used(Y, [{bs_get_tail,Src,Dst,_}|Is]) ->
is_not_used_ss_dst(Y, [Src], Dst, Is);
is_not_used(Y, [{bs_init,_,_,_,Ss,Dst}|Is]) ->
is_not_used_ss_dst(Y, Ss, Dst, Is);
is_not_used(Y, [{bs_put,{f,_},_,Ss}|Is]) ->
not member(Y, Ss) andalso is_not_used(Y, Is);
is_not_used(Y, [{bs_start_match4,_Fail,_Live,Src,Dst}|Is]) ->
Y =/= Src andalso Y =/= Dst andalso
is_not_used(Y, Is);
is_not_used(Y, [{bs_set_position,Src1,Src2}|Is]) ->
Y =/= Src1 andalso Y =/= Src2 andalso
is_not_used(Y, Is);
is_not_used(Y, [{call,_,_}|Is]) ->
is_not_used(Y, Is);
is_not_used(Y, [{call_ext,_,_}=I|Is]) ->
beam_jump:is_exit_instruction(I) orelse is_not_used(Y, Is);
is_not_used(Y, [{call_fun,_}|Is]) ->
is_not_used(Y, Is);
is_not_used(_Y, [{deallocate,_}|_]) ->
true;
is_not_used(Y, [{gc_bif,_,{f,_},_Live,Ss,Dst}|Is]) ->
is_not_used_ss_dst(Y, Ss, Dst, Is);
is_not_used(Y, [{get_map_elements,{f,_},S,{list,List}}|Is]) ->
{Ss,Ds} = beam_utils:split_even(List),
case member(Y, [S|Ss]) of
true ->
false;
false ->
member(Y, Ds) orelse is_not_used(Y, Is)
end;
is_not_used(Y, [{kill,Yreg}|Is]) ->
Y =:= Yreg orelse is_not_used(Y, Is);
is_not_used(Y, [{line,_}|Is]) ->
is_not_used(Y, Is);
is_not_used(Y, [{make_fun2,_,_,_,_}|Is]) ->
is_not_used(Y, Is);
is_not_used(Y, [{swap,Reg1,Reg2}|Is]) ->
Y =/= Reg1 andalso Y =/= Reg2 andalso is_not_used(Y, Is);
is_not_used(Y, [{test,_,_,Ss}|Is]) ->
not member(Y, Ss) andalso is_not_used(Y, Is);
is_not_used(Y, [{test,_Op,{f,_},_Live,Ss,Dst}|Is]) ->
is_not_used_ss_dst(Y, Ss, Dst, Is).
is_not_used_block(Y, [{set,Ds,Ss,_}|Is]) ->
case member(Y, Ss) of
true ->
used;
false ->
case member(Y, Ds) of
true ->
killed;
false ->
is_not_used_block(Y, Is)
end
end;
is_not_used_block(_Y, []) -> transparent.
is_not_used_ss_dst(Y, Ss, Dst, Is) ->
not member(Y, Ss) andalso (Y =:= Dst orelse is_not_used(Y, Is)). | lib/compiler/src/beam_trim.erl | 0.575111 | 0.437403 | beam_trim.erl | starcoder |
-module(spud).
-compile({no_auto_import,[min/2]}).
-compile({no_auto_import,[max/2]}).
-export([sample/1, slices/2, uniq/1]).
-export([array_update/3, tuple_update/3]).
-export([combinations/2]).
-export([mapreduce/4, parallel_mapreduce/4, parallel_mapreduce/5]).
-export([min/2, max/2, min_by/2, max_by/2]).
-export([parallel_min/2, parallel_max/2, parallel_min/3, parallel_max/3]).
-export([parallel_min_by/2, parallel_max_by/2]).
-export([parallel_min_by/3, parallel_max_by/3]).
-export([array_min_by/2, tuple_min_by/2]).
-export([map_find/2, product/2, sort_by/2]).
-export([format/2, debug/1, debug/2]).
%% Some handy utility functions.
%% Returns a random sample from the List, using random:uniform/1 which
%% uses the process dictionary for state.
%%
sample(List) when is_list(List) ->
lists:nth(rnd:uniform(length(List)), List);
sample(Tuple) when is_tuple(Tuple) ->
element(rnd:uniform(size(Tuple)), Tuple).
%% Returns a list containing elements each containing N elements from
%% the original list.
%%
slices([], _N) when is_integer(_N) ->
[];
slices(List, N) ->
{Slice, Rest} = lists:split(N, List),
[Slice | slices(Rest, N)].
%% Returns the list with duplicate elements removed.
%%
uniq(List) when is_list(List) ->
sets:to_list(sets:from_list(List)).
%% Returns a new Array with the Indexth value updated by calling Func
%% on it. Index starts at 0.
%% You'd think there would be an array:update like dict:update so the
%% structure would only have to be descended once, but no, it takes two
%% calls to do an update.
%%
array_update(Index, Func, Array) ->
Value = array:get(Index, Array),
array:set(Index, Func(Value), Array).
%% Returns a new Tuple with the Indexth value updated by calling Func
%% on it. Index starts at 0.
%%
tuple_update(Index, Func, Tuple) ->
Value = element(Index + 1, Tuple),
setelement(Index + 1, Tuple, Func(Value)).
%% Returns all combinations of length N of the elements in List. Each
%% combination will use each element zero to N times. If N is zero
%% then the result is [[]] because there is one combination with zero
%% elements and it is empty.
%%
combinations(N, List) when is_integer(N), is_list(List) ->
C = combinations(N, [[]], List),
%% Reverse each combination because things look better that way.
[lists:reverse(E) || E <- C].
combinations(0, Accum, _List) ->
Accum;
combinations(N, Accum, List) ->
NewAccum = lists:flatmap(
fun (C) ->
[[E | C] || E <- List]
end,
Accum),
combinations(N - 1, NewAccum, List).
%% Performs a mapreduce on the List and returns the result. All computations
%% are done in-process.
%%
mapreduce([], _Map, _Reduce, Accum) ->
Accum;
mapreduce([H|T], Map, Reduce, Accum) ->
mapreduce(T, Map, Reduce, Reduce(Map(H), Accum)).
%% Performs a mapreduce on the List and returns the result. Map operations
%% are spawned, the reduce is done in-process.
%%
parallel_mapreduce(List, Map, Reduce, Accum) ->
limited_mapreduce(fun spawn/1, List, Map, Reduce, Accum).
%% Performs a mapreduce on the List and returns the result. Map
%% operations are spawned but are limited by the given Limiter so we
%% don't spawn too many. If we can't spawn a map process then the map
%% is performed in-process.
%%
parallel_mapreduce(Limiter, List, Map, Reduce, Accum)->
Run = fun (Func) ->
limiter:run(Limiter, Func)
end,
limited_mapreduce(Run, List, Map, Reduce, Accum).
limited_mapreduce(Run, List, Map, Reduce, Accum) ->
Self = self(),
Ref = make_ref(),
lists:foreach(
fun (Element) ->
Run(fun () -> Self ! {Ref, Map(Element)} end)
end,
List),
collect_mapreduce(Ref, Reduce, length(List), Accum).
collect_mapreduce(_Ref, _Reduce, 0, Accum) ->
Accum;
collect_mapreduce(Ref, Reduce, N, Accum) ->
receive
{Ref, Value} ->
collect_mapreduce(Ref, Reduce, N - 1, Reduce(Value, Accum))
end.
%% Applies Func to each value in the List and returns the minimum.
%%
min(List, Func) ->
mapreduce(List, Func, fun min_reduce/2, undefined).
%% Applies Func to each value in the List and returns the maximum.
%%
max(List, Func) ->
mapreduce(List, Func, fun max_reduce/2, undefined).
min_reduce(Value, undefined) ->
Value;
min_reduce(Value, Accum) when Value < Accum ->
Value;
min_reduce(_Value, Accum) ->
Accum.
max_reduce(Value, undefined) ->
Value;
max_reduce(Value, Accum) when Value > Accum ->
Value;
max_reduce(_Value, Accum) ->
Accum.
%% Returns the element of List with the minimum value computed by Func.
%%
min_by(List, Func) ->
{Result, _Min} = min_by_with_min(List, Func),
Result.
%% Returns the element of List with the minimum value computed by Func,
%% and returns the minimum value.
%%
min_by_with_min(List, Func) ->
{Min, Result} = min(List, fun (E) -> {Func(E), E} end),
{Result, Min}.
%% Returns the element of List with the maximum value computed by Func.
%%
max_by(List, Func) ->
{_, Result} = max(List, fun (E) -> {Func(E), E} end),
Result.
%% Applies Func to each value in the List and returns the minimum. Spawns
%% a new process for each value to compute Func(Value).
%%
parallel_min(List, Func) ->
parallel_mapreduce(List, Func, fun min_reduce/2, undefined).
%% Applies Func to each value in the List and returns the minimum. A
%% new processs is spawned for each value to compute Func(Value) if
%% the limiter allows it, otherwise Func(Value) is computed
%% in-process.
%%
parallel_min(Limiter, List, Func) ->
parallel_mapreduce(Limiter, List, Func, fun min_reduce/2, undefined).
%% Applies Func to each value in the List and returns the maximum. Spawns
%% a new process for each value to compute Func(Value).
%%
parallel_max(List, Func) ->
parallel_mapreduce(List, Func, fun max_reduce/2, undefined).
%% Applies Func to each value in the List and returns the maximum. A
%% new processs is spawned for each value to compute Func(Value) if
%% the limiter allows it, otherwise Func(Value) is computed
%% in-process.
%%
parallel_max(Limiter, List, Func) ->
parallel_mapreduce(Limiter, List, Func, fun max_reduce/2, undefined).
%% Returns the element of List with the minimum value computed by
%% Func. Spawns a new process for each value to compute Func(Value).
%%
parallel_min_by(List, Func) ->
{_, Result} = parallel_min(
List,
fun (Element) ->
{Func(Element), Element}
end),
Result.
%% Returns the element of List with the minimum value computed by
%% Func. A new processs is spawned for each value to compute
%% Func(Value) if the limiter allows it, otherwise Func(Value) is
%% computed in-process.
%%
parallel_min_by(Limiter, List, Func) ->
{_, Result} = parallel_min(
Limiter,
List,
fun (Element) ->
{Func(Element), Element}
end),
Result.
%% Returns the element of List with the maximum value computed by
%% Func. Spawns a new process for each value to compute Func(Value).
%%
parallel_max_by(List, Func) ->
{_, Result} = parallel_max(
List,
fun (Element) ->
{Func(Element), Element}
end),
Result.
%% Returns the element of List with the minimum value computed by
%% Func. A new processs is spawned for each value to compute
%% Func(Value) if the limiter allows it, otherwise Func(Value) is
%% computed in-process.
%%
parallel_max_by(Limiter, List, Func) ->
{_, Result} = parallel_max(
Limiter,
List,
fun (Element) ->
{Func(Element), Element}
end),
Result.
%% Returns the element of Array with the minimum value computed by Func.
%%
array_min_by(Array, Func) when is_function(Func) ->
{_, Element} =
array:foldl(
fun (_Index, Element, Accum = {MinN, _MinElement}) ->
N = Func(Element),
case MinN == undefined orelse N < MinN of
true -> {N, Element};
false -> Accum
end
end,
{undefined, undefined},
Array),
Element.
%% Returns the element of Tuple with the minimum value computed by Func.
%%
tuple_min_by(Tuple, Func) when is_function(Func) ->
{_, Element} =
tuple_foldl(
fun (Element, Accum = {MinN, _MinElement}) ->
N = Func(Element),
case MinN == undefined orelse N < MinN of
true -> {N, Element};
false -> Accum
end
end,
{undefined, undefined},
Tuple),
Element.
tuple_foldl(Func, Accum, Tuple) ->
tuple_foldl(Func, Accum, Tuple, size(Tuple)).
tuple_foldl(_Func, Accum, _Tuple, N) when N =< 0 ->
Accum;
tuple_foldl(Func, Accum, Tuple, N) ->
tuple_foldl(Func, Func(element(N, Tuple), Accum), Tuple, N - 1).
%% Calls Func on each element of the list and returns the first
%% non-false result, or false if all results are false.
%%
map_find([], _Func) ->
false;
map_find([H | T], Func) ->
case Func(H) of
false ->
map_find(T, Func);
Result ->
Result
end.
%% Returns all the combinations of an element of As and an element of Bs.
%%
product(As, Bs) ->
[{A, B} || A <- As, B <- Bs].
%% Returns List sorted by keys created by applying Func to each
%% element.
%%
sort_by(List, Func) ->
Augmented = [{Func(E), E} || E <- List],
Sorted = lists:sort(Augmented),
[E || {_, E} <- Sorted].
%% Returns a string with Data formatted by Format.
%%
format(Format, Data) ->
lists:flatten(io_lib:format(Format, Data)).
%% Write some information for debugging.
%%
debug(Format, Data) ->
debug(format(Format, Data)).
%% Write a string for debugging.
%%
debug(String) ->
% String.
io:format(String). | src/spud.erl | 0.516839 | 0.550124 | spud.erl | starcoder |
%%% Concurrent Programming In Erlang -- The University of Kent / FutureLearn
%%% Exercise : https://www.futurelearn.com/courses/concurrent-programming-erlang/3/steps/488342
%%% v3 - += Showing size of mailbox, clearing mailbox at client & server,
%%% imposing server load by sleeping+
%%%
%%% Last Modified Time-stamp: <2020-07-10 15:22:45, updated by <NAME>>
%% -----------------------------------------------------------------------------
%% What's New
%% ----------
%% - v3.1: - Added a clear in the server's loop, before the timer:sleep call so I can
%% get several messages to accumulate.
%% - v3: - Added show_mailbox() public functions to show number of messages
%% accumulating in the server and also to see the ones accumulating in the client.
%% - Removed other debug prints I introduced in v2.1.
%% - Removed the catch-all Msg reception in loop/0 I used for debugging v2.
%% - v2.1: - Fixed a bug in loop patter for set_wait: A *new* variable must be
%% used for the time: ``NewWaitTime`` otherwise it patterns match
%% only if the wait time value does *not* change!
%% - Placed clear() code close to where it's used.
%% - Added several io:format to see the clear and delay activities.
%% - v2: instrument for simulating server loading:
%% - client can now timeout after CLIENT_RX_TIMEOUT (set to 1 second via a macro)
%% - Data structure change: FreDb has a TestData field.
%% For now it holds a tuple of 1 tagged value: {sleep_period, integer}
%% identifying the time the server should sleep before each receive
%% to let message accumulate in its mailbox.
%% - Added new debug command/message: set_server_load/1 which identifies
%% how long the server should sleep.
%% - Added clear/0 which clears a mailbox, printing each message removed
%% and returning the number of cleared message.
%% It is called by the client before the client sends a new request,
%% to flush previous un-processed replies.
%% - v1: Providing a functional interface to the requests:
%% - allocate()
%% - deallocate(Freq)
%% - dump()
%% - stop()
%%
%% Supported Transactions
%% ----------------------
%%
%% Here's the representation of the supported transactions:
%%
%% @startuml
%%
%% actor Client
%% boundary API
%% database Frequency
%%
%% == Operation: start the server ==
%% Client ->o API : start()
%% API o-->o Frequency : register(spawn())
%% Client <-o API : ok | {error, Error}
%%
%% == Operation: successful allocation ==
%%
%% Client ->o API : allocate()
%% API --> Frequency : {request, Pid, allocate}
%% API <-- Frequency : {reply, {ok, Freq}}
%% Client <-o API : {ok, Freq}
%%
%% == Operation: successful de-allocation ==
%%
%% Client ->o API: deallocate(Freq)
%% API --> Frequency : {request, Pid, {deallocate, Freq}}
%% API <-- Frequency : {reply, ok}
%% Client <-o API : ok
%%
%%
%% == Timeout: *for any command*: timeout waiting for server reply ==
%%
%% Client -> API : allocate() | deallocate(Freq) | dump() | set_server_load(WaitTime)
%% API -->x Frequency : {request, Pid, Msg}
%% Client <- API : {error, timeout}
%%
%% == Error: failed allocation (no available frequency) ==
%%
%% Client ->o API : allocate()
%% API --> Frequency : {request, Pid, allocate}
%% API <-- Frequency : {reply, {error, no_frequency}}
%% Client <-o API : {error, no_frequency}
%%
%% == Error: failed allocation (client already owns one) ==
%%
%% Client ->o API : allocate()
%% API --> Frequency : {request, Pid, allocate}
%% API <-- Frequency : {reply, {error, client_already_owns, Freq}}
%% Client <-o API : {error, client_already_owns, Freq}
%%
%% == Error: failed de-allocation (frequency not allocated by client) ==
%%
%% Client ->o API : deallocate(Freq)
%% API --> Frequency : {request, Pid, {deallocate, Freq}}
%% API <-- Frequency : {reply, {error, client_does_not_own, Freq}}
%% Client <-o API : {error, client_does_not_own, Freq}
%%
%% == Development help: dump DB ==
%%
%% Client ->o API : dump()
%% API --> Frequency : {request, Pid, dump}
%% API <-- Frequency : {reply, FreqDb}
%% Client <-o API : FreqDb
%%
%% == Development help: set server load ==
%%
%% Client ->o API : set_server_load(WaitTime)
%% API --> Frequency : {request, Pid, {set_wait, WaitTime}}
%% API <-- Frequency : {reply, {ok, OldWaitTime}}
%% Client <-o API : {ok, OldWaitTime}
%%
%% == Shutdown ==
%%
%% Client ->o API: stop()
%% API --> Frequency : {request, Pid, stop}
%% API <-- Frequency : {reply, stopped}
%% Client <-o API : stopped
%%
%% @enduml
%% Server Functional State / Data Model
%% ------------------------------------
%% The server functional state is:
%% - a pair of lists {Free, Allocated}
%% - Free := a list of frequency integers
%% - Allocated: a list of {Freq, UserPid}
%%
%% Db access functions:
%% - allocate/2 : Allocate any frequency for Client
%% - deallocate/3 : de-allocate client owned frequency
%% - is_owner/2 : predicate: return {true, Freq} if Client owns a frequency,
%% False otherwise.
%% - owns/3 : predicate: return true if Client owns a specific frequency.
-module(frequency).
-export([ start/0
, init/0
, allocate/0
, deallocate/1
, dump/0
, set_server_load/1
, show_mailbox/0
, show_mailbox/1
, stop/0]).
%% Data Model:
%% FreqDb := { free : [integer],
%% allocated: [{integer, pid}]
%% test : sleep_period := integer
%% }
%%% Public API
-define(CLIENT_RX_TIMEOUT, 3000). % Timeout for client waiting for server reply.
%% start/0 : start the server
%% return : ok | {error, Error}
start() ->
case register(frequency, spawn(frequency, init, [])) of
true -> ok;
Error -> {error, Error}
end.
%% allocate/0 : allocate a frequency for the caller's process
%% return : {ok, Freq} | {error, client_already_own, Freq{}
allocate() ->
Cleared = clear(),
io:format("set_server_load(): cleared: ~w~n", [Cleared]),
frequency ! {request, self(), allocate},
receive {reply, Reply} ->
Reply
after ?CLIENT_RX_TIMEOUT -> {error, timeout}
end.
%% deallocate/1 : deallocate a specified frequency that should have
%% already have been allocated by the caller's process.
%% return : ok | {error, client_does_not_own, Freq}
deallocate(Freq) ->
Cleared = clear(),
io:format("set_server_load(): cleared: ~w~n", [Cleared]),
frequency ! {request, self(), {deallocate, Freq}},
receive {reply, Reply} ->
Reply
after ?CLIENT_RX_TIMEOUT -> {error, timeout}
end.
%% dump/0 : return internal database data (should really be debug only)
dump() ->
Cleared = clear(),
io:format("set_server_load(): cleared: ~w~n", [Cleared]),
frequency ! {request, self(), dump},
receive {reply, FreqDb} ->
FreqDb
after ?CLIENT_RX_TIMEOUT -> {error, timeout}
end.
%% set_server_load/1 : WaitTime (in milliseconds)
%% Return: ok | {error, timeout}
set_server_load(WaitTime) ->
io:format("set_server_load()~n"),
Cleared = clear(),
io:format("set_server_load(): cleared: ~w~n", [Cleared]),
frequency ! {request, self(), {set_wait, WaitTime}},
receive {reply, Reply} ->
Reply
after ?CLIENT_RX_TIMEOUT -> {error, timeout}
end.
% stop/0 : stop the frequency server
stop() ->
clear(),
frequency ! {request, self(), stop},
receive {reply, Reply} ->
Reply
after ?CLIENT_RX_TIMEOUT -> {error, timeout}
end.
%%% Client API utility function
%% clear/0: clear the mailbox
%% return: number of cleared messages.
%% side effect: prints each cleared message on stdout.
clear() -> clear(0).
clear(ClearCount) ->
receive
Msg ->
io:format("Cleared Message: ~w~n", [Msg]),
clear(ClearCount + 1)
after 0 -> {ok, ClearCount}
end.
%% -----------------------------------------------------------------------------
%%% Server - Internal process logic
init() ->
FreqDb = {get_frequencies(), [], {sleep_period, 0}},
loop(FreqDb).
loop(FreqDb) ->
%% extract WaitTime
{_Allocated, _Free, {sleep_period, WaitTime}} = FreqDb,
%% clear the mailbox
Cleared = clear(),
io:format("frequency loop(): cleared: ~w~n", [Cleared]),
%% simulate a server load
timer:sleep(WaitTime),
%% normal processing
receive
{request, Pid, allocate} ->
{NewFreqDb, Result} = allocate(FreqDb, Pid),
Pid ! {reply, Result},
loop(NewFreqDb);
{request, Pid, {deallocate, Freq}} ->
{NewFreqDb, Result} = deallocate(FreqDb, Freq, Pid),
Pid! {reply, Result},
loop(NewFreqDb);
{request, Pid, dump} ->
Pid! {reply, FreqDb},
loop(FreqDb);
{request, Pid, {set_wait, NewWaitTime}} ->
{NewFreqDb, Result} = set_wait(FreqDb, NewWaitTime),
Pid ! {reply, Result},
loop(NewFreqDb);
{request, Pid, stop} ->
Pid! {reply, stopped}
end.
%% Frequency 'Database' management functions.
%% allocate/2: FreqDb, ClientPid
%% allocate a frequency for ClientPid. Allow 1 frequency per Client.
%% Return: {FreqDb, Reply}
%% 1) when all frequencies are allocated (none free)
allocate({[], Allocated, TestData}, _Pid) ->
{ {[], Allocated, TestData},
{error, no_frequency} };
%% 2) with some available frequency/ies
allocate({[Freq|Free], Allocated, TestData}, Pid) ->
case is_owner(Allocated, Pid) of
false -> { {Free, [{Freq, Pid} | Allocated], TestData},
{ok, Freq} };
{true, OwnedFreq} -> { {[Freq|Free], Allocated, TestData},
{error, client_already_owns, OwnedFreq} }
end.
%% deallocate/3 : FreqDb, Freq, Pid
%% de-allocate client owned frequency
%% Return: {FreqDb, Reply}
deallocate({Free, Allocated, TestData}, Freq, Pid) ->
case owns(Allocated, Freq, Pid) of
true -> NewAllocated = lists:keydelete(Freq, 1, Allocated),
{ {[Freq|Free], NewAllocated, TestData},
ok };
false -> { {Free, Allocated, TestData},
{error, client_does_not_own, Freq} }
end.
%% set_wait/2: FreqDb, WaitTime
%% set server sleep time to WaitTime
%% Return: {FreqDb, {ok, OldWaitTime}}
set_wait({Free, Allocated, {sleep_period, OldWaitTime}}, WaitTime) ->
{{Free, Allocated, {sleep_period, WaitTime}}, {ok, OldWaitTime}}.
%% show_mailbox/0 : print and return process mailbox size on stdout
show_mailbox() ->
show_mailbox(self()).
%% show_mailbox/1 : print and return process mailbox size on stdout
show_mailbox(Pid) ->
{message_queue_len, MsgCount} = process_info(Pid, message_queue_len),
io:format("Size of ~w mailbox: ~w~n", [self(), MsgCount]),
MsgCount.
%%% Database verification
%% is_owner/2 : Allocated, ClientPid
%% Return {true, Freq} when ClientPid already owns a frequency, false otherwise.
is_owner([], _ClientPid) -> false;
is_owner([{Freq, ClientPid} | _AllocatedTail], ClientPid) -> {true, Freq};
is_owner([_Head | Tail], ClientPid) -> is_owner(Tail, ClientPid).
%% owns/3 : Allocated, Freq, ClientPid
%% Return true when ClientPid owns Freq, false otherwise.
owns([], _Freq, _ClientPid) -> false;
owns([{Freq, ClientPid} | _AllocatedTail], Freq, ClientPid) -> true;
owns([_Head | Tail], Freq, ClientPid) -> owns(Tail, Freq, ClientPid).
%%% Database initialization
get_frequencies() ->
[10,11,12,13,14,15].
%% ----------------------------------------------------------------------------- | exercises/e4/v3/frequency.erl | 0.535584 | 0.504578 | frequency.erl | starcoder |
%% @doc Model for fetching and updating the Git clone of Zotonic.
%% The Git checkout is located in "priv/data/zotonic-git".
%%
%% This file is a model. You will see functions like 'm_get' that
%% implement the model behaviour. Models are always located in the
%% directory 'models' and their filename always start with 'm_'.
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2020 <NAME>
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% Note that the filename starts with m_, followed by the module name.
%% This ensures that there are no unexpected name clashes like with the
%% 'm_search' model in the Zotonic core.
-module(m_zotonicwww2_search).
-behaviour(zotonic_model).
-export([
m_get/3,
exact_match/2
]).
%% Include the central definitions of Zotonic. Useful for macro
%% definitions like ?DEBUG.
-include_lib("zotonic_core/include/zotonic.hrl").
%% @doc Handle GET requests for this model. Can be called from the
%% templates (m.zotonicwww2_search), the API (/api/model/zotonicwww2_search/get/...)
%% or via MQTT (topic model/zotonicwww2_search/get).
%%
%% The first argument is the split path of the request (after 'get').
%%
%% The second argument is the MQTT messages, if any. For template calls
%% this could be 'undefined'. API calls also construct a MQTT message, as
%% the API routes calls via the MQTT tree and the zotonic_model.erl (in
%% zotonic_core/src/support).
%%
%% The m_get function consumes as much of the Path as is needed, it must
%% return its result together with the unconsumed part of the path. The
%% zotonic_model functions will do a further lookup of the Path remainder
%% in the return value.
%%
%% On an error an error tuple should be returned. For the template routines
%% this maps to 'undefined' and is ignored. The API and MQTT will return
%% a payload with the error to the caller.-spec m_get( Path :: list(), zotonic_model:opt_msg(), z:context() ) -> zotonic_model:return().
m_get([ <<"exact_match">>, Term | Rest ], _Msg, Context) ->
{ok, {exact_match(Term, Context), Rest}};
m_get([ <<"title_match">>, Term | Rest ], _Msg, Context) ->
{ok, {title_match(Term, Context), Rest}}.
-spec exact_match( binary(), z:context() ) -> list( m_rsc:resource_id() ).
exact_match(Term, Context) when is_binary(Term) ->
Lower = z_string:trim( z_string:to_lower(Term) ),
Ids = z_db:q("
select id
from rsc
where pivot_title = $1",
[ Lower ],
Context),
[ Id || {Id} <- Ids ].
-spec title_match( binary(), z:context() ) -> list( m_rsc:resource_id() ).
title_match(Term, Context) when is_binary(Term) ->
Exact = exact_match(Term, Context),
Lower = z_string:trim( z_string:to_lower(Term) ),
{RefFrom, RefTo} = m_category:get_range_by_name(reference, Context),
{CookFrom, CookTo} = m_category:get_range_by_name(cookbook, Context),
StartIds = z_db:q("
select id
from rsc
where pivot_title like $1 || '%'
and (
(pivot_category_nr >= $2 and pivot_category_nr <= $3)
or (pivot_category_nr >= $4 and pivot_category_nr <= $5)
)
",
[ Lower, RefFrom, RefTo, CookFrom, CookTo ],
Context),
WordIds = z_db:q("
select id
from rsc
where ( pivot_title like '%' || $1 || '\\_%'
or pivot_title like '%' || $1 || ' %'
or pivot_title like '%\\_' || $1 || '%'
or pivot_title like '% ' || $1 || '%'
)
and (
(pivot_category_nr >= $2 and pivot_category_nr <= $3)
or (pivot_category_nr >= $4 and pivot_category_nr <= $5)
)
",
[ Lower, RefFrom, RefTo, CookFrom, CookTo ],
Context),
AllIds = z_db:q("
select id
from rsc
where pivot_title like '%' || $1 || '%'
and (
(pivot_category_nr >= $2 and pivot_category_nr <= $3)
or (pivot_category_nr >= $4 and pivot_category_nr <= $5)
)
",
[ Lower, RefFrom, RefTo, CookFrom, CookTo ],
Context),
Ids = StartIds ++ (WordIds -- StartIds) ++ (AllIds -- StartIds -- WordIds),
Ids1 = [ Id || {Id} <- Ids ],
lists:sublist(Ids1 -- Exact, 50). | src/models/m_zotonicwww2_search.erl | 0.58261 | 0.558869 | m_zotonicwww2_search.erl | starcoder |
% @copyright 2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Bloom Filter implementation
%% @end
%% @reference <NAME>, <NAME>
%% <em>Network Applications of Bloom Filters: A Survey</em>
%% 2004 Internet Mathematics 1(4)
%% @version $Id$
-module(bloom).
-include("record_helpers.hrl").
-behaviour(bloom_beh).
-include("scalaris.hrl").
%% Types
-record(bloom, {
size = 0 :: integer(), %bit-length of the bloom filter - requirement: size rem 8 = 0
filter = <<>> :: binary(), %length = size div 8
expItems = ?required(bloom, expItems) :: integer(), %extected number of items
targetFPR = ?required(bloom, targetFPR) :: float(), %target false-positive-rate
hfs = ?required(bloom, hfs) :: ?REP_HFS:hfs(),%HashFunctionSet
addedItems = 0 :: integer() %number of inserted items
}).
-type bloom_filter_t() :: #bloom{}.
-include("bloom_beh.hrl").
%% API
% @doc creates a new bloom filter
new_(N, FPR, Hfs) ->
Size = resize(calc_least_size(N, FPR), 8), %BF bit size should fit into a number of bytes
#bloom{
size = Size,
filter = <<0:Size>>,
expItems = N,
targetFPR = calc_FPR(Size, N, calc_HF_num(Size, N)),
hfs = Hfs,
addedItems = 0
}.
% @doc adds a range of items to bloom filter
add_list_(Bloom, Items) ->
#bloom{
size = BFSize,
hfs = Hfs,
addedItems = FilledCount,
filter = Filter
} = Bloom,
Pos = lists:append([apply(element(1, Hfs), apply_val, [Hfs, Item]) || Item <- Items]), %TODO: USE FLATTEN???
Positions = lists:map(fun(X) -> X rem BFSize end, Pos),
Bloom#bloom{
filter = set_Bits(Filter, Positions),
addedItems = FilledCount + length(Items)
}.
% @doc returns true if the bloom filter contains item
is_element_(Bloom, Item) ->
#bloom{
size = BFSize,
hfs = Hfs,
filter = Filter
} = Bloom,
Pos = apply(element(1, Hfs), apply_val, [Hfs, Item]),
Positions = lists:map(fun(X) -> X rem BFSize end, Pos),
check_Bits(Filter, Positions).
%% @doc joins two bloom filter, returned bloom filter represents their union
join_(#bloom{size = Size1, expItems = ExpItem1, addedItems = Items1, targetFPR = Fpr1,
filter = F1, hfs = Hfs},
#bloom{size = Size2, expItems = ExpItem2, addedItems = Items2, targetFPR = Fpr2,
filter = F2}) ->
NewSize = erlang:max(Size1, Size2),
<<F1Val : Size1>> = F1,
<<F2Val : Size2>> = F2,
NewFVal = F1Val bor F2Val,
#bloom{
size = NewSize,
filter = <<NewFVal:NewSize>>,
expItems = erlang:max(ExpItem1, ExpItem2),
targetFPR = erlang:min(Fpr1, Fpr2),
hfs = Hfs,
addedItems = Items1 + Items2 %approximation
}.
%% @doc checks equality of two bloom filters
equals_(Bloom1, Bloom2) ->
#bloom{
size = Size1,
addedItems = Items1,
filter = Filter1
} = Bloom1,
#bloom{
size = Size2,
addedItems = Items2,
filter = Filter2
} = Bloom2,
Size1 =:= Size2 andalso
Items1 =:= Items2 andalso
Filter1 =:= Filter2.
% @doc bloom filter debug information
print_(Bloom) ->
#bloom{
expItems = MaxItems,
targetFPR = TargetFPR,
size = Size,
hfs = Hfs,
addedItems = NumItems
} = Bloom,
HCount = apply(element(1, Hfs), hfs_size, [Hfs]),
[{filter_bit_size, Size},
{struct_byte_size, byte_size(term_to_binary(Bloom))},
{hash_fun_num, HCount},
{max_items, MaxItems},
{dest_fpr, TargetFPR},
{items_inserted, NumItems},
{act_fpr, calc_FPR(Size, NumItems, HCount)}].
%% bit operations
% @doc Sets all filter-bits at given positions to 1
-spec set_Bits(binary(), [integer()]) -> binary().
set_Bits(Filter, []) ->
Filter;
set_Bits(Filter, [Pos | Positions]) ->
PreByteNum = Pos div 8,
<<PreBin:PreByteNum/binary, OldByte:8, PostBin/binary>> = Filter,
NewByte = OldByte bor (1 bsl (Pos rem 8)),
set_Bits(<<PreBin/binary, NewByte:8, PostBin/binary>>, Positions).
% @doc Checks if all bits are set on a given position list
-spec check_Bits(binary(), [integer()]) -> boolean().
check_Bits(_, []) ->
true;
check_Bits(Filter, [Pos | Positions]) ->
PreBytes = Pos div 8,
<<_:PreBytes/binary, CheckByte:8, _/binary>> = Filter,
case 0 =/= CheckByte band (1 bsl (Pos rem 8)) of
true -> check_Bits(Filter, Positions);
false -> false
end.
%% helper functions
-spec ln(X::number()) -> float().
ln(X) ->
util:log(X, math:exp(1)).
% @doc Increases Val until Val rem Div == 0.
-spec resize(integer(), integer()) -> integer().
resize(Val, Div) when Val rem Div == 0 ->
Val;
resize(Val, Div) when Val rem Div /= 0 ->
resize(Val + 1, Div). | src/rrepair/bloom.erl | 0.507812 | 0.425098 | bloom.erl | starcoder |
%% @doc This module implements storage and lookup of data in ETS tables.
%% Including a minimal gen_server to stay owner of the ets tables.
-module(watts_ets).
%%
%% Copyright 2016 SCC/KIT
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0 (see also the LICENSE file)
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
-author("<NAME>, Bas.Wegh<at>kit.edu").
-include("watts.hrl").
-behaviour(gen_server).
-export([init/0,
destroy/0
]).
-export([
sessions_get_list/0,
sessions_create_new/1,
sessions_get_pid/1,
sessions_update_pid/2,
sessions_delete/1
]).
-export([
service_add/2,
service_update/2,
service_get/1,
service_get_list/0
]).
-export([
start_link/0,
stop/0,
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-define(WATTS_SESSIONS, watts_sessions).
-define(WATTS_OIDCP, watts_oidcp).
-define(WATTS_SERVICE, watts_service).
%% the list of tables to automatically create/delete ... etc
-define(WATTS_TABLES, [
?WATTS_SESSIONS
, ?WATTS_OIDCP
, ?WATTS_SERVICE
]).
%% @doc create the ets tables
-spec init() -> ok.
init() ->
gen_server:call(?MODULE, create_tables).
%% @doc delete all ets tables
-spec destroy() -> ok.
destroy() ->
gen_server:call(?MODULE, delete_tables).
% functions for session management
%% @doc get the list of all sessions
-spec sessions_get_list() -> [map()].
sessions_get_list() ->
Entries = get_all_entries(?WATTS_SESSIONS),
ExtractValue = fun({Id, Pid}, List) ->
[#{id => Id, pid => Pid} | List]
end,
lists:reverse(lists:foldl(ExtractValue, [], Entries)).
%% @doc create a new session entry, without a pid.
-spec sessions_create_new(Token :: binary()) -> ok | {error, Reason :: atom()}.
sessions_create_new(Token) ->
return_ok_or_error(insert_new(?WATTS_SESSIONS, {Token, none_yet})).
%% @doc return the pid for a given token or error
-spec sessions_get_pid(Token :: binary()) -> {ok, Pid :: pid()} |
{error, Reason :: atom()}.
sessions_get_pid(Token) ->
case return_value(lookup(?WATTS_SESSIONS, Token)) of
{ok, none_yet} -> {error, none_yet};
Other -> Other
end.
%% @doc update the pid for the given token
-spec sessions_update_pid(Token :: binary(), Pid :: pid()) -> ok.
sessions_update_pid(Token, Pid) ->
insert(?WATTS_SESSIONS, {Token, Pid}),
ok.
%% @doc delete the session entry for the given token
-spec sessions_delete(Token :: binary()) -> ok.
sessions_delete(Token) ->
delete(?WATTS_SESSIONS, Token).
% functions for service management
%% @doc add a service with the given Identifier and its Config
-spec service_add(Identifier::binary(), Info :: map()) ->
ok | {error, Reason :: atom()}.
service_add(Identifier, Info) ->
return_ok_or_error(insert_new(?WATTS_SERVICE, {Identifier, Info})).
%% @doc update the Config of the given ServiceId.
-spec service_update(Identifier::binary(), Info :: map()) ->
ok | {error, Reason :: atom()}.
service_update(Identifier, Info) ->
return_ok_or_error(insert(?WATTS_SERVICE, {Identifier, Info})).
%% @doc lookup a service by its id
-spec service_get(Identifier::binary()) ->
{ok, tuple()} | {error, Reason :: atom()}.
service_get(Id) ->
lookup(?WATTS_SERVICE, Id).
%% @doc get a list of all service configs
-spec service_get_list() -> {ok, [map()]}.
service_get_list() ->
Entries = get_all_entries(?WATTS_SERVICE),
ExtractValue = fun({_, Val}, List) ->
[Val | List]
end,
{ok, lists:reverse(lists:foldl(ExtractValue, [], Entries))}.
%% internal functions
%% @doc convert booelan to ok/{error, reason}
-spec return_ok_or_error(boolean()) -> ok | {error, already_exists}.
return_ok_or_error(true) ->
ok;
return_ok_or_error(false) ->
{error, already_exists}.
%% @doc unify return value
-spec return_value({ok, {Key :: any(), Value::any()}} |
{error, any()}
)
-> {ok, Value :: any()} | {error, any()}.
return_value({ok, {_Key, Value}}) ->
{ok, Value};
return_value({error, _} = Error) ->
Error.
%% @doc create the ets tables
-spec create_tables() -> ok.
create_tables() ->
CreateTable = fun(Table) ->
create_table(Table)
end,
lists:map(CreateTable, ?WATTS_TABLES),
ok = wait_for_tables(?WATTS_TABLES),
ok.
%% @doc create a single ets table with name TableName
-spec create_table(Name :: atom()) -> ets:tid() | atom().
create_table(TableName) ->
Heir = case erlang:whereis(watts_sup) of
undefined -> {heir, none};
Pid -> {heir, Pid, none}
end,
ets:new(TableName, [set, public, named_table, {keypos, 1}, Heir]).
%% @doc wait until the tables are ready
-spec wait_for_tables([atom()]) -> ok.
wait_for_tables(List) ->
Check = fun(Table, Result) ->
case ets:info(Table) of
undefined ->
false;
_ ->
Result
end
end,
case lists:foldl(Check, true, List) of
false ->
timer:sleep(100),
wait_for_tables(List);
true ->
ok
end.
%% @doc delete all tables
-spec delete_tables() -> ok.
delete_tables() ->
DeleteTable = fun(Table) ->
delete_table(Table)
end,
lists:map(DeleteTable, ?WATTS_TABLES),
ok.
%% @doc delete a single table
-spec delete_table(Name :: atom()) -> ok.
delete_table(Name) ->
case ets:info(Name) of
undefined ->
ok;
_ ->
true = ets:delete(Name),
ok
end.
%% @doc get all entries of a table
-spec get_all_entries(Table :: atom()) -> [any()].
get_all_entries(Table) ->
GetVal = fun(Entry, List) ->
[Entry | List]
end,
Entries = ets:foldl(GetVal, [], Table),
lists:reverse(Entries).
%% @doc delete the specified Entry.
-spec delete(Table :: atom(), Key :: any()) -> ok.
delete(Table, Key) ->
true = ets:delete(Table, Key),
ok.
%% @doc insert an entry into the table
-spec insert(Table :: atom(), Entry :: any()) -> true.
insert(Table, Entry) ->
true = ets:insert(Table, Entry).
%% @doc insert an new entry into the table
-spec insert_new(Table :: atom(), Entry :: any()) -> boolean().
insert_new(Table, Entry) ->
ets:insert_new(Table, Entry).
%% @doc lookup a key in the given table
-spec lookup(Table :: atom(), Key :: any()) ->
{ok, Element::any()} | {error, not_found}.
lookup(Table, Key) ->
create_lookup_result(ets:lookup(Table, Key)).
%% @doc convert the lookup result into ok/error tuple
-spec create_lookup_result([any()]) -> {ok, any()} |
{error, not_found}.
create_lookup_result([Element]) ->
{ok, Element};
create_lookup_result([]) ->
{error, not_found}.
%% @doc start the minimal gen_server linked
-spec start_link() -> {ok, pid()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, noparams, []).
%% @doc trigger a stop of the gen_server
-spec stop() -> ok.
stop() ->
gen_server:cast(?MODULE, stop).
%% @doc no initilization needed
-spec init(noparams) -> {ok, nothing}.
init(noparams) ->
{ok, nothing}.
%% @doc create or delete tables
-spec handle_call(any(), any(), nothing)
-> {reply, ok, nothing} |
{reply, ignored, nothing}.
handle_call(create_tables, _From, State) ->
ok = create_tables(),
{reply, ok, State};
handle_call(delete_tables, _From, State) ->
ok = delete_tables(),
{reply, ok, State};
handle_call(_, _, State) ->
{reply, ignored, State}.
%% @doc handles stop and does ignore everything else
-spec handle_cast(any(), nothing) ->
{stop, normal, noting} |
{noreply, nothing}.
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(_, State) ->
{noreply, State}.
%% @doc does ignore everything
-spec handle_info(any(), nothing) -> {noreply, nothing}.
handle_info(_, State) ->
{noreply, State}.
%% @doc log on termination due to error
-spec terminate(any(), nothing) -> ok.
terminate(normal, _) ->
ok;
terminate(Reason, _) ->
lager:error("ETS-db crashed: ~p", [Reason]),
ok.
%% @doc just return back the state 'nothing'
-spec code_change(any(), nothing, any()) -> {ok, nothing}.
code_change(_, State, _) ->
{ok, State}. | src/watts_ets.erl | 0.562417 | 0.411939 | watts_ets.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author mb299v
%%% @copyright (C) 3129, <COMPANY>
%%% @doc
%%%
%%% @end
%%% Created : 31. Aug 3129 21:29 AM
%%%-------------------------------------------------------------------
-module(vector_math).
-author("mb299v").
%% API
-export([rotation_matrix/2,
multiply_vector_by/2,
rotate_around_vector/3,
point_plus_vector/2,
multiply_vector_by_scalar/2]).
%% makes a 3x3 rotation matrix from the given angle and returns the rotation matrix
rotation_matrix(Axis, Angle) ->
Cos2 = math:cos(Angle),
Cos3 = 2 - Cos2,
Sin2 = math:sin(Angle),
[
Cos2 + lists:nth(1, Axis) * lists:nth(1, Axis) * Cos3,
lists:nth(1, Axis) * lists:nth(2, Axis) * Cos3 - lists:nth(3, Axis) * Sin2,
lists:nth(1, Axis) * lists:nth(3, Axis) * Cos3 + lists:nth(2, Axis) * Sin2,
lists:nth(2, Axis) * lists:nth(1, Axis) * Cos3 + lists:nth(3, Axis) * Sin2,
Cos2 + lists:nth(2, Axis) * lists:nth(2, Axis) * Cos3,
lists:nth(2, Axis) * lists:nth(3, Axis) * Cos3 - lists:nth(1, Axis) * Sin2,
lists:nth(3, Axis) * lists:nth(1, Axis) * Cos3 - lists:nth(2, Axis) * Sin2,
lists:nth(3, Axis) * lists:nth(2, Axis) * Cos3 + lists:nth(1, Axis) * Sin2,
Cos2 + lists:nth(3, Axis) * lists:nth(3, Axis) * Cos3
].
%% multiplies a vector Vector by a Matrix and returns the resulting vector
multiply_vector_by(Vector, Matrix) ->
[
lists:nth(1, Vector) * lists:nth(1, Matrix) + lists:nth(2, Vector) * lists:nth(2, Matrix) + lists:nth(3, Vector) * lists:nth(3, Matrix),
lists:nth(1, Vector) * lists:nth(4, Matrix) + lists:nth(2, Vector) * lists:nth(5, Matrix) + lists:nth(3, Vector) * lists:nth(6, Matrix),
lists:nth(1, Vector) * lists:nth(7, Matrix) + lists:nth(2, Vector) * lists:nth(8, Matrix) + lists:nth(3, Vector) * lists:nth(9, Matrix)
].
%% rotate a vector Vector1 around the axis Vector2 by Angle and return the resulting Vector
rotate_around_vector(Vector1, Vector2, Angle) ->
multiply_vector_by(Vector1, rotation_matrix(Vector2, Angle)).
point_plus_vector({Px, Py, Pz}, {Vi, Vj, Vk}) ->
{Px + Vi, Py + Vj, Pz + Vk}.
multiply_vector_by_scalar({Vi, Vj, Vk}, Scalar) ->
{Vi * Scalar, Vj * Scalar, Vk * Scalar}. | src/vector_math.erl | 0.631935 | 0.42937 | vector_math.erl | starcoder |
-module(parser).
-export([parse/1, parse_and_scan/1, format_error/1]).
-file("src/parser.yrl", 268).
-compile([{hipe, [{regalloc, linear_scan}]}]).
% LANGUAGE
set_language({'feature', {'meta', {Location, TokenLabel, _}, Type}, Constituents}, Language) ->
{'feature', {'meta', {Location, TokenLabel, language(Language)}, Type}, Constituents}.
% FEATURE
feature_block(Feature, Contents, BackgroundBlock, ScenarioBlocks, RuleBlocks) ->
Constituents = [
tags_component([]),
title_component(Feature),
description_block(Contents),
background_block(BackgroundBlock),
scenario_blocks(ScenarioBlocks),
rule_blocks(RuleBlocks)
],
component(token(Feature), meta(Feature, 'default_language'), Constituents).
% BACKGROUND
background_block([]) ->
Meta = {meta, {none, none}, {type, multiples}},
normalize('background', Meta, constituents([]));
background_block(B) -> B.
background_block(Background, Contents, Steps) ->
Constituents = [
title_component(Background),
description_block(Contents),
step_block(Steps)
],
component(token(Background), meta(Background), Constituents).
% RULE
rule_blocks(RuleBlocks) -> multiple_component('rule_blocks', RuleBlocks).
rule_block(Rule, Contents, BackgroundBlock, ScenarioBlocks) ->
Constituents = [
title_component(Rule),
description_block(Contents),
background_block(BackgroundBlock),
scenario_blocks(ScenarioBlocks)
],
component(token(Rule), meta(Rule), Constituents).
% SCENARIO
scenario_blocks(ScenarioBlocks) -> multiple_component('scenario_blocks', ScenarioBlocks).
scenario_block(Scenario, Contents, Steps, ExamplesBlocks) ->
Constituents = [
tags_component([]),
title_component(Scenario),
description_block(Contents),
step_block(Steps),
examples_blocks(ExamplesBlocks)
],
component(token(Scenario), meta(Scenario), Constituents).
% STEP
step_block(Steps) -> multiple_component(steps, Steps).
step_component(FullToken) ->
Constituents = [
text_component(FullToken),
step_arg_component()
],
component(token(FullToken), meta(FullToken), Constituents).
step_component(FullToken, StepArg) ->
Constituents = [
text_component(FullToken),
step_arg_component(StepArg)
],
component(token(FullToken), meta(FullToken), Constituents).
step_arg_component(StepArg) -> singular_component('arg', StepArg).
step_arg_component() -> singular_component('arg', 'none').
% EXAMPLES
examples_blocks(ExamplesBlocks) -> multiple_component('examples_blocks', ExamplesBlocks).
examples_block(Examples, Contents, TableData) ->
Constituents = [
tags_component([]),
title_component(Examples),
description_block(Contents),
data_table_block(TableData)
],
component('examples', meta(Examples), Constituents).
% DATATABLE
data_table_block(DataTableBlock) -> multiple_component(data_table, DataTableBlock).
data_table_component(Token = {_, _, _, Row}) ->
Meta = meta(Token),
Cells = lists:map(fun({Column, Value}) ->
component('data_table_cell', set_column(Meta, Column), Value)
end, Row),
component('data_table_row', Meta, Cells).
% TAG
set_tags_block(FullToken, Tags) -> replace_tags_component(FullToken, tags_component(Tags)).
tag_component(Token = {_, _, _, TagName}) ->
Meta = erlang:append_element(meta(Token), {type, singular}),
normalize(token(Token), Meta, TagName).
tags_component(Tags) ->
IndividualizedTags = lists:foldl(fun({Tag, Meta, TagsOnLine}, Acc) ->
Acc ++ lists:map(fun({ColumnNumber, TagName}) -> {Tag, set_column(Meta, ColumnNumber), TagName} end, TagsOnLine)
end, [], Tags),
multiple_component('tags', IndividualizedTags).
% DOCSTRING
doc_string_component(FullToken, Contents) ->
Constituents = [
delim_component(FullToken),
doc_string_content_component(Contents)
],
component(token(FullToken), meta(FullToken), Constituents).
doc_string_content_component(Descriptions) -> to_block_component('contents', Descriptions, [], fun doc_string_content_line_component/1).
doc_string_content_line_component(Content = {_, _, _, _}) ->
component(token(Content), meta(Content), value(Content)).
delim_component({_, _, _, {_, Delim}}) -> singular_component('delim', Delim).
% GENERAL COMPONENTS
to_block_component(Label, Contents, Acc, ComponentFunc) when is_list(Contents) and is_list(Acc) ->
case Contents of
[] -> multiple_component(Label, lists:reverse(Acc));
[Hd | Tl] -> to_block_component(Label, Tl, [ComponentFunc(Hd) | Acc], ComponentFunc)
end.
description_block(Descriptions) -> description_block(Descriptions, []).
description_block(Descriptions, Acc) -> to_block_component('description_block', Descriptions, Acc, fun description_component/1).
description_component(Content = {_, _, _, _}) -> component('description', meta(Content), value(Content)).
title_component({_, _, _, Title}) -> singular_component('title', Title).
text_component({_, _, _, ActualContent}) -> singular_component('text', ActualContent).
% COMPONENT
constituents(Constituents) -> {'constituents', Constituents}.
component(Label, {meta, Meta}, Constituents) when is_list(Constituents) ->
UpdatedMeta = {meta, Meta, {type, multiples}},
normalize(Label, UpdatedMeta, constituents(Constituents));
component(Label, {meta, Meta}, Constituent) ->
UpdatedMeta = {meta, Meta, {type, singular}},
normalize(Label, UpdatedMeta, Constituent);
component(Label, Meta, Constituents) -> normalize(Label, Meta, Constituents).
normalize(Label, Meta, Constituents) -> {Label, Meta, Constituents}.
singular_component(Label, Constituent) -> component(Label, singular_meta(), Constituent).
multiple_component(Label, Constituents) -> component(Label, multiple_meta(), constituents(Constituents)).
meta(FullToken = {_, _, _, _}) -> {'meta', {location(FullToken), token_label(FullToken)}}.
meta(FullToken = {_, _, _, _}, Language) -> {'meta', {location(FullToken), token_label(FullToken), language(Language)}}.
singular_meta() -> {'meta', {none, none}, {type, singular}}.
multiple_meta() -> {'meta', {none, none}, {type, multiples}}.
% HELPERS
token({Token, _, _, _}) -> Token.
token_label({_, TokenLabel, _, _}) -> {'token_label', TokenLabel}.
line({_, _, {location, Line, _}, _}) -> {'line', Line}.
column({_, _, {location, _, Column}, _}) -> {'column', Column}.
set_column({'meta', {{location, {Line, _}}, TokenLabel}, Typing}, ColumnNumber) -> {'meta', {{location, {Line, {'column', ColumnNumber}}}, TokenLabel}, Typing};
set_column({'meta', {{location, {Line, _}}, TokenLabel}}, ColumnNumber) -> {'meta', {{location, {Line, {'column', ColumnNumber}}}, TokenLabel}}.
location(FullToken = {_, _, {location, _, _}, _}) -> {location, {line(FullToken), column(FullToken)}}.
language(Language) ->
case Language of
FullToken = {_, _, _, LanguageSymbol} -> component(token(FullToken), meta(FullToken), LanguageSymbol);
'default_language' -> singular_component('language', 'en')
end.
value({_, _, _, ActualContent}) -> ActualContent.
replace_tags_component({Label, Meta, {'constituents', [_tags_component_to_replace | Constituents]}}, Constituent) ->
component(Label, Meta, constituents([Constituent | Constituents]));
replace_tags_component({Label, Meta, [_tags_component_to_replace | Constituents]}, Constituent) ->
component(Label, Meta, constituents([Constituent | Constituents])).
-file("/Users/kevinjohnson/.asdf/installs/erlang/22.3.3/lib/parsetools-2.1.8/include/yeccpre.hrl", 0).
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The parser generator will insert appropriate declarations before this line.%
-type yecc_ret() :: {'error', _} | {'ok', _}.
-spec parse(Tokens :: list()) -> yecc_ret().
parse(Tokens) ->
yeccpars0(Tokens, {no_func, no_line}, 0, [], []).
-spec parse_and_scan({function() | {atom(), atom()}, [_]}
| {atom(), atom(), [_]}) -> yecc_ret().
parse_and_scan({F, A}) ->
yeccpars0([], {{F, A}, no_line}, 0, [], []);
parse_and_scan({M, F, A}) ->
Arity = length(A),
yeccpars0([], {{fun M:F/Arity, A}, no_line}, 0, [], []).
-spec format_error(any()) -> [char() | list()].
format_error(Message) ->
case io_lib:deep_char_list(Message) of
true ->
Message;
_ ->
io_lib:write(Message)
end.
%% To be used in grammar files to throw an error message to the parser
%% toplevel. Doesn't have to be exported!
-compile({nowarn_unused_function, return_error/2}).
-spec return_error(integer(), any()) -> no_return().
return_error(Line, Message) ->
throw({error, {Line, ?MODULE, Message}}).
-define(CODE_VERSION, "1.4").
yeccpars0(Tokens, Tzr, State, States, Vstack) ->
try yeccpars1(Tokens, Tzr, State, States, Vstack)
catch
error: Error: Stacktrace ->
try yecc_error_type(Error, Stacktrace) of
Desc ->
erlang:raise(error, {yecc_bug, ?CODE_VERSION, Desc},
Stacktrace)
catch _:_ -> erlang:raise(error, Error, Stacktrace)
end;
%% Probably thrown from return_error/2:
throw: {error, {_Line, ?MODULE, _M}} = Error ->
Error
end.
yecc_error_type(function_clause, [{?MODULE,F,ArityOrArgs,_} | _]) ->
case atom_to_list(F) of
"yeccgoto_" ++ SymbolL ->
{ok,[{atom,_,Symbol}],_} = erl_scan:string(SymbolL),
State = case ArityOrArgs of
[S,_,_,_,_,_,_] -> S;
_ -> state_is_unknown
end,
{Symbol, State, missing_in_goto_table}
end.
yeccpars1([Token | Tokens], Tzr, State, States, Vstack) ->
yeccpars2(State, element(1, Token), States, Vstack, Token, Tokens, Tzr);
yeccpars1([], {{F, A},_Line}, State, States, Vstack) ->
case apply(F, A) of
{ok, Tokens, Endline} ->
yeccpars1(Tokens, {{F, A}, Endline}, State, States, Vstack);
{eof, Endline} ->
yeccpars1([], {no_func, Endline}, State, States, Vstack);
{error, Descriptor, _Endline} ->
{error, Descriptor}
end;
yeccpars1([], {no_func, no_line}, State, States, Vstack) ->
Line = 999999,
yeccpars2(State, '$end', States, Vstack, yecc_end(Line), [],
{no_func, Line});
yeccpars1([], {no_func, Endline}, State, States, Vstack) ->
yeccpars2(State, '$end', States, Vstack, yecc_end(Endline), [],
{no_func, Endline}).
%% yeccpars1/7 is called from generated code.
%%
%% When using the {includefile, Includefile} option, make sure that
%% yeccpars1/7 can be found by parsing the file without following
%% include directives. yecc will otherwise assume that an old
%% yeccpre.hrl is included (one which defines yeccpars1/5).
yeccpars1(State1, State, States, Vstack, Token0, [Token | Tokens], Tzr) ->
yeccpars2(State, element(1, Token), [State1 | States],
[Token0 | Vstack], Token, Tokens, Tzr);
yeccpars1(State1, State, States, Vstack, Token0, [], {{_F,_A}, _Line}=Tzr) ->
yeccpars1([], Tzr, State, [State1 | States], [Token0 | Vstack]);
yeccpars1(State1, State, States, Vstack, Token0, [], {no_func, no_line}) ->
Line = yecctoken_end_location(Token0),
yeccpars2(State, '$end', [State1 | States], [Token0 | Vstack],
yecc_end(Line), [], {no_func, Line});
yeccpars1(State1, State, States, Vstack, Token0, [], {no_func, Line}) ->
yeccpars2(State, '$end', [State1 | States], [Token0 | Vstack],
yecc_end(Line), [], {no_func, Line}).
%% For internal use only.
yecc_end({Line,_Column}) ->
{'$end', Line};
yecc_end(Line) ->
{'$end', Line}.
yecctoken_end_location(Token) ->
try erl_anno:end_location(element(2, Token)) of
undefined -> yecctoken_location(Token);
Loc -> Loc
catch _:_ -> yecctoken_location(Token)
end.
-compile({nowarn_unused_function, yeccerror/1}).
yeccerror(Token) ->
Text = yecctoken_to_string(Token),
Location = yecctoken_location(Token),
{error, {Location, ?MODULE, ["syntax error before: ", Text]}}.
-compile({nowarn_unused_function, yecctoken_to_string/1}).
yecctoken_to_string(Token) ->
try erl_scan:text(Token) of
undefined -> yecctoken2string(Token);
Txt -> Txt
catch _:_ -> yecctoken2string(Token)
end.
yecctoken_location(Token) ->
try erl_scan:location(Token)
catch _:_ -> element(2, Token)
end.
-compile({nowarn_unused_function, yecctoken2string/1}).
yecctoken2string({atom, _, A}) -> io_lib:write_atom(A);
yecctoken2string({integer,_,N}) -> io_lib:write(N);
yecctoken2string({float,_,F}) -> io_lib:write(F);
yecctoken2string({char,_,C}) -> io_lib:write_char(C);
yecctoken2string({var,_,V}) -> io_lib:format("~s", [V]);
yecctoken2string({string,_,S}) -> io_lib:write_string(S);
yecctoken2string({reserved_symbol, _, A}) -> io_lib:write(A);
yecctoken2string({_Cat, _, Val}) -> io_lib:format("~tp", [Val]);
yecctoken2string({dot, _}) -> "'.'";
yecctoken2string({'$end', _}) -> [];
yecctoken2string({Other, _}) when is_atom(Other) ->
io_lib:write_atom(Other);
yecctoken2string(Other) ->
io_lib:format("~tp", [Other]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-file("src/parser.erl", 346).
-dialyzer({nowarn_function, yeccpars2/7}).
yeccpars2(0=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_0(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(1=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_1(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(2=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_2(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(3=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_3(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(4=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_4(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(5=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_5(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(6=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_6(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(7=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_7(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(8=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_8(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(9=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_9(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(10=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_10(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(11=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_11(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(12=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_12(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(13=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_13(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(14=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_14(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(15=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_15(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(16=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_16(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(17=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_17(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(18=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_18(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(19=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_19(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(20=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_20(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(21=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_21(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(22=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_22(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(23=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_23(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(24=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_24(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(25=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_25(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(26=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_26(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(27=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_27(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(28=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_28(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(29=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_29(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(30=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_30(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(31=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_31(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(32=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_32(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(33=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_33(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(34=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_34(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(35=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_35(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(36=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_36(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(37=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_37(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(38=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_38(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(39=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_39(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(40=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_40(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(41=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_41(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(42=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_42(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(43=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_43(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(44=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_44(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(45=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_45(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(46=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_46(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(47=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_47(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(48=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_48(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(49=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_49(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(50=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_50(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(51=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_51(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(52=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_52(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(53=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_53(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(54=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_54(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(55=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_55(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(56=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_56(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(57=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_57(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(58=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_58(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(59=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_59(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(60=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_60(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(61=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_61(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(62=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_62(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(63=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_63(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(64=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_64(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(65=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_65(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(66=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_66(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(67=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_67(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(68=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_68(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(69=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_69(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(70=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_70(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(71=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_71(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(72=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_72(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(73=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_73(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(74=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_74(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(75=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_75(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(76=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_76(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(77=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_77(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(78=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_78(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(79=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_79(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(80=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_80(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(81=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_81(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(82=S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_82(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(83=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_83(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(84=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_84(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(85=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_85(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(86=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_86(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(87=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_87(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(88=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_88(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(89=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_89(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(90=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_90(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(91=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_91(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(92=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_92(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(93=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_93(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(94=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_94(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(95=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_95(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(96=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_96(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(97=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_97(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(98=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_98(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(99=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_99(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(100=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_100(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(101=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_101(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(102=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_102(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(103=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_103(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(104=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_104(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(105=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_105(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(106=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_106(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(107=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_107(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(108=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_108(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(109=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_109(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(110=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_110(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(111=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_111(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(112=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_112(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(113=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_113(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(114=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_114(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(115=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_115(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(116=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_116(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(117=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_117(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(118=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_118(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(119=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_119(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(120=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_120(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(121=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_121(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(122=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_122(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(123=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_123(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(124=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_124(S, Cat, Ss, Stack, T, Ts, Tzr);
%% yeccpars2(125=S, Cat, Ss, Stack, T, Ts, Tzr) ->
%% yeccpars2_125(S, Cat, Ss, Stack, T, Ts, Tzr);
yeccpars2(Other, _, _, _, _, _, _) ->
erlang:error({yecc_bug,"1.4",{missing_state_in_action_table, Other}}).
-dialyzer({nowarn_function, yeccpars2_0/7}).
yeccpars2_0(S, background_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 9, Ss, Stack, T, Ts, Tzr);
yeccpars2_0(S, feature, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 10, Ss, Stack, T, Ts, Tzr);
yeccpars2_0(S, feature_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 11, Ss, Stack, T, Ts, Tzr);
yeccpars2_0(S, language, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 12, Ss, Stack, T, Ts, Tzr);
yeccpars2_0(S, rule_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 13, Ss, Stack, T, Ts, Tzr);
yeccpars2_0(_, _, _, _, T, _, _) ->
yeccerror(T).
yeccpars2_1(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_grammar(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_2(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_2_(Stack),
yeccgoto_invalids(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_3(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_3_(Stack),
yeccgoto_grammar(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_4(S, feature, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 10, Ss, Stack, T, Ts, Tzr);
yeccpars2_4(S, feature_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 11, Ss, Stack, T, Ts, Tzr);
yeccpars2_4(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_4_(Stack),
yeccgoto_invalids(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccpars2_5/7}).
yeccpars2_5(_S, '$end', _Ss, Stack, _T, _Ts, _Tzr) ->
{ok, hd(Stack)};
yeccpars2_5(_, _, _, _, T, _, _) ->
yeccerror(T).
-dialyzer({nowarn_function, yeccpars2_6/7}).
yeccpars2_6(S, feature, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 10, Ss, Stack, T, Ts, Tzr);
yeccpars2_6(_, _, _, _, T, _, _) ->
yeccerror(T).
yeccpars2_7(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_tagged_feature_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_8(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_8_(Stack),
yeccgoto_invalids(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_9(S, background_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 9, Ss, Stack, T, Ts, Tzr);
yeccpars2_9(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_9_(Stack),
yeccgoto_background_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_10(S, background, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 28, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_10(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_10_(Stack),
yeccgoto_feature_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_11(S, feature_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 11, Ss, Stack, T, Ts, Tzr);
yeccpars2_11(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_11_(Stack),
yeccgoto_feature_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_12(S, language, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 16, Ss, Stack, T, Ts, Tzr);
yeccpars2_12(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_i18n(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_13(S, rule_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 13, Ss, Stack, T, Ts, Tzr);
yeccpars2_13(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_13_(Stack),
yeccgoto_rule_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_14(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_14_(Stack),
yeccgoto_rule_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_15(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_15_(Stack),
yeccgoto_invalids(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_16(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_i18n(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_17(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_17_(Stack),
yeccgoto_feature_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_18(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_18(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_18(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_18(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_18(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_18_(Stack),
yeccgoto_scenario_blocks(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccpars2_19/7}).
yeccpars2_19(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_19(_, _, _, _, T, _, _) ->
yeccerror(T).
-dialyzer({nowarn_function, yeccpars2_20/7}).
yeccpars2_20(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_20(_, _, _, _, T, _, _) ->
yeccerror(T).
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_tagged_scenario_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_22(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_22(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_22_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_tagged_scenario_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_24(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_24_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_25(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_25(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_25_(Stack),
yeccgoto_rule_blocks(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_26(S, background, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 28, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_26(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_26_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_27(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_27(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_27(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_27(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_27(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_27(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_27_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_28(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_28(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_28_(Stack),
yeccgoto_background_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_29(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_29(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_29_(Stack),
yeccgoto_contents(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_30(S, background, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 28, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_30(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_30_(Stack),
yeccgoto_rule_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_31(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_31(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_31_(Stack),
yeccgoto_scenario_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_32(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_32(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_32_(Stack),
yeccgoto_scenario_outline_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_33(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_33(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_33_(Stack),
yeccgoto_scenario_outline_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_34(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_34(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_34_(Stack),
yeccgoto_scenario_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_35(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_35_(Stack),
yeccgoto_scenario_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_36(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_36_(Stack),
yeccgoto_scenario_outline_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_39(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_39_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_40(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_40(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_40(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_40_(Stack),
yeccgoto_tagged_examples_blocks(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_41(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_41(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_41(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_41_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_42(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_42(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_42(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_42(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_42(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_42(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_blocks(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccpars2_43/7}).
yeccpars2_43(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_43(_, _, _, _, T, _, _) ->
yeccerror(T).
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_tagged_examples_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_46(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_46(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_46_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_block(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_49(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_49(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 82, Ss, Stack, T, Ts, Tzr);
yeccpars2_49(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_49(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 62, Ss, Stack, T, Ts, Tzr);
yeccpars2_49(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_49_(Stack),
yeccgoto_ands(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_50(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_50(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 79, Ss, Stack, T, Ts, Tzr);
yeccpars2_50(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_50(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 62, Ss, Stack, T, Ts, Tzr);
yeccpars2_50(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_50_(Stack),
yeccgoto_buts(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_51(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 76, Ss, Stack, T, Ts, Tzr);
yeccpars2_51(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_51(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 62, Ss, Stack, T, Ts, Tzr);
yeccpars2_51(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_51(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_51_(Stack),
yeccgoto_givens(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_52(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_52(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_52(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_52_(Stack),
yeccgoto_examples_block(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_53(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_53(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_53_(Stack),
yeccgoto_scenarios_tags(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_54(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 69, Ss, Stack, T, Ts, Tzr);
yeccpars2_54(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_54(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 62, Ss, Stack, T, Ts, Tzr);
yeccpars2_54(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_54(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_54_(Stack),
yeccgoto_thens(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_55(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 60, Ss, Stack, T, Ts, Tzr);
yeccpars2_55(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_55(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 62, Ss, Stack, T, Ts, Tzr);
yeccpars2_55(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_55(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_55_(Stack),
yeccgoto_whens(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_56(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_56_(Stack),
yeccgoto_whens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_57(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_57_(Stack),
yeccgoto_whens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccgoto_step_arg(hd(Ss), Cat, Ss, Stack, T, Ts, Tzr).
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_59_(Stack),
yeccgoto_step_arg(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
yeccpars2_60(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_60_(Stack),
yeccgoto_whens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_61(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_61(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
NewStack = yeccpars2_61_(Stack),
yeccgoto_datatable_rows(hd(Ss), Cat, Ss, NewStack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccpars2_62/7}).
yeccpars2_62(S, content, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 29, Ss, Stack, T, Ts, Tzr);
yeccpars2_62(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 64, Ss, Stack, T, Ts, Tzr);
yeccpars2_62(_, _, _, _, T, _, _) ->
yeccerror(T).
-dialyzer({nowarn_function, yeccpars2_63/7}).
yeccpars2_63(S, doc_string, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 65, Ss, Stack, T, Ts, Tzr);
yeccpars2_63(_, _, _, _, T, _, _) ->
yeccerror(T).
yeccpars2_64(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_64_(Stack),
yeccgoto_docstring(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_65(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_65_(Stack),
yeccgoto_docstring(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_66(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_66_(Stack),
yeccgoto_datatable_rows(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_67(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_67_(Stack),
yeccgoto_thens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_68(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_68_(Stack),
yeccgoto_thens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_69(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_69_(Stack),
yeccgoto_thens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_70(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_70_(Stack),
yeccgoto_scenarios_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_71(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_71_(Stack),
yeccgoto_examples_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_72(S, data_table, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 61, Ss, Stack, T, Ts, Tzr);
yeccpars2_72(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_72_(Stack),
yeccgoto_examples_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_73(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_73_(Stack),
yeccgoto_examples_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_74(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_74_(Stack),
yeccgoto_givens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_75(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_75_(Stack),
yeccgoto_givens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_76(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_76_(Stack),
yeccgoto_givens(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_77(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_77_(Stack),
yeccgoto_buts(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_78(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_78_(Stack),
yeccgoto_buts(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_79(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_79_(Stack),
yeccgoto_buts(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_80(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_80_(Stack),
yeccgoto_ands(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_81(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_81_(Stack),
yeccgoto_ands(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_82(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_82_(Stack),
yeccgoto_ands(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_83(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_83_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_84(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_84(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_84(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_84_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_85(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_85_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_86(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_86_(Stack),
yeccgoto_tagged_examples_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_87(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_87_(Stack),
yeccgoto_step_blocks(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_88(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_88_(Stack),
yeccgoto_scenario_outline_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_89(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_89_(Stack),
yeccgoto_tagged_examples_blocks(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_90(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_90_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_91(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_91(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_91(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_91_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_92(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_92(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_92_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_93(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_93_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_94(S, scenarios, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 52, Ss, Stack, T, Ts, Tzr);
yeccpars2_94(S, scenarios_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 53, Ss, Stack, T, Ts, Tzr);
yeccpars2_94(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_94_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_95(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_95_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_96(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_96_(Stack),
yeccgoto_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_97(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_97_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_98(S, background, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 28, Ss, Stack, T, Ts, Tzr);
yeccpars2_98(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_98(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_98(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_98(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_98(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_98_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_99(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_99(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_99(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_99(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_99(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_99_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_100(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_100_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_101(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_101_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_102(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_102(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_102(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_102(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_102(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_102_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_103(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_103_(Stack),
yeccgoto_rule_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_104(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_104_(Stack),
yeccgoto_contents(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_105(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_105_(Stack),
yeccgoto_background_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_106(S, 'and', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 49, Ss, Stack, T, Ts, Tzr);
yeccpars2_106(S, but, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 50, Ss, Stack, T, Ts, Tzr);
yeccpars2_106(S, given, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 51, Ss, Stack, T, Ts, Tzr);
yeccpars2_106(S, then, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 54, Ss, Stack, T, Ts, Tzr);
yeccpars2_106(S, 'when', Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 55, Ss, Stack, T, Ts, Tzr);
yeccpars2_106(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_106_(Stack),
yeccgoto_background_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_107(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_107_(Stack),
yeccgoto_background_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_108(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_108(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_108_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_109(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_109_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_110(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_110_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_111(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_111(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_111_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_112(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_112_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_113(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_113(S, scenario, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 31, Ss, Stack, T, Ts, Tzr);
yeccpars2_113(S, scenario_outline, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 32, Ss, Stack, T, Ts, Tzr);
yeccpars2_113(S, scenario_outline_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 33, Ss, Stack, T, Ts, Tzr);
yeccpars2_113(S, scenario_tag, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 34, Ss, Stack, T, Ts, Tzr);
yeccpars2_113(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_113_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_114(S, rule, Ss, Stack, T, Ts, Tzr) ->
yeccpars1(S, 30, Ss, Stack, T, Ts, Tzr);
yeccpars2_114(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_114_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_115(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_115_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_116(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_,_|Nss] = Ss,
NewStack = yeccpars2_116_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_117(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_,_|Nss] = Ss,
NewStack = yeccpars2_117_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_118(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_118_(Stack),
yeccgoto_rule_blocks(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_119(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_,_|Nss] = Ss,
NewStack = yeccpars2_119_(Stack),
yeccgoto_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_120(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_120_(Stack),
yeccgoto_tagged_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_121(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_121_(Stack),
yeccgoto_tagged_scenario_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_122(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_122_(Stack),
yeccgoto_scenario_blocks(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_123(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_123_(Stack),
yeccgoto_background_tags(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_124(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_124_(Stack),
yeccgoto_tagged_feature_block(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
yeccpars2_125(_S, Cat, Ss, Stack, T, Ts, Tzr) ->
[_|Nss] = Ss,
NewStack = yeccpars2_125_(Stack),
yeccgoto_grammar(hd(Nss), Cat, Nss, NewStack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_ands/7}).
yeccgoto_ands(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(49=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_81(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_ands(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_48(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_background_block/7}).
yeccgoto_background_block(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_27(27, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_background_block(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_113(113, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_background_block(30, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_99(99, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_background_block(98, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_102(102, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_background_tags/7}).
yeccgoto_background_tags(0=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_8(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_background_tags(9=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_123(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_buts/7}).
yeccgoto_buts(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(50=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_78(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_buts(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_47(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_contents/7}).
yeccgoto_contents(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_26(26, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(28, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_106(106, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(29=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_104(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(30, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_98(98, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(31, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_92(92, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(32, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_46(46, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(52, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_72(72, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_contents(62, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_63(63, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_datatable_rows/7}).
yeccgoto_datatable_rows(49=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(50=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(51=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(52=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_71(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(54=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(55=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_59(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(61=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_66(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_datatable_rows(72=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_73(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_docstring/7}).
yeccgoto_docstring(49=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_docstring(50=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_docstring(51=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_docstring(54=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_docstring(55=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_58(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_examples_block/7}).
yeccgoto_examples_block(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(40=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(41=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(43=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_86(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(84=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(91=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_examples_block(94=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_45(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_feature_block/7}).
yeccgoto_feature_block(0=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_7(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_feature_block(4=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_7(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_feature_block(6=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_124(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_feature_tags/7}).
yeccgoto_feature_tags(0, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_6(6, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_feature_tags(4, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_6(6, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_feature_tags(11=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_17(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_givens/7}).
yeccgoto_givens(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(51=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_75(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_givens(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_44(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_grammar/7}).
yeccgoto_grammar(0, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_5(5, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_i18n/7}).
yeccgoto_i18n(0, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_4(4, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_i18n(12=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_15(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_invalids/7}).
yeccgoto_invalids(0=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_3(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_rule_block/7}).
yeccgoto_rule_block(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(22, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(25, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(27, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(108, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(111, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(113, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_block(114, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_25(25, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_rule_blocks/7}).
yeccgoto_rule_blocks(10=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_24(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(22=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_119(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(25=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_118(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(26=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_112(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(27=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_109(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(108=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_110(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(111=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_117(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(113=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_115(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_blocks(114=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_116(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_rule_tags/7}).
yeccgoto_rule_tags(0=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_2(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_rule_tags(13=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_14(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenario_block/7}).
yeccgoto_scenario_block(10=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(18=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(19=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_121(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(26=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(27=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(30=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(98=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(99=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(102=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_block(113=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_23(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenario_blocks/7}).
yeccgoto_scenario_blocks(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_22(22, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(18=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_122(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_111(111, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(27, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_108(108, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(30=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_97(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(98=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_101(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(99=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_100(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(102=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_103(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_blocks(113, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_114(114, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenario_outline_block/7}).
yeccgoto_scenario_outline_block(10=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(18=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(20=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_120(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(26=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(27=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(30=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(98=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(99=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(102=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_block(113=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_21(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenario_outline_tags/7}).
yeccgoto_scenario_outline_tags(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(18, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(27, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(30, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(33=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_36(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(98, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(99, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(102, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_outline_tags(113, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_20(20, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenario_tags/7}).
yeccgoto_scenario_tags(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(18, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(27, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(30, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(34=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_35(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(98, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(99, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(102, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenario_tags(113, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_19(19, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_scenarios_tags/7}).
yeccgoto_scenarios_tags(31, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(32, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(40, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(41, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(46, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(53=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_70(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(84, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(91, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(92, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_scenarios_tags(94, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_43(43, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_step_arg/7}).
yeccgoto_step_arg(49=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_80(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_arg(50=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_77(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_arg(51=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_74(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_arg(54=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_68(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_arg(55=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_57(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_step_block/7}).
yeccgoto_step_block(28, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(31, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(32, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(42, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(46, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(92, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_block(106, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_42(42, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_step_blocks/7}).
yeccgoto_step_blocks(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_105(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(31, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_91(91, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(32, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_41(41, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_87(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(46, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_84(84, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(92, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_94(94, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_step_blocks(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_107(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_tagged_examples_block/7}).
yeccgoto_tagged_examples_block(31, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(32, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(40, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(41, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(46, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(84, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(91, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(92, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_block(94, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_40(40, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_tagged_examples_blocks/7}).
yeccgoto_tagged_examples_blocks(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_90(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_39(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(40=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_89(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(41=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_88(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_83(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(84=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_85(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(91=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_96(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_93(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_examples_blocks(94=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_95(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_tagged_feature_block/7}).
yeccgoto_tagged_feature_block(0=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_1(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_feature_block(4=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_125(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_tagged_scenario_block/7}).
yeccgoto_tagged_scenario_block(10, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(18, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(26, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(27, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(30, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(98, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(99, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(102, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_tagged_scenario_block(113, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_18(18, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_thens/7}).
yeccgoto_thens(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(54=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_67(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_thens(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_38(_S, Cat, Ss, Stack, T, Ts, Tzr).
-dialyzer({nowarn_function, yeccgoto_whens/7}).
yeccgoto_whens(28=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(31=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(32=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(42=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(46=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(55=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_56(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(92=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr);
yeccgoto_whens(106=_S, Cat, Ss, Stack, T, Ts, Tzr) ->
yeccpars2_37(_S, Cat, Ss, Stack, T, Ts, Tzr).
-compile({inline,yeccpars2_2_/1}).
-file("src/parser.yrl", 90).
yeccpars2_2_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
return_error ( __1 , tagged_rule )
end | __Stack].
-compile({inline,yeccpars2_3_/1}).
-file("src/parser.yrl", 85).
yeccpars2_3_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ ]
end | __Stack].
-compile({inline,yeccpars2_4_/1}).
-file("src/parser.yrl", 91).
yeccpars2_4_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
return_error ( __1 , missing_feature_token )
end | __Stack].
-compile({inline,yeccpars2_8_/1}).
-file("src/parser.yrl", 89).
yeccpars2_8_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
return_error ( __1 , tagged_background )
end | __Stack].
-compile({inline,yeccpars2_9_/1}).
-file("src/parser.yrl", 254).
yeccpars2_9_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_10_/1}).
-file("src/parser.yrl", 135).
yeccpars2_10_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , [ ] , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_11_/1}).
-file("src/parser.yrl", 252).
yeccpars2_11_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_13_/1}).
-file("src/parser.yrl", 258).
yeccpars2_13_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_14_/1}).
-file("src/parser.yrl", 257).
yeccpars2_14_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_15_/1}).
-file("src/parser.yrl", 88).
yeccpars2_15_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , multiple_language_tokens )
end | __Stack].
-compile({inline,yeccpars2_17_/1}).
-file("src/parser.yrl", 251).
yeccpars2_17_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_18_/1}).
-file("src/parser.yrl", 162).
yeccpars2_18_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ __1 ]
end | __Stack].
-compile({inline,yeccpars2_22_/1}).
-file("src/parser.yrl", 131).
yeccpars2_22_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , [ ] , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_24_/1}).
-file("src/parser.yrl", 132).
yeccpars2_24_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , [ ] , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_25_/1}).
-file("src/parser.yrl", 142).
yeccpars2_25_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ __1 ]
end | __Stack].
-compile({inline,yeccpars2_26_/1}).
-file("src/parser.yrl", 129).
yeccpars2_26_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , [ ] , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_27_/1}).
-file("src/parser.yrl", 130).
yeccpars2_27_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , __2 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_28_/1}).
-file("src/parser.yrl", 158).
yeccpars2_28_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
background_block ( __1 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_29_/1}).
-file("src/parser.yrl", 248).
yeccpars2_29_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ __1 ]
end | __Stack].
-compile({inline,yeccpars2_30_/1}).
-file("src/parser.yrl", 147).
yeccpars2_30_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , [ ] , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_31_/1}).
-file("src/parser.yrl", 176).
yeccpars2_31_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_32_/1}).
-file("src/parser.yrl", 185).
yeccpars2_32_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_33_/1}).
-file("src/parser.yrl", 256).
yeccpars2_33_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_34_/1}).
-file("src/parser.yrl", 260).
yeccpars2_34_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_35_/1}).
-file("src/parser.yrl", 259).
yeccpars2_35_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_36_/1}).
-file("src/parser.yrl", 255).
yeccpars2_36_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_39_/1}).
-file("src/parser.yrl", 184).
yeccpars2_39_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_40_/1}).
-file("src/parser.yrl", 189).
yeccpars2_40_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ __1 ]
end | __Stack].
-compile({inline,yeccpars2_41_/1}).
-file("src/parser.yrl", 183).
yeccpars2_41_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_46_/1}).
-file("src/parser.yrl", 182).
yeccpars2_46_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_49_/1}).
-file("src/parser.yrl", 228).
yeccpars2_49_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_50_/1}).
-file("src/parser.yrl", 232).
yeccpars2_50_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_51_/1}).
-file("src/parser.yrl", 216).
yeccpars2_51_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_52_/1}).
-file("src/parser.yrl", 197).
yeccpars2_52_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
examples_block ( __1 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_53_/1}).
-file("src/parser.yrl", 262).
yeccpars2_53_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_54_/1}).
-file("src/parser.yrl", 224).
yeccpars2_54_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_55_/1}).
-file("src/parser.yrl", 220).
yeccpars2_55_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_56_/1}).
-file("src/parser.yrl", 218).
yeccpars2_56_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_57_/1}).
-file("src/parser.yrl", 219).
yeccpars2_57_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 , __2 ) ]
end | __Stack].
-compile({inline,yeccpars2_59_/1}).
-file("src/parser.yrl", 235).
yeccpars2_59_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
data_table_block ( __1 )
end | __Stack].
-compile({inline,yeccpars2_60_/1}).
-file("src/parser.yrl", 209).
yeccpars2_60_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , plain_content_within_step_block )
end | __Stack].
-compile({inline,yeccpars2_61_/1}).
-file("src/parser.yrl", 244).
yeccpars2_61_(__Stack0) ->
[__1 | __Stack] = __Stack0,
[begin
[ data_table_component ( __1 ) ]
end | __Stack].
-compile({inline,yeccpars2_64_/1}).
-file("src/parser.yrl", 238).
yeccpars2_64_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
doc_string_component ( __1 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_65_/1}).
-file("src/parser.yrl", 239).
yeccpars2_65_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
doc_string_component ( __1 , __2 )
end | __Stack].
-compile({inline,yeccpars2_66_/1}).
-file("src/parser.yrl", 243).
yeccpars2_66_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ data_table_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_67_/1}).
-file("src/parser.yrl", 222).
yeccpars2_67_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_68_/1}).
-file("src/parser.yrl", 223).
yeccpars2_68_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 , __2 ) ]
end | __Stack].
-compile({inline,yeccpars2_69_/1}).
-file("src/parser.yrl", 210).
yeccpars2_69_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , plain_content_within_step_block )
end | __Stack].
-compile({inline,yeccpars2_70_/1}).
-file("src/parser.yrl", 261).
yeccpars2_70_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_71_/1}).
-file("src/parser.yrl", 196).
yeccpars2_71_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
examples_block ( __1 , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_72_/1}).
-file("src/parser.yrl", 195).
yeccpars2_72_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
examples_block ( __1 , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_73_/1}).
-file("src/parser.yrl", 194).
yeccpars2_73_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
examples_block ( __1 , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_74_/1}).
-file("src/parser.yrl", 215).
yeccpars2_74_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 , __2 ) ]
end | __Stack].
-compile({inline,yeccpars2_75_/1}).
-file("src/parser.yrl", 214).
yeccpars2_75_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_76_/1}).
-file("src/parser.yrl", 208).
yeccpars2_76_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , plain_content_within_step_block )
end | __Stack].
-compile({inline,yeccpars2_77_/1}).
-file("src/parser.yrl", 231).
yeccpars2_77_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 , __2 ) ]
end | __Stack].
-compile({inline,yeccpars2_78_/1}).
-file("src/parser.yrl", 230).
yeccpars2_78_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_79_/1}).
-file("src/parser.yrl", 212).
yeccpars2_79_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , plain_content_within_step_block )
end | __Stack].
-compile({inline,yeccpars2_80_/1}).
-file("src/parser.yrl", 227).
yeccpars2_80_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 , __2 ) ]
end | __Stack].
-compile({inline,yeccpars2_81_/1}).
-file("src/parser.yrl", 226).
yeccpars2_81_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ step_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_82_/1}).
-file("src/parser.yrl", 211).
yeccpars2_82_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
return_error ( __2 , plain_content_within_step_block )
end | __Stack].
-compile({inline,yeccpars2_83_/1}).
-file("src/parser.yrl", 180).
yeccpars2_83_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , [ ] , __3 )
end | __Stack].
-compile({inline,yeccpars2_84_/1}).
-file("src/parser.yrl", 179).
yeccpars2_84_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , __3 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_85_/1}).
-file("src/parser.yrl", 178).
yeccpars2_85_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , __3 , __4 )
end | __Stack].
-compile({inline,yeccpars2_86_/1}).
-file("src/parser.yrl", 191).
yeccpars2_86_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
set_tags_block ( __2 , __1 )
end | __Stack].
-compile({inline,yeccpars2_87_/1}).
-file("src/parser.yrl", 200).
yeccpars2_87_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
__1 ++ __2
end | __Stack].
-compile({inline,yeccpars2_88_/1}).
-file("src/parser.yrl", 181).
yeccpars2_88_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_89_/1}).
-file("src/parser.yrl", 188).
yeccpars2_89_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ __1 | __2 ]
end | __Stack].
-compile({inline,yeccpars2_90_/1}).
-file("src/parser.yrl", 175).
yeccpars2_90_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_91_/1}).
-file("src/parser.yrl", 174).
yeccpars2_91_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_92_/1}).
-file("src/parser.yrl", 173).
yeccpars2_92_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_93_/1}).
-file("src/parser.yrl", 171).
yeccpars2_93_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , [ ] , __3 )
end | __Stack].
-compile({inline,yeccpars2_94_/1}).
-file("src/parser.yrl", 170).
yeccpars2_94_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , __3 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_95_/1}).
-file("src/parser.yrl", 169).
yeccpars2_95_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , __2 , __3 , __4 )
end | __Stack].
-compile({inline,yeccpars2_96_/1}).
-file("src/parser.yrl", 172).
yeccpars2_96_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
scenario_block ( __1 , [ ] , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_97_/1}).
-file("src/parser.yrl", 151).
yeccpars2_97_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , [ ] , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_98_/1}).
-file("src/parser.yrl", 146).
yeccpars2_98_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , __2 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_99_/1}).
-file("src/parser.yrl", 152).
yeccpars2_99_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , [ ] , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_100_/1}).
-file("src/parser.yrl", 149).
yeccpars2_100_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , [ ] , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_101_/1}).
-file("src/parser.yrl", 150).
yeccpars2_101_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , __2 , [ ] , __3 )
end | __Stack].
-compile({inline,yeccpars2_102_/1}).
-file("src/parser.yrl", 145).
yeccpars2_102_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , __2 , __3 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_103_/1}).
-file("src/parser.yrl", 144).
yeccpars2_103_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
rule_block ( __1 , __2 , __3 , __4 )
end | __Stack].
-compile({inline,yeccpars2_104_/1}).
-file("src/parser.yrl", 247).
yeccpars2_104_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ __1 | __2 ]
end | __Stack].
-compile({inline,yeccpars2_105_/1}).
-file("src/parser.yrl", 156).
yeccpars2_105_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
background_block ( __1 , [ ] , __2 )
end | __Stack].
-compile({inline,yeccpars2_106_/1}).
-file("src/parser.yrl", 157).
yeccpars2_106_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
background_block ( __1 , __2 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_107_/1}).
-file("src/parser.yrl", 155).
yeccpars2_107_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
background_block ( __1 , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_108_/1}).
-file("src/parser.yrl", 118).
yeccpars2_108_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , __2 , __3 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_109_/1}).
-file("src/parser.yrl", 122).
yeccpars2_109_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , __2 , [ ] , __3 )
end | __Stack].
-compile({inline,yeccpars2_110_/1}).
-file("src/parser.yrl", 113).
yeccpars2_110_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , __2 , __3 , __4 )
end | __Stack].
-compile({inline,yeccpars2_111_/1}).
-file("src/parser.yrl", 117).
yeccpars2_111_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , [ ] , __3 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_112_/1}).
-file("src/parser.yrl", 123).
yeccpars2_112_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , [ ] , [ ] , __3 )
end | __Stack].
-compile({inline,yeccpars2_113_/1}).
-file("src/parser.yrl", 119).
yeccpars2_113_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , __3 , [ ] , [ ] )
end | __Stack].
-compile({inline,yeccpars2_114_/1}).
-file("src/parser.yrl", 110).
yeccpars2_114_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , __3 , __4 , [ ] )
end | __Stack].
-compile({inline,yeccpars2_115_/1}).
-file("src/parser.yrl", 111).
yeccpars2_115_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , __3 , [ ] , __4 )
end | __Stack].
-compile({inline,yeccpars2_116_/1}).
-file("src/parser.yrl", 107).
yeccpars2_116_(__Stack0) ->
[__5,__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , __3 , __4 , __5 )
end | __Stack].
-compile({inline,yeccpars2_117_/1}).
-file("src/parser.yrl", 112).
yeccpars2_117_(__Stack0) ->
[__4,__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , __2 , [ ] , __3 , __4 )
end | __Stack].
-compile({inline,yeccpars2_118_/1}).
-file("src/parser.yrl", 141).
yeccpars2_118_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ __1 | __2 ]
end | __Stack].
-compile({inline,yeccpars2_119_/1}).
-file("src/parser.yrl", 126).
yeccpars2_119_(__Stack0) ->
[__3,__2,__1 | __Stack] = __Stack0,
[begin
feature_block ( __1 , [ ] , [ ] , __2 , __3 )
end | __Stack].
-compile({inline,yeccpars2_120_/1}).
-file("src/parser.yrl", 166).
yeccpars2_120_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
set_tags_block ( __2 , __1 )
end | __Stack].
-compile({inline,yeccpars2_121_/1}).
-file("src/parser.yrl", 164).
yeccpars2_121_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
set_tags_block ( __2 , __1 )
end | __Stack].
-compile({inline,yeccpars2_122_/1}).
-file("src/parser.yrl", 161).
yeccpars2_122_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ __1 | __2 ]
end | __Stack].
-compile({inline,yeccpars2_123_/1}).
-file("src/parser.yrl", 253).
yeccpars2_123_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
[ tag_component ( __1 ) | __2 ]
end | __Stack].
-compile({inline,yeccpars2_124_/1}).
-file("src/parser.yrl", 99).
yeccpars2_124_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
set_tags_block ( __2 , __1 )
end | __Stack].
-compile({inline,yeccpars2_125_/1}).
-file("src/parser.yrl", 96).
yeccpars2_125_(__Stack0) ->
[__2,__1 | __Stack] = __Stack0,
[begin
set_language ( __2 , __1 )
end | __Stack].
-file("src/parser.yrl", 438). | src/parser.erl | 0.517571 | 0.446314 | parser.erl | starcoder |
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil; fill-column: 92-*-
%% ex: ts=4 sw=4 et
%% Copyright 2013 Opscode, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%% @doc Utility functions for performing work in parallel.
-module(chef_parallel).
-export([
parallelize_all_with_timeout/5
]).
%% @doc Parallelizes the map of `Fun' across `Items', with each
%% invocation subject to a `Timeout' (specified in ms).
%%
%% This function is intended for use with high-latency, IO-bound
%% operations (such as HTTP requests). `Fanout' processes are
%% started, each of which will process a single element of `Items'.
%% If the call takes longer than `Timeout' ms to return,
%% `TimeoutHandler' is invoked with the offending item; the return
%% value will be the value present in the final list of results.
%% There will always be `Fanout' processes running at all points,
%% providing for more even throughput.
%%
%% `Fun' should be able to handle any errors or exceptions that are
%% thrown internally. Note that every item in the list will be
%% processed; there is no short-circuiting if one invocation of `Fun'
%% fails or times out.
-spec parallelize_all_with_timeout(Items :: list(),
Fun :: fun((any()) -> any()),
Fanout :: pos_integer(),
Timeout :: pos_integer(),
TimeoutHandler :: fun((any()) -> any())) -> list().
parallelize_all_with_timeout(Items, Fun, Fanout, Timeout, TimeoutHandler) ->
%% Create a wrapper function to map across `Items'. It spawns an
%% additional process to invoke `Fun' in order to control timeout
%% situations.
%%
%% This is necessary because often we'd like to keep track of
%% individual operation information like this, but erlware_commons
%% now obscures timeout information. Additionally,
%% erlware_commons appears to only allow clients to specify a
%% timeout on an entire list operation as a whole, instead of a
%% timeout on each individual list item operation.
%%
%% By spawning a separate process and managing the timeout
%% ourselves, we can once again capture this information.
%% Additionally, we no longer need to specify a timeout via the
%% ec_plist "malt" configuration, since that now basically takes
%% care of itself.
MapFun = fun(Item) ->
Me = self(),
%% This token is used below in the receive block
%% (just look!) to make absolutely certain that
%% we are only processing the exact message we
%% are expecting, as opposed to any other
%% messages the receiving process may be getting
%% from elsewhere in the system.
%%
%% It's admittedly a bit of paranoia, but should
%% help insulate from potential future changes in
%% erlware_commons (that involve message
%% passing).
%%
%% Also, paranoia.
Token = erlang:make_ref(),
Worker = proc_lib:spawn_link(fun() ->
Result = Fun(Item),
Me ! {Token, Result, self()}
end),
receive
{Token, Response, Worker} ->
Response
after Timeout ->
erlang:unlink(Worker),
erlang:exit(Worker, kill),
TimeoutHandler(Item)
end
end,
%% Have `Fanout' processes working on the list, with each process
%% handling one list item at a time.
%%
%% See the documentation for erlware_commons' ec_plists module for
%% more details.
ParallelConfig = [1, {processes, Fanout}],
%% Now we actually get to do the work!
Results = ec_plists:ftmap(MapFun, Items, ParallelConfig),
%% Get rid of the `{value, Term}' wrapping that ec_plists:ftmap/3
%% introduces.
[Value || {value, Value} <- Results]. | src/oc_erchef/apps/chef_objects/src/chef_parallel.erl | 0.58948 | 0.409162 | chef_parallel.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2011 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc A pipe fitting that applies a function to a Riak object, and
%% sends its results downstream.
%%
%% This module is intended as the second half of the emulation layer
%% for running Riak KV MapReduce on top of Riak Pipe. An upstream
%% fitting should read the object out of Riak KV, and then send it to
%% this fitting as a 3-tuple of the form `{ok, RiakObject, KeyData}'.
%% If there was an error reading the object, that can be sent to this
%% fitting as a 3-tuple of the form `{{error, Reason}, {Bucket, Key},
%% KeyData}'. (The {@link riak_kv_pipe_get} fitting conforms to this
%% interface.)
%%
%% This module expects a 2-tuple, `{PhaseSpec, PhaseArg}' as argument.
%% Both elements come directly from the phase definition in the
%% MapReduce query: `{map, PhaseSpec, PhaseArg, Keep}'.
%%
%% When an `ok' input arrives, the function defined by `PhaseSpec' is
%% evalutated as, in pseudocode: `PhaseSpec(RiakObject, KeyData,
%% PhaseArg)'. The result of the function is expected to be a list.
%% Each element of the result list is sent downstream as an output.
%%
%% When an `error' input arrives, the behavior of this fitting depends
%% on whether `PhaseSpec' specifies an Erlang function or a Javascript
%% function. In the case of an Erlang function, `PhaseSpec' will be
%% evaluated as `PhaseSpec({error, Reason}, KeyData, PhaseArg)'. In
%% the case of a Javascript function, if the `Reason' is `not_found',
%% then the output `{not_found, {Bucket, Key}, KeyData}' is sent
%% downstream as output. Other error reasons cause Javascript
%% evaluation to fail.
-module(riak_kv_mrc_map).
-behaviour(riak_pipe_vnode_worker).
-export([init/2,
process/3,
done/1,
link_phase/3,
validate_arg/1]).
-include_lib("riak_kv_js_pools.hrl").
-include_lib("riak_pipe/include/riak_pipe.hrl").
-include_lib("riak_pipe/include/riak_pipe_log.hrl").
-record(state, {p :: riak_pipe_vnode:partition(),
fd :: riak_pipe_fitting:details(),
phase :: riak_kv_mrc_pipe:map_query_fun(),
arg :: term()}).
-opaque state() :: #state{}.
-export_type([state/0]).
-define(DEFAULT_JS_RESERVE_ATTEMPTS, 10).
%% @doc Init verifies the phase spec, and then stashes it away with
%% `Partition' and the rest of `FittingDetails' for use during
%% processing.
-spec init(riak_pipe_vnode:partition(), riak_pipe_fitting:details()) ->
{ok, state()}.
init(Partition, #fitting_details{arg={Phase, Arg}}=FittingDetails) ->
%% `init_phase/`' may return an error tuple, but we purposely
%% don't pattern match on it at the moment. If it returns an
%% error tuple, a badmatch exception will be thrown, and will be
%% caught inside of `riak_pipe_vnode_worker:init/1'. This will
%% cause the pipe worker to return `{stop, {init_failed, ...}}'.
%% A longer-term fix might be to update the callback spec
%% for init to allow for an error-tuple to be returned.
{ok, LocalPhase} = init_phase(Phase),
{ok, #state{p=Partition, fd=FittingDetails,
phase=LocalPhase, arg=Arg}}.
%% @doc Perform any one-time initialization of the phase that's
%% possible/needed. This currently includes looking up functions from
%% Riak KV (for `{jsanon, {Bucket, Key}}' and `{strfun, {Bucket,
%% Key}}', as well as compiling `strfun' specs to Erlang functions.
%%
%% <ul>
%% <li>`{jsanon, {Bucket, Key}}' is converted to `{jsanon, Source}'</li>
%%
%% <li>`{strfun, {Bucket, Key}}' and `{strfun, Source}' are both
%% converted to `{qfun, Fun}' after compiling</li>
%% </ul>
-spec init_phase(PhaseSpec :: term()) ->
{ok, PhaseSpec :: term()} | {error, Reason :: term()}.
init_phase({Anon, {Bucket, Key}})
when Anon =:= jsanon; Anon =:= strfun ->
%% lookup source for stored functions only at fitting worker startup
{ok, C} = riak:local_client(),
case C:get(Bucket, Key, 1) of
{ok, Object} ->
case riak_object:get_value(Object) of
Source when Anon =:= jsanon, is_binary(Source) ->
{ok, {jsanon, Source}};
Source when Anon =:= strfun,
(is_binary(Source) orelse is_list(Source)) ->
init_phase({strfun, Source});
Value ->
{error, {Anon, {invalid, Value}}}
end;
{error, notfound} ->
{error, {Anon, {notfound, {Bucket, Key}}}}
end;
init_phase({strfun, Source}) ->
case app_helper:get_env(riak_kv, allow_strfun, false) of
true ->
case riak_kv_mrc_pipe:compile_string(Source) of
{ok, Fun} when is_function(Fun, 3) ->
{ok, {qfun, Fun}};
Error ->
{error, {strfun, {compile_error, Error}}}
end;
_ ->
{error, {strfun, not_allowed}}
end;
init_phase(Other) ->
%% other types need no initialization
{ok, Other}.
%% @doc Process evaluates the fitting's argument function, and sends
%% output downstream.
-spec process(term(), boolean(), state())
-> {ok | forward_preflist, state()}.
process(Input, _Last,
#state{fd=_FittingDetails, phase=Phase, arg=Arg}=State) ->
?T(_FittingDetails, [map], {mapping, Input}),
case map(Phase, Arg, Input) of
{ok, Results} when is_list(Results) ->
?T(_FittingDetails, [map], {produced, Results}),
send_results(Results, State);
{ok, _NonListResults} ->
?T(_FittingDetails, [map, error],
{error, {non_list_result, Input}}),
{ok, State};
{forward_preflist, Reason} ->
?T(_FittingDetails, [map], {forward_preflist, Reason}),
{forward_preflist, State};
{error, Error} ->
?T(_FittingDetails, [map, error], {error, {Error, Input}}),
{ok, State}
end.
%% @doc Evaluate the map function.
-spec map(riak_kv_mrc_pipe:map_query_fun(), term(), term())
-> {ok, term()}
| {forward_preflist, Reason :: term()}
| {error, Reason :: term()}.
map({modfun, Module, Function}, Arg, Input0) ->
Input = erlang_input(Input0),
KeyData = erlang_keydata(Input0),
{ok, Module:Function(Input, KeyData, Arg)};
map({qfun, Fun}, Arg, Input0) ->
Input = erlang_input(Input0),
KeyData = erlang_keydata(Input0),
{ok, Fun(Input, KeyData, Arg)};
%% {strfun, Source} is converted to {qfun, Fun} in init
%% {strfun, {Bucket, Key}} is converted to {qfun, Fun} in init
%% {jsanon, {Bucket, Key}} is converted to {jsanon, Source} in init
map({jsfun, Name}, Arg, Input) ->
map_js({jsfun, Name}, Arg, Input);
map({jsanon, Source}, Arg, Input) ->
map_js({jsanon, Source}, Arg, Input).
%% select which bit of the input to hand to the map function
erlang_input({ok, Input, _}) -> Input;
erlang_input({{error,_}=Input, _, _}) -> Input.
%% extract keydata from the input
erlang_keydata({_OkError, _Input, KeyData}) -> KeyData.
%% @doc Evaluate Javascript map functions ... if the input is ok.
map_js(_JS, _Arg, {{error, notfound}, {Bucket, Key}, KeyData}) ->
{ok, [{not_found,
{Bucket, Key},
KeyData}]};
map_js(JS, Arg, {ok, Input, KeyData}) ->
JSArgs = [riak_object_json:encode(Input), KeyData, Arg],
JSCall = {JS, JSArgs},
case riak_kv_js_manager:blocking_dispatch(
?JSPOOL_MAP, JSCall, ?DEFAULT_JS_RESERVE_ATTEMPTS) of
{ok, Results} -> {ok, Results};
{error, no_vms} -> {forward_preflist, no_js_vms};
{error, Error} -> {error, Error}
end.
%% @doc Function to do link extraction via this module. The function
%% will extract all links matching Bucket and Tag from an input
%% object, and send them as fitting output.
%%
%% Note: This function was added in Riak 1.0.2, but is not used there.
%% It is intended to smooth the transition from 1.0.2 to 1.1.0.
-spec link_phase(Object::term(), KeyData::term(),
{Bucket::riak_kv_mrc_pipe:link_match(),
Tag::riak_kv_mrc_pipe:link_match()})
-> [riak_kv_mrc_pipe:key_input()].
link_phase({error, notfound}, _, _) ->
[];
link_phase(Input, _KeyData, {Bucket, Tag}) ->
LinkFun = bucket_linkfun(riak_object:bucket(Input)),
Results = LinkFun(Input, none, {Bucket, Tag}),
Results.
%% @doc Find the link-extraction function for the bucket.
-spec bucket_linkfun(binary()) ->
fun( (Object::term(), KeyData::term(),
{Bucket::riak_kv_mrc_pipe:link_match(),
Tag::riak_kv_mrc_pipe:link_match()})
-> [riak_kv_mrc_pipe:key_input()] ).
bucket_linkfun(Bucket) ->
BucketProps = riak_core_bucket:get_bucket(Bucket),
{_, {modfun, Module, Function}} = lists:keyfind(linkfun, 1, BucketProps),
erlang:make_fun(Module, Function, 3).
%% @doc Send results to the next fitting.
-spec send_results([term()], state()) -> {ok | {error, term()}, state()}.
send_results([], State) ->
{ok, State};
send_results([Result | Results], #state{p=P, fd=FD} = State) ->
case riak_pipe_vnode_worker:send_output(Result, P, FD) of
ok ->
send_results(Results, State);
ER ->
{ER, State}
end.
%% @doc Unused.
-spec done(state()) -> ok.
done(_State) ->
ok.
%% @doc Check that the argument is a 2-tuple, with the first element
%% being a valid map phase specification. For `modfun' and
%% `qfun' phases, also check that the specified function exists,
%% and is arity-3 (see {@link riak_pipe_v_validate_function/3}).
-spec validate_arg(term()) -> ok | {error, iolist()}.
validate_arg({Phase, _Arg}) ->
case Phase of
{modfun, Module, Function} ->
riak_pipe_v:validate_function(
"PhaseSpec", 3, erlang:make_fun(Module, Function, 3));
{qfun, Fun} ->
riak_pipe_v:validate_function("PhaseSpec", 3, Fun);
{Anon, {Bucket, Key}} when Anon =:= jsanon; Anon =:= strfun->
if is_binary(Bucket), is_binary(Key) -> ok;
true ->
{error, io_lib:format(
"~p requires that the {Bucket,Key} of a ~p"
" request be a {binary,binary}, not {~p,~p}",
[?MODULE, Anon,
riak_pipe_v:type_of(Bucket),
riak_pipe_v:type_of(Key)])}
end;
{jsfun, Name} ->
if is_binary(Name) -> ok; %% TODO: validate name somehow?
true ->
{error, io_lib:format(
"~p requires that the Name of a jsfun"
" request be a binary, not a ~p",
[?MODULE, riak_pipe_v:type_of(Name)])}
end;
{jsanon, Source} ->
if is_binary(Source) -> ok; %% TODO: validate JS code somehow?
true ->
{error, io_lib:format(
"~p requires that the Source of a jsanon"
" request be a binary, not a ~p",
[?MODULE, riak_pipe_v:type_of(Source)])}
end;
{strfun, Source} ->
if is_binary(Source); is_list(Source) ->
ok;
true ->
{error, io_lib:format(
"~p requires that the Source of a strfun"
" request be a binary or list, not a ~p",
[?MODULE, riak_pipe_v:type_of(Source)])}
end;
_ ->
{error, io_lib:format(
"The PhaseSpec part of the argument for ~p"
" must be of one of the following forms:~n"
" {modfun, Module :: atom(), Function :: atom()}~n"
" {qfun, Function :: function()}~n"
" {jsanon, {Bucket :: binary(), Key :: binary()}}~n"
" {jsanon, Source :: binary()}~n"
" {jsfun, Name :: binary()}~n"
" {strfun, Source :: string()}~n"
" {strfun, {Bucket :: binary(), Key :: binary()}}~n",
[?MODULE])}
end;
validate_arg(Other) ->
{error, io_lib:format("~p requires a 2-tuple of {PhaseSpec, StaticArg}"
" as argument, not a ~p",
[?MODULE, riak_pipe_v:type_of(Other)])}. | deps/riak_kv/src/riak_kv_mrc_map.erl | 0.774711 | 0.520862 | riak_kv_mrc_map.erl | starcoder |
-module(discovery).
-behavior(gen_server).
-export([
start_link/3,
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3,
terminate/2
]).
configuration_number() ->
1.
% based on https://github.com/homebridge/HAP-NodeJS/blob/81652da554137d818d049f6f245e88efec43b1c5/src/lib/Accessory.ts#L85-L125
category(Category) ->
case Category of
other -> 1;
switch -> 8;
_ -> category(other)
end.
% based on https://github.com/homebridge/HAP-NodeJS/blob/81652da554137d818d049f6f245e88efec43b1c5/src/lib/Advertiser.ts#L27-L34
pairing_feature_flag(Feature) ->
case Feature of
supports_hardware_authentication -> 16#01;
supports_software_authentication -> 16#02
end.
% based on https://github.com/homebridge/HAP-NodeJS/blob/81652da554137d818d049f6f245e88efec43b1c5/src/lib/Advertiser.ts#L17-L25
status_flag(Status) ->
case Status of
not_paired -> 16#01;
not_joined_wifi -> 16#02;
problem_detected -> 16#04
end.
% based on https://github.com/homebridge/HAP-NodeJS/blob/81652da554137d818d049f6f245e88efec43b1c5/src/lib/Advertiser.ts#L160-L164
setup_hash(DeviceId, SetupId) ->
Hash = crypto:hash(sha512, [DeviceId, SetupId]),
<<Prefix:4/binary, _/binary>> = Hash,
base64:encode_to_string(Prefix).
format_txt_value(Int) when is_integer(Int) ->
io_lib:format("~p", [Int]);
format_txt_value(Value) ->
Value.
format_txt(Txt) ->
[{Key, format_txt_value(Value)} || {Key, Value} <- Txt].
log(Msg, Args) ->
io:format("discovery: " ++ Msg ++ "~n", Args).
log(Msg) ->
log(Msg, []).
start_link(Name, Type, Port) ->
gen_server:start_link(?MODULE, {Name, Type, Port}, []).
init({Name, Type, Port}) ->
log("registering"),
DeviceId = "C3:5D:3A:AE:5E:FB",
Txt = [
% based on https://github.com/homebridge/HAP-NodeJS/blob/master/src/lib/Advertiser.ts#L147-L157
% current configuration number
{'c#', configuration_number()},
% pairing feature flags
{ff, pairing_feature_flag(supports_software_authentication)},
% device id
{id, DeviceId},
% model name
{md, "whistlee@0.1.0"},
% protocol version
{pv, "1.1"},
% current state number (must be 1)
{'s#', "1"},
% status flag
{sf, status_flag(not_paired)},
% accessory category
{ci, category(switch)},
% setup hash
{sh, setup_hash(DeviceId, "")}
],
FormattedTxt = format_txt(Txt),
log("registering~p", [FormattedTxt]),
{ok, Ref} = dnssd:register(Name, Type, Port, FormattedTxt),
{ok, {Ref}}.
handle_call(_E, _From, State) ->
log("handle_call"),
{noreply, State}.
handle_cast(_Msg, State) ->
log("handle_call"),
{noreply, State}.
handle_info({dnssd, _Ref, {register, add, {Name, _, _}}}, State) ->
log("registered ~p", [Name]),
{noreply, State};
handle_info(UnknownMsg, State) ->
log("unknown: ~p", [UnknownMsg]),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(normal, {Ref}) ->
log("shutting down"),
dnssd:stop(Ref);
terminate(_Reason, {Ref}) ->
log("terminate reason: ~p", [_Reason]),
dnssd:stop(Ref). | apps/whistlee/src/discovery.erl | 0.580828 | 0.406744 | discovery.erl | starcoder |
-module(bloom).
-export([
new/2,
new_optimal/2,
new_forgetful/4,
new_forgetful_optimal/4,
set/2,
check/2,
check_and_set/2,
clear/1,
type/1,
serialize/1,
deserialize/1
]).
%% ----------------------------------------------------------------------------
%% @doc
%% Create a new bloom filter structure.
%% `BitmapSize' is the size in bytes (not bits) that will be allocated in memory
%% `ItemsCount' is an estimation of the maximum number of items to store.
%% @end
%% ----------------------------------------------------------------------------
-spec new(BitmapSize :: pos_integer(), ItemsCount :: pos_integer()) -> {ok, Bloom :: bloom_nif:bloom()}.
new(BitmapSize, ItemsCount) ->
bloom_nif:new(#{
filter_type => bloom,
bitmap_size => BitmapSize,
items_count => ItemsCount
}).
%% ----------------------------------------------------------------------------
%% @doc
%% Create a new bloom filter structure.
%% `ItemsCount' is an estimation of the maximum number of items to store.
%% `FalsePositiveRate' is the wanted rate of false positives, in [0.0, 1.0].
%% @end
%% ----------------------------------------------------------------------------
-spec new_optimal(ItemsCount :: pos_integer(), FalsePositiveRate :: float()) -> {ok, Bloom :: bloom_nif:bloom()}.
new_optimal(ItemsCount, FalsePositiveRate) when FalsePositiveRate >= 0.0 andalso FalsePositiveRate =< 1.0 ->
bloom_nif:new(#{
filter_type => bloom,
items_count => ItemsCount,
fp_rate => FalsePositiveRate
}).
%% ----------------------------------------------------------------------------
%% @doc
%% Create a new forgetful bloom filter structure.
%% `BitmapSize' is the size in bytes (not bits) that will be allocated in memory
%% `ItemsCount' is an estimation of the maximum number of items to store,
%% `NumFilters' is the number of filters to maintain (minimum of 3) and
%% `RotateAfter' is how many insertions to do into a filter before rotating a blank filter into the `future' position.
%% @end
%% ----------------------------------------------------------------------------
-spec new_forgetful(BitmapSize :: pos_integer(), ItemsCount :: pos_integer(), NumFilters :: pos_integer(), RotateAfter :: pos_integer())
-> {ok, Bloom :: bloom_nif:bloom()}.
new_forgetful(BitmapSize, ItemsCount, NumFilters, RotateAfter) when NumFilters > 2 ->
bloom_nif:new(#{
filter_type => fbf,
bitmap_size => BitmapSize,
items_count => ItemsCount,
capacity => NumFilters,
rotate_at => RotateAfter
}).
%% ----------------------------------------------------------------------------
%% @doc
%% Create a new forgetful bloom filter structure.
%% `BitmapSize' is the size in bytes (not bits) that will be allocated in memory
%% `ItemsCount' is an estimation of the maximum number of items to store,
%% `NumFilters' is the number of filters to maintain (minimum of 3) and
%% `RotateAfter' is how many insertions to do into a filter before rotating a blank filter into the `future' position.
%% `FalsePositiveRate' is the wanted rate of false positives, in [0.0, 1.0].
%% @end
%% ----------------------------------------------------------------------------
-spec new_forgetful_optimal(ItemsCount :: pos_integer(), NumFilters :: pos_integer(), RotateAfter :: pos_integer(), FalsePositiveRate :: float())
-> {ok, Bloom :: bloom_nif:bloom()}.
new_forgetful_optimal(ItemsCount, NumFilters, RotateAfter, FalsePositiveRate) when NumFilters > 2 andalso FalsePositiveRate >= 0.0 andalso FalsePositiveRate =< 1.0 ->
bloom_nif:new(#{
filter_type => fbf,
items_count => ItemsCount,
capacity => NumFilters,
rotate_at => RotateAfter,
fp_rate => FalsePositiveRate
}).
%% ----------------------------------------------------------------------------
%% @doc
%% Record the presence of `Key' in `Bloom' and `ForgetfulBloom'
%% For `ForgetfulBloom' a boolean is returned to indicate if the value was already present (like `check_and_set/2').
%% @end
%% ----------------------------------------------------------------------------
-spec set(Bloom :: bloom_nif:bloom(), Key :: term()) -> ok | boolean().
set(Bloom, Key) ->
bloom_nif:set(Bloom, Key).
%% ----------------------------------------------------------------------------
%% @doc
%% Check for the presence of `Key' in `Bloom'.
%% Serialized and binary encoded bloom filters can be used with this
%% function when you wish to check for the key and do not need to use set
%% (eg. a static bloom filter stored in a database).
%% @end
%% ----------------------------------------------------------------------------
-spec check(Bloom :: bloom_nif:bloom() | bloom_nif:serialized_bloom(), Key :: term()) -> boolean().
check(Bloom, Key) ->
bloom_nif:check(Bloom, Key).
%% ----------------------------------------------------------------------------
%% @doc
%% Record the presence of `Key' in `Bloom' or `ForgetfulBloom'
%% and return whether it was present before.
%% @end
%% ----------------------------------------------------------------------------
-spec check_and_set(Bloom :: bloom_nif:bloom(), Key :: term()) -> boolean().
check_and_set(Bloom, Key) ->
bloom_nif:check_and_set(Bloom, Key).
%% ----------------------------------------------------------------------------
%% @doc
%% Clear all of the bits in the filter, removing all keys from the set.
%% @end
%% ----------------------------------------------------------------------------
-spec clear(Bloom :: bloom_nif:bloom()) -> ok.
clear(Bloom) ->
bloom_nif:clear(Bloom).
%% ----------------------------------------------------------------------------
%% @doc
%% Get type of filter.
%% @end
%% ----------------------------------------------------------------------------
-spec type(Bloom :: bloom_nif:bloom()) -> number() | {error, Reason :: binary()}.
type(Bloom) ->
bloom_nif:ftype(Bloom).
%% ----------------------------------------------------------------------------
%% @doc
%% Serialize a bloom filter to binary.
%% `check/2' can be used against this serialized form efficiently.
%% @end
%% ----------------------------------------------------------------------------
-spec serialize(Bloom :: bloom_nif:bloom()) -> {ok, bloom_nif:serialized_bloom()}.
serialize(Bloom) ->
bloom_nif:serialize(Bloom).
%% ----------------------------------------------------------------------------
%% @doc
%% Deserialize a previously serialized bloom filter back
%% into a bloom filter reference.
%% @end
%% ----------------------------------------------------------------------------
-spec deserialize(SerializedBloom :: bloom_nif:serialized_bloom()) -> {ok, bloom_nif:bloom()}.
deserialize(SerializedBloom) ->
bloom_nif:deserialize(SerializedBloom). | src/bloom.erl | 0.579995 | 0.569912 | bloom.erl | starcoder |
%%% Logplex has the presence of reference-counted binaries (refc binaries) that
%%% are leaking (see
%%% http://www.erlang.org/doc/efficiency_guide/binaryhandling.html#id65722).
%%% The problem is that all drain processes end up doing little more than
%%% routing binaries that may or may not be reference-counted. After enough
%%% work done by a drain -- or a larger mailbox, or anything else -- it may get
%%% more space allocated for its stack and heap. When that space is garbage-
%%% collected or compacted following hibernation (if any), future refc binaries
%%% will feel the process space one by one, each being one single pointer in a
%%% list of references.
%%%
%%% Garbage collection may take far longer to trigger for 100,000 refc binaries
%%% than for far fewer non-counted binaries, or may just as well never happen.
%%% In this case, the memory is never reclaimed and we have a leak.
%%%
%%% There exist decent work-arounds for this -- fiddling with hibernation,
%%% different GC strategies (tracking refc binary space and doing it manually),
%%% doing it on a per-process basis, and so on.
%%%
%%% However, because production nodes might be suffering right now, this server
%%% acts as a quick fix where a max memory threshold may be given, and within 5
%%% minutes, the VM will go through a full GC for the entire VM. This is not
%%% optimal, and the effect can be bad given potential adverse effect on the
%%% generational garbage collector, but it's better than what we have right now
%%% -- nothing and crash dumps.
%%%
%%% We believe the leak to be real following usage of a function such as
%%% https://gist.github.com/ferd/6028931 that revealed gigabytes of data would
%%% be freed, and hundreds of thousands of refc binary references being behind
%%% the savings.
-module(logplex_leak).
-behaviour(gen_server).
-include("logplex_logging.hrl").
-define(SLEEP, timer:minutes(5)).
-define(THRESHOLD, 10000000000). % arbitrary value!
-record(state, {tref}).
-export([start_link/0, force/0]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
code_change/3, terminate/2]).
%%%%%%%%%%%%%%%%%%%%%%%
%%% Public Inerface %%%
%%%%%%%%%%%%%%%%%%%%%%%
start_link() ->
gen_server:start_link({local,?MODULE}, ?MODULE, [], []).
force() ->
gen_server:call(?MODULE, force, timer:seconds(10)).
%%%%%%%%%%%%%%%%%%
%%% gen_server %%%
%%%%%%%%%%%%%%%%%%
init([]) ->
Ref = erlang:start_timer(0, self(), gc),
{ok, #state{tref=Ref}}.
handle_call(force, _From, S=#state{tref=Ref}) ->
erlang:cancel_timer(Ref),
Before = erlang:memory(total),
[erlang:garbage_collect(Pid) || Pid <- processes()],
NewRef = erlang:start_timer(?SLEEP, self(), gc),
After = erlang:memory(total),
?INFO("at=gc mem_pre=~p mem_post=~p type=forced", [Before,After]),
{reply, ok, S#state{tref=NewRef}};
handle_call(_, _From, State=#state{}) ->
{noreply, State}.
handle_cast(_Unknown, State=#state{}) ->
{noreply, State}.
handle_info({timeout, Ref, gc}, S=#state{tref=Ref}) ->
Mem = erlang:memory(total),
case Mem >= logplex_app:config(force_gc_memory, ?THRESHOLD) of
true ->
[erlang:garbage_collect(Pid) || Pid <- processes()],
NewRef = erlang:start_timer(?SLEEP, self(), gc),
After = erlang:memory(total),
?INFO("at=gc mem_pre=~p mem_post=~p type=timeout", [Mem,After]),
{noreply, S#state{tref=NewRef}};
false ->
NewRef = erlang:start_timer(?SLEEP, self(), gc),
{noreply, S#state{tref=NewRef}}
end;
handle_info(_WhoCares, State=#state{}) ->
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok,State}.
terminate(_,_) -> ok. | src/logplex_leak.erl | 0.61832 | 0.467696 | logplex_leak.erl | starcoder |
-module(efrisby_constraint).
%% Callbacks
-export([
evaluate/2
]).
-spec evaluate( tuple() , tuple()) -> 'ok' | none().
evaluate([], {ok, _}) ->
ok;
evaluate(_Expectations, {error, _} = Error) ->
fail(ok, Error, {request});
evaluate(Expectations, {ok, Response}) when erlang:is_list(Expectations) ->
lists:foreach(fun(Expec) -> evaluate(Expec, Response) end, Expectations);
evaluate({content_type, ExpectedContentType}, Response) ->
assert_equal(ExpectedContentType, efrisby_resp:header("content-type", Response), {content_type});
evaluate({json, ExpectedJson}, Response) ->
evaluate({json, ".", ExpectedJson}, Response);
evaluate({json, RootPath, [{SubPath, ExpectedJson}|_List]}, Response) ->
evaluate({json, path_concat(RootPath, SubPath), ExpectedJson}, Response);
evaluate({json, Path, ExpectedJson}, Response) ->
assert_equal(ExpectedJson, efrisby_resp:json(Path, Response), {json, Path});
evaluate({headers, ExpectedHeaders}, Response) ->
evaluate({headers, ".", ExpectedHeaders}, Response);
evaluate({headers, RootPath, [{SubPath, ExpectedHeaders}|_List]}, Response) ->
evaluate({headers, path_concat(RootPath, SubPath), ExpectedHeaders}, Response);
evaluate({headers, Name, ExpectedHeaders}, Response) ->
assert_equal(ExpectedHeaders, efrisby_resp:header(Name, Response), {headers, efrisby_resp:headers(Response)});
evaluate({json_types, ExpectedTypes}, Response) ->
evaluate({json_types, ".", ExpectedTypes}, Response);
evaluate({json_types, RootPath, [{SubPath, ExpectedType}|_List]}, Response) ->
ActualPath = path_concat(RootPath, SubPath),
ActualValue = efrisby_resp:json(ActualPath, Response),
ActualType = case (ActualValue) of
undefined -> undefined;
_ -> efrisby_data:type_of(ActualValue)
end,
assert_equal(ExpectedType, ActualType, {json_types, ActualPath});
evaluate({body_contains, ExpectedBody}, Response) when erlang:is_binary(ExpectedBody) ->
evaluate({body_contains, erlang:binary_to_list(ExpectedBody)}, Response);
evaluate({body_contains, ExpectedBody}, Response) ->
assert_contains(ExpectedBody, erlang:binary_to_list(efrisby_resp:body(Response)), {body_contains});
evaluate({status, ExpectedStatus}, Response) ->
assert_equal(ExpectedStatus, efrisby_resp:status(Response), {status}).
path_concat(RootPath, SubPath) when erlang:is_binary(RootPath) ->
path_concat(erlang:binary_to_list(RootPath), SubPath);
path_concat(RootPath, SubPath) when erlang:is_binary(SubPath) ->
path_concat(RootPath, erlang:binary_to_list(SubPath));
path_concat(".", SubPath) ->
SubPath;
path_concat(RootPath, ".") ->
RootPath;
path_concat(RootPath, SubPath) ->
lists:concat([RootPath, '.', SubPath]).
assert_equal(Expect, Actual, Context) ->
case (Actual) of
Expect -> ok;
_ -> fail(Expect, Actual, Context)
end.
assert_contains(Expect, Actual, Context) ->
case (string:str(Actual, Expect) > 0) of
true -> ok;
_ -> fail(Expect, Actual, Context)
end.
fail(Expect, Actual, Context) ->
erlang:throw({efrisby_expectation_failed, [
{context, Context},
{expected, Expect},
{actual, Actual}
]}). | src/efrisby_constraint.erl | 0.582729 | 0.544499 | efrisby_constraint.erl | starcoder |
%%==============================================================================
%% Copyright 2010 Erlang Solutions Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
-module(escalus_assert).
-export([is_chat_message/2,
has_no_stanzas/1,
is_iq/2,
is_presence_stanza/1,
is_presence_type/2,
is_presence_with_show/2,
is_presence_with_status/2,
is_presence_with_priority/2,
is_stanza_from/2,
is_roster_set/1,
is_roster_result/1,
is_result/1,
count_roster_items/2,
roster_contains/2,
is_privacy_query_result/1,
is_privacy_query_result_with_active/1,
is_privacy_query_result_with_active/2,
is_privacy_query_result_with_default/1,
is_privacy_query_result_with_default/2,
is_privacy_list_nonexistent_error/1,
is_error/3]).
%%===================================================================
%% Special cases
%%===================================================================
% note argument order change (backwards compatibility hack)
is_error(Stanza, Type, Condition) ->
escalus:assert(is_error, [Type, Condition], Stanza).
% Assertion about client, not Stanza
has_no_stanzas(Client) ->
case escalus_client:peek_stanzas(Client) of
[] ->
ok;
Stanzas ->
escalus_utils:log_stanzas("following stanzas shouldn't be there", Stanzas),
ct:fail({has_stanzas_but_shouldnt, Client, Stanzas})
end.
%%===================================================================
%% Forward to new API
%%===================================================================
-define(USE_NEW_API_1(F),
F(Stanza) ->
escalus:assert(F, Stanza)).
-define(USE_NEW_API_2(F),
F(Param, Stanza) ->
escalus:assert(F, [Param], Stanza)).
?USE_NEW_API_2(is_chat_message).
?USE_NEW_API_2(is_iq).
?USE_NEW_API_1(is_presence_stanza).
?USE_NEW_API_2(is_presence_type).
?USE_NEW_API_2(is_presence_with_show).
?USE_NEW_API_2(is_presence_with_status).
?USE_NEW_API_2(is_presence_with_priority).
?USE_NEW_API_2(is_stanza_from).
?USE_NEW_API_1(is_roster_set).
?USE_NEW_API_1(is_roster_result).
?USE_NEW_API_1(is_result).
?USE_NEW_API_2(count_roster_items).
?USE_NEW_API_2(roster_contains).
?USE_NEW_API_1(is_privacy_query_result).
?USE_NEW_API_1(is_privacy_query_result_with_active).
?USE_NEW_API_2(is_privacy_query_result_with_active).
?USE_NEW_API_1(is_privacy_query_result_with_default).
?USE_NEW_API_2(is_privacy_query_result_with_default).
?USE_NEW_API_1(is_privacy_list_nonexistent_error). | src/escalus_assert.erl | 0.633864 | 0.427994 | escalus_assert.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
-module(digraph_utils).
%%% Operations on directed (and undirected) graphs.
%%%
%%% Implementation based on <NAME>: Graph Algorithms with a
%%% Functional Flavour, in Jeuring, Johan, and <NAME> (Eds.):
%%% Advanced Functional Programming, Lecture Notes in Computer
%%% Science 925, Springer Verlag, 1995.
-export([components/1, strong_components/1, cyclic_strong_components/1,
reachable/2, reachable_neighbours/2,
reaching/2, reaching_neighbours/2,
topsort/1, is_acyclic/1,
arborescence_root/1, is_arborescence/1, is_tree/1,
loop_vertices/1,
subgraph/2, subgraph/3, condensation/1,
preorder/1, postorder/1]).
%%
%% A convenient type alias
%%
-type vertices() :: [digraph:vertex()].
%%
%% Exported functions
%%
-spec components(digraph()) -> vertices().
components(G) ->
forest(G, fun inout/3).
-spec strong_components(digraph()) -> vertices().
strong_components(G) ->
forest(G, fun in/3, revpostorder(G)).
-spec cyclic_strong_components(digraph()) -> vertices().
cyclic_strong_components(G) ->
remove_singletons(strong_components(G), G, []).
-spec reachable(vertices(), digraph()) -> vertices().
reachable(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun out/3, Vs, first)).
-spec reachable_neighbours(vertices(), digraph()) -> vertices().
reachable_neighbours(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun out/3, Vs, not_first)).
-spec reaching(vertices(), digraph()) -> vertices().
reaching(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun in/3, Vs, first)).
-spec reaching_neighbours(vertices(), digraph()) -> vertices().
reaching_neighbours(Vs, G) when is_list(Vs) ->
lists:append(forest(G, fun in/3, Vs, not_first)).
-spec topsort(digraph()) -> vertices() | 'false'.
topsort(G) ->
L = revpostorder(G),
case length(forest(G, fun in/3, L)) =:= length(digraph:vertices(G)) of
true -> L;
false -> false
end.
-spec is_acyclic(digraph()) -> boolean().
is_acyclic(G) ->
loop_vertices(G) =:= [] andalso topsort(G) =/= false.
-spec arborescence_root(digraph()) -> 'no' | {'yes', digraph:vertex()}.
arborescence_root(G) ->
case digraph:no_edges(G) =:= digraph:no_vertices(G) - 1 of
true ->
try
F = fun(V, Z) ->
case digraph:in_degree(G, V) of
1 -> Z;
0 when Z =:= [] -> [V]
end
end,
[Root] = lists:foldl(F, [], digraph:vertices(G)),
{yes, Root}
catch _:_ ->
no
end;
false ->
no
end.
-spec is_arborescence(digraph()) -> boolean().
is_arborescence(G) ->
arborescence_root(G) =/= no.
-spec is_tree(digraph()) -> boolean().
is_tree(G) ->
(digraph:no_edges(G) =:= digraph:no_vertices(G) - 1)
andalso (length(components(G)) =:= 1).
-spec loop_vertices(digraph()) -> vertices().
loop_vertices(G) ->
[V || V <- digraph:vertices(G), is_reflexive_vertex(V, G)].
-spec subgraph(digraph(), vertices()) -> digraph().
subgraph(G, Vs) ->
try
subgraph_opts(G, Vs, [])
catch
throw:badarg ->
erlang:error(badarg)
end.
-type option() :: {'type', 'inherit' | [digraph:d_type()]}
| {'keep_labels', boolean()}.
-spec subgraph(digraph(), vertices(), [option()]) -> digraph().
subgraph(G, Vs, Opts) ->
try
subgraph_opts(G, Vs, Opts)
catch
throw:badarg ->
erlang:error(badarg)
end.
-spec condensation(digraph()) -> digraph().
condensation(G) ->
SCs = strong_components(G),
%% Each component is assigned a number.
%% V2I: from vertex to number.
%% I2C: from number to component.
V2I = ets:new(condensation, []),
I2C = ets:new(condensation, []),
CFun = fun(SC, N) -> lists:foreach(fun(V) ->
true = ets:insert(V2I, {V,N})
end,
SC),
true = ets:insert(I2C, {N, SC}),
N + 1
end,
lists:foldl(CFun, 1, SCs),
SCG = subgraph_opts(G, [], []),
lists:foreach(fun(SC) -> condense(SC, G, SCG, V2I, I2C) end, SCs),
ets:delete(V2I),
ets:delete(I2C),
SCG.
-spec preorder(digraph()) -> vertices().
preorder(G) ->
lists:reverse(revpreorder(G)).
-spec postorder(digraph()) -> vertices().
postorder(G) ->
lists:reverse(revpostorder(G)).
%%
%% Local functions
%%
forest(G, SF) ->
forest(G, SF, digraph:vertices(G)).
forest(G, SF, Vs) ->
forest(G, SF, Vs, first).
forest(G, SF, Vs, HandleFirst) ->
T = ets:new(forest, [set]),
F = fun(V, LL) -> pretraverse(HandleFirst, V, SF, G, T, LL) end,
LL = lists:foldl(F, [], Vs),
ets:delete(T),
LL.
pretraverse(first, V, SF, G, T, LL) ->
ptraverse([V], SF, G, T, [], LL);
pretraverse(not_first, V, SF, G, T, LL) ->
case ets:member(T, V) of
false -> ptraverse(SF(G, V, []), SF, G, T, [], LL);
true -> LL
end.
ptraverse([V | Vs], SF, G, T, Rs, LL) ->
case ets:member(T, V) of
false ->
ets:insert(T, {V}),
ptraverse(SF(G, V, Vs), SF, G, T, [V | Rs], LL);
true ->
ptraverse(Vs, SF, G, T, Rs, LL)
end;
ptraverse([], _SF, _G, _T, [], LL) ->
LL;
ptraverse([], _SF, _G, _T, Rs, LL) ->
[Rs | LL].
revpreorder(G) ->
lists:append(forest(G, fun out/3)).
revpostorder(G) ->
T = ets:new(forest, [set]),
L = posttraverse(digraph:vertices(G), G, T, []),
ets:delete(T),
L.
posttraverse([V | Vs], G, T, L) ->
L1 = case ets:member(T, V) of
false ->
ets:insert(T, {V}),
[V | posttraverse(out(G, V, []), G, T, L)];
true ->
L
end,
posttraverse(Vs, G, T, L1);
posttraverse([], _G, _T, L) ->
L.
in(G, V, Vs) ->
digraph:in_neighbours(G, V) ++ Vs.
out(G, V, Vs) ->
digraph:out_neighbours(G, V) ++ Vs.
inout(G, V, Vs) ->
in(G, V, out(G, V, Vs)).
remove_singletons([C=[V] | Cs], G, L) ->
case is_reflexive_vertex(V, G) of
true -> remove_singletons(Cs, G, [C | L]);
false -> remove_singletons(Cs, G, L)
end;
remove_singletons([C | Cs], G, L) ->
remove_singletons(Cs, G, [C | L]);
remove_singletons([], _G, L) ->
L.
is_reflexive_vertex(V, G) ->
lists:member(V, digraph:out_neighbours(G, V)).
subgraph_opts(G, Vs, Opts) ->
subgraph_opts(Opts, inherit, true, G, Vs).
subgraph_opts([{type, Type} | Opts], _Type0, Keep, G, Vs)
when Type =:= inherit; is_list(Type) ->
subgraph_opts(Opts, Type, Keep, G, Vs);
subgraph_opts([{keep_labels, Keep} | Opts], Type, _Keep0, G, Vs)
when is_boolean(Keep) ->
subgraph_opts(Opts, Type, Keep, G, Vs);
subgraph_opts([], inherit, Keep, G, Vs) ->
Info = digraph:info(G),
{_, {_, Cyclicity}} = lists:keysearch(cyclicity, 1, Info),
{_, {_, Protection}} = lists:keysearch(protection, 1, Info),
subgraph(G, Vs, [Cyclicity, Protection], Keep);
subgraph_opts([], Type, Keep, G, Vs) ->
subgraph(G, Vs, Type, Keep);
subgraph_opts(_, _Type, _Keep, _G, _Vs) ->
throw(badarg).
subgraph(G, Vs, Type, Keep) ->
try digraph:new(Type) of
SG ->
lists:foreach(fun(V) -> subgraph_vertex(V, G, SG, Keep) end, Vs),
EFun = fun(V) -> lists:foreach(fun(E) ->
subgraph_edge(E, G, SG, Keep)
end,
digraph:out_edges(G, V))
end,
lists:foreach(EFun, digraph:vertices(SG)),
SG
catch
error:badarg ->
throw(badarg)
end.
subgraph_vertex(V, G, SG, Keep) ->
case digraph:vertex(G, V) of
false -> ok;
_ when not Keep -> digraph:add_vertex(SG, V);
{_V, Label} when Keep -> digraph:add_vertex(SG, V, Label)
end.
subgraph_edge(E, G, SG, Keep) ->
{_E, V1, V2, Label} = digraph:edge(G, E),
case digraph:vertex(SG, V2) of
false -> ok;
_ when not Keep -> digraph:add_edge(SG, E, V1, V2, []);
_ when Keep -> digraph:add_edge(SG, E, V1, V2, Label)
end.
condense(SC, G, SCG, V2I, I2C) ->
T = ets:new(condense, []),
NFun = fun(Neighbour) ->
[{_V,I}] = ets:lookup(V2I, Neighbour),
ets:insert(T, {I})
end,
VFun = fun(V) -> lists:foreach(NFun, digraph:out_neighbours(G, V)) end,
lists:foreach(VFun, SC),
digraph:add_vertex(SCG, SC),
condense(ets:first(T), T, SC, G, SCG, I2C),
ets:delete(T).
condense('$end_of_table', _T, _SC, _G, _SCG, _I2C) ->
ok;
condense(I, T, SC, G, SCG, I2C) ->
[{_,C}] = ets:lookup(I2C, I),
digraph:add_vertex(SCG, C),
digraph:add_edge(SCG, SC, C),
condense(ets:next(T, I), T, SC, G, SCG, I2C). | data/erlang/5cc905ab3f463731a8927cd873ec6ad3_digraph_utils.erl | 0.548432 | 0.530723 | 5cc905ab3f463731a8927cd873ec6ad3_digraph_utils.erl | starcoder |
-module(bits).
-export([bits/1]).
-include_lib("eunit/include/eunit.hrl").
% <NAME>
% 2017-03-12
%
% Sums the number of 'on' bits in a given positive integer.
%
% Tail-recursive vs direct
% ------------------------
% Tail-recursive implementations have the advantages:
% - internally can be converted to a loop, instead of nested function calls,
% which means we wont exhaust the stack (unlike direct) in tail-call
% optimized programming languages.
% - being a loop, may execute faster due to not having to create new stack
% frames for each cycle.
% - often use less memory.
%
% Whereas direct-recursive implementations are:
% - often more intuitive / more clearly illustrate the work being done.
%
% Because direct-recursive implementations will eventually exhaust the stack
% and crash if they execute too many cycles, tail-recursive implementations
% are generally preferred. For these reasons, I prefer the bits2/1
% implementation below.
% (direct-recursive)
bits(N) ->
case N == 0 of
true -> 0;
false -> 1 + bits(N band (N-1))
end.
% --- PREFERRED ---
% (tail-recursive)
bits2(N) ->
bits2(N,0).
bits2(0,Acc) ->
Acc;
bits2(N,Acc) ->
bits2(N band (N-1), Acc + 1).
% (tail-recursive, alternative)
% Uses a bit mask to test each bit in turn starting from 1, until
% the mask is > N.
bits3(N) ->
bits3(N,1,0).
bits3(N,M,S) when M > N ->
S;
bits3(N,M,S) ->
bits3(N,M bsl 1, S + case N band M == 0 of
true -> 0;
false -> 1
end).
% --- unit tests ------------------------
bits_test() ->
?assert(bits(7) == 3),
?assert(bits(8) == 1),
?assert(bits(27) == 4),
?assert(bits(98) == 3),
?assert(bits(219) == 6).
bits2_test() ->
?assert(bits2(7) == 3),
?assert(bits2(8) == 1),
?assert(bits2(27) == 4),
?assert(bits2(98) == 3),
?assert(bits(219) == 6).
bits3_test() ->
?assert(bits3(7) == 3),
?assert(bits3(8) == 1),
?assert(bits3(27) == 4),
?assert(bits3(98) == 3),
?assert(bits(219) == 6). | 1_24/bits.erl | 0.587233 | 0.609306 | bits.erl | starcoder |
%%% Advent of Code solution for 2019 day 14.
%%% Created: 2019-12-14T07:03:21+00:00
-module(aoc2019_day14).
-behavior(aoc_puzzle).
-export([parse/1, solve1/1, solve2/1, info/0]).
-include("aoc_puzzle.hrl").
-spec info() -> aoc_puzzle().
info() ->
#aoc_puzzle{module = ?MODULE,
year = 2019,
day = 14,
name = "Space Stoichiometry",
expected = {741927, 2371699},
has_input_file = true}.
-type input_type() :: map().
-type result_type() :: integer().
-spec parse(Binary :: binary()) -> input_type().
parse(Binary) ->
Lines = string:tokens(binary_to_list(Binary), "\n"),
maps:from_list(
lists:map(fun(Line) ->
[Left, Right] = string:tokens(Line, "=>"),
{_Q, C} = parse_chemical(Right),
{C,
{lists:map(fun parse_chemical/1, string:tokens(Left, ",")),
produces,
parse_chemical(Right)}}
end,
Lines)).
-spec solve1(Input :: input_type()) -> result_type().
solve1(Input) ->
produce(1, 'FUEL', Input).
-spec solve2(Input :: input_type()) -> result_type().
solve2(Input) ->
MaxOre = 1000000000000,
binary_search(1, inf, MaxOre, Input).
%% Binary search exploring the upper limit by starting at 1 and
%% doubling until we overshoot.
binary_search(Lower, inf = Upper, MaxOre, Rules) ->
case produce(Lower, 'FUEL', Rules) of
TotalOre when TotalOre < MaxOre ->
binary_search(Lower * 2, Upper, MaxOre, Rules);
_ ->
binary_search(floor(Lower / 2), Lower, MaxOre, Rules)
end;
binary_search(Lower, Upper, MaxOre, Rules) when Lower < Upper ->
case floor(Lower + (Upper - Lower) / 2) of
%% If middle == lower, it means that upper exceed the limit, but
%% middle does not.
Middle when Middle == Lower ->
Middle;
Middle ->
TotalOre = produce(Middle, 'FUEL', Rules),
if TotalOre > MaxOre ->
binary_search(Lower, Middle, MaxOre, Rules);
true ->
binary_search(Middle, Upper, MaxOre, Rules)
end
end.
parse_chemical(Str) ->
[Quantity, Chemical] = string:tokens(Str, " "),
{list_to_integer(Quantity), list_to_atom(Chemical)}.
%% Produce a given amount of chemical. Returns the total amount of ORE
%% used.
produce(Qnty, Chem, Rules) ->
Inv0 = do_produce({Qnty, Chem}, Rules, #{}),
maps:get(total_ore, Inv0).
%% Recursive entry point. Produce (at least) the given amount of
%% chemical given an inventory.
do_produce({Qnty, 'ORE'}, _, Inv) ->
Inv0 = maps:update_with('ORE', fun(Old) -> Old + Qnty end, Qnty, Inv),
Inv1 = maps:update_with(total_ore, fun(Old) -> Old + Qnty end, Qnty, Inv0),
Inv1;
do_produce({Qnty, Chem}, Rules, Inv) ->
case maps:get(Chem, Inv, 0) of
Available when Available >= Qnty ->
Inv;
Available ->
{_, produces, {Q, _}} = Rule = maps:get(Chem, Rules),
Needed = Qnty - Available,
Repeats = ceil(Needed / Q),
apply_rule(Rule, Repeats, Rules, Inv)
end.
%% Apply a rule to produce a chemical
%% @param Rule The rule to produce
%% @param Multiplier How many copies of the rule to apply
%% @param Rules The rules, needed to recurse when producing inputs.
%% @param Inv Inventory
apply_rule({Inputs, produces, {Q, C}} = Rule, Multiplier, Rules, Inv) ->
M = fun(X) -> X * Multiplier end,
case lists:all(fun({Q0, C0}) -> maps:get(C0, Inv, 0) >= M(Q0) end, Inputs) of
true ->
Inv0 =
lists:foldl(fun({Q0, C0}, Acc) -> maps:update_with(C0, fun(V) -> V - M(Q0) end, Acc)
end,
Inv,
Inputs),
maps:update_with(C, fun(V) -> V + M(Q) end, M(Q), Inv0);
false ->
Inv0 =
lists:foldl(fun({Qin, Cin}, Acc) -> do_produce({M(Qin), Cin}, Rules, Acc) end,
Inv,
Inputs),
apply_rule(Rule, Multiplier, Rules, Inv0)
end. | src/2019/aoc2019_day14.erl | 0.528777 | 0.558207 | aoc2019_day14.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2022. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose: Calculate tight bounds for integer operations.
%%
%% Reference:
%%
%% <NAME>, <NAME> (2 ed). <NAME> -
%% Pearson Education, Inc. Chapter 4. Arithmetic Bounds.
%%
%%
-module(beam_bounds).
-export(['+'/2, '-'/2, '*'/2, 'div'/2, 'rem'/2,
'band'/2, 'bor'/2, 'bxor'/2, 'bsr'/2, 'bsl'/2,
relop/3]).
-type range() :: {integer(), integer()} | 'any'.
-type range_result() :: range() | 'any'.
-type relop() :: '<' | '=<' | '>' | '>='.
-type bool_result() :: 'true' | 'false' | 'maybe'.
-spec '+'(range(), range()) -> range_result().
'+'({A,B}, {C,D}) when abs(A) bsr 256 =:= 0, abs(B) bsr 256 =:= 0,
abs(C) bsr 256 =:= 0, abs(D) bsr 256 =:= 0 ->
verify_range({A+C,B+D});
'+'(_, _) ->
any.
-spec '-'(range(), range()) -> range_result().
'-'({A,B}, {C,D}) when abs(A) bsr 256 =:= 0, abs(B) bsr 256 =:= 0,
abs(C) bsr 256 =:= 0, abs(D) bsr 256 =:= 0 ->
verify_range({A-D,B-C});
'-'(_, _) ->
any.
-spec '*'(range(), range()) -> range_result().
'*'({A,B}, {C,D}) when abs(A) bsr 256 =:= 0, abs(B) bsr 256 =:= 0,
abs(C) bsr 256 =:= 0, abs(D) bsr 256 =:= 0 ->
All = [X * Y || X <- [A,B], Y <- [C,D]],
Min = lists:min(All),
Max = lists:max(All),
verify_range({Min,Max});
'*'(_, _) ->
any.
-spec 'div'(range(), range()) -> range_result().
'div'({A,B}, {C,D}) ->
Denominators = [min(C, D),max(C, D)|
%% Handle zero crossing for the denominator.
if
C < 0, 0 < D -> [-1, 1];
C =:= 0 -> [1];
D =:= 0 -> [-1];
true -> []
end],
All = [X div Y || X <- [A,B],
Y <- Denominators,
Y =/= 0],
Min = lists:min(All),
Max = lists:max(All),
verify_range({Min,Max});
'div'(_, _) ->
any.
-spec 'rem'(range(), range()) -> range_result().
'rem'({A,_}, {C,D}) when C > 0 ->
Max = D - 1,
Min = if
A >= 0 -> 0;
true -> -Max
end,
verify_range({Min,Max});
'rem'(_, {C,D}) when C =/= 0; D =/= 0 ->
Max = max(abs(C), abs(D)) - 1,
Min = -Max,
verify_range({Min,Max});
'rem'(_, _) ->
any.
-spec 'band'(range(), range()) -> range_result().
'band'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Min = min_band(A, B, C, D),
Max = max_band(A, B, C, D),
{Min,Max};
'band'(_, {C,D}) when C >= 0 ->
{0,D};
'band'({A,B}, _) when A >= 0 ->
{0,B};
'band'(_, _) ->
any.
-spec 'bor'(range(), range()) -> range_result().
'bor'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Min = min_bor(A, B, C, D),
Max = max_bor(A, B, C, D),
{Min,Max};
'bor'(_, _) ->
any.
-spec 'bxor'(range(), range()) -> range_result().
'bxor'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Max = max_bxor(A, B, C, D),
{0,Max};
'bxor'(_, _) ->
any.
-spec 'bsr'(range(), range()) -> range_result().
'bsr'({A,B}, {C,D}) when C >= 0 ->
Min = min(A bsr C, A bsr D),
Max = max(B bsr C, B bsr D),
{Min,Max};
'bsr'(_, _) ->
any.
-spec 'bsl'(range(), range()) -> range_result().
'bsl'({A,B}, {C,D}) when abs(B) bsr 128 =:= 0, C >= 0, D < 128 ->
Min = min(A bsl C, A bsl D),
Max = max(B bsl C, B bsl D),
{Min,Max};
'bsl'(_, _) ->
any.
-spec relop(relop(), range(), range()) -> bool_result().
relop(Op, {A,B}, {C,D}) ->
case {erlang:Op(B, C),erlang:Op(A, D)} of
{Bool,Bool} -> Bool;
{_,_} -> 'maybe'
end;
relop(_, _, _) ->
'maybe'.
%%%
%%% Internal functions.
%%%
verify_range({Min,Max}=T) when Min =< Max -> T.
min_band(A, B, C, D) ->
M = 1 bsl (upper_bit(A bor C) + 1),
min_band(A, B, C, D, M).
min_band(A, _B, C, _D, 0) ->
A band C;
min_band(A, B, C, D, M) ->
if
(bnot A) band (bnot C) band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_band(NewA, B, C, D, 0);
_ ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_band(A, B, NewC, D, 0);
_ ->
min_band(A, B, C, D, M bsr 1)
end
end;
true ->
min_band(A, B, C, D, M bsr 1)
end.
max_band(A, B, C, D) ->
M = 1 bsl upper_bit(B bxor D),
max_band(A, B, C, D, M).
max_band(_A, B, _C, D, 0) ->
B band D;
max_band(A, B, C, D, M) ->
if
B band (bnot D) band M =/= 0 ->
case (B band (bnot M)) bor (M - 1) of
NewB when NewB >= A ->
max_band(A, NewB, C, D, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
(bnot B) band D band M =/= 0 ->
case (D band (bnot M)) bor (M - 1) of
NewD when NewD >= C ->
max_band(A, B, C, NewD, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
true ->
max_band(A, B, C, D, M bsr 1)
end.
min_bor(A, B, C, D) ->
M = 1 bsl upper_bit(A bxor C),
min_bor(A, B, C, D, M).
min_bor(A, _B, C, _D, 0) ->
A bor C;
min_bor(A, B, C, D, M) ->
if
(bnot A) band C band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_bor(NewA, B, C, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
A band (bnot C) band M =/= 0 ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_bor(A, B, NewC, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
true ->
min_bor(A, B, C, D, M bsr 1)
end.
max_bor(A, B, C, D) ->
Intersection = B band D,
M = 1 bsl upper_bit(Intersection),
max_bor(Intersection, A, B, C, D, M).
max_bor(_Intersection, _A, B, _C, D, 0) ->
B bor D;
max_bor(Intersection, A, B, C, D, M) ->
if
Intersection band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bor(Intersection, A, NewB, C, D, 0);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bor(Intersection, A, B, C, NewD, 0);
_ ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end
end;
true ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end.
max_bxor(A, B, C, D) ->
M = 1 bsl upper_bit(B band D),
max_bxor(A, B, C, D, M).
max_bxor(_A, B, _C, D, 0) ->
B bxor D;
max_bxor(A, B, C, D, M) ->
if
B band D band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bxor(A, NewB, C, D, M bsr 1);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bxor(A, B, C, NewD, M bsr 1);
_ ->
max_bxor(A, B, C, D, M bsr 1)
end
end;
true ->
max_bxor(A, B, C, D, M bsr 1)
end.
upper_bit(Val) ->
upper_bit_1(Val, 0).
upper_bit_1(Val0, N) ->
case Val0 bsr 1 of
0 -> N;
Val -> upper_bit_1(Val, N + 1)
end. | lib/compiler/src/beam_bounds.erl | 0.66628 | 0.450903 | beam_bounds.erl | starcoder |
%%%
%%% Copyright 2017 RBKmoney
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%%
%%% Модуль для работы с mg из консоли, чтобы не писать длинные команды.
%%% Сюда можно (нужно) добавлять всё, что понадобится в нелёгкой жизни девопса.
%%%
-module(mwc).
%% API
-export_type([scalar/0]).
-export([get_statuses_distrib /1]).
-export([simple_repair /2]).
-export([simple_repair /3]).
-export([resume_interrupted_one/2]).
-export([kill /2]).
-export([get_failed_machines /1]).
-export([get_machine /2]).
-export([get_events_machine /2]).
-export([get_events_machine /3]).
-export([m_opts /1]).
-export([em_opts/1]).
%%
%% API
%%
-type scalar() :: string() | atom() | binary() | number().
% получение распределения по статусам
-spec get_statuses_distrib(scalar()) ->
[{atom(), non_neg_integer()}].
get_statuses_distrib(Namespace) ->
[
{StatusQuery, status_count(Namespace, StatusQuery)}
|| StatusQuery <- mg_machine:all_statuses()
].
-spec status_count(scalar(), mg_machine:search_query()) ->
non_neg_integer().
status_count(Namespace, StatusQuery) ->
Result = mg_machine:search(m_opts(Namespace), StatusQuery),
erlang:length(Result).
% восстановление машины
-spec simple_repair(scalar(), scalar()) ->
woody_context:ctx() | no_return().
simple_repair(Namespace, ID) ->
simple_repair(Namespace, ID, mg_utils:default_deadline()).
-spec simple_repair(scalar(), scalar(), mg_utils:deadline()) ->
woody_context:ctx() | no_return().
simple_repair(Namespace, ID, Deadline) ->
WoodyCtx = woody_context:new(),
ok = mg_machine:simple_repair(
m_opts(Namespace),
id(ID),
mg_woody_api_utils:woody_context_to_opaque(WoodyCtx),
Deadline
),
WoodyCtx.
-spec resume_interrupted_one(scalar(), scalar()) ->
ok | no_return().
resume_interrupted_one(Namespace, ID) ->
ok = mg_machine:resume_interrupted_one(m_opts(Namespace), id(ID)).
% убийство машины
-spec kill(scalar(), scalar()) ->
ok.
kill(Namespace, ID) ->
ok = mg_workers_manager:brutal_kill(mg_machine:manager_options(m_opts(Namespace)), id(ID)).
-spec get_failed_machines(mg:ns()) ->
[{mg:id(), Reason::term()}].
get_failed_machines(Namespace) ->
Options = m_opts(Namespace),
[
{ID, Reason}
||
{ID, {error, Reason, _}} <-
[{ID, mg_machine:get_status(Options, ID)} || ID <- mg_machine:search(Options, failed)]
].
% посмотреть стейт машины
-spec get_machine(scalar(), scalar()) ->
mg_machine:machine_state().
get_machine(Namespace, ID) ->
mg_machine:get(m_opts(Namespace), id(ID)).
-spec get_events_machine(scalar(), mg_events_machine:ref()) ->
mg_events_machine:machine().
get_events_machine(Namespace, Ref) ->
get_events_machine(Namespace, Ref, {undefined, undefined, forward}).
-spec get_events_machine(scalar(), mg_events_machine:ref(), mg_events:history_range()) ->
mg_events_machine:machine().
get_events_machine(Namespace, Ref, HRange) ->
mg_events_machine:get_machine(em_opts(Namespace), Ref, HRange).
%%
-spec em_opts(scalar()) ->
mg_events_machine:options().
em_opts(Namespace) ->
mg_woody_api:events_machine_options(
ns(Namespace),
ns_config(Namespace),
genlib_app:env(mg_woody_api, event_sink_ns)
).
-spec m_opts(scalar()) ->
mg_machine:options().
m_opts(Namespace) ->
mg_woody_api:machine_options(ns(Namespace), ns_config(Namespace)).
-spec ns_config(scalar()) ->
_Config.
ns_config(Namespace) ->
maps:get(ns(Namespace), genlib_app:env(mg_woody_api, namespaces)).
-spec ns(scalar()) ->
mg:ns().
ns(Namespace) ->
genlib:to_binary(Namespace).
-spec id(scalar()) ->
mg:id().
id(ID) ->
genlib:to_binary(ID). | apps/mg_woody_api/src/mwc.erl | 0.569134 | 0.420421 | mwc.erl | starcoder |
- module(trace_theory).
-export([perform_computation/3]).
-ignore_xref([perform_computation/3]).
-export_type([word/0, dependent/0, production/0, alphabet/0]).
-type production() :: char().
-type production_definition() :: {char(), list(), list()}.
-type word() :: [production()].
-type dependent() :: [{production(), production()}].
-type independent() :: [{production(), production()}].
-type alphabet() :: [string()].
%------------------------------------------------%
% API %
%------------------------------------------------%
%Main Function that runs all the code, it computes Dependent Productions set,
%Independent Productions set, Graph, and Foata Normal Form.
-spec perform_computation(Alphabet :: alphabet(),
Productions :: [production_definition()],
Word :: word()) ->
ok.
perform_computation(Alphabet, Productions, Word) ->
Dependent = determine_dependent(Productions),
Independent = determine_independent(Productions),
print_dependent_independent(Dependent, Independent),
FNF1 = foata_normal_form:get_foata_normal_form(Word, Dependent, Alphabet),
print_FNF(FNF1, "WRONG "),
{Graph, Order} = graph_creation:create_graph(Word, Dependent),
print_graph(Graph, Order),
FNF2 = get_foata_normal_form_from_graph(Graph),
print_FNF(FNF2, ""),
ok.
%------------------------------------------------%
% PRIVATE FUNCTIONS %
%------------------------------------------------%
%Function that checks if every possible pair of productions is dependent
-spec determine_dependent(Productions :: [production_definition()]) -> dependent().
determine_dependent(Productions) ->
[ {Name1, Name2} || {Name1, _, _} = X <- Productions, {Name2, _, _} = Y <- Productions, is_dependent(X, Y)].
%Function that checks if the specific pair of productions is dependent
-spec is_dependent(Prod1 :: production_definition(), Prod2 :: production_definition()) -> boolean().
is_dependent({_, Prod1Left, Prod1Right}, {_, Prod2Left, Prod2Right}) ->
case Prod1Left of
Prod2Left -> true;
_ ->
case lists:nth(find_non_zero(Prod1Left), Prod2Right) of
A when A /= 0 -> true;
_ ->
case lists:nth(find_non_zero(Prod2Left), Prod1Right) of
A when A /= 0 -> true;
_ -> false
end
end
end.
%Function that checks if every possible pair of productions is independent
-spec determine_independent(Productions :: [production_definition()]) -> independent().
determine_independent(Productions) ->
[ {Name1, Name2} || {Name1, _, _} = X <- Productions, {Name2, _, _} = Y <- Productions, is_independent(X, Y)].
%Function that checks if the specific pair of productions is independent
-spec is_independent(Prod1 :: production_definition(), Prod2 :: production_definition()) -> boolean().
is_independent(Prod1, Prod2) ->
case is_dependent(Prod1, Prod2) of
true -> false;
false -> true
end.
%function that seeks first non-zero value in an array
-spec find_non_zero([integer()]) -> integer().
find_non_zero(Table) ->
find_non_zero(Table, 1).
%function that seeks first non-zero value in an array
-spec find_non_zero([integer()], integer()) -> integer().
find_non_zero([], _) ->
-1;
find_non_zero([Head | Tail], Pos) ->
case Head of
A when A /= 0 -> Pos;
_ -> find_non_zero(Tail, Pos + 1)
end.
%Function computes foata normal form based on graph
-spec get_foata_normal_form_from_graph(Graph :: graph_creation:graph_without_n_parents()) ->
foata_normal_form:foata_normal_form().
get_foata_normal_form_from_graph(Graph) ->
foata_normal_form:create_from_graph(graph_creation:insert_parent_data(Graph)).
%-------------------------------------------------%
% PRINTING FUNCTIONS %
%-------------------------------------------------%
%Function printing dependent and independent sets
-spec print_dependent_independent(Dependent :: dependent(), Independent :: independent()) -> ok.
print_dependent_independent(Dependent, Independent) ->
print_table(Dependent, "D"),
print_table(Independent, "I").
%Function prints table of dependent and independent productions
-spec print_table(Table :: dependent() | independent(), Letter :: string()) -> ok.
print_table(Table, Letter) ->
io:format(Letter ++ " = {"),
lists:foreach(fun({Prod1, Prod2}) ->
io:format("(" ++ Prod1 ++ "," ++ Prod2 ++ ")")
end, Table),
io:format("}\n"),
ok.
%Function prints graph
-spec print_graph(Graph::graph_creation:graph_without_n_parents(), Order::graph_creation:order()) -> ok.
print_graph(Graph, Order) -> % Setting vertex numbers
{VertexNumber, _, NumberVertex} = lists:foldl(fun({Letter, _} = Element, {Map1, Counter, Map2}) ->
{maps:put(Element, Counter, Map1), Counter + 1, maps:put(Counter, Letter, Map2)}
end, {#{}, 1, #{}}, Order),
% Printing all edges in a graph
lists:foreach(fun({Key, Connections}) ->
lists:foreach(fun(DesignetedConnection) ->
io:fwrite("~.10B", [maps:get(Key, VertexNumber)]),
io:format(" -> "),
io:fwrite("~.10B\n", [maps:get(DesignetedConnection, VertexNumber)])
end, Connections)
end, maps:to_list(Graph)),
% Printing all vertices in a graph
lists:foreach(fun({Number, Letter}) ->
io:fwrite("~.10B", [Number]),
io:format("[label=" ++ Letter ++ "]\n")
end, maps:to_list(NumberVertex)),
ok.
%Function printing FNF
-spec print_FNF(List :: foata_normal_form:foata_normal_form(), String :: string()) -> ok.
print_FNF(List, String) ->
io:format("FNF " ++ String ++ "= "),
lists:foreach(fun([Head|Tail]) ->
io:format("[" ++ Head),
case Tail of
[] -> io:format("]");
Tab -> lists:foreach(fun(Element) ->
io:format("," ++ Element)
end, Tab),
io:format("]")
end
end, List),
io:format("\n"),
ok. | src/trace_theory.erl | 0.570571 | 0.432303 | trace_theory.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_kv_wm_crdt: Webmachine resource for convergent data types
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Resource for serving data-types over HTTP.
%%
%% Available operations:
%%
%% GET /types/BucketType/buckets/Bucket/datatypes/Key
%% Get the current value of the data-type at `BucketType', `Bucket', `Key'.
%% Result is a JSON body with a structured value, or `404 Not Found' if no
%% datatype exists at that resource location.
%%
%% The format of the JSON response will be roughly
%% <code>{"type":..., "value":..., "context":...}</code>, where the
%% `type' is a string designating which data-type is presented, the
%% `value' is a representation of the data-type's value (see below),
%% and the `context' is the opaque context, if needed or requested.
%%
%% The type and structure of the `value' field in the response
%% depends on the `type' field.
%% <dl>
%% <dt>counter</dt><dd>an integer</dd>
%% <dt>set</dt><dd>an array of strings</dd>
%% <dt>map</dt><dd>an object where the fields are as described below.</dd>
%% </dl>
%%
%% The format of a field name in the map value determines both the
%% name of the entry and the type, joined with an underscore. For
%% example, a `register' with name `firstname' would be
%% `"firstname_register"'. Valid types embeddable in a map are
%% `counter', `flag', `register', `set', and `map'.
%%
%% The following query params are accepted:
%%
%% <dl>
%% <dt>r</dt><dd>Read quorum. See below for defaults and values.</dd>
%% <dt>pr</dt><dd>Primary read quorum. See below for defaults and values.</dd>
%% <dt>basic_quorum</dt><dd>Boolean. Return as soon as a quorum of responses are received
%% if true. Default is the bucket default, if absent.</dd>
%% <dt>notfound_ok</dt><dd>Boolean. A `not_found` response from a vnode counts toward
%% `r' quorum if true. Default is the bucket default, if absent.</dd>
%% <dt>include_context</dt><dd>Boolean. If the datatype requires the opaque "context" for
%% safe removal, include it in the response. Defaults to `true'.</dd>
%% </dl>
%%
%% POST /types/BucketType/buckets/Bucket/datatypes
%% POST /types/BucketType/buckets/Bucket/datatypes/Key
%% Mutate the data-type at `BucketType', `Bucket', `Key' by applying
%% the submitted operation contained in a JSON payload. If `Key' is
%% not specified, one will be generated for the client and included
%% in the returned `Location' header.
%%
%% The format of the operation payload depends on the data-type.
%% <dl>
%% <dt>counter</dt><dd>An integer, or an object containing a single field, either
%% `"increment"' or `"decrement"', and an associated integer value.</dd>
%% <dt>set</dt><dd>An object containing any combination of `"add"', `"add_all"',
%% `"remove"', `"remove_all"' fields. `"add"' and `"remove"' should refer to
%% single string values, while `"add_all"' and `"remove_all"' should be arrays
%% of strings. The `"context"' field may be included.</dd>
%% <dt>map</dt><dd>An object containing any of the fields `"remove"', or `"update"'.
%% `"remove"' should be a list of field names as described above.
%% `"update"` should be an object containing fields and the operation to apply
%% to the type associated with the field.</dd>
%% <dt>register (embedded in map only)</dt><dd>`{"assign":Value}' where `Value' is the new string
%% value of the register.</dd>
%% <dt>flag (embedded in map only)</dt><dd>The string "enable" or "disable".</dd>
%% </dl>
%%
%% The following query params are accepted (@see `riak_kv_wm_object' docs, too):
%%
%% <dl>
%% <dt>w</dt><dd>The write quorum. See below for defaults and values.</dd>
%% <dt>pw</dt><dd>The primary write quorum. See below for defaults and values.</dd>
%% <dt>dw</dt><dd>The durable write quorum. See below for default and values.</dd>
%% <dt>returnbody</dt><dd>Boolean. Default is `false' if not provided. When `true'
%% the response body will be the value of the datatype.</dd>
%% <dt>include_context</dt><dd>Boolean. Default is `true' if not provided. When `true'
%% and `returnbody' is `true', the opaque context will be included.</dd>
%% </dl>
%%
%% Quorum values (r/pr/w/pw/dw):
%% <dl>
%% <dt>default</dt<dd>Whatever the bucket default is. This is the value used
%% for any absent value.</dd>
%% <dt>quorum</dt><dd>(Bucket N val / 2) + 1</dd>
%% <dt>all</dt><dd>All replicas must respond</dd>
%% <dt>one</dt><dd>Any one response is enough</dd>
%% <dt>Integer</dt><dd>That specific number of vnodes must respond. Must be =< N</dd>
%% </dl>
%%
%% Please see http://docs.basho.com for details of all the quorum values and their effects.
-module(riak_kv_wm_crdt).
-record(ctx, {
api_version,
client, %% riak:local_client()
bucket_type,
bucket,
key,
crdt_type,
data,
module,
r,
w,
dw,
rw,
pr,
pw,
basic_quorum,
notfound_ok,
include_context,
returnbody,
method,
timeout,
security
}).
-include("riak_kv_wm_raw.hrl").
-include("riak_kv_types.hrl").
-export([
init/1,
service_available/2,
malformed_request/2,
is_authorized/2,
forbidden/2,
allowed_methods/2,
content_types_provided/2,
encodings_provided/2,
resource_exists/2,
process_post/2, %% POST handler
produce_json/2 %% GET/HEAD handler
]).
-include_lib("webmachine/include/webmachine.hrl").
init(Props) ->
{ok, #ctx{api_version=proplists:get_value(api_version, Props)}}.
service_available(RD, Ctx0) ->
Ctx = riak_kv_wm_utils:ensure_bucket_type(RD, Ctx0, #ctx.bucket_type),
{ok, Client} = riak_kv_wm_utils:get_riak_client(
local, riak_kv_wm_utils:get_client_id(RD)),
{true, RD,
Ctx#ctx{client=Client,
bucket=path_segment_to_bin(bucket, RD),
key=path_segment_to_bin(key, RD),
method=wrq:method(RD)}}.
allowed_methods(RD, Ctx) ->
{['GET', 'HEAD', 'POST'], RD, Ctx}.
is_authorized(ReqData, Ctx) ->
case riak_api_web_security:is_authorized(ReqData) of
false ->
{"Basic realm=\"Riak\"", ReqData, Ctx};
{true, SecContext} ->
{true, ReqData, Ctx#ctx{security=SecContext}};
insecure ->
%% XXX 301 may be more appropriate here, but since the http and
%% https port are different and configurable, it is hard to figure
%% out the redirect URL to serve.
halt_with_message(426,
<<"Security is enabled and Riak does not accept "
"credentials over HTTP. Try HTTPS instead.">>,
ReqData, Ctx)
end.
malformed_request(RD, Ctx=#ctx{method='POST'}) ->
malformed_check_post_ctype(RD, Ctx);
malformed_request(RD, Ctx) ->
malformed_rw_params(RD, Ctx).
malformed_check_post_ctype(RD, Ctx) ->
CType = wrq:get_req_header(?HEAD_CTYPE, RD),
case mochiweb_util:parse_header(CType) of
{"application/json",_} ->
malformed_rw_params(RD, Ctx);
_Other ->
{{halt, 406}, RD, Ctx}
end.
malformed_rw_params(RD, Ctx) ->
Res = lists:foldl(fun malformed_rw_param/2,
{false, RD, Ctx},
[{#ctx.r, "r", "default"},
{#ctx.w, "w", "default"},
{#ctx.dw, "dw", "default"},
{#ctx.pw, "pw", "default"},
{#ctx.pr, "pr", "default"}]),
Res1 = lists:foldl(fun malformed_boolean_param/2,
Res,
[{#ctx.basic_quorum, "basic_quorum", "default"},
{#ctx.notfound_ok, "notfound_ok", "default"},
{#ctx.include_context, "include_context", "true"},
{#ctx.returnbody, "returnbody", "false"}]),
malformed_timeout_param(Res1).
malformed_rw_param({Idx, Name, Default}, {Result, RD, Ctx}) ->
case catch normalize_rw_param(wrq:get_qs_value(Name, Default, RD)) of
P when (is_atom(P) orelse is_integer(P)) ->
{Result, RD, setelement(Idx, Ctx, P)};
_ ->
{true,
error_response("~s query parameter must be an integer or "
"one of the following words: 'one', 'quorum' or 'all'~n",
[Name], RD),
Ctx}
end.
malformed_boolean_param({Idx, Name, Default}, {Result, RD, Ctx}) ->
case string:to_lower(wrq:get_qs_value(Name, Default, RD)) of
"true" ->
{Result, RD, setelement(Idx, Ctx, true)};
"false" ->
{Result, RD, setelement(Idx, Ctx, false)};
"default" ->
{Result, RD, setelement(Idx, Ctx, default)};
_ ->
{true,
error_response("~s query parameter must be true or false~n",
[Name], RD),
Ctx}
end.
malformed_timeout_param({Result, RD, Ctx}) ->
case wrq:get_qs_value("timeout", undefined, RD) of
undefined ->
{Result, RD, Ctx};
TimeoutStr when is_list(TimeoutStr) ->
try list_to_integer(TimeoutStr) of
0 ->
{Result, RD, Ctx#ctx{timeout=infinity}};
Timeout when is_integer(Timeout), Timeout > 0 ->
{Result, RD, Ctx#ctx{timeout=Timeout}};
_Other ->
{true,
error_response("timeout query parameter must be an "
"integer greater than 0 (or 0 for disabled), "
"~s is invalid~n",
[TimeoutStr], RD),
Ctx}
catch
error:badarg ->
{true,
error_response("timeout query parameter must be an "
"integer greater than 0, ~s is invalid~n",
[TimeoutStr], RD),
Ctx}
end
end.
forbidden(RD, Ctx) ->
case riak_kv_wm_utils:is_forbidden(RD) of
true ->
{true, RD, Ctx};
false ->
forbidden_check_security(RD, Ctx)
end.
forbidden_check_security(RD, Ctx=#ctx{security=undefined}) ->
forbidden_check_bucket_type(RD, Ctx);
forbidden_check_security(RD, Ctx=#ctx{bucket_type=BType, bucket=Bucket,
security=SecContext, method=Method}) ->
Perm = permission(Method),
case riak_core_security:check_permission({Perm, {BType, Bucket}},
SecContext) of
{false, Error, _} ->
{true, error_response(Error, RD), Ctx};
{true, _} ->
forbidden_check_bucket_type(RD, Ctx)
end.
%% @doc Detects whether the requested object's bucket-type exists.
forbidden_check_bucket_type(RD, Ctx) ->
case riak_kv_wm_utils:bucket_type_exists(Ctx#ctx.bucket_type) of
true ->
forbidden_check_crdt_type(RD, Ctx);
false ->
handle_common_error(bucket_type_unknown, RD, Ctx)
end.
forbidden_check_crdt_type(RD, Ctx=#ctx{bucket_type = <<"default">>,
bucket=B0,
key=K0}) ->
%% Only legacy/1.4 counters are supported in the default/undefined
%% bucket type. Since we don't want to confuse semantics of the
%% new types or duplicate code, we redirect to the old resource
%% instead.
B = mochiweb_util:quote_plus(B0),
K = mochiweb_util:quote_plus(K0),
CountersUrl = lists:flatten(
io_lib:format("/buckets/~s/counters/~s",[B, K])),
halt_with_message(301,
"Counters in the default bucket-type should use the "
"legacy URL\n",
wrq:set_resp_header("Location", CountersUrl, RD),
Ctx);
forbidden_check_crdt_type(RD, Ctx=#ctx{bucket_type=T, bucket=B}) ->
case riak_core_bucket:get_bucket({T, B}) of
BProps when is_list(BProps) ->
DataType = proplists:get_value(datatype, BProps),
AllowMult = proplists:get_value(allow_mult, BProps),
Mod = riak_kv_crdt:to_mod(DataType),
case {AllowMult, riak_kv_crdt:supported(Mod)} of
{false, _} ->
{true, error_response("Bucket must be allow_mult=true~n",
[], RD), Ctx};
{_, false} ->
{true, error_response("Bucket datatype '~s' is not a "
"supported type.~n", [DataType], RD), Ctx};
_ ->
{false, RD, Ctx#ctx{module=Mod, crdt_type=DataType}}
end;
{error, no_type} ->
%% This should be handled by forbidden_check_bucket_type/2
handle_common_error(bucket_type_unknown, RD, Ctx)
end.
content_types_provided(RD, Ctx) ->
{[{"application/json", produce_json}], RD, Ctx}.
encodings_provided(RD, Ctx) ->
{riak_kv_wm_utils:default_encodings(), RD, Ctx}.
resource_exists(RD, Ctx=#ctx{method='POST'}) ->
%% When submitting an operation, the resource always exists, even
%% if key is unspecified.
{true, RD, Ctx};
resource_exists(RD, Ctx=#ctx{key=undefined}) ->
%% When fetching, if the key does not exist, we should give a not
%% found.
handle_common_error(notfound, RD, Ctx);
resource_exists(RD, Ctx=#ctx{client=C, bucket_type=T, bucket=B, key=K, module=Mod}) ->
Options = make_options(Ctx),
case C:get({T,B}, K, [{crdt_op, Mod}|Options]) of
{ok, O} ->
{true, RD, Ctx#ctx{data=O}};
{error, Reason} ->
handle_common_error(Reason, RD, Ctx)
end.
process_post(RD0, Ctx0=#ctx{client=C, bucket_type=T, bucket=B, module=Mod}) ->
case check_post_body(RD0, Ctx0) of
{error, RD} ->
{{halt, 400}, RD, Ctx0};
{ok, {_Type, Op, OpCtx}} ->
{RD, Ctx} = maybe_generate_key(RD0, Ctx0),
O = riak_kv_crdt:new({T, B}, Ctx#ctx.key, Mod),
Options0 = make_options(Ctx),
CrdtOp = make_operation(Mod, Op, OpCtx),
Options = [{crdt_op, CrdtOp},
{retry_put_coordinator_failure,false}|Options0],
case C:put(O, Options) of
ok ->
{true, RD, Ctx};
{ok, RObj} ->
{Body, RD1, Ctx1} = produce_json(RD, Ctx#ctx{data=RObj}),
{true,
wrq:set_resp_body(Body, wrq:set_resp_header(
?HEAD_CTYPE, "application/json",
RD1)),
Ctx1};
{error, Reason} ->
handle_common_error(Reason, RD, Ctx)
end
end.
produce_json(RD, Ctx=#ctx{module=Mod, data=RObj, include_context=I}) ->
Type = riak_kv_crdt:from_mod(Mod),
{{RespCtx, Value}, Stats} = riak_kv_crdt:value(RObj, Mod),
_ = [ ok = riak_kv_stat:update(S) || S <- Stats ],
ModMap = riak_kv_crdt:mod_map(Type),
Body = riak_kv_crdt_json:fetch_response_to_json(
Type, Value, get_context(RespCtx,I), ModMap),
{mochijson2:encode(Body), RD, Ctx}.
%% Internal functions
check_post_body(RD, #ctx{crdt_type=CRDTType}) ->
try
JSON = mochijson2:decode(wrq:req_body(RD)),
ModMap = riak_kv_crdt:mod_map(CRDTType),
Data = {CRDTType, _Op, _Context} =
riak_kv_crdt_json:update_request_from_json(CRDTType, JSON,
ModMap),
{ok, Data}
catch
throw:{invalid_operation, {BadType, BadOp}} ->
{error,
error_response("Invalid operation on datatype '~s': ~s~n",
[BadType, mochijson2:encode(BadOp)], RD)};
throw:{invalid_field_name, Field} ->
{error,
error_response("Invalid map field name '~s'~n", [Field], RD)};
throw:invalid_utf8 ->
{error,
error_response("Malformed JSON submitted, invalid UTF-8", RD)};
_Other:Reason ->
{error,
error_response("Couldn't decode JSON: ~p~n", [Reason], RD)}
end.
%% @doc Converts a query string value into a quorum value.
normalize_rw_param("default") -> default;
normalize_rw_param("one") -> one;
normalize_rw_param("quorum") -> quorum;
normalize_rw_param("all") -> all;
normalize_rw_param(V) -> list_to_integer(V).
%% @doc Returns the appropriate permission for a given request method.
permission('POST') -> "riak_kv.put";
permission('GET') -> "riak_kv.get";
permission('HEAD') -> "riak_kv.get".
%% @doc Halts the resource with the given formatted response message.
halt_with_message(Status, Format, Args, RD, Ctx) ->
halt_with_message(Status, io_lib:format(Format, Args), RD, Ctx).
%% @doc Halts the resource with the given response message.
halt_with_message(Status, Message, RD, Ctx) ->
{{halt, Status}, error_response(Message,RD), Ctx}.
%% @doc Outputs a formatted error response with the text/plain content type.
error_response(Fmt, Args, RD) ->
error_response(io_lib:format(Fmt, Args), RD).
%% @doc Outputs an error response with the text/plain content type.
error_response(Msg, RD) ->
wrq:set_resp_header(?HEAD_CTYPE, "text/plain",
wrq:append_to_response_body(Msg, RD)).
%% @doc Converts an error into the appropriate resource halt and message.
handle_common_error(Reason, RD, Ctx) ->
case Reason of
too_many_fails ->
halt_with_message(503, "Too many write failures to satisfy W/DW\n",
RD, Ctx);
timeout ->
halt_with_message(503, "request timed out\n", RD, Ctx);
notfound ->
{{halt, 404}, notfound_body(RD, Ctx), Ctx};
bucket_type_unknown ->
halt_with_message(404, "Unknown bucket type: ~s~n",
[Ctx#ctx.bucket_type], RD, Ctx);
{deleted, _VClock} ->
{{halt,404},
wrq:set_resp_header(?HEAD_DELETED, "true", notfound_body(RD, Ctx)),
Ctx};
{n_val_violation, N} ->
halt_with_message(400,
"Specified w/dw/pw values invalid for bucket n "
"value of ~p~n",[N], RD, Ctx);
{r_val_unsatisfied, Requested, Returned} ->
halt_with_message(503, "R-value unsatisfied: ~p/~p~n",
[Returned, Requested], RD, Ctx);
{dw_val_unsatisfied, DW, NumDW} ->
halt_with_message(503, "DW-value unsatisfied: ~p/~p~n", [NumDW, DW],
RD, Ctx);
{pr_val_unsatisfied, Requested, Returned} ->
halt_with_message(503, "PR-value unsatisfied: ~p/~p~n",
[Returned, Requested], RD, Ctx);
{pw_val_unsatisfied, Requested, Returned} ->
halt_with_message(503, "PW-value unsatisfied: ~p/~p~n",
[Returned, Requested], RD, Ctx);
failed ->
halt_with_message(412, "", RD, Ctx);
Err ->
halt_with_message(500, "Error:~n~p~n", [Err], RD, Ctx)
end.
%% @doc Converts a path segment into a binary by key.
path_segment_to_bin(Key, RD) ->
Segment = proplists:get_value(Key, wrq:path_info(RD)),
case Segment of
undefined -> undefined;
_ ->
list_to_binary(riak_kv_wm_utils:maybe_decode_uri(RD, Segment))
end.
%% @doc If the key is not submitted on POST, generate a key and set
%% the appropriate redirect location.
maybe_generate_key(RD, Ctx=#ctx{api_version=V, bucket_type=T, bucket=B,
key=undefined}) ->
K = riak_core_util:unique_id_62(),
{wrq:set_resp_header("Location",
riak_kv_wm_utils:format_uri(T, B, K, undefined, V), RD),
Ctx#ctx{key=list_to_binary(K)}};
maybe_generate_key(RD, Ctx) ->
{RD, Ctx}.
make_operation(Mod, Op, Ctx) ->
#crdt_op{mod=Mod, op=Op, ctx=Ctx}.
get_context(_Ctx, false) ->
undefined;
get_context(Ctx, true) ->
Ctx.
make_options(Ctx) ->
OptList = [{r, Ctx#ctx.r},
{w, Ctx#ctx.w},
{dw, Ctx#ctx.dw},
{rw, Ctx#ctx.rw},
{pr, Ctx#ctx.pr},
{pw, Ctx#ctx.pw},
{basic_quorum, Ctx#ctx.basic_quorum},
{notfound_ok, Ctx#ctx.notfound_ok},
{timeout, Ctx#ctx.timeout},
{returnbody, Ctx#ctx.returnbody}],
[ {K,V} || {K,V} <- OptList, V /= default, V /= undefined ].
notfound_body(RD, #ctx{module=Mod}) ->
JSON = {struct, [{<<"type">>, atom_to_binary(riak_kv_crdt:from_mod(Mod), utf8)},
{<<"error">>, <<"notfound">>}]},
wrq:set_resp_header(?HEAD_CTYPE, "application/json",
wrq:set_resp_body(mochijson2:encode(JSON), RD)). | deps/riak_kv/src/riak_kv_wm_crdt.erl | 0.712832 | 0.452052 | riak_kv_wm_crdt.erl | starcoder |
%%
%% @doc Miscellaneous utilities
%%
-module(rncryptor_util).
-author("<EMAIL>").
-export([ceil/1]).
-export([enpad/1, depad/1]).
-export([const_compare/2]).
-export([bin_to_hex/1, hex_to_bin/1]).
-define(AES256_BLOCK_SIZE, 16). %% AES uses 128-bit blocks (regardless of key size)
%%======================================================================================
%%
%% Integer ceiling
%%
%%======================================================================================
%% @doc Return integer ceiling of float.
%%
-spec ceil(X) -> Ceiling when
X :: float(),
Ceiling :: integer().
%%--------------------------------------------------------------------------------------
ceil(X) ->
Tx = trunc(X),
case (X - Tx) of
Neg when Neg < 0 ->
Tx;
Pos when Pos > 0 ->
Tx + 1;
_ ->
Tx
end.
%%======================================================================================
%%
%% PKCS7 padding
%%
%%======================================================================================
%%--------------------------------------------------------------------------------------
%% @doc Pad binary input using PKCS7 scheme.
%%
-spec enpad(Bin) -> Padded when
Bin :: binary(),
Padded :: binary().
%%--------------------------------------------------------------------------------------
enpad(Bin) ->
enpad(Bin, ?AES256_BLOCK_SIZE-(byte_size(Bin) rem ?AES256_BLOCK_SIZE)).
%% @private
enpad(Bin, Len) ->
Pad = list_to_binary(lists:duplicate(Len,Len)),
<<Bin/binary, Pad/binary>>.
%%--------------------------------------------------------------------------------------
%% @doc Remove padding from binary input using PKCS7 scheme.
%%
%% The last byte of the binary is the pad hex digit. Per <a
%% href="https://tools.ietf.org/html/rfc5652#section-6.3">RFC 5652 Section
%% 6.3</a>, "<em>all input is padded, including input values that are already a
%% multiple of the block size</em>", i.e., there should be a padding of
%% <strong>k</strong> values of <strong>k</strong> when <code>len mod k =
%% 0</code>. However, if <code>len mod k = 0</code> AND the last byte is
%% greater than <strong>k</strong>, padding with <strong>k</strong> values of
%% <strong>k</strong> can be viewed as superfluous since the last byte can be
%% unambiguously interpreted as not a padding value. Some implementations
%% don't add padding in this case, i.e. if the last byte is greater than
%% <strong>k</strong> we interpret as no padding.
%%
-spec depad(Padded :: binary()) -> Bin | {error, Reason} when
Bin :: binary(),
Reason :: string().
%%--------------------------------------------------------------------------------------
depad(Bin) ->
Len = byte_size(Bin),
Pad = binary:last(Bin),
case Pad =< ?AES256_BLOCK_SIZE of
true ->
%% The last byte less-equal than our block size and hence represents a padding value
BinPad = list_to_binary(lists:duplicate(Pad, Pad)),
%% verify the padding is indeed k values of k and return the unpadded data
DataLen = Len - Pad,
case Bin of
<<Data:DataLen/binary, BinPad/binary>> ->
Data;
_ ->
{error, "Data not properly padded"}
end;
false ->
%% The last byte is greater than our block size; we interpret as no padding
Bin
end.
%%======================================================================================
%%
%% Compare binaries for equality
%%
%%======================================================================================
%% @doc Compare two binaries for equality, bit-by-bit, without short-circuits
%% to avoid timing differences. Note this function does short-circuit to
%% <code>false</code> if the binaries are not of equal size.
%%
-spec const_compare(Bin1, Bin2) -> boolean() when
Bin1 :: binary(),
Bin2 :: binary().
%%--------------------------------------------------------------------------------------
const_compare(<<X/binary>>, <<Y/binary>>) ->
case byte_size(X) == byte_size(Y) of
true ->
const_compare(X, Y, true);
false ->
false
end;
const_compare(_X, _Y) ->
false.
%% @private
const_compare(<<X:1/bitstring, XT/bitstring>>, <<Y:1/bitstring, YT/bitstring>>, Acc) ->
const_compare(XT, YT, (X == Y) and Acc);
const_compare(<<>>, <<>>, Acc) ->
Acc.
%%======================================================================================
%%
%% Conversions for hex to binary to hex
%%
%%======================================================================================
%%--------------------------------------------------------------------------------------
%% @doc Convert binary to hex string.
%%
-spec bin_to_hex(Bin) -> Hex when
Bin :: binary(),
Hex :: string().
%%--------------------------------------------------------------------------------------
bin_to_hex(Bin) ->
lists:flatten([io_lib:format("~2.16.0B", [X]) ||
X <- binary_to_list(Bin)]).
%%--------------------------------------------------------------------------------------
%% @doc Convert hex string to binary.
%%
-spec hex_to_bin(Hex) -> Bin when
Hex :: string(),
Bin :: binary().
%%--------------------------------------------------------------------------------------
hex_to_bin(S) when is_list(S) ->
hex_to_bin(S, []);
hex_to_bin(B) when is_binary(B) ->
hex_to_bin(binary_to_list(B), []).
%%--------------------------------------------------------------------------------------
%% @private
%%--------------------------------------------------------------------------------------
hex_to_bin([], Acc) ->
list_to_binary(lists:reverse(Acc));
hex_to_bin([X,Y|T], Acc) ->
{ok, [V], []} = io_lib:fread("~16u", [X,Y]),
hex_to_bin(T, [V | Acc]). | src/rncryptor_util.erl | 0.554229 | 0.470554 | rncryptor_util.erl | starcoder |
%% Copyright 2019 Octavo Labs AG Zurich Switzerland (https://octavolabs.com)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_topic).
-import(lists, [reverse/1]).
%% ------------------------------------------------------------------------
%% Topic semantics and usage
%% ------------------------------------------------------------------------
%% A topic must be at least one character long.
%%
%% Topic names are case sensitive. For example, ACCOUNTS and Accounts are two different topics.
%%
%% Topic names can include the space character. For example, Accounts payable is a valid topic.
%%
%% A leading "/" creates a distinct topic. For example, /finance is different from finance. /finance matches "+/+" and "/+", but not "+".
%%
%% Do not include the null character (Unicode \x0000) in any topic.
%%
%% The following principles apply to the construction and content of a topic tree:
%%
%% The length is limited to 64k but within that there are no limits to the number of levels in a topic tree.
%%
%% There can be any number of root nodes; that is, there can be any number of topic trees.
%% ------------------------------------------------------------------------
-export([new/1,
match/2,
validate_topic/2,
contains_wildcard/1,
unword/1,
word/1,
triples/1]).
-define(MAX_LEN, 65536).
new(Name) when is_list(Name) ->
{topic, Name}.
%% ------------------------------------------------------------------------
%% topic match
%% ------------------------------------------------------------------------
match([], []) ->
true;
match([H|T1], [H|T2]) ->
match(T1, T2);
match([_H|T1], [<<"+">>|T2]) ->
match(T1, T2);
match(_, [<<"#">>]) ->
true;
match([_H1|_], [_H2|_]) ->
false;
match([], [_H|_T2]) ->
false;
match(_, _) -> false.
%% ------------------------------------------------------------------------
%% topic validate
%% ------------------------------------------------------------------------
triples([Word|_] = Topic) when is_binary(Word) ->
triples(reverse(Topic), []).
triples([Word], Acc) ->
[{root, Word, [Word]}|Acc];
triples([Word|Rest] = Topic, Acc) ->
triples(Rest, [{reverse(Rest), Word, reverse(Topic)}|Acc]).
unword(Topic) ->
vernemq_dev_api:unword_topic(Topic).
word(Topic) ->
re:split(Topic, <<"/">>).
validate_topic(_Type, <<>>) ->
{error, no_empty_topic_allowed};
validate_topic(_Type, Topic) when byte_size(Topic) > ?MAX_LEN ->
{error, subscribe_topic_too_long};
validate_topic(publish, Topic) ->
validate_publish_topic(Topic, 0, []);
validate_topic(subscribe, Topic) ->
validate_subscribe_topic(Topic, 0, []).
contains_wildcard([<<"+">>|_]) -> true;
contains_wildcard([<<"#">>]) -> true;
contains_wildcard([_|Rest]) ->
contains_wildcard(Rest);
contains_wildcard([]) -> false.
validate_publish_topic(<<"+/", _/binary>>, _, _) -> {error, 'no_+_allowed_in_publish'};
validate_publish_topic(<<"+">>, _, _) -> {error, 'no_+_allowed_in_publish'};
validate_publish_topic(<<"#">>,_, _) -> {error, 'no_#_allowed_in_publish'};
validate_publish_topic(Topic, L, Acc) ->
case Topic of
<<Word:L/binary, "/", Rest/binary>> ->
validate_publish_topic(Rest, 0, [Word|Acc]);
<<Word:L/binary>> ->
{ok, lists:reverse([Word|Acc])};
<<_:L/binary, "+", _/binary>> ->
{error, 'no_+_allowed_in_word'};
<<_:L/binary, "#", _/binary>> ->
{error, 'no_#_allowed_in_word'};
_ ->
validate_publish_topic(Topic, L + 1, Acc)
end.
validate_subscribe_topic(<<"+/", Rest/binary>>, _, Acc) -> validate_subscribe_topic(Rest, 0, [<<"+">>|Acc]);
validate_subscribe_topic(<<"+">>, _, Acc) -> validate_shared_subscription(reverse([<<"+">>|Acc]));
validate_subscribe_topic(<<"#">>, _, Acc) -> validate_shared_subscription(reverse([<<"#">>|Acc]));
validate_subscribe_topic(Topic, L, Acc) ->
case Topic of
<<Word:L/binary, "/", Rest/binary>> ->
validate_subscribe_topic(Rest, 0, [Word|Acc]);
<<Word:L/binary>> ->
validate_shared_subscription(reverse([Word|Acc]));
<<_:L/binary, "+", _/binary>> ->
{error, 'no_+_allowed_in_word'};
<<_:L/binary, "#", _/binary>> ->
{error, 'no_#_allowed_in_word'};
_ ->
validate_subscribe_topic(Topic, L + 1, Acc)
end.
validate_shared_subscription([<<"$share">>, _Group, _FirstWord | _] = Topic) -> {ok, Topic};
validate_shared_subscription([<<"$share">> | _] = _Topic) -> {error, invalid_shared_subscription};
validate_shared_subscription(Topic) -> {ok, Topic}.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
validate_no_wildcard_test() ->
% no wildcard
{ok, [<<"a">>, <<"b">>, <<"c">>]}
= validate_topic(subscribe, <<"a/b/c">>),
{ok, [<<>>, <<"a">>, <<"b">>]}
= validate_topic(subscribe, <<"/a/b">>),
{ok, [<<"test">>, <<"topic">>, <<>>]}
= validate_topic(subscribe, <<"test/topic/">>),
{ok, [<<"test">>, <<>>, <<>>, <<>>, <<"a">>, <<>>, <<"topic">>]}
= validate_topic(subscribe, <<"test////a//topic">>),
{ok, [<<>>, <<"test">>, <<>>, <<>>, <<>>, <<"a">>, <<>>, <<"topic">>]}
= validate_topic(subscribe, <<"/test////a//topic">>),
{ok, [<<"foo">>, <<>>, <<"bar">>, <<>>, <<>>, <<"baz">>]}
= validate_topic(publish, <<"foo//bar///baz">>),
{ok, [<<"foo">>, <<>>, <<"baz">>, <<>>, <<>>]}
= validate_topic(publish, <<"foo//baz//">>),
{ok, [<<"foo">>, <<>>, <<"baz">>]}
= validate_topic(publish, <<"foo//baz">>),
{ok, [<<"foo">>, <<>>, <<"baz">>, <<"bar">>]}
= validate_topic(publish, <<"foo//baz/bar">>),
{ok, [<<>>, <<>>, <<>>, <<>>, <<"foo">>, <<>>, <<>>, <<"bar">>]}
= validate_topic(publish, <<"////foo///bar">>).
validate_wildcard_test() ->
{ok, [<<>>, <<"+">>, <<"x">>]}
= validate_topic(subscribe, <<"/+/x">>),
{ok, [<<>>, <<"a">>, <<"b">>, <<"c">>, <<"#">>]}
= validate_topic(subscribe, <<"/a/b/c/#">>),
{ok, [<<"#">>]}
= validate_topic(subscribe, <<"#">>),
{ok, [<<"foo">>, <<"#">>]}
= validate_topic(subscribe, <<"foo/#">>),
{ok, [<<"foo">>, <<"+">>, <<"baz">>]}
= validate_topic(subscribe, <<"foo/+/baz">>),
{ok, [<<"foo">>, <<"+">>, <<"baz">>, <<"#">>]}
= validate_topic(subscribe, <<"foo/+/baz/#">>),
{ok, [<<"foo">>, <<"foo">>, <<"baz">>, <<"#">>]}
= validate_topic(subscribe, <<"foo/foo/baz/#">>),
{ok, [<<"foo">>, <<"#">>]} = validate_topic(subscribe, <<"foo/#">>),
{ok, [<<>>, <<"#">>]} = validate_topic(subscribe, <<"/#">>),
{ok, [<<"test">>, <<"topic">>, <<"+">>]} = validate_topic(subscribe, <<"test/topic/+">>),
{ok, [<<"+">>, <<"+">>, <<"+">>, <<"+">>, <<"+">>,
<<"+">>, <<"+">>, <<"+">>, <<"+">>, <<"+">>, <<"test">>]}
= validate_topic(subscribe, <<"+/+/+/+/+/+/+/+/+/+/test">>),
{error, 'no_#_allowed_in_word'} = validate_topic(publish, <<"test/#-">>),
{error, 'no_+_allowed_in_word'} = validate_topic(publish, <<"test/+-">>),
{error, 'no_+_allowed_in_publish'} = validate_topic(publish, <<"test/+/">>),
{error, 'no_#_allowed_in_publish'} = validate_topic(publish, <<"test/#">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"a/#/c">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"#testtopic">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"testtopic#">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"+testtopic">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"testtopic+">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"#testtopic/test">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"testtopic#/test">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"+testtopic/test">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"testtopic+/test">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"/test/#testtopic">>),
{error, 'no_#_allowed_in_word'} = validate_topic(subscribe, <<"/test/testtopic#">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"/test/+testtopic">>),
{error, 'no_+_allowed_in_word'} = validate_topic(subscribe, <<"/testtesttopic+">>).
validate_shared_subscription_test() ->
{error, invalid_shared_subscription} = validate_topic(subscribe, <<"$share/mygroup">>),
{ok, [<<"$share">>, <<"mygroup">>, <<"a">>, <<"b">>]} = validate_topic(subscribe, <<"$share/mygroup/a/b">>).
validate_unword_test() ->
rand:seed(exsplus, erlang:timestamp()),
random_topics(1000),
ok.
contains_wildcard_test() ->
true = contains_wildcard([<<"a">>, <<"+">>, <<"b">>]),
true = contains_wildcard([<<"#">>]),
false = contains_wildcard([<<"a">>, <<"b">>, <<"c">>]).
random_topics(0) -> ok;
random_topics(N) when N > 0 ->
NWords = rand:uniform(100),
Words =
lists:foldl(fun(_, AAcc) ->
case rand:uniform(3) of
1 ->
["+/"|AAcc];
_ ->
[random_word(), "/"|AAcc]
end
end, [], lists:seq(1, NWords)),
Topic = iolist_to_binary(Words),
{ok, T} = validate_topic(subscribe, Topic),
Topic = iolist_to_binary(unword(T)),
random_topics(N - 1).
random_word() ->
Words = "abcdefghijklmnopqrstuvwxyz0123456789",
N = rand:uniform(length(Words)),
S = rand:uniform(length(Words) + 1 - N),
string:substr(Words, N, S).
-endif. | src/vmq_topic.erl | 0.516108 | 0.429669 | vmq_topic.erl | starcoder |
-module(grisp_led).
-behavior(gen_server).
% API
-export([start_link/0]).
-export([color/2]).
-export([off/1]).
-export([flash/3]).
-export([pattern/2]).
% Callbacks
-export([init/1]).
-export([handle_call/3]).
-export([handle_cast/2]).
-export([handle_info/2]).
-export([code_change/3]).
-export([terminate/2]).
%--- Types ---------------------------------------------------------------------
-type position() :: 1 | 2.
% The position of the LED on the GRiSP board, either `1' or `2'.
-type color() :: color_name() | color_value().
% A color as either a shorthand name or a specific RGB value.
-type color_name() ::
off |
black |
blue |
green |
aqua |
red |
magenta |
yellow |
white.
% A color name. `off' and `black' are shorthands for turning off the LED.
-type color_value() :: {0 | 1, 0 | 1, 0 | 1}.
% A color value, specifying the individual R, G and B components as 1's or 0's
% where 1 means on and 0 means off.
-type time() :: pos_integer() | infinity.
% A time interval for a color in milliseconds. Must be 1 or above, or
% alternatively `infinity'.
-type pattern() :: [{time(), color() | fun(() -> color()) }].
% A list of intervals and colors to show during those intervals.
%--- Records -------------------------------------------------------------------
-record(state, {driver, leds}).
%--- Macros --------------------------------------------------------------------
-define(is_component(C), C >= 0 andalso C =< 1).
%--- API -----------------------------------------------------------------------
% @private
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, undefined, []).
% @doc Set the color of an LED.
%
% <h5>Examples</h5>
% ```
% 1> grisp_led:color(1, red).
% ok
% 2> grisp_led:color(2, {0, 1, 0}).
% ok
% '''
-spec color(position(), color()) -> ok.
color(Pos, Color) -> pattern(Pos, [{infinity, Color}]).
% @doc Turn of an LED.
% @equiv grisp_led:color(Pos, off)
-spec off(position()) -> ok.
off(Pos) -> pattern(Pos, [{infinity, off}]).
% @doc Flash an LED in an on/off pattern with the specified color.
%
% <h5>Examples</h5>
% ```
% 1> grisp_led:flash(2, blue, 500).
% ok
% '''
%
% @equiv grisp_led:pattern(Position, [{Time, Color}, {Time, off}])
-spec flash(position(), color(), time()) -> ok.
flash(Pos, Color, Interval) ->
pattern(Pos, [{Interval, Color}, {Interval, off}]).
% @doc Animate an LED with a pattern of colors and intervals.
%
% <h5>Examples</h5>
% ```
% 1> grisp_led:pattern(1, [{300, green}, {500, yellow}, {700, red}, {infinity, off}]).
% ok
% 2> Rainbow = [{300, {R, G, B}} || R <- [0,1], G <- [0,1], B <- [0,1], {R, G, B} =/= {0, 0, 0}].
% [{300,{0,0,1}},
% {300,{0,1,0}},
% {300,{0,1,1}},
% {300,{1,0,0}},
% {300,{1,0,1}},
% {300,{1,1,0}},
% {300,{1,1,1}}]
% 3> grisp_led:pattern(2, Rainbow).
% ok
% '''
%
% The color can also be specified using functions as generators
% instead of explicitly stating the color :
%
% ```
% 2> Random = fun() -> {rand:uniform(2) - 1, rand:uniform(2) -1, rand:uniform(2) - 1} end.
% #Fun<erl_eval.20.128620087>
% 3> grisp_led:pattern(1, [{100, Random}]).
% '''
%
% As well as by composing lists of intervals and pattern functions :
%
% ```
% 4> Funs = [ fun() -> {X rem 2, rand:uniform(2) - 1 , 1} end || X <- lists:seq(1,10) ].
% [#Fun<erl_eval.20.128620087>, ...
% 5> Intervals = lists:seq(1000,1900,100).
% [1000,1100,1200,1300,1400,1500,1600,1700,1800,1900]
% 6> Result = lists:zip(Intervals, Funs).
% [{1000,#Fun<erl_eval.20.128620087>},...
% 7> grisp_led:pattern(1, Result).
% '''
-spec pattern(position(), pattern()) -> ok.
pattern(Pos, Pattern) -> gen_server:cast(?MODULE, {pattern, Pos, Pattern}).
%--- Callbacks -----------------------------------------------------------------
% @private
init(undefined) ->
{ok, #state{leds = [
{1, {[{infinity, off}], undefined}},
{2, {[{infinity, off}], undefined}}
]}}.
% @private
handle_call(Request, From, _State) -> error({unknown_call, Request, From}).
% @private
handle_cast({pattern, Pos, NewPattern}, State) ->
NewState = update_led(Pos, State, fun({_OldPattern, Timer}) ->
tick_pattern(Pos, {NewPattern, Timer})
end),
{noreply, NewState}.
% @private
handle_info({tick, Pos}, State) ->
NewState = update_led(Pos, State, fun(Led) ->
tick_pattern(Pos, Led)
end),
{noreply, NewState}.
% @private
code_change(_OldVsn, State, _Extra) -> {ok, State}.
% @private
terminate(_Reason, _State) -> ok.
%--- Internal ------------------------------------------------------------------
update_led(Pos, #state{leds = Leds} = State, Fun) ->
State#state{leds = mod(Pos, Fun, Leds)}.
mod(Pos, Fun, [{Pos, Led}|Rest]) -> [{Pos, Fun(Led)}|Rest];
mod(Pos, Fun, [Led|Rest]) -> [Led|mod(Pos, Fun, Rest)];
mod(Pos, _Fun, []) -> error({led_not_found, Pos}).
tick_pattern(Pos, {[{infinity, Color} = Pattern|_Rest], Timer}) ->
cancel_timer(Timer),
write_color(Pos, Color),
{[Pattern], undefined};
tick_pattern(Pos, {[{Time, Color} = Step|Rest], Timer}) when Time >= 1 ->
cancel_timer(Timer),
write_color(Pos, Color),
NewTimer = erlang:send_after(Time, self(), {tick, Pos}),
{Rest ++ [Step], NewTimer}.
cancel_timer(undefined) -> ok;
cancel_timer(Timer) -> erlang:cancel_timer(Timer).
write_color(Pos, Color) ->
{R, G, B} = translate(Color),
write_component(Pos, red, action(R)),
write_component(Pos, green, action(G)),
write_component(Pos, blue, action(B)).
action(0) -> clear;
action(1) -> set.
write_component(1, red, Action) -> grisp_gpio:Action(led1_r);
write_component(1, green, Action) -> grisp_gpio:Action(led1_g);
write_component(1, blue, Action) -> grisp_gpio:Action(led1_b);
write_component(2, red, Action) -> grisp_gpio:Action(led2_r);
write_component(2, green, Action) -> grisp_gpio:Action(led2_g);
write_component(2, blue, Action) -> grisp_gpio:Action(led2_b).
translate(Fun) when is_function(Fun) -> to_rgb(Fun());
translate(Value) -> to_rgb(Value).
to_rgb(black) -> to_rgb(off);
to_rgb(off) -> {0, 0, 0};
to_rgb(blue) -> {0, 0, 1};
to_rgb(green) -> {0, 1, 0};
to_rgb(aqua) -> {0, 1, 1};
to_rgb(red) -> {1, 0, 0};
to_rgb(magenta) -> {1, 0, 1};
to_rgb(yellow) -> {1, 1, 0};
to_rgb(white) -> {1, 1, 1};
to_rgb({R, G, B} = Color)
when ?is_component(R) andalso ?is_component(G) andalso ?is_component(B) ->
Color;
to_rgb(Color) ->
error({invalid_color, Color}). | src/grisp_led.erl | 0.7181 | 0.455441 | grisp_led.erl | starcoder |
-module(erlmachine_supervisor_model).
%% NOTE: The main purpouse of the supervisor model is the ability to make impact on runtime layer without affecting of monitoring layer of service;
%% NOTE: Supervisor model concerns: runtime credentials, logging;
%% NOTE: UID is used to support runtime credentials of the initiator of supervisor process:
%% - https://en.wikipedia.org/wiki/Group_identifier;
%% - https://en.wikipedia.org/wiki/User_identifier;
%% NOTE: Permissions are set of characters which represents the read, write, and execute access;
%% TODO: Permissions should be checked automatically before invocation;
%% TODO: https://www.kernel.org/doc/html/latest/security/credentials.html;
%% TODO:
%% 1. https://wiki.archlinux.org/index.php/File_permissions_and_attributes;
%% 2. https://en.wikipedia.org/wiki/File-system_permissions;
%% 3. https://en.wikipedia.org/wiki/File_attribute;
%% 4. https://mason.gmu.edu/~montecin/UNIXpermiss.htm;
%% 5. https://www.howtogeek.com/437958/how-to-use-the-chmod-command-on-linux/
%% API
-export([is_supervisor_model/1]).
-export([startup/2]).
-export([install/2, uninstall/2]).
-include("erlmachine_user.hrl").
-include("erlmachine_assembly.hrl").
-include("erlmachine_graph.hrl").
-include("erlmachine_system.hrl").
-callback startup(UID::uid(), Vertices::[vertex()], Opt::map(), Env::map()) ->
success() | failure(term(), term()).
-callback install(UID::uid(), Vertex::vertex()) ->
success() | failure(term(), term()).
-callback uninstall(UID::uid(), Vertex::vertex()) ->
success() | failure(term(), term()).
-optional_callbacks([install/2, uninstall/2]).
-spec is_supervisor_model(Module::atom()) -> boolean().
is_supervisor_model(Module) ->
lists:member(?MODULE, erlmachine:behaviours(Module)).
%%% Transmission API
-spec startup(Assembly::assembly(), Exts::[assembly()]) ->
success() | failure(term(), term()).
startup(Assembly, Exts) ->
Model = erlmachine_assembly:model(Assembly), Module = erlmachine_model:module(Model),
UID = erlmachine_assembly:uid(Assembly),
Vs = [erlmachine_assembly:vertex(Ext)|| Ext <- Exts],
Opt = erlmachine_model:options(Model),
Env = erlmachine_assembly:env(Assembly),
Module:startup(UID, Vs, Opt, Env).
-spec install(Assembly::assembly(), Ext::assembly()) ->
success() | failure(term(), term()).
install(Assembly, Ext) ->
Model = erlmachine_assembly:model(Assembly), Module = erlmachine_model:module(Model),
UID = erlmachine_assembly:uid(Assembly),
V = erlmachine_assembly:vertex(Ext),
Fun = install, Args = [UID, V], Def = erlmachine:success(),
erlmachine:optional_callback(Module, Fun, Args, Def).
-spec uninstall(Assembly::assembly(), V::vertex()) ->
success() | failure(term(), term()).
uninstall(Assembly, V) ->
Model = erlmachine_assembly:model(Assembly), Module = erlmachine_model:module(Model),
UID = erlmachine_assembly:uid(Assembly),
Fun = uninstall, Args = [UID, V], Def = erlmachine:success(),
erlmachine:optional_callback(Module, Fun, Args, Def). | src/behaviours/erlmachine_supervisor_model.erl | 0.562056 | 0.470919 | erlmachine_supervisor_model.erl | starcoder |
%%%-----------------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright (C) 2011-2017 <NAME>
%%% @doc
%%% The module name 'em' stands for 'Erly Mock'.
%%%
%%% <p>This mocking library works similar to Easymock.</p>
%%%
%%% <p>After a mock process is started by {@link new/0} it can be programmed to
%%% expect function calls and to react to them in two ways: <ul><li>by returning
%%% a value</li><li>by executing an arbitrary function</li></ul> This is done
%%% with {@link strict/4}, {@link strict/5}, {@link stub/4}, {@link stub/5} </p>
%%%
%%% <p>Before the code under test is executed, the mock must be told
%%% that the programming phase is over by {@link replay/1}.</p>
%%%
%%% <p>In the next phase the code under test is run, and might or might not call
%%% the functions mocked. The mock process checks that all functions programmed
%%% with {@link strict/4}, {@link strict/5} are called in the correct order,
%%% with the expected arguments and reacts in the way defined during the
%%% programming phase. If a mocked function is called although another function
%%% was expected, or if an expected function was called with different
%%% arguments, the mock process dies and prints a comprehensive error message
%%% before failing the test.</p>
%%%
%%% <p>To support mock invokations from multiple processes the strictness
%%% requirement can be reduced to calls belonging to the same group. {@link
%%% new_groups/2} creates a list of named groups, where calls belongig to
%%% different groups may occur in any order. A group is passed as mock reference
%%% (1st parameter) to {@link strict/5} or {@link strict/4}. Use {@link
%%% await/1} with a list of groups to block the caller until all groups
%%% are finished, i.e. the expectations assigned to each group via {@link
%%% strict/5} were invoked. NOTE: It is prohibited to use the same expectations
%%% with different return values among a list groups created together.</p>
%%%
%%% <p>At the end of a unit test {@link await_expectations/1} is called to
%%% await all invocations defined during the programming phase.</p>
%%%
%%% <p>An alternative to {@link await_expectations/1} is {@link verify/1}. It is
%%% called to check for missing invocations at the end of the programming phase,
%%% if any expected invocations are missing at verify will throw an
%%% exception.</p>
%%%
%%% <p>When the mock process exits it tries hard to remove all modules, that
%%% were dynamically created and loaded during the programming phase.</p>
%%%
%%% NOTE: This library works by purging the modules mocked and replacing them
%%% with dynamically created and compiled code, so be careful what you mock,
%%% i.e. it brings chaos to mock modules from kernel. This also implies, that
%%% tests that mock the same modules must be run sequentially.
%%%
%%% Apart from that, it is very advisable to <b>only mock owned modules</b>
%%% anyway.
%%%
%%% @end
%%%-----------------------------------------------------------------------------
%%% Copyright (c) 2011-2017 <NAME>
%%%
%%% Permission is hereby granted, free of charge, to any person obtaining a copy
%%% of this software and associated documentation files (the "Software"), to
%%% deal in the Software without restriction, including without limitation the
%%% rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
%%% sell copies of the Software, and to permit persons to whom the Software is
%%% furnished to do so, subject to the following conditions:
%%%
%%% The above copyright notice and this permission notice shall be included in
%%% all copies or substantial portions of the Software.
%%%
%%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
%%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
%%% IN THE SOFTWARE.
%%%
%%%-----------------------------------------------------------------------------
-module(em).
-behaviour(gen_statem).
%% public API ---
-export([new/0,
new_groups/2,
nothing/2,
strict/4,
strict/5,
any/0,
zelf/0,
stub/4,
stub/5,
replay/1,
replay/2,
await/2,
await_expectations/1,
verify/1,
call_log/1]).
%% gen_statem callbacks ---
-export([programming/3,
replaying/3,
no_expectations/3,
deranged/3,
callback_mode/0,
init/1,
terminate/3,
code_change/4
]).
%% !!!NEVER CALL THIS FUNCTION!!! ---
-export([invoke/4]).
-export_type([group/0, group_tag/0, timeout_millis/0]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% important types
%%
%%------------------------------------------------------------------------------
%% The type that defines the argument list passed to strict() or stub().
%% Each list element is either a value that will be matched to the actual value
%% of the parameter at that position, or a predicate function which will be
%% applied to the actual argument.
%%------------------------------------------------------------------------------
-type args() :: [ fun((any()) ->
true | false)
| term()].
%%------------------------------------------------------------------------------
%% The type that defines the response to a mocked function call. A response is
%% either that a value is returned, or the application of a function to the
%% actual arguments.
%%------------------------------------------------------------------------------
-type answer() :: {function, fun(([any()]) -> any())}
| {return, any()} .
%%------------------------------------------------------------------------------
%% A group is a pair with a tag for a group and a mock process.
%%------------------------------------------------------------------------------
-type group_tag() :: {term(), reference()}.
-type group() :: {group, pid(), group_tag()}.
%%------------------------------------------------------------------------------
%% Timout for {@link replay/2}
%%------------------------------------------------------------------------------
-type timeout_millis() :: non_neg_integer() | infinity.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% API
%%
%%------------------------------------------------------------------------------
%% @doc
%% Spawn a linked mock process and return the pid. <p>This is usually the
%% first thing to do in each unit test. The resulting pid is used in the other
%% functions below.</p> <p>NOTE: only a single mock proccess is required for a
%% single unit test case. One mock process can mock an arbitrary number of
%% different modules.</p> <p>When the mock process dies, all uploaded modules
%% are purged from the code server, and all cover compiled modules are
%% restored.</p> <p>When the process that started the mock exits, the mock
%% automatically cleans up and exits.</p> <p>After new() the mock is in
%% 'programming' state.</p>
%% @end
%%------------------------------------------------------------------------------
-spec new() ->
group().
new() ->
{ok, M} = gen_statem:start_link(?MODULE, [erlang:self()], []),
RootTag = {root, make_ref()},
{group, M, RootTag}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
callback_mode() -> state_functions.
%%------------------------------------------------------------------------------
%% @doc
%% Create a group handle to assign mock expectation to. The result can be passed
%% to {@link strict/4} or {@link strict/5} and {@link await/2}.
%% @end
%%------------------------------------------------------------------------------
-spec new_groups(group(), [term()]) ->
[group()].
new_groups({group, M, _}, GroupNames) ->
GroupCluster = make_ref(),
[{group,
M,
{GroupName, GroupCluster}}
|| GroupName <- GroupNames].
%%------------------------------------------------------------------------------
%% @doc
%% Add an expectation during the programming phase for a specific function
%% invokation.
%%
%% <p>All expectations defined by 'strict' define an order in which the
%% application must call the mocked functions, hence the name 'strict' as oposed
%% to 'stub' (see below).</p>
%%
%% <p>The parameters are:
%% <ul>
%% <li><code>M</code> the mock pid, returned by {@link new/0}</li>
%% <li><code>Mod</code> the module of the function to mock</li>
%% <li><code>Fun</code> the name of the function to mock</li>
%% <li><code>Args</code> a list of expected arguments.
%% Each list element is either a value that will be matched to the actual value
%% of the parameter at that position, or a predicate function which will be
%% applied to the actual argument.</li>
%% </ul></p>
%%
%% <p>This function returns a reference that identifies the expectation. This
%% reference can be passed to {@link await/2} which blocks until the expected
%% invokation happens.</p>
%%
%% <p> The return value, that the application will get when calling the mocked
%% function in the replay phase is simply the atom <code>ok</code>. This
%% differentiates this function from {@link strict/5}, which allows the
%% definition of a custom response function or a custom return value. </p>
%%
%% NOTE: This function may only be called between <code>new/0</code> and {@link
%% replay/1} - that is during the programming phase.
%%
%% @end
%%------------------------------------------------------------------------------
-spec strict(group(), atom(), atom(), args()) ->
reference().
strict(M, Mod, Fun, Args) ->
strict(M, Mod, Fun, Args, {return, ok}).
%%------------------------------------------------------------------------------
%% @doc
%% This function behaves like {@link strict/4}
%% and additionally accepts a return value or an answer function. That parameter
%% <code>Answer</code> may be:
%% <ul>
%% <li><code>{return, SomeValue}</code> This causes the mocked function invocation to
%% return the specified value.</li>
%% <li><code>{function, fun(([Arg1, ... , ArgN]) -> SomeValue)}</code> This defines
%% a function to be called when the mocked invokation happens.
%% That function is applied to all captured actual arguments. For convenience these
%% are passed as a list, so the user can simply write <code>fun(_) -> ...</code>
%% when the actual values are not needed.
%% The function will be executed by the process that calls the mocked function, not
%% by the mock process. Hence the function may access <code>self()</code> and may
%% throw an exception, which will then correctly appear in the process under test,
%% allowing unit testing of exception handling.
%% Otherwise the value returned by the function is passed through as the value
%% returned from the invocation.
%% </li>
%% </ul>
%% @end
%%------------------------------------------------------------------------------
-spec strict(group(), atom(), atom(), args(), answer()) ->
reference().
strict({group, M, Group}, Mod, Fun, Args, Answer = {return, _})
when is_pid(M), is_atom(Mod), is_atom(Fun), is_list(Args) ->
gen_statem:call(M, {strict, Group, Mod, Fun, Args, Answer}, infinity);
strict({group, M, Group}, Mod, Fun, Args, Answer = {function, _})
when is_pid(M), is_atom(Mod), is_atom(Fun), is_list(Args) ->
gen_statem:call(M, {strict, Group, Mod, Fun, Args, Answer}, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Defines a what happens when a function is called whithout recording any
%% expectations. The invocations defined by this function may happen in any order
%% any number of times. The way, the invocation is defined is analog to
%% @see strict/4. <code>strict/4</code>
%% @end
%%------------------------------------------------------------------------------
-spec stub(group(), atom(), atom(), args()) ->
ok.
stub(M, Mod, Fun, Args) ->
stub(M, Mod, Fun, Args, {return, ok}).
%%------------------------------------------------------------------------------
%% @doc
%% This is similar <code>stub/4</code> except that it, like
%% <code>strict/5</code> allows the definition of a return value
%% or an answer function.
%% @see stub/4. <code>stub/4</code>
%% @see strict/5. <code>strict/5</code>
%% @end
%%------------------------------------------------------------------------------
-spec stub(group(), atom(), atom(), args(), answer()) ->
ok.
stub({group, M, Group = {root, _}}, Mod, Fun, Args, Answer = {return, _})
when is_pid(M), is_atom(Mod), is_atom(Fun), is_list(Args) ->
ok = gen_statem:call(M, {stub, Group, Mod, Fun, Args, Answer}, infinity);
stub({group, M, Group = {root, _}}, Mod, Fun, Args, Answer = {function, _})
when is_pid(M), is_atom(Mod), is_atom(Fun), is_list(Args) ->
ok = gen_statem:call(M, {stub, Group, Mod, Fun, Args, Answer}, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% This is used to express the expectation that no function of a certain module
%% is called. This will cause each function call on a module to throw an 'undef'
%% exception.
%% @end
%%------------------------------------------------------------------------------
-spec nothing(group(), atom()) ->
ok.
nothing({group, M, {root, _}}, Mod) when is_pid(M), is_atom(Mod) ->
ok = gen_statem:call(M, {nothing, Mod}, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Finishes the programming phase and switches to the replay phase where the
%% actual code under test may run and invoke the functions mocked. This may
%% be called only once, and only in the programming phase. This also loads
%% (or replaces) the modules of the functions mocked.
%% In the replay phase the code under test may call all mocked functions.
%% If the application calls a mocked function with invalid arguments, or
%% if the application calls a function not expected on a mocked module, the mock
%% process dies and - if used in a typical edoc test suite - fails the test.
%% @end
%%------------------------------------------------------------------------------
-spec replay(group()) -> ok.
replay(G) ->
replay(G, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Finishes the programming phase and switches to the replay phase, expecting
%% that invokations are recorded at least once every `InvokationTimeout' millis.
%% @see replay/1
%% @end
%%------------------------------------------------------------------------------
-spec replay(group(), timeout_millis()) -> ok.
replay({group, M, {root, _}}, InvokationTimeout) ->
ok = gen_statem:call(M, {replay, InvokationTimeout}, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Block until a specific invokation defined via {@link strict/4} during the
%% programming phase was made. <p>The handle for the specific invokation is the
%% value returned by {@link strict/4}.</p> <p>The return value contains the
%% parameters and the pid of the recorded invokation. This function maybe called
%% anytime before or after the referenced invokation has actually
%% happened.</p><p>If the handle is not valid, an error is returned.</p>
%% @end
%% ------------------------------------------------------------------------------
-spec await(group(), reference()) ->
{success,
InvPid :: pid(),
Args :: [term()]} |
{error, term()}.
await({group, M, {root, _}}, Handle) ->
gen_statem:call(M, {await, Handle}, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Retrieve a list of successfully mocked invokations, i.e. all calls that were
%% accepted by the `em' process in the `replay' phase. Both strict and stub
%% invokations are recorded. NOTE: The Answer might as well be a function,
%% depending on the `return' argument passed to `strict' or `stub'.
%% @end
%%------------------------------------------------------------------------------
-spec call_log(group()) -> [{Mod :: atom(),
Func :: atom(),
Args :: [term()],
Answer :: term()}].
call_log({group, M, {root, _}}) ->
gen_statem:call(M, get_call_log, infinity).
%%------------------------------------------------------------------------------
%% @doc
%% Wait until all invokations defined during the programming phase were made.
%% After this functions returns, the mock can be expected to exit and clean up
%% all modules installed.
%% @end
%%------------------------------------------------------------------------------
-spec await_expectations(group()) -> ok.
await_expectations({group, M, {root, _}}) ->
case
gen_statem:call(M, await_expectations, infinity)
of
ok ->
ok;
Error ->
error_logger:error_msg("erlymock verification failed: ~p",
[Error]),
error(Error)
end.
%%------------------------------------------------------------------------------
%% @doc
%% Finishes the replay phase. If the code under test did not cause all expected
%% invokations defined by {@link strict/4} or {@link strict/5}, the
%% call will fail with <code>badmatch</code> with a comprehensive error message.
%% Otherwise the mock process exits normally, returning <code>ok</code>.
%% @end
%%------------------------------------------------------------------------------
-spec verify(group()) -> ok.
verify({group, M, {root, _}}) ->
case
gen_statem:call(M, verify, infinity)
of
ok ->
ok;
Error ->
error_logger:error_msg("erlymock verification failed: ~p",
[Error]),
error(Error)
end.
%%------------------------------------------------------------------------------
%% @doc
%% Utility function that can be used as a match function in an argument list
%% to match any value.
%% @end
%%------------------------------------------------------------------------------
-spec any() ->
fun((any()) ->
true).
any() ->
fun(_) ->
true
end.
%%------------------------------------------------------------------------------
%% @doc
%% Utility function that can be used as a match function in an
%% argument list to match <code>self()</code>, e.g. when it matches the pid of the
%% process, that calls the funtion during the replay phase.
%% @end
%%------------------------------------------------------------------------------
-spec zelf() ->
atom().
zelf() ->
'$$em zelf$$'.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% internal state
%%
-record(expectation,
{id :: reference(),
g :: group_tag(),
m :: atom(),
f :: atom(),
a :: args(),
answer :: answer(),
listeners = [] :: [GenFsmFrom :: term()]}).
-record(strict_log,
{grpt :: group_tag(),
eref :: reference(),
ipid :: pid(),
args :: [term()]}).
-record(state,
{test_proc :: pid(),
inv_to :: timeout_millis() | infinity,
strict :: [#expectation{}],
strict_log :: [#strict_log{}],
stub :: [#expectation{}],
call_log :: [{Mod :: atom(),
Func :: atom(),
Args :: [term()],
Answer :: term()}],
blacklist :: [atom()],
mocked_modules :: [{atom(), {just, term()}|nothing}],
on_finished :: term(), % GenFsmFrom
error = no_error :: no_error | term()
}).
-type statedata() :: #state{}.
-define(ERLYMOCK_COMPILED, erlymock_compiled).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% gen_statem callbacks
%%
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec init([TestProc :: term()]) ->
{ok, atom(), StateData :: statedata()}.
init([TestProc]) ->
process_flag(sensitive, true),
erlang:trace(self(), false, [all]),
{ok,
programming,
#state{
test_proc = TestProc,
inv_to = infinity,
strict = [],
strict_log = [],
stub = [],
call_log =[],
blacklist = [],
mocked_modules = []}}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec programming(gen_statem:event_type(), EventContent :: term(), statedata()) ->
gen_statem:event_handler_result(no_expectations|replaying).
programming({call, From},
{strict, Group, Mod, Fun, Args, Answer},
State = #state{strict = Strict}) ->
InvRef = make_ref(),
{keep_state,
State#state{
strict = [#expectation{id = InvRef,
g = Group,
m = Mod,
f = Fun,
a = Args,
answer = Answer,
listeners = []}
|Strict]},
{reply, From, InvRef}};
programming({call, From},
{stub, Group, Mod, Fun, Args, Answer},
State = #state{stub = Stub}) ->
InvRef = make_ref(),
{keep_state,
State#state{
stub = [#expectation{id = InvRef,
g = Group,
m = Mod,
f = Fun,
a = Args,
answer = Answer,
listeners = []}
|Stub]},
{reply, From, ok}};
programming({call, From},
{nothing, Mod},
State = #state{blacklist = BL}) ->
{keep_state,
State#state{
blacklist = [Mod | BL]},
{reply, From, ok}};
programming({call, From},
{replay, InvTo},
State) ->
NextState = load_mock_modules(
prepare_strict_invocations(
set_invokation_timeout(InvTo, State))),
NextStateName = case NextState#state.strict of
[] -> no_expectations;
_ -> replaying
end,
{next_state,
NextStateName,
NextState,
[{reply, From, ok}
|[start_invokation_timer(NextState)||NextStateName == replaying]]};
programming({call, From}, get_call_log, State) ->
{keep_state, State,
{reply, From, lists:reverse(State#state.call_log)}};
programming({call, From}, Event, State) ->
{keep_state, State, {reply, From, {error, {bad_request, programming, Event}}}}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec replaying(gen_statem:event_type(), EventContent :: term(), statedata()) ->
gen_statem:event_handler_result(no_expectations|deranged).
replaying({timeout, invokation_timeout},
invokation_timeout,
State = #state{ strict = Expectations}) ->
{stop, {invokation_timeout, {missing_invokations, Expectations}}, State};
replaying({call, From},
Inv = {invokation, _M, _F, _A, _IPid},
St) ->
case
find_matching_expectation(
Inv,
[],
get_next_expectations(St))
of
{ok, E = #expectation{}} ->
stop_or_continue_replay(
answer_invokation(Inv, E, From),
remove_expectation(
E,
log_invokation(Inv, E, St)));
{error, Error} ->
enter_deranged([{reply, From, {'$em_error', Error}}], Error, St)
end;
replaying({call, From}, verify, State) ->
Reason = {invokations_missing, State#state.strict},
{stop_and_reply, normal, {reply, From, Reason}, State};
replaying({call, From}, {await, H}, State) ->
{NewState, ReplyActions} = add_invokation_listener(From, H, State),
{keep_state, NewState, ReplyActions};
replaying({call, From},
await_expectations,
State = #state{on_finished = undefined}) ->
{keep_state, State#state{ on_finished = From }};
replaying({call, From}, get_call_log, State) ->
{keep_state, State,
{reply, From, lists:reverse(State#state.call_log)}};
replaying({call, From}, Event, State) ->
{keep_state, State, {reply, From, {error, {bad_request, replaying, Event}}}}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec no_expectations(gen_statem:event_type(),
EventData :: term(),
statedata()) ->
gen_statem:event_handler_result(deranged).
no_expectations({call, From},
{invokation, Mod, Fun, Args, IPid},
State = #state{call_log = CallLog}) ->
Stubs = State#state.stub,
MatchingStubs = [Stub
|| Stub = #expectation {m = M, f = F, a = A} <- Stubs,
M == Mod,
F == Fun,
length(Args) == length(A),
check_args(Args, A, IPid)],
case MatchingStubs of
[#expectation{answer = Answer}|_] ->
{keep_state,
State#state{call_log = [{Mod, Fun, Args, Answer}|CallLog]},
[{reply, From, Answer}]};
_ ->
Error = {unexpected_invokation, {invokation, Mod, Fun, Args, IPid}},
enter_deranged([{reply, From, {'$em_error', Error}}], Error, State)
end;
no_expectations({call, From}, verify, State) ->
{stop_and_reply, normal, {reply, From, ok}, State};
no_expectations({call, From}, await_expectations, State) ->
{stop_and_reply, normal, {reply, From, ok}, State};
no_expectations({call, From}, {await, H}, State) ->
{NewState, ReplyActions} = add_invokation_listener(From, H, State),
{keep_state, NewState, ReplyActions};
no_expectations({call, From}, get_call_log, State) ->
{keep_state, State,
{reply, From, lists:reverse(State#state.call_log)}};
no_expectations({call, From}, Event, State) ->
{keep_state, State,
{reply, From, {error, {bad_request, no_expectations, Event}}}}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec deranged(gen_statem:event_type(),
EventData :: term(),
statedata()) ->
gen_statem:state_callback_result(gen_statem:reply_action()).
deranged({call, From}, verify, State = #state{ error = Error }) ->
{stop_and_reply, normal, {reply, From, Error}, State};
deranged({call, From}, await_expectations, State = #state{ error = Error }) ->
{stop_and_reply, normal, {reply, From, Error}, State};
deranged({call, From}, {await, _}, State) ->
{keep_state, State, {reply, From, {error, mock_deranged}}};
deranged({call, From}, {invokation, _M, _F, _A, _IPid}, State) ->
{keep_state, State, {reply, From, {'$em_error', mock_deranged}}};
deranged({call, From}, get_call_log, State) ->
{keep_state, State,
{reply, From, lists:reverse(State#state.call_log)}};
deranged({call, From}, Event, State) ->
{keep_state, State, {reply, From, {error, {bad_request, deranged, Event}}}}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec terminate(Reason :: term(), StateName :: atom(),
StateData :: statedata()) -> no_return().
terminate(_Reason, _StateName, State) ->
try unload_mock_modules(State) catch _:_ -> ok end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec code_change(OldVsn :: term(), StateName :: atom(), State :: statedata(),
Extra :: term()) ->
{ok, NextState :: atom(), NewStateData :: statedata()}.
code_change(_OldVsn, StateName, State, _Extra) ->
{ok, StateName, State}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% api for generated mock code
%%
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec invoke(M :: term(), Mod :: term(), Fun :: fun(), Args :: list()) ->
{Value :: term()}.
invoke(M, Mod, Fun, Args) ->
Trace = erlang:get_stacktrace(),
try io:format("~nEM: ~w:~w ~p",[Mod,Fun, Args]) catch _:_ -> ok end,
Rv = case gen_statem:call(M, {invokation, Mod, Fun, Args, self()}, infinity) of
{return, Value} ->
Value;
{'$em_error' , WTF} ->
(catch io:format(" *ERROR* -> ~p~nAT: ~p~n~n",[WTF, Trace])),
exit({mock_error, WTF});
{function, F} ->
F(Args)
end,
try io:format(" -> ~p~n",[Rv]) catch _:_ -> ok end,
Rv.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% internal functions
%%
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
unload_mock_modules(#state{mocked_modules = MMs}) ->
[begin
code:purge(Mod),
code:delete(Mod),
code:purge(Mod),
case MaybeBin of
nothing ->
ignore;
{just, {Mod, CoverCompiledBinary}} ->
code:load_binary(Mod, cover_compiled, CoverCompiledBinary)
end
end
|| {Mod, MaybeBin} <- MMs].
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
load_mock_modules(State = #state{ strict = ExpectationsStrict,
stub = ExpectationsStub,
blacklist = BlackList }) ->
Expectations = ExpectationsStub ++ ExpectationsStrict,
ExpectationModules = [M || #expectation{m = M} <- Expectations],
ModulesToMock = lists:usort(ExpectationModules ++ BlackList),
assert_not_mocked(ModulesToMock),
[check_func(Ex) || Ex <- Expectations],
MockedModules = [load_mock_module(M, Expectations) || M <- ModulesToMock],
State#state{ mocked_modules = MockedModules }.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
load_mock_module(Mod, Expectations) ->
MaybeBin = get_cover_compiled_binary(Mod),
ModHeaderSyn = [erl_syntax:attribute(erl_syntax:atom(module),
[erl_syntax:atom(Mod)]),
erl_syntax:attribute(erl_syntax:atom(?ERLYMOCK_COMPILED),
[erl_syntax:atom(true)]),
erl_syntax:attribute(erl_syntax:atom(compile),
[erl_syntax:list(
[erl_syntax:atom(export_all)])])],
Funs = lists:usort(
[{F, length(A)} ||
#expectation{ m = M, f = F, a = A } <- Expectations,
M == Mod]),
FunFormsSyn = [mock_fun_syn(Mod, F, A) || {F, A} <- Funs],
{ok, Mod, Code} =
compile:forms([erl_syntax:revert(F)
|| F <- ModHeaderSyn ++ FunFormsSyn]),
code:purge(Mod),
code:delete(Mod),
code:purge(Mod),
{module, _} = load_module(Mod, Code),
{Mod, MaybeBin}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
mock_fun_syn(Mod, F, Args) ->
ArgsSyn = var_list_syn(Args),
FunSyn = erl_syntax:atom(F),
erl_syntax:function(
FunSyn,
[erl_syntax:clause(ArgsSyn,
none,
body_syn(Mod, FunSyn, ArgsSyn))]).
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
var_list_syn(Args) ->
[erl_syntax:variable(list_to_atom("Arg_" ++ integer_to_list(I)))
|| I <- lists:seq(0, Args - 1)].
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
body_syn(Mod, FunSyn, ArgsSyn) ->
SelfStr = pid_to_list(erlang:self()),
SelfSyn = erl_syntax:application(
erl_syntax:atom(erlang),
erl_syntax:atom(list_to_pid),
[erl_syntax:string(SelfStr)]),
[erl_syntax:application(
erl_syntax:atom(?MODULE),
erl_syntax:atom(invoke),
[SelfSyn,
erl_syntax:atom(Mod),
FunSyn,
erl_syntax:list(ArgsSyn)])].
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
prepare_strict_invocations(S = #state{ strict = Strict }) ->
S#state{ strict = lists:reverse(Strict) }.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
check_args(Args, ArgSpecs, InvokationPid) ->
try
[begin
if
is_function(E) ->
case E(A) of
true ->
ok;
_ ->
throw({error, I, E, A})
end;
true ->
case E of
'$$em zelf$$' ->
if A =/= InvokationPid ->
throw({error, I, E, A});
true ->
ok
end;
A ->
ok;
_Otherwise ->
throw({error, I, E, A})
end
end
end
|| {I, A, E} <- lists:zip3(lists:seq(1, length(Args)),
Args,
ArgSpecs)]
of
_ -> true
catch
E -> E
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec get_cover_compiled_binary(atom()) ->
{just, term()} | nothing.
get_cover_compiled_binary(Mod) ->
case code:which(Mod) of
cover_compiled ->
case ets:info(cover_binary_code_table) of
undefined ->
nothing;
_ ->
case ets:lookup(cover_binary_code_table, Mod) of
[Binary] ->
{just, Binary};
_ ->
nothing
end
end;
_ ->
nothing
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec add_invokation_listener(gen_statem:from(), Ref :: term(), statedata()) ->
{statedata(), [gen_statem:reply_action()]}.
add_invokation_listener(From, Ref, State = #state{strict = Strict,
strict_log = StrictSucc}) ->
%% if the invokation does not exist, check the strict_history
case lists:keyfind(Ref, #expectation.id, Strict) of
false ->
case lists:keyfind(Ref, #strict_log.eref, StrictSucc) of
false ->
{State, [{reply, From, {error, invalid_handle}}]};
#strict_log{ args = Args,
ipid = IPid
} ->
{State, [{reply, From, {success, IPid, Args}}]}
end;
E = #expectation{listeners = Ls} ->
NewE = E#expectation{listeners = [From|Ls]},
NewStrict = lists:keyreplace(Ref, 2, Strict, NewE),
{State#state{strict = NewStrict}, []}
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
assert_not_mocked(Mods) ->
[case assert_not_mocked_(M) of
ok -> ok;
{error, {already_mocked, Mod}} ->
throw({em_error_module_already_mocked, Mod})
end || M <- Mods],
ok.
assert_not_mocked_(Mod) ->
try Mod:module_info(attributes) of
Attrs ->
case lists:keyfind(?ERLYMOCK_COMPILED, 1 , Attrs) of
false ->
ok;
_ ->
{error, {already_mocked, Mod}}
end
catch
_:_ ->
ok
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
check_func(#expectation{m = Mod, f = Fun, a = Args}) ->
code:purge(Mod),
code:delete(Mod),
code:purge(Mod),
case code:load_file(Mod) of
{module, Mod} ->
case erlang:function_exported(Mod, Fun, length(Args)) of
false ->
throw({'_______________em_invalid_mock_program_______________',
lists:flatten(
io_lib:format(
"erly_mock: mocked function not exported: ~w:~w/~w",
[Mod, Fun, length(Args)])),
Args});
true ->
ok
end;
_ ->
ok
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
get_next_expectations(#state{strict = Es, stub = StubEs}) ->
heads_by_group_tag(Es) ++ StubEs.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
heads_by_group_tag(Es) ->
lists:foldl(fun
(#expectation{g = EG}, Acc = [#expectation{g = AG}|_])
when EG =:= AG ->
Acc;
(E, Acc) ->
[E|Acc]
end,
[],
lists:keysort(#expectation.g, Es)).
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
find_matching_expectation(I, Hints, []) ->
{error, {unexpected_invokation, I, Hints}};
find_matching_expectation(I = {invokation, Mod, Fun, Args, IPid},
Hints,
[E|RestEs]) ->
case E of
#expectation{m = Mod,
f = Fun,
a = EArgs}
when
length(EArgs) == length(Args) ->
case check_args(Args, EArgs, IPid) of
true ->
{ok, E};
{error, Index, Expected, Actual} ->
Hint = {parameter_mismatch,
{parameter, Index},
{expected, Expected},
{actual, Actual},
E},
find_matching_expectation(I, [Hint|Hints], RestEs)
end;
_ ->
Hint = {mfa_mismatch, E},
find_matching_expectation(I, [Hint|Hints], RestEs)
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec answer_invokation(Invokation :: term(),
#expectation{},
gen_statem:from()) ->
[gen_statem:reply_action()].
answer_invokation({invokation, _Mod, _Fun, Args, IPid},
#expectation{answer = Answer,
listeners = Listeners},
From) ->
[{reply, From, Answer}|
[{reply, Listener, {success, IPid, Args}} || Listener <- Listeners]].
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
remove_expectation(#expectation{id = EId},
St = #state{strict = Stricts}) ->
St#state {
strict = lists:keydelete(
EId,
#expectation.id,
Stricts)
}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
log_invokation({invokation, Mod, Fun, Args, IPid},
#expectation {
id = EId,
g = GroupTag,
answer = Answer
},
St = #state{ strict_log = Log,
call_log = CallLog }) ->
St#state {
strict_log = [#strict_log {
eref = EId,
grpt = GroupTag,
ipid = IPid,
args = Args
} | Log],
call_log = [{Mod, Fun, Args, Answer} | CallLog]}.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec stop_or_continue_replay([gen_statem:reply_action()], statedata()) ->
gen_statem:event_handler_result(no_expectations).
stop_or_continue_replay(
ReplyActions,
St = #state{
strict = Expectations,
on_finished = OnFinished
}) ->
case {Expectations, OnFinished} of
{[], undefined} ->
{next_state, no_expectations, St,
[reset_invokation_timer()|ReplyActions]};
{[_|_], _} ->
{keep_state, St, [start_invokation_timer(St)|ReplyActions]};
{[], OnFinished} ->
{stop_and_reply, normal, ReplyActions++[{reply, OnFinished, ok}],
St}
end.
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
set_invokation_timeout(InvTimeout, S = #state{}) ->
S#state{ inv_to = InvTimeout }.
-define(set_invokation_timeout_action(T),
{{timeout, invokation_timeout}, T, invokation_timeout}).
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec start_invokation_timer(statedata()) -> gen_statem:enter_action().
start_invokation_timer(#state{ inv_to = InvTo }) ->
?set_invokation_timeout_action(InvTo).
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec reset_invokation_timer() -> gen_statem:enter_action().
reset_invokation_timer() -> ?set_invokation_timeout_action(infinity).
%%------------------------------------------------------------------------------
%% @private
%%------------------------------------------------------------------------------
-spec enter_deranged([gen_statem:reply_action()], Error :: term(), #state{}) ->
gen_statem:event_handler_result(deranged).
enter_deranged(ReplyActions, What, State = #state{ error = no_error,
strict = Strict}) ->
{next_state,
deranged,
State#state{ error = What },
[reset_invokation_timer()
|
[{reply, L, {error, mock_deranged}} ||
#expectation{ listeners = Ls } <- Strict,
L <- Ls]
++ ReplyActions]}. | src/em.erl | 0.588889 | 0.654288 | em.erl | starcoder |
%% @author Couchbase <<EMAIL>>
%% @copyright 2017-2018 Couchbase, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% node_status_analyzer runs on each node and analyzes status of all nodes
%% in the cluster.
%%
%% - Periodically, it fetches the status information stored
%% by the node_monitor and analyzes it.
%%
%% - node_monitor on each node send the status information to the
%% orchestrator/master.
%%
%% - The status returned by the node_monitor on master contains each node’s
%% view of every other node in the cluster. Different monitors running
%% on a node can have different view of the status of other nodes
%% in the cluster.
%% This monitor specific status is contained in the information returned
%% by the node_monitor.
%%
%% E.g. information returned by the node_monitor on the master:
%%
%% [{node1, <======== node1's view of other nodes
%% node1_status, <======== "active" if node1 sent this recently
%% [{node2, [{monitor1, node2_status}, {monitor2, node2_status}]},
%% {node1, [{monitor1, node1_status}, {monitor2, node1_status}]},
%% {node3, ...},
%% ...]},
%% {node2, <======== node2's view of other nodes
%% node2_status,
%% [{node2, [{monitor1, node2_status}, {monitor2, node2_status}]},
%% {node1, [{monitor1, node1_status}, {monitor2, node1_status}]},
%% {node3, ...},
%% ...]},
%% {node3, ..., [...]},
%% ...]
%%
%% - node_status_analyzer then calls monitor specific analyzers to interpret
%% the above information. These analyzers determine health of a particular
%% node by taking view of all nodes into consideration.
%%
%% - At the end of the analysis, a node is considered:
%% - healthy: if all monitors report that the node is healthy.
%% - unhealthy: if all monitor report the node is unhealthy.
%% - {needs_attention, <analysis_returned_by_the_monitor>}:
%% if different monitors return different status for the node.
%% E.g. ns_server analyzer reports the node is healthy but KV
%% analyzer reports that some buckets are not ready.
-module(node_status_analyzer).
-include("ns_common.hrl").
-export([start_link/0]).
-export([get_nodes/0]).
-export([init/0, handle_call/4, handle_cast/3, handle_info/3]).
start_link() ->
health_monitor:start_link(?MODULE).
%% gen_server callbacks
init() ->
health_monitor:common_init(?MODULE, with_refresh).
handle_call(get_nodes, _From, Statuses, _Nodes) ->
{reply, Statuses};
handle_call(Call, From, Statuses, _Nodes) ->
?log_warning("Unexpected call ~p from ~p when in state:~n~p",
[Call, From, Statuses]),
{reply, nack}.
handle_cast(Cast, Statuses, _Nodes) ->
?log_warning("Unexpected cast ~p when in state:~n~p", [Cast, Statuses]),
noreply.
handle_info(refresh, Statuses, NodesWanted) ->
%% Fetch each node's view of every other node and analyze it.
AllNodes = node_monitor:get_nodes(),
NewStatuses = lists:foldl(
fun (Node, Acc) ->
NewState = analyze_status(Node, AllNodes),
Status = case dict:find(Node, Statuses) of
{ok, {NewState, _} = OldStatus} ->
%% Node state has not changed.
%% Do not update the timestamp.
OldStatus;
_ ->
{NewState, erlang:monotonic_time()}
end,
dict:store(Node, Status, Acc)
end, dict:new(), NodesWanted),
{noreply, NewStatuses};
handle_info(Info, Statuses, _Nodes) ->
?log_warning("Unexpected message ~p when in state:~n~p", [Info, Statuses]),
noreply.
%% APIs
get_nodes() ->
gen_server:call(?MODULE, get_nodes).
%% Internal functions
analyze_status(Node, AllNodes) ->
Monitors = health_monitor:node_monitors(Node),
{Healthy, Unhealthy, Other} = lists:foldl(
fun (Monitor, Accs) ->
analyze_monitor_status(Monitor,
Node,
AllNodes,
Accs)
end, {[], [], []}, Monitors),
case lists:subtract(Monitors, Healthy) of
[] ->
healthy;
_ ->
case lists:subtract(Monitors, Unhealthy) of
[] ->
unhealthy;
_ ->
{needs_attention, lists:sort(Unhealthy ++ Other)}
end
end.
analyze_monitor_status(Monitor, Node, AllNodes,
{Healthy, Unhealthy, Other}) ->
Mod = health_monitor:get_module(Monitor),
case Mod:analyze_status(Node, AllNodes) of
healthy ->
{[Monitor | Healthy], Unhealthy, Other};
unhealthy ->
{Healthy, [Monitor | Unhealthy], Other};
State ->
{Healthy, Unhealthy, [{Monitor, State} | Other]}
end. | src/node_status_analyzer.erl | 0.571169 | 0.478651 | node_status_analyzer.erl | starcoder |
%%==============================================================================
%% Copyright 2010 Erlang Solutions Ltd.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%% @doc escript wrapper for Quviq QuickCheck.
-module(run_eqc).
-export([main/1]).
main(S) ->
Args = try args(S)
catch
error:R ->
usage(R)
end,
N = proplists:get_value(numtests, Args, 100),
M = proplists:get_value(module, Args),
Rpt = proplists:get_value(rpt, Args, all),
Output = case Rpt of
none ->
fun(_, _) -> ok end;
all ->
fun(Str, IOArgs) ->
io:format(user, Str, IOArgs)
end;
error ->
fun(Str, IOArgs) ->
push_io({Str, IOArgs})
end
end,
case module([{numtests, N},
{on_output, Output}], Rpt, M) of
%% {on_test, fun on_test/2}], M) of
[] ->
io:fwrite("...tests passed (~p)~n", [M]),
ok;
Failed ->
io:fwrite("Failed properties in ~p:~n"
"~p~n", [M, Failed]),
erlang:halt(1)
end.
usage(Error) ->
io:fwrite(
"Error: ~p~n"
"Usage: escript ~s [ Option ]~n"
" -n NumTests run NumTests number of tests (default 100)~n"
" -m Module run eqc:module([ Option, ] Module)~n"
" -pa Dir prepend Dir to the code path~n"
" -pz Dir append Dir to the code path~n"
" -rpt all set reporting level - 'all': report everything (default)~n"
" | none 'none': (as silent as possible)~n"
" | error 'error': report on failing tests.~n",
[Error, script_name()]),
erlang:halt(1).
script_name() ->
try filename:basename(escript:script_name())
catch
error:_ ->
?MODULE_STRING
end.
args(["-n", Ns|T]) ->
[{numtests, list_to_integer(Ns)}|args(T)];
args(["-pa", P|T]) ->
code:add_patha(P),
args(T);
args(["-pz", P|T]) ->
code:add_pathz(P),
args(T);
args(["-m", Ms|T]) ->
[{module, list_to_atom(Ms)}|args(T)];
args(["-rpt", Flag | T]) ->
Level = case Flag of
"all" -> all;
"none" -> none;
"error" -> error
end,
[{rpt, Level}|args(T)];
args([]) ->
[].
module(Opt, Rpt, Mod) ->
case erlang:function_exported(eqc, module, 2) of
true ->
eqc:module(Opt, Mod);
false ->
case Opt == [] of
true -> eqc:module(Mod);
false ->
module2(Opt, Rpt, Mod)
end
end.
module2(Opt, Rpt, Mod) ->
Exports = Mod:module_info(exports),
Props = [F || {F, 0} <- Exports,
is_prop(F)],
Apply = if is_list(Opt) ->
fun(F) ->
lists:foldl(fun({OptF,Arg}, Fx) ->
eqc:OptF(Arg, Fx)
end, F, Opt)
end;
true ->
{OptF, Arg} = Opt,
fun(F) -> eqc:OptF(Arg, F()) end
end,
if Props =/= [], Rpt == error ->
spawn_reporter();
true ->
ok
end,
Res = lists:filter(fun(P) ->
new_context(P, Rpt),
Passed = eqc:quickcheck(Apply(Mod:P())),
clear_context(Passed, Rpt),
not Passed
end, Props),
close_reporter(),
Res.
is_prop(F) ->
case atom_to_list(F) of
"prop_" ++ _ -> true;
_ -> false
end.
spawn_reporter() ->
Parent = self(),
P = spawn(fun() ->
register(run_eqc_reporter, self()),
erlang:monitor(process, Parent),
Parent ! {ok, self()},
reporter_loop(undefined, queue:new())
end),
receive
{ok, P} = Res ->
Res
end.
close_reporter() ->
case whereis(run_eqc_reporter) of
undefined ->
ok;
P ->
Ref = erlang:monitor(process, P),
P ! {self(), stop},
receive
{P, ok} ->
ok;
{'DOWN', Ref, _, _, _} ->
ok
after 10000 ->
exit(P, kill),
ok
end
end.
reporter_loop(P, Q) ->
receive
{'DOWN', _, _, _, _} ->
ok;
{Pid, stop} ->
Pid ! {self(), ok};
{new_context, P1} ->
reporter_loop(P1, queue:new());
{output, IO} ->
reporter_loop(P, queue:in(IO, Q));
report ->
io:fwrite("FAILED property ~p:~n", [P]),
print_queue(queue:out(Q)),
reporter_loop(undefined, queue:new())
end.
new_context(P, error) ->
run_eqc_reporter ! {new_context, P};
new_context(_, _) ->
ok.
clear_context(false, error) ->
run_eqc_reporter ! report;
clear_context(_, _) ->
ok.
print_queue({{value, {Str,Args}}, Q}) ->
io:fwrite(Str, Args),
print_queue(queue:out(Q));
print_queue(_) ->
ok.
push_io(IO) ->
catch run_eqc_reporter ! {output, IO}. | src/run_eqc.erl | 0.540318 | 0.406273 | run_eqc.erl | starcoder |
-module(poker).
-export([best/1]).
-type rank() :: 1..14.
-type suite() :: heart | diamond | spade | club.
-type classification() :: high_card | pair | two_pair | three_of_a_kind | straight | flush | full_house | four_of_a_kind | straight_flush.
-record(card, {rank :: rank(),
suite :: suite()}).
-record(hand, {cards :: [#card{}],
raw :: unicode:chardata()}).
-record(ranking, {hand :: #hand{},
classification :: classification(),
value :: [rank()]}).
%% API
-spec best([unicode:chardata()]) -> [unicode:chardata()].
best(Hands) ->
io:format("Hands: ~w~n", [Hands]),
Parsed = lists:flatmap(fun parse_hand/1, Hands),
io:format("Parsed: ~w~n", [Parsed]),
Ranked = lists:map(fun rank_hand/1, Parsed),
io:format("Ranked: ~w~n", [Ranked]),
Best = best_rankings(Ranked),
io:format("Best: ~w~n", [Best]),
% Must preserve original order. This also has the side effect of
% normalizing the ace containing hands back into their canonical
% form.
[Hand || Hand <- Hands,
lists:member(Hand, raw_rankings(Best))].
%% Internal
rank("2") -> [2];
rank("3") -> [3];
rank("4") -> [4];
rank("5") -> [5];
rank("6") -> [6];
rank("7") -> [7];
rank("8") -> [8];
rank("9") -> [9];
rank("10") -> [10];
rank("J") -> [11];
rank("Q") -> [12];
rank("K") -> [13];
rank("A") -> [1, 14].
ranks(Hand) ->
[Card#card.rank || Card <- Hand#hand.cards].
suite("H") -> heart;
suite("D") -> diamond;
suite("S") -> spade;
suite("C") -> club.
suites(Hand) ->
[Card#card.suite || Card <- Hand#hand.cards].
sort_cards(Cards) ->
% Sort a list of cards by descending rank.
lists:sort(
fun(L, R) -> L#card.rank > R#card.rank end,
Cards).
parse_hand(Hand) ->
Cards = string:split(Hand, " ", all),
lists:flatten(parse_cards(Hand, Cards, [])).
parse_cards(Hand, [], Acc) ->
% Sort the cards by rank here during parsing, so that later steps
% can make some simplifying assumptions without constantly
% resorting.
#hand{cards = sort_cards(Acc), raw = Hand};
parse_cards(Hand, [Card | Cards], Acc) ->
Chars = string:to_graphemes(Card),
{Rank, Suite} = lists:split(length(Chars) - 1, Chars),
ParsedSuite = suite(Suite),
% Deal with aces by returning multiple hands for each ace
% encountered, one for each possible ranking.
[parse_cards(
Hand,
Cards,
[#card{rank = ParsedRank, suite = ParsedSuite} | Acc])
|| ParsedRank <- rank(Rank)].
rank_hand(Hand) ->
IsFlush = is_flush(Hand),
IsStraight = is_straight(Hand),
{Duplicates, DuplicateValues} = count_ranks(Hand),
% It is useful that the scoring order for hands also happens to
% match the precendence order for determining ranking.
{Classification, Value} = case {IsFlush, IsStraight, Duplicates} of
{true, true, _} ->
{straight_flush, ranks(Hand)};
{_, _, [4 | _]} ->
{four_of_a_kind, DuplicateValues};
{_, _, [3, 2]} ->
{full_house, DuplicateValues};
{true, _, _} ->
{flush, ranks(Hand)};
{_, true, _} ->
{straight, ranks(Hand)};
{_, _, [3 | _]} ->
{three_of_a_kind, DuplicateValues};
{_, _, [2, 2 | _]} ->
{two_pair, DuplicateValues};
{_, _, [2 | _]} ->
{pair, DuplicateValues};
_ ->
{high_card, ranks(Hand)}
end,
#ranking{hand = Hand,
classification = Classification,
value = Value}.
is_flush(Hand) ->
[First | Suites] = suites(Hand),
lists:all(fun (Suite) -> First == Suite end, Suites).
is_straight(Hand) ->
[First | Ranks] = ranks(Hand),
% Check if the rank of each card is one less than the previous.
{Valid, _} = lists:foldl(
fun (Elem, {Valid, Prev}) ->
case {Valid, Prev - 1} of
{false, _} ->
{false, 0};
{true, Elem} ->
{true, Elem};
_ ->
{false, 0}
end
end,
{true, First},
Ranks),
Valid.
count_ranks(Hand) ->
[First | Ranks] = ranks(Hand),
Duplicates = lists:foldl(
fun (Elem, [{Count, Rank} | Duplicates] = Acc) ->
case Elem of
Rank ->
[{Count + 1, Rank} | Duplicates];
_ ->
[{1, Elem} | Acc]
end
end,
[{1, First}],
Ranks),
% Return a list of duplicate counts useful for classification and
% a corresponding list of ranks sorted by counts usable as a
% secondary ranking.
lists:unzip(lists:reverse(lists:sort(Duplicates))).
score_classification(high_card) -> 1;
score_classification(pair) -> 2;
score_classification(two_pair) -> 3;
score_classification(three_of_a_kind) -> 4;
score_classification(straight) -> 5;
score_classification(flush) -> 6;
score_classification(full_house) -> 7;
score_classification(four_of_a_kind) -> 8;
score_classification(straight_flush) -> 9.
score_ranking(Ranking) ->
{score_classification(Ranking#ranking.classification),
Ranking#ranking.value}.
best_rankings(Rankings) ->
% Sort a list of rankings by descending score.
[Best | Rest] = lists:sort(
fun(L, R) -> score_ranking(L) > score_ranking(R) end,
Rankings),
% Return a list of tied rankings.
[Best | [Ranking || Ranking <- Rest,
score_ranking(Best) == score_ranking(Ranking)]].
raw_rankings(Rankings) ->
[Ranking#ranking.hand#hand.raw || Ranking <- Rankings]. | erlang/poker/src/poker.erl | 0.619356 | 0.511351 | poker.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.