code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%% Copyright 2016-2017 TensorHub, Inc.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(inifile).
-export([load/1, parse/1]).
-record(ps, {sec, secs, lnum}).
load(File) ->
case file:read_file(File) of
{ok, Bin} -> parse(Bin);
{error, Err} -> {error, Err}
end.
parse(Bin) ->
parse_lines(split_lines(Bin), init_parse_state()).
init_parse_state() ->
#ps{sec=undefined, secs=[], lnum=1}.
split_lines(Bin) ->
re:split(Bin, "\r\n|\n|\r|\032", [{return, list}]).
parse_lines([Line|Rest], PS) ->
parse_line(strip_trailing_spaces(Line), Rest, PS);
parse_lines([], PS) ->
{ok, finalize_parse(PS)}.
parse_line("", Rest, PS) ->
parse_lines(Rest, incr_lnum(PS));
parse_line(";"++_, Rest, PS) ->
parse_lines(Rest, incr_lnum(PS));
parse_line("#"++_, Rest, PS) ->
parse_lines(Rest, incr_lnum(PS));
parse_line("["++_=Line, Rest, PS) ->
handle_section_parse(parse_section_line(Line, PS), Rest, PS);
parse_line(Line0, Rest0, PS0) ->
case read_line_continuations(Line0, Rest0, PS0) of
{ok, {Line, Rest, PS}} ->
handle_attr_parse(parse_attr_line(Line, PS), Rest, PS);
{error, Err} ->
{error, Err}
end.
parse_section_line(Line, #ps{lnum=Num}) ->
Pattern = "\\[\\s*([^ ]+)(?:\\s+\"([^\"]*)\")?\\s*\\]",
case re:run(Line, Pattern, [{capture, all_but_first, list}]) of
{match, Keys} -> {ok, {Keys, []}};
nomatch -> {error, {section_line, Num}}
end.
handle_section_parse({ok, Section}, Rest, PS) ->
parse_lines(Rest, incr_lnum(add_section(Section, PS)));
handle_section_parse({error, Err}, _Rest, _PS) ->
{error, Err}.
add_section(New, #ps{sec=undefined}=PS) ->
PS#ps{sec=New};
add_section(New, #ps{sec=Cur, secs=Secs}=PS) ->
PS#ps{sec=New, secs=[finalize_section(Cur)|Secs]}.
finalize_section({Name, Attrs}) ->
{Name, lists:reverse(Attrs)}.
read_line_continuations(Line, Rest, PS) ->
{ok, Pattern} = re:compile("(.*?)\\\\$"),
read_line_continuations_acc(Line, Rest, PS, Pattern, []).
read_line_continuations_acc(Line, Rest, PS, Pattern, Acc) ->
case re:run(Line, Pattern, [{capture, all_but_first, list}]) of
{match, [Part]} ->
handle_line_continuation(Part, Rest, PS, Pattern, Acc);
nomatch ->
finalize_line_continuation(Line, Acc, Rest, PS)
end.
handle_line_continuation(Part, [NextLine|NextRest], PS, Pattern, Acc) ->
read_line_continuations_acc(
NextLine, NextRest, incr_lnum(PS), Pattern, [Part|Acc]);
handle_line_continuation(_Part, [], #ps{lnum=Num}, _Pattern, _Acc) ->
{error, {eof, Num}}.
finalize_line_continuation(Line, Acc, Rest, PS) ->
{ok, {lists:reverse([Line|Acc]), Rest, PS}}.
parse_attr_line(Line, #ps{lnum=Num}) ->
Pattern = "([^\\s]+)\\s*[:=]\\s*(.*)",
case re:run(Line, Pattern, [{capture, all_but_first, list}]) of
{match, [Name, Val]} -> {ok, {Name, Val}};
nomatch -> {error, {attr_line, Num}}
end.
handle_attr_parse({ok, _}, _Rest, #ps{sec=undefined, lnum=Num}) ->
{error, {no_section_for_attr, Num}};
handle_attr_parse({ok, Attr}, Rest, PS) ->
parse_lines(Rest, incr_lnum(add_attr(Attr, PS)));
handle_attr_parse({error, Err}, _Rest, _PS) ->
{error, Err}.
add_attr(Attr, #ps{sec={Name, Attrs}}=PS) ->
PS#ps{sec={Name, [Attr|Attrs]}}.
finalize_parse(#ps{sec=undefined, secs=Acc}) ->
lists:reverse(Acc);
finalize_parse(#ps{sec=Sec, secs=Acc}) ->
lists:reverse([finalize_section(Sec)|Acc]).
strip_trailing_spaces(Str) -> string:strip(Str, right).
incr_lnum(#ps{lnum=N}=PS) -> PS#ps{lnum=N + 1}. | src/inifile.erl | 0.604282 | 0.411998 | inifile.erl | starcoder |
%% Copyright 2018 Octavo Labs AG Zurich Switzerland (https://octavolabs.com)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_discovery_k8s).
-author("<NAME> <<EMAIL>>").
-behaviour(vmq_discovery_backend).
-export([init/0, list_nodes/0, register/0,
supports_registration/0, unregister/0]).
-define(SERVICE_ACCOUNT_PATH, "/var/run/secrets/kubernetes.io/serviceaccount/").
%%%===================================================================
%%% API
%%%===================================================================
init() ->
lager:info("Initializing Kubernetes peer discovery..."),
ok = application:ensure_started(hackney).
list_nodes() ->
{ok, DiscoveryConf} = application:get_env(vmq_discovery, discovery_config),
K8sConf = proplists:get_value(k8s, DiscoveryConf),
ApiServer = proplists:get_value(master_node_url, K8sConf),
Namespace = proplists:get_value(service_namespace, K8sConf),
Service = proplists:get_value(service_name, K8sConf),
ServiceUrl = generate_service_url(Service, Namespace),
AddressType = proplists:get_value(address_type, K8sConf, hostname),
TokenPath = proplists:get_value(token_path, K8sConf),
CertPath = proplists:get_value(certificate_path, K8sConf),
%% Check if the CA certificate exists and set HTTP options accordingly.
HttpOpts = case filelib:is_file(CertPath) of
true -> [{ssl, [{cacertfile, CertPath}]}];
false -> [{ssl, [{verify, verify_none}]}]
end,
Url = lists:concat([ApiServer, ServiceUrl]),
Headers = auth_headers(TokenPath),
case api_request(Url, Headers, HttpOpts) of
{ok, Response} ->
Addresses = extract_addresses(AddressType, Response),
{ok, lists:map(fun vmq_discovery_utils:append_node_prefix/1, Addresses)};
{error, Reason} ->
lager:warning("Failed to get nodes from Kubernetes - ~s", [Reason]),
{error, Reason}
end.
supports_registration() ->
true.
-spec register() -> ok.
register() ->
ok.
-spec unregister() -> ok.
unregister() -> ok.
generate_service_url(Service, Namespace) ->
lists:concat(["/api/v1/namespaces/", Namespace, "/endpoints/", Service]).
api_request(URL, Headers, Opts) ->
Params = <<>>,
lager:debug("Hitting Kubernetes endpoint: ~p.", [URL]),
case hackney:get(URL, Headers, Params, Opts) of
{ok, 200, _RespHeaders, Client} ->
{ok, _Response} = hackney:body(Client);
{ok, Status, _, _} ->
{error, Status};
{error, Reason} ->
{error, Reason}
end.
auth_headers(TokenPath) ->
Token0 = read_file(TokenPath, <<>>),
Token = binary:replace(Token0, <<"\n">>, <<>>),
[{"Authorization", "Bearer " ++ binary_to_list(Token)}].
read_file(Path, Default) ->
case file:read_file(Path) of
{ok, Data} ->
Data;
{error, Error} ->
lager:error("Cannot read ~s. Reason: ~p", [Path, Error]),
Default
end.
get_ready_addresses(AddressType, Subset) ->
case maps:get(<<"notReadyAddresses">>, Subset, undefined) of
undefined -> ok;
NotReadyAddresses ->
Formatted = string:join([binary_to_list(get_address(AddressType, Address))
|| Address <- NotReadyAddresses], ", "),
lager:info(
"Kubernetes endpoint returned some nodes thst are not ready: ~s",
[Formatted]
)
end,
maps:get(<<"addresses">>, Subset, []).
extract_addresses(AddressType, Response) ->
AddressList = [[get_address(AddressType, Address)
|| Address <- get_ready_addresses(AddressType, Subset)]
|| Subset <- maps:get(<<"subsets">>, Response, [])],
sets:to_list(sets:union(lists:map(fun sets:from_list/1, AddressList))).
get_address(AddressType, Address) ->
maps:get(atom_to_binary(AddressType, utf8), Address). | src/backends/vmq_discovery_k8s.erl | 0.548794 | 0.404125 | vmq_discovery_k8s.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2018-2021. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% This pass infers types from expressions and attempts to simplify or remove
%% subsequent instructions based on that information.
%%
%% This is divided into two subpasses; the first figures out function type
%% signatures for the whole module without optimizing anything, and the second
%% optimizes based on that information, further refining the type signatures as
%% it goes.
%%
-module(beam_ssa_type).
-export([opt_start/2, opt_continue/4, opt_finish/3, opt_ranges/1]).
-include("beam_ssa_opt.hrl").
-include("beam_types.hrl").
-import(lists, [any/2,duplicate/2,foldl/3,member/2,
keyfind/3,reverse/1,split/2,zip/2]).
%% The maximum number of #b_ret{} terminators a function can have before
%% collapsing success types into a single entry. Consider the following code:
%%
%% f(0) -> 1;
%% f(...) -> ...;
%% f(500000) -> 500000.
%%
%% Since success types are grouped by return type and each clause returns a
%% distinct type (singleton #t_integer{}s), we'll add 500000 entries which
%% makes progress glacial since every call needs to examine them all to
%% determine the return type.
%%
%% The entries are collapsed unconditionally if the number of returns in a
%% function exceeds this threshold. This is necessary because collapsing as we
%% go might widen a type; if we're at (?RETURN_LIMIT - 1) entries and suddenly
%% narrow a type down, it could push us over the edge and collapse all entries,
%% possibly widening the return type and breaking optimizations that were based
%% on the earlier (narrower) types.
-define(RETURN_LIMIT, 30).
%% Constants common to all subpasses.
-record(metadata,
{ func_id :: func_id(),
limit_return :: boolean(),
params :: [beam_ssa:b_var()],
used_once :: #{ beam_ssa:b_var() => _ } }).
-type metadata() :: #metadata{}.
-type meta_cache() :: #{ func_id() => metadata() }.
-type type_db() :: #{ beam_ssa:var_name() := ssa_type() }.
%% The types are the same as in 'beam_types.hrl', with the addition of
%% `(fun(type_db()) -> type())` that defers figuring out the type until it's
%% actually used. Mainly used to coax more type information out of
%% `get_tuple_element` where a test on one element (e.g. record tag) may
%% affect the type of another.
-type ssa_type() :: fun((type_db()) -> type()) | type().
%%
-spec opt_start(term(), term()) -> term().
opt_start(StMap, FuncDb0) when FuncDb0 =/= #{} ->
{ArgDb, MetaCache, FuncDb} = signatures(StMap, FuncDb0),
opt_start_1(maps:keys(StMap), ArgDb, StMap, FuncDb, MetaCache);
opt_start(StMap, FuncDb) ->
%% Module-level analysis is disabled, likely because of a call to
%% load_nif/2 or similar. opt_continue/4 will assume that all arguments and
%% return types are 'any'.
{StMap, FuncDb}.
opt_start_1([Id | Ids], ArgDb, StMap0, FuncDb0, MetaCache) ->
case ArgDb of
#{ Id := ArgTypes } ->
#opt_st{ssa=Linear0,args=Args} = St0 = map_get(Id, StMap0),
Ts = maps:from_list(zip(Args, ArgTypes)),
{Linear, FuncDb} = opt_function(Linear0, Args, Id, Ts, FuncDb0, MetaCache),
St = St0#opt_st{ssa=Linear},
StMap = StMap0#{ Id := St },
opt_start_1(Ids, ArgDb, StMap, FuncDb, MetaCache);
#{} ->
%% Unreachable functions must be removed so that opt_continue/4
%% won't process them and potentially taint the argument types of
%% other functions.
StMap = maps:remove(Id, StMap0),
FuncDb = maps:remove(Id, FuncDb0),
opt_start_1(Ids, ArgDb, StMap, FuncDb, MetaCache)
end;
opt_start_1([], _CommittedArgs, StMap, FuncDb, _MetaCache) ->
{StMap, FuncDb}.
%%
%% The initial signature analysis is based on the paper "Practical Type
%% Inference Based on Success Typings" [1] by `<NAME>` and
%% `<NAME>`, mainly section 6.1 and onwards.
%%
%% The general idea is to start out at the module's entry points and propagate
%% types to the functions we call. The argument types of all exported functions
%% start out a 'any', whereas local functions start at 'none'. Every time a
%% function call widens the argument types, we analyze the callee again and
%% propagate its return types to the callers, analyzing them again, and
%% continuing this process until all arguments and return types have been
%% widened as far as they can be.
%%
%% Note that we do not "jump-start the analysis" by first determining success
%% types as in the paper because we need to know all possible inputs including
%% those that will not return.
%%
%% [1] http://www.it.uu.se/research/group/hipe/papers/succ_types.pdf
%%
-record(sig_st,
{ wl = wl_new() :: worklist(),
committed = #{} :: #{ func_id() => [type()] },
updates = #{} :: #{ func_id() => [type()] },
meta_cache = #{} :: meta_cache()}).
signatures(StMap, FuncDb0) ->
State0 = init_sig_st(StMap, FuncDb0),
{State, FuncDb} = signatures_1(StMap, FuncDb0, State0),
{State#sig_st.committed, State#sig_st.meta_cache, FuncDb}.
signatures_1(StMap, FuncDb0, State0) ->
case wl_next(State0#sig_st.wl) of
{ok, FuncId} ->
{State, FuncDb} = sig_function(FuncId, StMap, State0, FuncDb0),
signatures_1(StMap, FuncDb, State);
empty ->
%% No more work to do, assert that we don't have any outstanding
%% updates.
#sig_st{updates=Same,committed=Same} = State0, %Assertion.
{State0, FuncDb0}
end.
sig_function(Id, StMap, State, FuncDb) ->
try
do_sig_function(Id, StMap, State, FuncDb)
catch
Class:Error:Stack ->
#b_local{name=#b_literal{val=Name},arity=Arity} = Id,
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
do_sig_function(Id, StMap, State0, FuncDb0) ->
case sig_function_1(Id, StMap, State0, FuncDb0) of
{false, false, State, FuncDb} ->
%% No added work and the types are identical. Pop ourselves from
%% the work list and move on to the next function.
Wl = wl_pop(Id, State#sig_st.wl),
{State#sig_st{wl=Wl}, FuncDb};
{false, true, State, FuncDb} ->
%% We've added some work and our return type is unchanged. Keep
%% following the work list without popping ourselves; we're very
%% likely to need to return here later and can avoid a lot of
%% redundant work by keeping our place in line.
{State, FuncDb};
{true, WlChanged, State, FuncDb} ->
%% Our return type has changed so all of our (previously analyzed)
%% callers need to be analyzed again.
%%
%% If our worklist is unchanged we'll pop ourselves since our
%% callers will add us back if we need to analyzed again, and
%% it's wasteful to stay in the worklist when we don't.
Wl0 = case WlChanged of
true -> State#sig_st.wl;
false -> wl_pop(Id, State#sig_st.wl)
end,
#func_info{in=Cs0} = map_get(Id, FuncDb0),
Updates = State#sig_st.updates,
Callers = [C || C <- Cs0, is_map_key(C, Updates)],
Wl = wl_defer_list(Callers, Wl0),
{State#sig_st{wl=Wl}, FuncDb}
end.
sig_function_1(Id, StMap, State0, FuncDb) ->
#opt_st{ssa=Linear,args=Args} = map_get(Id, StMap),
{ArgTypes, State1} = sig_commit_args(Id, State0),
Ts = maps:from_list(zip(Args, ArgTypes)),
FakeCall = #b_set{op=call,args=[#b_remote{mod=#b_literal{val=unknown},
name=#b_literal{val=unknown},
arity=0}]},
Ds = maps:from_list([{Var, FakeCall#b_set{dst=Var}} ||
#b_var{}=Var <- Args]),
Ls = #{ ?EXCEPTION_BLOCK => {incoming, Ts},
0 => {incoming, Ts} },
{Meta, State2} = sig_init_metadata(Id, Linear, Args, State1),
Wl0 = State1#sig_st.wl,
{State, SuccTypes} = sig_bs(Linear, Ds, Ls, FuncDb, #{}, [], Meta, State2),
WlChanged = wl_changed(Wl0, State#sig_st.wl),
#{ Id := #func_info{succ_types=SuccTypes0}=Entry0 } = FuncDb,
if
SuccTypes0 =:= SuccTypes ->
{false, WlChanged, State, FuncDb};
SuccTypes0 =/= SuccTypes ->
Entry = Entry0#func_info{succ_types=SuccTypes},
{true, WlChanged, State, FuncDb#{ Id := Entry }}
end.
%% Get the metadata for a function. If this function has been analysed
%% previously, retrieve the previously calculated metadata.
sig_init_metadata(Id, Linear, Args, #sig_st{meta_cache=MetaCache} = State) ->
case MetaCache of
#{Id := Meta} ->
{Meta, State};
#{} ->
Meta = init_metadata(Id, Linear, Args),
{Meta, State#sig_st{meta_cache=MetaCache#{Id => Meta}}}
end.
sig_bs([{L, #b_blk{is=Is,last=Last0}} | Bs],
Ds0, Ls0, Fdb, Sub0, SuccTypes0, Meta, State0) ->
case Ls0 of
#{ L := Incoming } ->
{incoming, Ts0} = Incoming, %Assertion.
{Ts, Ds, Sub, State} =
sig_is(Is, Ts0, Ds0, Ls0, Fdb, Sub0, State0),
Last = simplify_terminator(Last0, Ts, Ds, Sub),
SuccTypes = update_success_types(Last, Ts, Ds, Meta, SuccTypes0),
UsedOnce = Meta#metadata.used_once,
{_, Ls1} = update_successors(Last, Ts, Ds, Ls0, UsedOnce),
%% In the future there may be a point to storing outgoing types on
%% a per-edge basis as it would give us more precision in phi
%% nodes, but there's nothing to gain from that at the moment so
%% we'll store the current Ts to save memory.
Ls = Ls1#{ L := {outgoing, Ts} },
sig_bs(Bs, Ds, Ls, Fdb, Sub, SuccTypes, Meta, State);
#{} ->
%% This block is never reached. Ignore it.
sig_bs(Bs, Ds0, Ls0, Fdb, Sub0, SuccTypes0, Meta, State0)
end;
sig_bs([], _Ds, _Ls, _Fdb, _Sub, SuccTypes, _Meta, State) ->
{State, SuccTypes}.
sig_is([#b_set{op=call,
args=[#b_local{}=Callee | _]=Args0,
dst=Dst}=I0 | Is],
Ts0, Ds0, Ls, Fdb, Sub, State0) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = I0#b_set{args=Args},
[_ | CallArgs] = Args,
{I, State} = sig_local_call(I1, Callee, CallArgs, Ts0, Fdb, State0),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
sig_is(Is, Ts, Ds, Ls, Fdb, Sub, State);
sig_is([#b_set{op=call,
args=[#b_var{} | _]=Args0,
dst=Dst}=I0 | Is],
Ts0, Ds0, Ls, Fdb, Sub, State0) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = I0#b_set{args=Args},
{I, State} = sig_fun_call(I1, Args, Ts0, Ds0, Fdb, Sub, State0),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
sig_is(Is, Ts, Ds, Ls, Fdb, Sub, State);
sig_is([#b_set{op=MakeFun,args=Args0,dst=Dst}=I0|Is],
Ts0, Ds0, Ls, Fdb, Sub0, State0) when MakeFun =:= make_fun;
MakeFun =:= old_make_fun ->
Args = simplify_args(Args0, Ts0, Sub0),
I1 = I0#b_set{args=Args},
{I, State} = sig_make_fun(I1, Ts0, Fdb, State0),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
sig_is(Is, Ts, Ds, Ls, Fdb, Sub0, State);
sig_is([I0 | Is], Ts0, Ds0, Ls, Fdb, Sub0, State) ->
case simplify(I0, Ts0, Ds0, Ls, Sub0) of
{#b_set{}, Ts, Ds} ->
sig_is(Is, Ts, Ds, Ls, Fdb, Sub0, State);
Sub when is_map(Sub) ->
sig_is(Is, Ts0, Ds0, Ls, Fdb, Sub, State)
end;
sig_is([], Ts, Ds, _Ls, _Fdb, Sub, State) ->
{Ts, Ds, Sub, State}.
sig_fun_call(I0, Args, Ts, Ds, Fdb, Sub, State0) ->
[Fun | CallArgs0] = Args,
FunType = normalized_type(Fun, Ts),
Arity = length(CallArgs0),
case {FunType, Ds} of
{_, #{ Fun := #b_set{op=make_fun,
args=[#b_local{arity=TotalArity}=Callee | Env]} }}
when TotalArity =:= Arity + length(Env) ->
%% When a fun is used and defined in the same function, we can make
%% a direct call since the environment is still available.
CallArgs = CallArgs0 ++ simplify_args(Env, Ts, Sub),
I = I0#b_set{args=[Callee | CallArgs]},
sig_local_call(I, Callee, CallArgs, Ts, Fdb, State0);
{#t_fun{target={Name,Arity}}, _} ->
%% When a fun lacks free variables, we can make a direct call even
%% when we don't know where it was defined.
Callee = #b_local{name=#b_literal{val=Name},
arity=Arity},
I = I0#b_set{args=[Callee | CallArgs0]},
sig_local_call(I, Callee, CallArgs0, Ts, Fdb, State0);
{#t_fun{type=Type}, _} when Type =/= any ->
{beam_ssa:add_anno(result_type, Type, I0), State0};
_ ->
{I0, State0}
end.
sig_local_call(I0, Callee, Args, Ts, Fdb, State) ->
ArgTypes = argument_types(Args, Ts),
I = sig_local_return(I0, Callee, ArgTypes, Fdb),
{I, sig_update_args(Callee, ArgTypes, State)}.
%% While it's impossible to tell which arguments a fun will be called with
%% (someone could steal it through tracing and call it), we do know its free
%% variables and can update their types as if this were a local call.
sig_make_fun(#b_set{op=MakeFun,
args=[#b_local{}=Callee | FreeVars]}=I0,
Ts, Fdb, State) when MakeFun =:= make_fun;
MakeFun =:= old_make_fun ->
ArgCount = Callee#b_local.arity - length(FreeVars),
FVTypes = [concrete_type(FreeVar, Ts) || FreeVar <- FreeVars],
ArgTypes = duplicate(ArgCount, any) ++ FVTypes,
I = sig_local_return(I0, Callee, ArgTypes, Fdb),
{I, sig_update_args(Callee, ArgTypes, State)}.
sig_local_return(I, Callee, ArgTypes, Fdb) ->
#func_info{succ_types=SuccTypes} = map_get(Callee, Fdb),
case return_type(SuccTypes, ArgTypes) of
any -> I;
Type -> beam_ssa:add_anno(result_type, Type, I)
end.
init_sig_st(StMap, FuncDb) ->
%% Start out as if all the roots have been called with 'any' for all
%% arguments.
Roots = init_sig_roots(FuncDb),
#sig_st{ committed=#{},
updates=init_sig_args(Roots, StMap, #{}),
wl=wl_defer_list(Roots, wl_new()) }.
init_sig_roots(FuncDb) ->
maps:fold(fun(Id, #func_info{exported=true}, Acc) ->
[Id | Acc];
(_, _, Acc) ->
Acc
end, [], FuncDb).
init_sig_args([Root | Roots], StMap, Acc) ->
#opt_st{args=Args0} = map_get(Root, StMap),
ArgTypes = lists:duplicate(length(Args0), any),
init_sig_args(Roots, StMap, Acc#{ Root => ArgTypes });
init_sig_args([], _StMap, Acc) ->
Acc.
sig_commit_args(Id, #sig_st{updates=Us,committed=Committed0}=State0) ->
Types = map_get(Id, Us),
Committed = Committed0#{ Id => Types },
State = State0#sig_st{committed=Committed},
{Types, State}.
sig_update_args(Callee, Types, #sig_st{committed=Committed}=State) ->
case Committed of
#{ Callee := Current } ->
case parallel_join(Current, Types) of
Current ->
%% We've already processed this function with these
%% arguments, so there's no need to visit it again.
State;
Widened ->
sig_update_args_1(Callee, Widened, State)
end;
#{} ->
sig_update_args_1(Callee, Types, State)
end.
sig_update_args_1(Callee, Types, #sig_st{updates=Us0,wl=Wl0}=State) ->
Us = case Us0 of
#{ Callee := Current } ->
Us0#{ Callee => parallel_join(Current, Types) };
#{} ->
Us0#{ Callee => Types }
end,
State#sig_st{updates=Us,wl=wl_add(Callee, Wl0)}.
-spec opt_continue(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when
Linear :: [{non_neg_integer(), beam_ssa:b_blk()}],
Args :: [beam_ssa:b_var()],
Anno :: beam_ssa:anno(),
FuncDb :: func_info_db().
opt_continue(Linear0, Args, Anno, FuncDb) when FuncDb =/= #{} ->
Id = get_func_id(Anno),
case FuncDb of
#{ Id := #func_info{exported=false,arg_types=ArgTypes} } ->
%% This is a local function and we're guaranteed to have visited
%% every call site at least once, so we know that the parameter
%% types are at least as narrow as the join of all argument types.
Ts = join_arg_types(Args, ArgTypes, #{}),
opt_function(Linear0, Args, Id, Ts, FuncDb);
#{ Id := #func_info{exported=true} } ->
%% We can't infer the parameter types of exported functions, but
%% running the pass again could still help other functions.
Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]),
opt_function(Linear0, Args, Id, Ts, FuncDb)
end;
opt_continue(Linear0, Args, Anno, _FuncDb) ->
%% Module-level optimization is disabled, pass an empty function database
%% so we only perform local optimizations.
Id = get_func_id(Anno),
Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]),
{Linear, _} = opt_function(Linear0, Args, Id, Ts, #{}),
{Linear, #{}}.
join_arg_types([Arg | Args], [TypeMap | TMs], Ts) ->
Type = beam_types:join(maps:values(TypeMap)),
join_arg_types(Args, TMs, Ts#{ Arg => Type });
join_arg_types([], [], Ts) ->
Ts.
%%
%% Optimizes a function based on the type information inferred by signatures/2
%% and earlier runs of opt_function/5,6.
%%
%% This is pretty straightforward as it only walks through each function once,
%% and because it only makes types narrower it's safe to optimize the functions
%% in any order or not at all.
%%
opt_function(Linear, Args, Id, Ts, FuncDb) ->
MetaCache = #{},
opt_function(Linear, Args, Id, Ts, FuncDb, MetaCache).
-spec opt_function(Linear, Args, Id, Ts, FuncDb, MetaCache) -> Result when
Linear :: [{non_neg_integer(), beam_ssa:b_blk()}],
Args :: [beam_ssa:b_var()],
Id :: func_id(),
Ts :: type_db(),
FuncDb :: func_info_db(),
Result :: {Linear, FuncDb},
MetaCache :: meta_cache().
opt_function(Linear, Args, Id, Ts, FuncDb, MetaCache) ->
try
do_opt_function(Linear, Args, Id, Ts, FuncDb, MetaCache)
catch
Class:Error:Stack ->
#b_local{name=#b_literal{val=Name},arity=Arity} = Id,
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
do_opt_function(Linear0, Args, Id, Ts, FuncDb0, MetaCache) ->
FakeCall = #b_set{op=call,args=[#b_remote{mod=#b_literal{val=unknown},
name=#b_literal{val=unknown},
arity=0}]},
Ds = maps:from_list([{Var, FakeCall#b_set{dst=Var}} ||
#b_var{}=Var <- Args]),
Ls = #{ ?EXCEPTION_BLOCK => {incoming, Ts},
0 => {incoming, Ts} },
Meta = case MetaCache of
#{Id := Meta0} ->
Meta0;
#{} ->
init_metadata(Id, Linear0, Args)
end,
{Linear, FuncDb, SuccTypes} =
opt_bs(Linear0, Ds, Ls, FuncDb0, #{}, [], Meta, []),
case FuncDb of
#{ Id := Entry0 } ->
Entry = Entry0#func_info{succ_types=SuccTypes},
{Linear, FuncDb#{ Id := Entry }};
#{} ->
%% Module-level optimizations have been turned off.
{Linear, FuncDb}
end.
get_func_id(Anno) ->
#{func_info:={_Mod, Name, Arity}} = Anno,
#b_local{name=#b_literal{val=Name}, arity=Arity}.
opt_bs([{L, #b_blk{is=Is0,last=Last0}=Blk0} | Bs],
Ds0, Ls0, Fdb0, Sub0, SuccTypes0, Meta, Acc) ->
case Ls0 of
#{ L := Incoming } ->
{incoming, Ts0} = Incoming, %Assertion.
{Is, Ts, Ds, Fdb, Sub} =
opt_is(Is0, Ts0, Ds0, Ls0, Fdb0, Sub0, Meta, []),
Last1 = simplify_terminator(Last0, Ts, Ds, Sub),
SuccTypes = update_success_types(Last1, Ts, Ds, Meta, SuccTypes0),
UsedOnce = Meta#metadata.used_once,
{Last, Ls1} = update_successors(Last1, Ts, Ds, Ls0, UsedOnce),
Ls = Ls1#{ L := {outgoing, Ts} }, %Assertion.
Blk = Blk0#b_blk{is=Is,last=Last},
opt_bs(Bs, Ds, Ls, Fdb, Sub, SuccTypes, Meta, [{L,Blk} | Acc]);
#{} ->
%% This block is never reached. Discard it.
opt_bs(Bs, Ds0, Ls0, Fdb0, Sub0, SuccTypes0, Meta, Acc)
end;
opt_bs([], _Ds, _Ls, Fdb, _Sub, SuccTypes, _Meta, Acc) ->
{reverse(Acc), Fdb, SuccTypes}.
opt_is([#b_set{op=call,
args=[#b_local{}=Callee | _]=Args0,
dst=Dst}=I0 | Is],
Ts0, Ds0, Ls, Fdb0, Sub, Meta, Acc) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = I0#b_set{args=Args},
[_ | CallArgs] = Args,
{I, Fdb} = opt_local_call(I1, Callee, CallArgs, Dst, Ts0, Fdb0, Meta),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
opt_is(Is, Ts, Ds, Ls, Fdb, Sub, Meta, [I | Acc]);
opt_is([#b_set{op=call,
args=[#b_var{} | _]=Args0,
dst=Dst}=I0 | Is],
Ts0, Ds0, Ls, Fdb0, Sub, Meta, Acc) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = opt_anno_types(I0#b_set{args=Args}, Ts0),
{I, Fdb} = opt_fun_call(I1, Args, Ts0, Ds0, Fdb0, Sub, Meta),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
opt_is(Is, Ts, Ds, Ls, Fdb, Sub, Meta, [I | Acc]);
opt_is([#b_set{op=MakeFun,args=Args0,dst=Dst}=I0|Is],
Ts0, Ds0, Ls, Fdb0, Sub0, Meta, Acc) when MakeFun =:= make_fun;
MakeFun =:= old_make_fun ->
Args = simplify_args(Args0, Ts0, Sub0),
I1 = I0#b_set{args=Args},
{I, Fdb} = opt_make_fun(I1, Ts0, Fdb0, Meta),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
opt_is(Is, Ts, Ds, Ls, Fdb, Sub0, Meta, [I|Acc]);
opt_is([I0 | Is], Ts0, Ds0, Ls, Fdb, Sub0, Meta, Acc) ->
case simplify(I0, Ts0, Ds0, Ls, Sub0) of
{#b_set{}=I1, Ts, Ds} ->
I = opt_anno_types(I1, Ts),
opt_is(Is, Ts, Ds, Ls, Fdb, Sub0, Meta, [I | Acc]);
Sub when is_map(Sub) ->
opt_is(Is, Ts0, Ds0, Ls, Fdb, Sub, Meta, Acc)
end;
opt_is([], Ts, Ds, _Ls, Fdb, Sub, _Meta, Acc) ->
{reverse(Acc), Ts, Ds, Fdb, Sub}.
opt_anno_types(#b_set{op=Op,args=Args}=I, Ts) ->
case benefits_from_type_anno(Op, Args) of
true -> opt_anno_types_1(I, Args, Ts, 0, #{});
false -> I
end.
opt_anno_types_1(I, [#b_var{}=Var | Args], Ts, Index, Acc0) ->
case concrete_type(Var, Ts) of
any ->
opt_anno_types_1(I, Args, Ts, Index + 1, Acc0);
Type ->
%% Note that we annotate arguments by their index instead of their
%% variable name, as they may be renamed by `beam_ssa_pre_codegen`.
Acc = Acc0#{ Index => Type },
opt_anno_types_1(I, Args, Ts, Index + 1, Acc)
end;
opt_anno_types_1(I, [_Arg | Args], Ts, Index, Acc) ->
opt_anno_types_1(I, Args, Ts, Index + 1, Acc);
opt_anno_types_1(#b_set{}=I, [], _Ts, _Index, Acc) when Acc =:= #{} ->
I;
opt_anno_types_1(#b_set{anno=Anno0}=I, [], _Ts, _Index, Acc) ->
case Anno0 of
#{ arg_types := Acc } ->
I;
#{} ->
Anno = Anno0#{ arg_types => Acc },
I#b_set{anno=Anno}
end.
%% Only add type annotations when we know we'll make good use of them.
benefits_from_type_anno({bif,'=:='}, _Args) ->
true;
benefits_from_type_anno({bif,'=/='}, _Args) ->
true;
benefits_from_type_anno({bif,Op}, Args) ->
not erl_internal:bool_op(Op, length(Args));
benefits_from_type_anno(bs_create_bin, _Args) ->
true;
benefits_from_type_anno(is_tagged_tuple, _Args) ->
true;
benefits_from_type_anno(call, [#b_var{} | _]) ->
true;
benefits_from_type_anno({float,convert}, _Args) ->
%% Note: The {float,convert} instruction does not exist when
%% the main type optimizer pass is run. It is created and
%% annotated by ssa_opt_float1 in beam_ssa_opt, and can also
%% be annotated by opt_ranges/1.
true;
benefits_from_type_anno(_Op, _Args) ->
false.
opt_fun_call(#b_set{dst=Dst}=I0, [Fun | CallArgs0], Ts, Ds, Fdb, Sub, Meta) ->
FunType = normalized_type(Fun, Ts),
Arity = length(CallArgs0),
case {FunType, Ds} of
{_, #{ Fun := #b_set{op=make_fun,
args=[#b_local{arity=TotalArity}=Callee | Env]} }}
when TotalArity =:= Arity + length(Env) ->
%% When a fun is used and defined in the same function, we can make
%% a direct call since the environment is still available.
CallArgs = CallArgs0 ++ simplify_args(Env, Ts, Sub),
I = I0#b_set{args=[Callee | CallArgs]},
opt_local_call(I, Callee, CallArgs, Dst, Ts, Fdb, Meta);
{#t_fun{target={Name,Arity}}, _} ->
%% When a fun lacks free variables, we can make a direct call even
%% when we don't know where it was defined.
Callee = #b_local{name=#b_literal{val=Name},
arity=Arity},
I = I0#b_set{args=[Callee | CallArgs0]},
opt_local_call(I, Callee, CallArgs0, Dst, Ts, Fdb, Meta);
{#t_fun{type=Type}, _} when Type =/= any ->
{beam_ssa:add_anno(result_type, Type, I0), Fdb};
_ ->
{I0, Fdb}
end.
opt_local_call(I0, Callee, Args, Dst, Ts, Fdb, Meta) ->
ArgTypes = argument_types(Args, Ts),
I = opt_local_return(I0, Callee, ArgTypes, Fdb),
case Fdb of
#{ Callee := #func_info{exported=false,arg_types=AT0}=Info0 } ->
%% Update the argument types of *this exact call*, the types
%% will be joined later when the callee is optimized.
CallId = {Meta#metadata.func_id, Dst},
AT = update_arg_types(ArgTypes, AT0, CallId),
Info = Info0#func_info{arg_types=AT},
{I, Fdb#{ Callee := Info }};
#{} ->
%% We can't narrow the argument types of exported functions as they
%% can receive anything as part of an external call. We can still
%% rely on their return types however.
{I, Fdb}
end.
%% See sig_make_fun/4
opt_make_fun(#b_set{op=MakeFun,
dst=Dst,
args=[#b_local{}=Callee | FreeVars]}=I0,
Ts, Fdb, Meta) when MakeFun =:= make_fun;
MakeFun =:= old_make_fun ->
ArgCount = Callee#b_local.arity - length(FreeVars),
FVTypes = [concrete_type(FreeVar, Ts) || FreeVar <- FreeVars],
ArgTypes = duplicate(ArgCount, any) ++ FVTypes,
I = opt_local_return(I0, Callee, ArgTypes, Fdb),
case Fdb of
#{ Callee := #func_info{exported=false,arg_types=AT0}=Info0 } ->
CallId = {Meta#metadata.func_id, Dst},
AT = update_arg_types(ArgTypes, AT0, CallId),
Info = Info0#func_info{arg_types=AT},
{I, Fdb#{ Callee := Info }};
#{} ->
%% We can't narrow the argument types of exported functions as they
%% can receive anything as part of an external call.
{I, Fdb}
end.
opt_local_return(I, Callee, ArgTypes, Fdb) when Fdb =/= #{} ->
#func_info{succ_types=SuccTypes} = map_get(Callee, Fdb),
case return_type(SuccTypes, ArgTypes) of
any -> I;
Type -> beam_ssa:add_anno(result_type, Type, I)
end;
opt_local_return(I, _Callee, _ArgTyps, _Fdb) ->
%% Module-level optimization is disabled, assume it returns anything.
I.
update_arg_types([ArgType | ArgTypes], [TypeMap0 | TypeMaps], CallId) ->
TypeMap = TypeMap0#{ CallId => ArgType },
[TypeMap | update_arg_types(ArgTypes, TypeMaps, CallId)];
update_arg_types([], [], _CallId) ->
[].
%%
-spec opt_finish(Args, Anno, FuncDb) -> {Anno, FuncDb} when
Args :: [beam_ssa:b_var()],
Anno :: beam_ssa:anno(),
FuncDb :: func_info_db().
opt_finish(Args, Anno, FuncDb) ->
Id = get_func_id(Anno),
case FuncDb of
#{ Id := #func_info{exported=false,arg_types=ArgTypes} } ->
ParamInfo0 = maps:get(parameter_info, Anno, #{}),
ParamInfo = opt_finish_1(Args, ArgTypes, ParamInfo0),
{Anno#{ parameter_info => ParamInfo }, FuncDb};
#{} ->
{Anno, FuncDb}
end.
opt_finish_1([Arg | Args], [TypeMap | TypeMaps], Acc0) ->
case beam_types:join(maps:values(TypeMap)) of
any ->
opt_finish_1(Args, TypeMaps, Acc0);
JoinedType ->
Info = maps:get(Arg, Acc0, []),
Acc = Acc0#{ Arg => [{type, JoinedType} | Info] },
opt_finish_1(Args, TypeMaps, Acc)
end;
opt_finish_1([], [], Acc) ->
Acc.
%%%
%%% This sub pass is run once after the main type sub pass
%%% to annotate more instructions with integer ranges.
%%%
%%% The main type sub pass annotates certain instructions with
%%% their types to help the JIT generate better code.
%%%
%%% Example:
%%%
%%% foo(N0) ->
%%% N1 = N0 band 3,
%%% N = N1 + 1, % N1 is in 0..3
%%% element(N,
%%% {zero,one,two,three}).
%%%
%%% The main type pass is able to figure out the range for `N1` but
%%% not for `N`. The reason is that the type pass iterates until it
%%% reaches a fixpoint. To guarantee that it will converge, ranges for
%%% results must only be calculated for operations that retain or
%%% shrink the ranges of their arguments.
%%%
%%% Therefore, to ensure convergence, the main type pass can only
%%% safely calculate ranges for results of operations such as `and`,
%%% `bsr`, and `rem`, but not for operations such as `+`, '-', '*',
%%% and `bsl`.
%%%
%%% This sub pass will start from the types found in the annotations
%%% and propagate them forward through arithmetic instructions within
%%% the same function.
%%%
%%% For the example, this sub pass adds a new annotation for `N`:
%%%
%%% foo(N0) ->
%%% N1 = N0 band 3,
%%% N = N1 + 1, % N1 is in 0..3
%%% element(N, % N is in 1..4
%%% {zero,one,two,three}).
%%%
%%% With a known range and known tuple size, the JIT is able to remove
%%% all range checks for the `element/2` instruction.
%%%
-spec opt_ranges(Blocks0) -> Blocks when
Blocks0 :: beam_ssa:block_map(),
Blocks :: beam_ssa:block_map().
opt_ranges(Blocks) ->
RPO = beam_ssa:rpo(Blocks),
Tss = #{0 => #{}, ?EXCEPTION_BLOCK => #{}},
ranges(RPO, Tss, Blocks).
ranges([L|Ls], Tss0, Blocks0) ->
#b_blk{is=Is0} = Blk0 = map_get(L, Blocks0),
Ts0 = map_get(L, Tss0),
{Is,Ts} = ranges_is(Is0, Ts0, []),
Blk = Blk0#b_blk{is=Is},
Blocks = Blocks0#{L := Blk},
Tss = ranges_successors(beam_ssa:successors(Blk), Ts, Tss0),
ranges(Ls, Tss, Blocks);
ranges([], _Tss, Blocks) -> Blocks.
ranges_is([#b_set{op=Op,args=Args}=I0|Is], Ts0, Acc) ->
case benefits_from_type_anno(Op, Args) of
false ->
ranges_is(Is, Ts0, [I0|Acc]);
true ->
I = update_anno_types(I0, Ts0),
Ts = ranges_propagate_types(I, Ts0),
ranges_is(Is, Ts, [I|Acc])
end;
ranges_is([], Ts, Acc) ->
{reverse(Acc),Ts}.
ranges_successors([?EXCEPTION_BLOCK|Ls], Ts, Tss) ->
ranges_successors(Ls, Ts, Tss);
ranges_successors([L|Ls], Ts0, Tss0) ->
case Tss0 of
#{L := Ts1} ->
Ts = join_types(Ts0, Ts1),
Tss = Tss0#{L := Ts},
ranges_successors(Ls, Ts0, Tss);
#{} ->
Tss = Tss0#{L => Ts0},
ranges_successors(Ls, Ts0, Tss)
end;
ranges_successors([], _, Tss) -> Tss.
ranges_propagate_types(#b_set{anno=Anno,op={bif,_}=Op,args=Args,dst=Dst}, Ts) ->
case Anno of
#{arg_types := ArgTypes0} ->
ArgTypes = ranges_get_arg_types(Args, 0, ArgTypes0),
case beam_call_types:arith_type(Op, ArgTypes) of
any -> Ts;
T -> Ts#{Dst => T}
end;
#{} ->
Ts
end;
ranges_propagate_types(_, Ts) -> Ts.
ranges_get_arg_types([#b_var{}|As], Index, ArgTypes) ->
case ArgTypes of
#{Index := Type} ->
[Type|ranges_get_arg_types(As, Index + 1, ArgTypes)];
#{} ->
[any|ranges_get_arg_types(As, Index + 1, ArgTypes)]
end;
ranges_get_arg_types([#b_literal{val=Value}|As], Index, ArgTypes) ->
Type = beam_types:make_type_from_value(Value),
[Type|ranges_get_arg_types(As, Index + 1, ArgTypes)];
ranges_get_arg_types([], _, _) -> [].
update_anno_types(#b_set{anno=Anno,args=Args}=I, Ts) ->
ArgTypes1 = case Anno of
#{arg_types := ArgTypes0} -> ArgTypes0;
#{} -> #{}
end,
ArgTypes = update_anno_types_1(Args, Ts, 0, ArgTypes1),
case Anno of
#{arg_types := ArgTypes} ->
I;
#{} when map_size(ArgTypes) =/= 0 ->
I#b_set{anno=Anno#{arg_types => ArgTypes}};
#{} ->
I
end.
update_anno_types_1([#b_var{}=V|As], Ts, Index, ArgTypes) ->
T0 = case ArgTypes of
#{Index := T00} -> T00;
#{} -> any
end,
T1 = case Ts of
#{V := T11} -> T11;
#{} -> any
end,
case beam_types:meet(T0, T1) of
any ->
update_anno_types_1(As, Ts, Index + 1, ArgTypes);
T ->
true = T =/= none, %Assertion.
update_anno_types_1(As, Ts, Index + 1, ArgTypes#{Index => T})
end;
update_anno_types_1([_|As], Ts, Index, ArgTypes) ->
update_anno_types_1(As, Ts, Index + 1, ArgTypes);
update_anno_types_1([], _, _, ArgTypes) -> ArgTypes.
%%%
%%% Optimization helpers
%%%
simplify_terminator(#b_br{bool=Bool}=Br0, Ts, Ds, Sub) ->
Br = beam_ssa:normalize(Br0#b_br{bool=simplify_arg(Bool, Ts, Sub)}),
simplify_not(Br, Ts, Ds, Sub);
simplify_terminator(#b_switch{arg=Arg0,fail=Fail,list=List0}=Sw0,
Ts, Ds, Sub) ->
Arg = simplify_arg(Arg0, Ts, Sub),
%% Ensure that no label in the switch list is the same as the
%% failure label.
List = [{Val,Lbl} || {Val,Lbl} <- List0, Lbl =/= Fail],
case beam_ssa:normalize(Sw0#b_switch{arg=Arg,list=List}) of
#b_switch{}=Sw ->
case beam_types:is_boolean_type(concrete_type(Arg, Ts)) of
true -> simplify_switch_bool(Sw, Ts, Ds, Sub);
false -> Sw
end;
#b_br{}=Br ->
simplify_terminator(Br, Ts, Ds, Sub)
end;
simplify_terminator(#b_ret{arg=Arg}=Ret, Ts, Ds, Sub) ->
%% Reducing the result of a call to a literal (fairly common for 'ok')
%% breaks tail call optimization.
case Ds of
#{ Arg := #b_set{op=call}} -> Ret;
#{} -> Ret#b_ret{arg=simplify_arg(Arg, Ts, Sub)}
end.
%%
%% Simplifies an instruction, returning either a new instruction (with updated
%% type and definition maps), or an updated substitution map if the instruction
%% was redundant.
%%
simplify(#b_set{op=phi,dst=Dst,args=Args0}=I0, Ts0, Ds0, Ls, Sub) ->
%% Simplify the phi node by removing all predecessor blocks that no
%% longer exists or no longer branches to this block.
{Type, Args} = simplify_phi_args(Args0, Ls, Sub, none, []),
case phi_all_same(Args) of
true ->
%% Eliminate the phi node if there is just one source
%% value or if the values are identical.
[{Val, _} | _] = Args,
Sub#{ Dst => Val };
false ->
I = I0#b_set{args=Args},
Ts = Ts0#{ Dst => Type },
Ds = Ds0#{ Dst => I },
{I, Ts, Ds}
end;
simplify(#b_set{op={succeeded,Kind},args=[Arg],dst=Dst}=I,
Ts0, Ds0, _Ls, Sub) ->
Type = case will_succeed(I, Ts0, Ds0, Sub) of
yes -> beam_types:make_atom(true);
no -> beam_types:make_atom(false);
maybe -> beam_types:make_boolean()
end,
case Type of
#t_atom{elements=[true]} ->
%% The checked operation always succeeds, so it's safe to remove
%% this instruction regardless of whether we're in a guard or not.
Lit = #b_literal{val=true},
Sub#{ Dst => Lit };
#t_atom{elements=[false]} when Kind =:= guard ->
%% Failing operations are only safe to remove in guards.
Lit = #b_literal{val=false},
Sub#{ Dst => Lit };
_ ->
true = is_map_key(Arg, Ds0), %Assertion.
%% Note that we never simplify args; this instruction is specific
%% to the operation being checked, and simplifying could break that
%% connection.
Ts = Ts0#{ Dst => Type },
Ds = Ds0#{ Dst => I },
{I, Ts, Ds}
end;
simplify(#b_set{op=bs_match,dst=Dst,args=Args0}=I0, Ts0, Ds0, _Ls, Sub) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = I0#b_set{args=Args},
I2 = case {Args0,Args} of
{[_,_,_,#b_var{},_],[Type,Val,Flags,#b_literal{val=all},Unit]} ->
%% The size `all` is used for the size of the final binary
%% segment in a pattern. Using `all` explicitly is not allowed,
%% so we convert it to an obvious invalid size.
I1#b_set{args=[Type,Val,Flags,#b_literal{val=bad_size},Unit]};
{_,_} ->
I1
end,
%% We KNOW that simplify/2 will return a #b_set{} record when called with
%% a bs_match instruction.
#b_set{} = I = simplify(I2, Ts0),
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
{I, Ts, Ds};
simplify(#b_set{dst=Dst,args=Args0}=I0, Ts0, Ds0, _Ls, Sub) ->
Args = simplify_args(Args0, Ts0, Sub),
I1 = beam_ssa:normalize(I0#b_set{args=Args}),
case simplify(I1, Ts0) of
#b_set{}=I ->
Ts = update_types(I, Ts0, Ds0),
Ds = Ds0#{ Dst => I },
{I, Ts, Ds};
#b_literal{}=Lit ->
Sub#{ Dst => Lit };
#b_var{}=Var ->
Sub#{ Dst => Var }
end.
simplify(#b_set{op={bif,'and'},args=Args}=I, Ts) ->
case is_safe_bool_op(Args, Ts) of
true ->
case Args of
[_,#b_literal{val=false}=Res] -> Res;
[Res,#b_literal{val=true}] -> Res;
_ -> eval_bif(I, Ts)
end;
false ->
I
end;
simplify(#b_set{op={bif,'or'},args=Args}=I, Ts) ->
case is_safe_bool_op(Args, Ts) of
true ->
case Args of
[Res,#b_literal{val=false}] -> Res;
[_,#b_literal{val=true}=Res] -> Res;
_ -> eval_bif(I, Ts)
end;
false ->
I
end;
simplify(#b_set{op={bif,element},args=[#b_literal{val=Index},Tuple]}=I0, Ts) ->
case normalized_type(Tuple, Ts) of
#t_tuple{size=Size} when is_integer(Index),
1 =< Index,
Index =< Size ->
I = I0#b_set{op=get_tuple_element,
args=[Tuple,#b_literal{val=Index-1}]},
simplify(I, Ts);
_ ->
eval_bif(I0, Ts)
end;
simplify(#b_set{op={bif,hd},args=[List]}=I, Ts) ->
case normalized_type(List, Ts) of
#t_cons{} ->
I#b_set{op=get_hd};
_ ->
eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,tl},args=[List]}=I, Ts) ->
case normalized_type(List, Ts) of
#t_cons{} ->
I#b_set{op=get_tl};
_ ->
eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,size},args=[Term]}=I, Ts) ->
case normalized_type(Term, Ts) of
#t_tuple{} ->
simplify(I#b_set{op={bif,tuple_size}}, Ts);
#t_bitstring{size_unit=U} when U rem 8 =:= 0 ->
%% If the bitstring is a binary (the size in bits is
%% evenly divisibly by 8), byte_size/1 gives
%% the same result as size/1.
simplify(I#b_set{op={bif,byte_size}}, Ts);
_ ->
eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,tuple_size},args=[Term]}=I, Ts) ->
case normalized_type(Term, Ts) of
#t_tuple{size=Size,exact=true} ->
#b_literal{val=Size};
_ ->
I
end;
simplify(#b_set{op={bif,is_function},args=[Fun,#b_literal{val=Arity}]}=I, Ts)
when is_integer(Arity), Arity >= 0 ->
case normalized_type(Fun, Ts) of
#t_fun{arity=any} ->
I;
#t_fun{arity=Arity} ->
#b_literal{val=true};
any ->
I;
_ ->
#b_literal{val=false}
end;
simplify(#b_set{op={bif,is_map_key},args=[Key,Map]}=I, Ts) ->
case normalized_type(Map, Ts) of
#t_map{} ->
I#b_set{op=has_map_field,args=[Map,Key]};
_ ->
I
end;
simplify(#b_set{op={bif,Op0},args=Args}=I, Ts) when Op0 =:= '==';
Op0 =:= '/=' ->
Types = normalized_types(Args, Ts),
EqEq0 = case {beam_types:meet(Types),beam_types:join(Types)} of
{none,any} -> true;
{#t_integer{},#t_integer{}} -> true;
{#t_float{},#t_float{}} -> true;
{#t_bitstring{},_} -> true;
{#t_atom{},_} -> true;
{_,_} -> false
end,
EqEq = EqEq0 orelse any_non_numeric_argument(Args, Ts),
case EqEq of
true ->
Op = case Op0 of
'==' -> '=:=';
'/=' -> '=/='
end,
simplify(I#b_set{op={bif,Op}}, Ts);
false ->
eval_bif(I, Ts)
end;
simplify(#b_set{op={bif,'=:='},args=[Same,Same]}, _Ts) ->
#b_literal{val=true};
simplify(#b_set{op={bif,'=:='},args=[LHS,RHS]}=I, Ts) ->
LType = concrete_type(LHS, Ts),
RType = concrete_type(RHS, Ts),
case beam_types:meet(LType, RType) of
none ->
#b_literal{val=false};
_ ->
case {beam_types:is_boolean_type(LType),
beam_types:normalize(RType)} of
{true,#t_atom{elements=[true]}} ->
%% Bool =:= true ==> Bool
LHS;
{true,#t_atom{elements=[false]}} ->
%% Bool =:= false ==> not Bool
%%
%% This will be further optimized to eliminate the
%% 'not', swapping the success and failure
%% branches in the br instruction. If LHS comes
%% from a type test (such as is_atom/1) or a
%% comparison operator (such as >=) that can be
%% translated to test instruction, this
%% optimization will eliminate one instruction.
simplify(I#b_set{op={bif,'not'},args=[LHS]}, Ts);
{_,_} ->
eval_bif(I, Ts)
end
end;
simplify(#b_set{op={bif,is_list},args=[Src]}=I0, Ts) ->
case concrete_type(Src, Ts) of
#t_union{list=#t_cons{}} ->
I = I0#b_set{op=is_nonempty_list,args=[Src]},
%% We might need to convert back to is_list/1 if it turns
%% out that this instruction is followed by a #b_ret{}
%% terminator.
beam_ssa:add_anno(was_bif_is_list, true, I);
#t_union{list=nil} ->
I0#b_set{op={bif,'=:='},args=[Src,#b_literal{val=[]}]};
_ ->
eval_bif(I0, Ts)
end;
simplify(#b_set{op={bif,Op},args=Args}=I, Ts) ->
Types = normalized_types(Args, Ts),
case is_float_op(Op, Types) of
false ->
eval_bif(I, Ts);
true ->
AnnoArgs = [anno_float_arg(A) || A <- Types],
eval_bif(beam_ssa:add_anno(float_op, AnnoArgs, I), Ts)
end;
simplify(#b_set{op=bs_extract,args=[Ctx]}=I, Ts) ->
case concrete_type(Ctx, Ts) of
#t_bitstring{} ->
%% This is a bs_match that has been rewritten as a bs_get_tail;
%% just return the input as-is.
Ctx;
#t_bs_context{} ->
I
end;
simplify(#b_set{op=bs_match,
args=[#b_literal{val=binary}, Ctx, _Flags,
#b_literal{val=all},
#b_literal{val=OpUnit}]}=I, Ts) ->
%% <<..., Foo/binary>> can be rewritten as <<..., Foo/bits>> if we know the
%% unit is correct.
#t_bs_context{tail_unit=CtxUnit} = concrete_type(Ctx, Ts),
if
CtxUnit rem OpUnit =:= 0 ->
I#b_set{op=bs_get_tail,args=[Ctx]};
CtxUnit rem OpUnit =/= 0 ->
I
end;
simplify(#b_set{op=bs_start_match,args=[#b_literal{val=new}, Src]}=I, Ts) ->
case concrete_type(Src, Ts) of
#t_bs_context{} ->
I#b_set{op=bs_start_match,args=[#b_literal{val=resume}, Src]};
_ ->
I
end;
simplify(#b_set{op=get_tuple_element,args=[Tuple,#b_literal{val=N}]}=I, Ts) ->
#t_tuple{size=Size,elements=Es} = normalized_type(Tuple, Ts),
true = Size > N, %Assertion.
ElemType = beam_types:get_tuple_element(N + 1, Es),
case beam_types:get_singleton_value(ElemType) of
{ok, Val} -> #b_literal{val=Val};
error -> I
end;
simplify(#b_set{op=is_nonempty_list,args=[Src]}=I, Ts) ->
case normalized_type(Src, Ts) of
any ->
I;
#t_list{} ->
I;
#t_cons{} ->
#b_literal{val=true};
_ ->
#b_literal{val=false}
end;
simplify(#b_set{op=is_tagged_tuple,
args=[Src,#b_literal{val=Size},#b_literal{}=Tag]}=I, Ts) ->
case concrete_type(Src, Ts) of
#t_union{tuple_set=TupleSet}=U ->
%% A union of different types, one of them (probably)
%% a tuple. Dig out the tuple type from the union and
%% find out whether it will match.
TupleOnlyType = #t_union{tuple_set=TupleSet},
TT = beam_types:normalize(TupleOnlyType),
case simplify_is_record(I, TT, Size, Tag, Ts) of
#b_literal{val=true} ->
%% The tuple part of the union will always match.
%% A simple is_tuple/1 test will be sufficient to
%% distinguish the tuple from the other types in
%% the union.
I#b_set{op={bif,is_tuple},args=[Src]};
#b_literal{val=false}=False ->
%% Src is never a tuple.
False;
_ ->
%% More than one type of tuple can match. Find out
%% whether the possible tuples can be
%% distinguished by size.
TupleArityType = #t_tuple{size=Size,exact=true},
TTT = beam_types:meet(TupleArityType, TupleOnlyType),
case simplify_is_record(I, TTT, Size, Tag, Ts) of
#b_literal{val=true} ->
%% The possible tuple types have different sizes.
%% Example: {ok, _} | {error, _, _}.
case beam_types:normalize(U) of
#t_tuple{} ->
%% Src is known to be a tuple, so it will
%% be sufficient to test the arity.
beam_ssa:add_anno(constraints, arity, I);
any ->
%% Src might not be a tuple. Must
%% test for a tuple with a given
%% arity.
beam_ssa:add_anno(constraints, tuple_arity, I)
end;
_ ->
I
end
end;
SimpleType ->
simplify_is_record(I, SimpleType, Size, Tag, Ts)
end;
simplify(#b_set{op=put_list,args=[#b_literal{val=H},
#b_literal{val=T}]}, _Ts) ->
#b_literal{val=[H|T]};
simplify(#b_set{op=put_tuple,args=Args}=I, _Ts) ->
case make_literal_list(Args) of
none -> I;
List -> #b_literal{val=list_to_tuple(List)}
end;
simplify(#b_set{op=call,args=[#b_remote{}=Rem|Args]}=I, Ts) ->
case Rem of
#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Name}} ->
simplify_remote_call(Mod, Name, Args, Ts, I);
#b_remote{} ->
I
end;
simplify(#b_set{op=call,args=[#b_literal{val=Fun}|Args]}=I, _Ts)
when is_function(Fun, length(Args)) ->
FI = erlang:fun_info(Fun),
{module,M} = keyfind(module, 1, FI),
{name,F} = keyfind(name, 1, FI),
{arity,A} = keyfind(arity, 1, FI),
Rem = #b_remote{mod=#b_literal{val=M},
name=#b_literal{val=F},
arity=A},
I#b_set{args=[Rem|Args]};
simplify(#b_set{op=peek_message,args=[#b_literal{val=Val}]}=I, _Ts) ->
case Val of
none ->
I;
_ ->
%% A potential receive marker has been substituted with a literal,
%% which means it can't actually be a marker on this path. Replace
%% it with a normal receive.
I#b_set{args=[#b_literal{val=none}]}
end;
simplify(#b_set{op=recv_marker_clear,args=[#b_literal{}]}, _Ts) ->
%% Not a receive marker: see the 'peek_message' case.
#b_literal{val=none};
simplify(I, _Ts) ->
I.
will_succeed(#b_set{args=[Src]}, Ts, Ds, Sub) ->
case {Ds, Ts} of
{#{}, #{ Src := none }} ->
%% Checked operation never returns.
no;
{#{ Src := I }, #{}} ->
%% There can't be any substitution because the instruction
%% is still there.
false = is_map_key(Src, Sub), %Assertion.
will_succeed_1(I, Src, Ts);
{#{}, #{}} ->
%% The checked instruction has been removed and substituted, so we
%% can assume it always succeeds.
true = is_map_key(Src, Sub), %Assertion.
yes
end.
will_succeed_1(#b_set{op=bs_get_tail}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=bs_start_match,args=[_, Arg]}, _Src, Ts) ->
ArgType = concrete_type(Arg, Ts),
case beam_types:is_bs_matchable_type(ArgType) of
true ->
%% In the future we may be able to remove this instruction
%% altogether when we have a #t_bs_context{}, but for now we need
%% to keep it for compatibility with older releases of OTP.
yes;
false ->
%% Is it at all possible to match?
case beam_types:meet(ArgType, #t_bs_matchable{}) of
none -> no;
_ -> maybe
end
end;
will_succeed_1(#b_set{op={bif,Bif},args=BifArgs}, _Src, Ts) ->
ArgTypes = normalized_types(BifArgs, Ts),
beam_call_types:will_succeed(erlang, Bif, ArgTypes);
will_succeed_1(#b_set{op=call,
args=[#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Func}} |
CallArgs]},
_Src, Ts) ->
ArgTypes = normalized_types(CallArgs, Ts),
beam_call_types:will_succeed(Mod, Func, ArgTypes);
will_succeed_1(#b_set{op=get_hd}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=get_tl}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=has_map_field}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=get_tuple_element}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=put_tuple}, _Src, _Ts) ->
yes;
will_succeed_1(#b_set{op=bs_create_bin}, _Src, _Ts) ->
%% Intentionally don't try to determine whether construction will
%% fail. Construction is unlikely to fail, and if it fails, the
%% instruction in the runtime system will generate an exception with
%% better information of what went wrong.
maybe;
will_succeed_1(#b_set{op=bs_match,
args=[#b_literal{val=Type},_,_,#b_literal{val=Size},_]},
_Src, _Ts) ->
if
is_integer(Size), Size >= 0 ->
maybe;
Type =:= binary, Size =:= all ->
%% `all` is a legal size for binary segments at the end of
%% a binary pattern.
maybe;
true ->
%% Invalid size. Matching will fail.
no
end;
%% These operations may fail even though we know their return value on success.
will_succeed_1(#b_set{op=call}, _Src, _Ts) ->
maybe;
will_succeed_1(#b_set{op=get_map_element}, _Src, _Ts) ->
maybe;
will_succeed_1(#b_set{op=wait_timeout}, _Src, _Ts) ->
%% It is essential to keep the {succeeded,body} instruction to
%% ensure that the failure edge, which potentially leads to a
%% landingpad, is preserved. If the failure edge is removed, a Y
%% register holding a `try` tag could be reused prematurely.
maybe;
will_succeed_1(#b_set{}, _Src, _Ts) ->
maybe.
simplify_is_record(I, #t_tuple{exact=Exact,
size=Size,
elements=Es},
RecSize, #b_literal{val=TagVal}=RecTag, Ts) ->
TagType = maps:get(1, Es, any),
TagMatch = case beam_types:get_singleton_value(TagType) of
{ok, TagVal} -> yes;
{ok, _} -> no;
error ->
%% Is it at all possible for the tag to match?
case beam_types:meet(concrete_type(RecTag, Ts), TagType) of
none -> no;
_ -> maybe
end
end,
if
Size =/= RecSize, Exact; Size > RecSize; TagMatch =:= no ->
#b_literal{val=false};
Size =:= RecSize, Exact, TagMatch =:= yes ->
#b_literal{val=true};
true ->
I
end;
simplify_is_record(I, any, _Size, _Tag, _Ts) ->
I;
simplify_is_record(_I, _Type, _Size, _Tag, _Ts) ->
#b_literal{val=false}.
simplify_switch_bool(#b_switch{arg=B,fail=Fail,list=List0}, Ts, Ds, Sub) ->
FalseVal = #b_literal{val=false},
TrueVal = #b_literal{val=true},
List1 = List0 ++ [{FalseVal,Fail},{TrueVal,Fail}],
{_,FalseLbl} = keyfind(FalseVal, 1, List1),
{_,TrueLbl} = keyfind(TrueVal, 1, List1),
Br = #b_br{bool=B,succ=TrueLbl,fail=FalseLbl},
simplify_terminator(Br, Ts, Ds, Sub).
simplify_not(#b_br{bool=#b_var{}=V,succ=Succ,fail=Fail}=Br0, Ts, Ds, Sub) ->
case Ds of
#{V:=#b_set{op={bif,'not'},args=[Bool]}} ->
case beam_types:is_boolean_type(concrete_type(Bool, Ts)) of
true ->
Br = Br0#b_br{bool=Bool,succ=Fail,fail=Succ},
simplify_terminator(Br, Ts, Ds, Sub);
false ->
Br0
end;
#{} ->
Br0
end;
simplify_not(#b_br{bool=#b_literal{}}=Br, _Sub, _Ts, _Ds) ->
Br.
simplify_phi_args([{Arg0, From} | Rest], Ls, Sub, Type0, Args) ->
case Ls of
#{ From := Outgoing } ->
{outgoing, Ts} = Outgoing, %Assertion.
Arg = simplify_arg(Arg0, Ts, Sub),
Type = beam_types:join(concrete_type(Arg, Ts), Type0),
Phi = {Arg, From},
simplify_phi_args(Rest, Ls, Sub, Type, [Phi | Args]);
#{} ->
simplify_phi_args(Rest, Ls, Sub, Type0, Args)
end;
simplify_phi_args([], _Ls, _Sub, Type, Args) ->
%% We return the arguments in their incoming order so that they won't
%% change back and forth and ruin fixpoint iteration in beam_ssa_opt.
{Type, reverse(Args)}.
phi_all_same([{Arg, _From} | Phis]) ->
phi_all_same_1(Phis, Arg).
phi_all_same_1([{Arg, _From} | Phis], Arg) ->
phi_all_same_1(Phis, Arg);
phi_all_same_1([], _Arg) ->
true;
phi_all_same_1(_Phis, _Arg) ->
false.
simplify_remote_call(erlang, throw, [Term], Ts, I) ->
%% Annotate `throw` instructions with the type of their thrown term,
%% helping `beam_ssa_throw` optimize non-local returns.
Type = normalized_type(Term, Ts),
beam_ssa:add_anno(thrown_type, Type, I);
simplify_remote_call(erlang, '++', [#b_literal{val=[]},Tl], _Ts, _I) ->
Tl;
simplify_remote_call(erlang, setelement,
[#b_literal{val=Pos},
#b_literal{val=Tuple},
#b_var{}=Value], _Ts, I)
when is_integer(Pos), 1 =< Pos, Pos =< tuple_size(Tuple) ->
%% Position is a literal integer and the shape of the
%% tuple is known.
Els0 = [#b_literal{val=El} || El <- tuple_to_list(Tuple)],
{Bef,[_|Aft]} = split(Pos - 1, Els0),
Els = Bef ++ [Value|Aft],
I#b_set{op=put_tuple,args=Els};
simplify_remote_call(Mod, Name, Args, _Ts, I) ->
case erl_bifs:is_pure(Mod, Name, length(Args)) of
true ->
simplify_pure_call(Mod, Name, Args, I);
false ->
I
end.
%% Simplify a remote call to a pure BIF.
simplify_pure_call(Mod, Name, Args0, I) ->
case make_literal_list(Args0) of
none ->
I;
Args ->
%% The arguments are literals. Try to evaluate the BIF.
try apply(Mod, Name, Args) of
Val ->
case cerl:is_literal_term(Val) of
true ->
#b_literal{val=Val};
false ->
%% The value can't be expressed as a literal
%% (e.g. a pid).
I
end
catch
_:_ ->
%% Failed. Don't bother trying to optimize
%% the call.
I
end
end.
any_non_numeric_argument([#b_literal{val=Lit}|_], _Ts) ->
is_non_numeric(Lit);
any_non_numeric_argument([#b_var{}=V|T], Ts) ->
is_non_numeric_type(concrete_type(V, Ts)) orelse
any_non_numeric_argument(T, Ts);
any_non_numeric_argument([], _Ts) -> false.
is_non_numeric([H|T]) ->
is_non_numeric(H) andalso is_non_numeric(T);
is_non_numeric(Tuple) when is_tuple(Tuple) ->
is_non_numeric_tuple(Tuple, tuple_size(Tuple));
is_non_numeric(Map) when is_map(Map) ->
%% Starting from OTP 18, map keys are compared using `=:=`.
%% Therefore, we only need to check that the values in the map are
%% non-numeric. (Support for compiling BEAM files for OTP releases
%% older than OTP 18 has been dropped.)
is_non_numeric(maps:values(Map));
is_non_numeric(Num) when is_number(Num) ->
false;
is_non_numeric(_) -> true.
is_non_numeric_tuple(Tuple, El) when El >= 1 ->
is_non_numeric(element(El, Tuple)) andalso
is_non_numeric_tuple(Tuple, El-1);
is_non_numeric_tuple(_Tuple, 0) -> true.
is_non_numeric_type(#t_atom{}) -> true;
is_non_numeric_type(#t_bitstring{}) -> true;
is_non_numeric_type(#t_cons{type=Type,terminator=Terminator}) ->
is_non_numeric_type(Type) andalso is_non_numeric_type(Terminator);
is_non_numeric_type(#t_list{type=Type,terminator=Terminator}) ->
is_non_numeric_type(Type) andalso is_non_numeric_type(Terminator);
is_non_numeric_type(#t_map{super_value=Value}) ->
is_non_numeric_type(Value);
is_non_numeric_type(nil) -> true;
is_non_numeric_type(#t_tuple{size=Size,exact=true,elements=Types})
when map_size(Types) =:= Size ->
is_non_numeric_tuple_type(Size, Types);
is_non_numeric_type(_) -> false.
is_non_numeric_tuple_type(0, _Types) ->
true;
is_non_numeric_tuple_type(Pos, Types) ->
is_non_numeric_type(map_get(Pos, Types)) andalso
is_non_numeric_tuple_type(Pos - 1, Types).
make_literal_list(Args) ->
make_literal_list(Args, []).
make_literal_list([#b_literal{val=H}|T], Acc) ->
make_literal_list(T, [H|Acc]);
make_literal_list([_|_], _) ->
none;
make_literal_list([], Acc) ->
reverse(Acc).
is_safe_bool_op([LHS, RHS], Ts) ->
LType = concrete_type(LHS, Ts),
RType = concrete_type(RHS, Ts),
beam_types:is_boolean_type(LType) andalso
beam_types:is_boolean_type(RType).
eval_bif(#b_set{op={bif,Bif},args=Args}=I, Ts) ->
Arity = length(Args),
case erl_bifs:is_pure(erlang, Bif, Arity) of
false ->
I;
true ->
case make_literal_list(Args) of
none ->
eval_type_test_bif(I, Bif, concrete_types(Args, Ts));
LitArgs ->
try apply(erlang, Bif, LitArgs) of
Val -> #b_literal{val=Val}
catch
error:_ -> I
end
end
end.
eval_type_test_bif(I, is_atom, [Type]) ->
eval_type_test_bif_1(I, Type, #t_atom{});
eval_type_test_bif(I, is_binary, [Type]) ->
eval_type_test_bif_1(I, Type, #t_bs_matchable{tail_unit=8});
eval_type_test_bif(I, is_bitstring, [Type]) ->
eval_type_test_bif_1(I, Type, #t_bs_matchable{});
eval_type_test_bif(I, is_boolean, [Type]) ->
case beam_types:is_boolean_type(Type) of
true ->
#b_literal{val=true};
false ->
case beam_types:meet(Type, #t_atom{}) of
#t_atom{elements=[_|_]=Es} ->
case any(fun is_boolean/1, Es) of
true -> I;
false -> #b_literal{val=false}
end;
#t_atom{} ->
I;
none ->
#b_literal{val=false}
end
end;
eval_type_test_bif(I, is_float, [Type]) ->
eval_type_test_bif_1(I, Type, #t_float{});
eval_type_test_bif(I, is_function, [Type, #t_integer{elements={Arity,Arity}}])
when Arity >= 0, Arity =< 255 ->
eval_type_test_bif_1(I, Type, #t_fun{arity=Arity});
eval_type_test_bif(I, is_function, [Type]) ->
eval_type_test_bif_1(I, Type, #t_fun{});
eval_type_test_bif(I, is_integer, [Type]) ->
eval_type_test_bif_1(I, Type, #t_integer{});
eval_type_test_bif(I, is_list, [Type]) ->
eval_type_test_bif_1(I, Type, #t_list{});
eval_type_test_bif(I, is_map, [Type]) ->
eval_type_test_bif_1(I, Type, #t_map{});
eval_type_test_bif(I, is_number, [Type]) ->
eval_type_test_bif_1(I, Type, number);
eval_type_test_bif(I, is_pid, [Type]) ->
eval_type_test_bif_1(I, Type, pid);
eval_type_test_bif(I, is_port, [Type]) ->
eval_type_test_bif_1(I, Type, port);
eval_type_test_bif(I, is_reference, [Type]) ->
eval_type_test_bif_1(I, Type, reference);
eval_type_test_bif(I, is_tuple, [Type]) ->
eval_type_test_bif_1(I, Type, #t_tuple{});
eval_type_test_bif(I, Op, Types) ->
case Types of
[#t_integer{},#t_integer{elements={0,0}}] when Op =:= 'bor'; Op =:= 'bxor' ->
#b_set{args=[Result,_]} = I,
Result;
[#t_integer{},#t_integer{elements={0,0}}] when Op =:= '*'; Op =:= 'band' ->
#b_literal{val=0};
[T,#t_integer{elements={0,0}}] when Op =:= '+'; Op =:= '-' ->
case beam_types:is_numerical_type(T) of
true ->
#b_set{args=[Result,_]} = I,
Result;
false ->
I
end;
[T,#t_integer{elements={1,1}}] when Op =:= '*'; Op =:= 'div' ->
case beam_types:is_numerical_type(T) of
true ->
#b_set{args=[Result,_]} = I,
Result;
false ->
I
end;
[#t_integer{elements={LMin,LMax}},#t_integer{elements={RMin,RMax}}] ->
case is_inequality_op(Op) of
true ->
case {erlang:Op(LMin, RMin),erlang:Op(LMax, RMin),
erlang:Op(LMin, RMax),erlang:Op(LMax, RMax)} of
{Bool,Bool,Bool,Bool} ->
#b_literal{val=Bool};
_ ->
I
end;
false ->
I
end;
_ ->
I
end.
is_inequality_op('<') -> true;
is_inequality_op('=<') -> true;
is_inequality_op('>') -> true;
is_inequality_op('>=') -> true;
is_inequality_op(_) -> false.
eval_type_test_bif_1(I, ArgType, Required) ->
case beam_types:meet(ArgType, Required) of
ArgType -> #b_literal{val=true};
none -> #b_literal{val=false};
_ -> I
end.
simplify_args(Args, Ts, Sub) ->
[simplify_arg(Arg, Ts, Sub) || Arg <- Args].
simplify_arg(#b_var{}=Arg0, Ts, Sub) ->
case sub_arg(Arg0, Sub) of
#b_literal{}=LitArg ->
LitArg;
#b_var{}=Arg ->
case beam_types:get_singleton_value(concrete_type(Arg, Ts)) of
{ok, Val} -> #b_literal{val=Val};
error -> Arg
end
end;
simplify_arg(#b_remote{mod=Mod,name=Name}=Rem, Ts, Sub) ->
Rem#b_remote{mod=simplify_arg(Mod, Ts, Sub),
name=simplify_arg(Name, Ts, Sub)};
simplify_arg(Arg, _Ts, _Sub) -> Arg.
sub_arg(#b_var{}=Old, Sub) ->
case Sub of
#{Old:=New} -> New;
#{} -> Old
end.
is_float_op('-', [#t_float{}]) ->
true;
is_float_op('/', [_,_]) ->
true;
is_float_op(Op, [#t_float{},_Other]) ->
is_float_op_1(Op);
is_float_op(Op, [_Other,#t_float{}]) ->
is_float_op_1(Op);
is_float_op(_, _) -> false.
is_float_op_1('+') -> true;
is_float_op_1('-') -> true;
is_float_op_1('*') -> true;
is_float_op_1(_) -> false.
anno_float_arg(#t_float{}) -> float;
anno_float_arg(_) -> convert.
%%%
%%% Type helpers
%%%
%% Returns the narrowest possible return type for the given success types and
%% arguments.
return_type(SuccTypes0, CallArgs0) ->
SuccTypes = st_filter_reachable(SuccTypes0, CallArgs0, [], []),
st_join_return_types(SuccTypes, none).
st_filter_reachable([{SuccArgs, {call_self, SelfArgs}}=SuccType | Rest],
CallArgs0, Deferred, Acc) ->
case st_is_reachable(SuccArgs, CallArgs0) of
true ->
%% If we return a call to ourselves, we need to join our current
%% argument types with that of the call to ensure all possible
%% return paths are covered.
CallArgs = parallel_join(SelfArgs, CallArgs0),
st_filter_reachable(Rest, CallArgs, Deferred, Acc);
false ->
%% This may be reachable after we've joined another self-call, so
%% we defer it until we've gone through all other self-calls.
st_filter_reachable(Rest, CallArgs0, [SuccType | Deferred], Acc)
end;
st_filter_reachable([SuccType | Rest], CallArgs, Deferred, Acc) ->
st_filter_reachable(Rest, CallArgs, Deferred, [SuccType | Acc]);
st_filter_reachable([], CallArgs, Deferred, Acc) ->
case st_any_reachable(Deferred, CallArgs) of
true ->
%% Handle all deferred self calls that may be reachable now that
%% we've expanded our argument types.
st_filter_reachable(Deferred, CallArgs, [], Acc);
false ->
%% We have no reachable self calls, so we know our argument types
%% can't expand any further. Filter out our reachable sites and
%% return.
[ST || {SuccArgs, _}=ST <- Acc, st_is_reachable(SuccArgs, CallArgs)]
end.
st_join_return_types([{_SuccArgs, SuccRet} | Rest], Acc0) ->
st_join_return_types(Rest, beam_types:join(SuccRet, Acc0));
st_join_return_types([], Acc) ->
Acc.
st_any_reachable([{SuccArgs, _} | SuccType], CallArgs) ->
case st_is_reachable(SuccArgs, CallArgs) of
true -> true;
false -> st_any_reachable(SuccType, CallArgs)
end;
st_any_reachable([], _CallArgs) ->
false.
st_is_reachable([A | SuccArgs], [B | CallArgs]) ->
case beam_types:meet(A, B) of
none -> false;
_Other -> st_is_reachable(SuccArgs, CallArgs)
end;
st_is_reachable([], []) ->
true.
update_success_types(#b_ret{arg=Arg}, Ts, Ds, Meta, SuccTypes) ->
#metadata{ func_id=FuncId,
limit_return=Limited,
params=Params } = Meta,
RetType = case Ds of
#{ Arg := #b_set{op=call,args=[FuncId | Args]} } ->
{call_self, argument_types(Args, Ts)};
#{} ->
argument_type(Arg, Ts)
end,
ArgTypes = argument_types(Params, Ts),
case Limited of
true -> ust_limited(SuccTypes, ArgTypes, RetType);
false -> ust_unlimited(SuccTypes, ArgTypes, RetType)
end;
update_success_types(_Last, _Ts, _Ds, _Meta, SuccTypes) ->
SuccTypes.
%% See ?RETURN_LIMIT for details.
ust_limited(SuccTypes, CallArgs, {call_self, SelfArgs}) ->
NewArgs = parallel_join(CallArgs, SelfArgs),
ust_limited_1(SuccTypes, NewArgs, none);
ust_limited(SuccTypes, CallArgs, CallRet) ->
ust_limited_1(SuccTypes, CallArgs, CallRet).
ust_limited_1([], ArgTypes, RetType) ->
[{ArgTypes, RetType}];
ust_limited_1([{SuccArgs, SuccRet}], CallArgs, CallRet) ->
NewTypes = parallel_join(SuccArgs, CallArgs),
NewType = beam_types:join(SuccRet, CallRet),
[{NewTypes, NewType}].
%% Adds a new success type. Note that we no longer try to keep the list short
%% by combining entries with the same return type, as that can make effective
%% return types less specific as analysis goes on, which may cause endless
%% loops or render previous optimizations unsafe.
%%
%% See beam_type_SUITE:success_type_oscillation/1 for more details.
ust_unlimited(SuccTypes, _CallArgs, none) ->
%% 'none' is implied since functions can always fail.
SuccTypes;
ust_unlimited([{SameArgs, SameType} | _]=SuccTypes, SameArgs, SameType) ->
%% Already covered, return as-is.
SuccTypes;
ust_unlimited([SuccType | SuccTypes], CallArgs, CallRet) ->
[SuccType | ust_unlimited(SuccTypes, CallArgs, CallRet)];
ust_unlimited([], CallArgs, CallRet) ->
[{CallArgs, CallRet}].
update_successors(#b_br{bool=#b_literal{val=true},succ=Succ}=Last,
Ts, _Ds, Ls, _UsedOnce) ->
{Last, update_successor(Succ, Ts, Ls)};
update_successors(#b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail}=Last0,
Ts, Ds, Ls0, UsedOnce) ->
IsTempVar = is_map_key(Bool, UsedOnce),
case infer_types_br(Bool, Ts, IsTempVar, Ds) of
{#{}=SuccTs, #{}=FailTs} ->
Ls1 = update_successor(Succ, SuccTs, Ls0),
Ls = update_successor(Fail, FailTs, Ls1),
{Last0, Ls};
{#{}=SuccTs, none} ->
Last = Last0#b_br{bool=#b_literal{val=true},fail=Succ},
{Last, update_successor(Succ, SuccTs, Ls0)};
{none, #{}=FailTs} ->
Last = Last0#b_br{bool=#b_literal{val=true},succ=Fail},
{Last, update_successor(Fail, FailTs, Ls0)}
end;
update_successors(#b_switch{arg=#b_var{}=V,fail=Fail0,list=List0}=Last0,
Ts, Ds, Ls0, UsedOnce) ->
IsTempVar = is_map_key(V, UsedOnce),
{List1, FailTs, Ls1} =
update_switch(List0, V, concrete_type(V, Ts),
Ts, Ds, Ls0, IsTempVar, []),
case FailTs of
none ->
%% The fail block is unreachable; swap it with one of the choices.
case List1 of
[{#b_literal{val=0},_}|_] ->
%% Swap with the last choice in order to keep the zero the
%% first choice. If the loader can substitute a jump table
%% instruction, then a shorter version of the jump table
%% instruction can be used if the first value is zero.
{List, [{_,Fail}]} = split(length(List1)-1, List1),
Last = Last0#b_switch{fail=Fail,list=List},
{Last, Ls1};
[{_,Fail}|List] ->
%% Swap with the first choice in the list.
Last = Last0#b_switch{fail=Fail,list=List},
{Last, Ls1}
end;
#{} ->
Ls = update_successor(Fail0, FailTs, Ls1),
Last = Last0#b_switch{list=List1},
{Last, Ls}
end;
update_successors(#b_ret{}=Last, _Ts, _Ds, Ls, _UsedOnce) ->
{Last, Ls}.
update_switch([{Val, Lbl}=Sw | List],
V, FailType0, Ts, Ds, Ls0, IsTempVar, Acc) ->
FailType = beam_types:subtract(FailType0, concrete_type(Val, Ts)),
case infer_types_switch(V, Val, Ts, IsTempVar, Ds) of
none ->
update_switch(List, V, FailType, Ts, Ds, Ls0, IsTempVar, Acc);
SwTs ->
Ls = update_successor(Lbl, SwTs, Ls0),
update_switch(List, V, FailType, Ts, Ds, Ls, IsTempVar, [Sw | Acc])
end;
update_switch([], _V, none, _Ts, _Ds, Ls, _IsTempVar, Acc) ->
%% Fail label is unreachable.
{reverse(Acc), none, Ls};
update_switch([], V, FailType, Ts, Ds, Ls, IsTempVar, Acc) ->
%% Fail label is reachable, see if we can narrow the type down further.
FailTs = case beam_types:get_singleton_value(FailType) of
{ok, Value} ->
%% This is the only possible value at the fail label, so
%% we can infer types as if we matched it directly.
Lit = #b_literal{val=Value},
infer_types_switch(V, Lit, Ts, IsTempVar, Ds);
error when IsTempVar ->
ts_remove_var(V, Ts);
error ->
Ts#{ V := FailType }
end,
{reverse(Acc), FailTs, Ls}.
update_successor(?EXCEPTION_BLOCK, _Ts, Ls) ->
%% We KNOW that no variables are used in the ?EXCEPTION_BLOCK,
%% so there is no need to update the type information. That
%% can be a huge timesaver for huge functions.
Ls;
update_successor(S, Ts0, Ls) ->
case Ls of
#{ S := {outgoing, _} } ->
%% We're in a receive loop or similar; the target block will not be
%% revisited.
Ls;
#{ S := {incoming, InTs} } ->
Ts = join_types(Ts0, InTs),
Ls#{ S := {incoming, Ts} };
#{} ->
Ls#{ S => {incoming, Ts0} }
end.
update_types(#b_set{op=Op,dst=Dst,anno=Anno,args=Args}, Ts, Ds) ->
T = type(Op, Args, Anno, Ts, Ds),
Ts#{ Dst => T }.
type({bif,Bif}, Args, _Anno, Ts, _Ds) ->
ArgTypes = normalized_types(Args, Ts),
case beam_call_types:types(erlang, Bif, ArgTypes) of
{any, _, _} ->
case {Bif, Args} of
{element, [_,#b_literal{val=Tuple}]}
when tuple_size(Tuple) > 0 ->
join_tuple_elements(Tuple);
{_, _} ->
any
end;
{RetType, _, _} ->
RetType
end;
type(bs_create_bin, _Args, _Anno, _Ts, _Ds) ->
#t_bitstring{};
type(bs_extract, [Ctx], _Anno, _Ts, Ds) ->
#b_set{op=bs_match,args=Args} = map_get(Ctx, Ds),
bs_match_type(Args);
type(bs_start_match, [_, Src], _Anno, Ts, _Ds) ->
case beam_types:meet(#t_bs_matchable{}, concrete_type(Src, Ts)) of
none ->
none;
T ->
Unit = beam_types:get_bs_matchable_unit(T),
#t_bs_context{tail_unit=Unit}
end;
type(bs_match, [#b_literal{val=binary}, Ctx, _Flags,
#b_literal{val=all}, #b_literal{val=OpUnit}],
_Anno, Ts, _Ds) ->
%% This is an explicit tail unit test which does not advance the match
%% position.
CtxType = concrete_type(Ctx, Ts),
OpType = #t_bs_context{tail_unit=OpUnit},
beam_types:meet(CtxType, OpType);
type(bs_match, Args, _Anno, Ts, _Ds) ->
[_, Ctx | _] = Args,
%% Matches advance the current position without testing the tail unit. We
%% try to retain unit information by taking the GCD of our current unit and
%% the increments we know the match will advance by.
#t_bs_context{tail_unit=CtxUnit} = concrete_type(Ctx, Ts),
OpUnit = bs_match_stride(Args, Ts),
#t_bs_context{tail_unit=gcd(OpUnit, CtxUnit)};
type(bs_get_tail, [Ctx], _Anno, Ts, _Ds) ->
#t_bs_context{tail_unit=Unit} = concrete_type(Ctx, Ts),
#t_bitstring{size_unit=Unit};
type(call, [#b_remote{mod=#b_literal{val=Mod},
name=#b_literal{val=Name}}|Args], _Anno, Ts, _Ds)
when is_atom(Mod), is_atom(Name) ->
ArgTypes = normalized_types(Args, Ts),
{RetType, _, _} = beam_call_types:types(Mod, Name, ArgTypes),
RetType;
type(call, [#b_remote{mod=Mod,name=Name} | _Args], _Anno, Ts, _Ds) ->
%% Remote call with variable Module and/or Function, we can't say much
%% about it other than that it will crash when either of the two is not an
%% atom.
ModType = beam_types:meet(concrete_type(Mod, Ts), #t_atom{}),
NameType = beam_types:meet(concrete_type(Name, Ts), #t_atom{}),
case {ModType, NameType} of
{none, _} -> none;
{_, none} -> none;
{_, _} -> any
end;
type(call, [#b_local{} | _Args], Anno, _Ts, _Ds) ->
case Anno of
#{ result_type := Type } -> Type;
#{} -> any
end;
type(call, [#b_var{}=Fun | Args], Anno, Ts, _Ds) ->
FunType = concrete_type(Fun, Ts),
case {beam_types:meet(FunType, #t_fun{arity=length(Args)}), Anno} of
{#t_fun{}, #{ result_type := Type }} -> Type;
{#t_fun{}, #{}} -> any;
{none, #{}} -> none
end;
type(call, [#b_literal{val=Fun} | Args], _Anno, _Ts, _Ds) ->
case is_function(Fun, length(Args)) of
true ->
%% This is an external fun literal (fun M:F/A).
any;
false ->
%% This is either not a fun literal or the number of
%% arguments is wrong.
none
end;
type(extract, [V, #b_literal{val=Idx}], _Anno, _Ts, Ds) ->
case map_get(V, Ds) of
#b_set{op=landingpad} when Idx =:= 0 ->
%% Class
#t_atom{elements=[error,exit,throw]};
#b_set{op=landingpad} when Idx =:= 1 ->
%% Reason
any;
#b_set{op=landingpad} when Idx =:= 2 ->
%% Stack trace
any
end;
type(get_hd, [Src], _Anno, Ts, _Ds) ->
SrcType = #t_cons{} = normalized_type(Src, Ts), %Assertion.
{RetType, _, _} = beam_call_types:types(erlang, hd, [SrcType]),
RetType;
type(get_tl, [Src], _Anno, Ts, _Ds) ->
SrcType = #t_cons{} = normalized_type(Src, Ts), %Assertion.
{RetType, _, _} = beam_call_types:types(erlang, tl, [SrcType]),
RetType;
type(get_map_element, [_, _]=Args0, _Anno, Ts, _Ds) ->
[#t_map{}=Map, Key] = normalized_types(Args0, Ts), %Assertion.
{RetType, _, _} = beam_call_types:types(erlang, map_get, [Key, Map]),
RetType;
type(get_tuple_element, [Tuple, Offset], _Anno, _Ts, _Ds) ->
#b_literal{val=N} = Offset,
Index = N + 1,
%% Defer our type until our first use (concrete_type/2), as our type may
%% depend on another value extracted from the same container.
fun(Ts) ->
#t_tuple{size=Size,elements=Es} = normalized_type(Tuple, Ts),
true = Index =< Size, %Assertion.
beam_types:get_tuple_element(Index, Es)
end;
type(has_map_field, [_, _]=Args0, _Anno, Ts, _Ds) ->
[#t_map{}=Map, Key] = normalized_types(Args0, Ts), %Assertion.
{RetType, _, _} = beam_call_types:types(erlang, is_map_key, [Key, Map]),
RetType;
type(is_nonempty_list, [_], _Anno, _Ts, _Ds) ->
beam_types:make_boolean();
type(is_tagged_tuple, [_,#b_literal{},#b_literal{}], _Anno, _Ts, _Ds) ->
beam_types:make_boolean();
type(MakeFun, Args, Anno, _Ts, _Ds) when MakeFun =:= make_fun;
MakeFun =:= old_make_fun ->
RetType = case Anno of
#{ result_type := Type } -> Type;
#{} -> any
end,
[#b_local{name=#b_literal{val=Name},arity=TotalArity} | Env] = Args,
Arity = TotalArity - length(Env),
#t_fun{arity=Arity,target={Name,TotalArity},type=RetType};
type(match_fail, _, _Anno, _Ts, _Ds) ->
none;
type(put_map, [_Kind, Map | Ss], _Anno, Ts, _Ds) ->
put_map_type(Map, Ss, Ts);
type(put_list, [Head, Tail], _Anno, Ts, _Ds) ->
HeadType = concrete_type(Head, Ts),
TailType = concrete_type(Tail, Ts),
beam_types:make_cons(HeadType, TailType);
type(put_tuple, Args, _Anno, Ts, _Ds) ->
{Es, _} = foldl(fun(Arg, {Es0, Index}) ->
Type = concrete_type(Arg, Ts),
Es = beam_types:set_tuple_element(Index, Type, Es0),
{Es, Index + 1}
end, {#{}, 1}, Args),
#t_tuple{exact=true,size=length(Args),elements=Es};
type(raw_raise, [Class, _, _], _Anno, Ts, _Ds) ->
ClassType = concrete_type(Class, Ts),
case beam_types:meet(ClassType, #t_atom{elements=[error,exit,throw]}) of
ClassType ->
%% Unlike erlang:raise/3, the stack argument is always correct as
%% it's generated by the emulator, so we KNOW that it will raise an
%% exception when the class is correct.
none;
_ ->
beam_types:make_atom(badarg)
end;
type(resume, [_, _], _Anno, _Ts, _Ds) ->
none;
type(wait_timeout, [#b_literal{val=infinity}], _Anno, _Ts, _Ds) ->
%% Waits forever, never reaching the 'after' block.
beam_types:make_atom(false);
type(_, _, _, _, _) ->
any.
join_tuple_elements(Tuple) ->
join_tuple_elements(tuple_size(Tuple), Tuple, none).
join_tuple_elements(0, _Tuple, Type) ->
Type;
join_tuple_elements(I, Tuple, Type0) ->
Type1 = beam_types:make_type_from_value(element(I, Tuple)),
Type = beam_types:join(Type0, Type1),
join_tuple_elements(I - 1, Tuple, Type).
put_map_type(Map, Ss, Ts) ->
pmt_1(Ss, Ts, normalized_type(Map, Ts)).
pmt_1([Key0, Value0 | Ss], Ts, Acc0) ->
Key = normalized_type(Key0, Ts),
Value = normalized_type(Value0, Ts),
{Acc, _, _} = beam_call_types:types(maps, put, [Key, Value, Acc0]),
pmt_1(Ss, Ts, Acc);
pmt_1([], _Ts, Acc) ->
Acc.
%% We seldom know how far a match operation may advance, but we can often tell
%% which increment it will advance by.
bs_match_stride([#b_literal{val=Type} | Args], Ts) ->
bs_match_stride(Type, Args, Ts).
bs_match_stride(_, [_,_,Size,#b_literal{val=Unit}], Ts) ->
case concrete_type(Size, Ts) of
#t_integer{elements={Sz, Sz}} when is_integer(Sz) ->
Sz * Unit;
_ ->
Unit
end;
bs_match_stride(string, [_,#b_literal{val=String}], _) ->
bit_size(String);
bs_match_stride(utf8, _, _) ->
8;
bs_match_stride(utf16, _, _) ->
16;
bs_match_stride(utf32, _, _) ->
32;
bs_match_stride(_, _, _) ->
1.
-define(UNICODE_MAX, (16#10FFFF)).
bs_match_type([#b_literal{val=Type}|Args]) ->
bs_match_type(Type, Args).
bs_match_type(binary, Args) ->
[_,_,_,#b_literal{val=U}] = Args,
#t_bitstring{size_unit=U};
bs_match_type(float, _) ->
#t_float{};
bs_match_type(integer, Args) ->
case Args of
[_,
#b_literal{val=Flags},
#b_literal{val=Size},
#b_literal{val=Unit}] when Size * Unit < 64 ->
NumBits = Size * Unit,
case member(unsigned, Flags) of
true ->
beam_types:make_integer(0, (1 bsl NumBits)-1);
false ->
%% Signed integer. Don't bother.
#t_integer{}
end;
[_|_] ->
#t_integer{}
end;
bs_match_type(skip, _) ->
any;
bs_match_type(string, _) ->
any;
bs_match_type(utf8, _) ->
beam_types:make_integer(0, ?UNICODE_MAX);
bs_match_type(utf16, _) ->
beam_types:make_integer(0, ?UNICODE_MAX);
bs_match_type(utf32, _) ->
beam_types:make_integer(0, ?UNICODE_MAX).
normalized_types(Values, Ts) ->
[normalized_type(Val, Ts) || Val <- Values].
-spec normalized_type(beam_ssa:value(), type_db()) -> normal_type().
normalized_type(V, Ts) ->
beam_types:normalize(concrete_type(V, Ts)).
argument_types(Values, Ts) ->
[argument_type(Val, Ts) || Val <- Values].
-spec argument_type(beam_ssa:value(), type_db()) -> type().
argument_type(V, Ts) ->
beam_types:limit_depth(concrete_type(V, Ts)).
concrete_types(Values, Ts) ->
[concrete_type(Val, Ts) || Val <- Values].
-spec concrete_type(beam_ssa:value(), type_db()) -> type().
concrete_type(#b_literal{val=Value}, _Ts) ->
beam_types:make_type_from_value(Value);
concrete_type(#b_var{}=Var, Ts) ->
#{ Var := Type } = Ts,
case is_function(Type) of
true -> Type(Ts);
false -> Type
end.
%% infer_types(Var, Types, #d{}) -> {SuccTypes,FailTypes}
%% Looking at the expression that defines the variable Var, infer
%% the types for the variables in the arguments. Return the updated
%% type database for the case that the expression evaluates to
%% true, and and for the case that it evaluates to false.
%%
%% Here is an example. The variable being asked about is
%% the variable Bool, which is defined like this:
%%
%% Bool = is_nonempty_list L
%%
%% If 'is_nonempty_list L' evaluates to 'true', L must
%% must be cons. The meet of the previously known type of L and 'cons'
%% will be added to SuccTypes.
%%
%% On the other hand, if 'is_nonempty_list L' evaluates to false, L
%% is not cons and cons can be subtracted from the previously known
%% type for L. For example, if L was known to be 'list', subtracting
%% 'cons' would give 'nil' as the only possible type. The result of the
%% subtraction for L will be added to FailTypes.
infer_types_br(#b_var{}=V, Ts, IsTempVar, Ds) ->
#{V:=#b_set{op=Op,args=Args}} = Ds,
{PosTypes, NegTypes} = infer_type(Op, Args, Ts, Ds),
SuccTs0 = meet_types(PosTypes, Ts),
FailTs0 = subtract_types(NegTypes, Ts),
case IsTempVar of
true ->
%% The branch variable is defined in this block and is only
%% referenced by this terminator. Therefore, there is no need to
%% include it in the type database passed on to the successors of
%% of this block.
SuccTs = ts_remove_var(V, SuccTs0),
FailTs = ts_remove_var(V, FailTs0),
{SuccTs, FailTs};
false ->
SuccTs = infer_br_value(V, true, SuccTs0),
FailTs = infer_br_value(V, false, FailTs0),
{SuccTs, FailTs}
end.
infer_br_value(_V, _Bool, none) ->
none;
infer_br_value(V, Bool, NewTs) ->
#{ V := T } = NewTs,
case beam_types:is_boolean_type(T) of
true ->
NewTs#{ V := beam_types:make_atom(Bool) };
false ->
%% V is a try/catch tag or similar, leave it alone.
NewTs
end.
infer_types_switch(V, Lit, Ts0, IsTempVar, Ds) ->
{PosTypes, _} = infer_type({bif,'=:='}, [V, Lit], Ts0, Ds),
Ts = meet_types(PosTypes, Ts0),
case IsTempVar of
true -> ts_remove_var(V, Ts);
false -> Ts
end.
ts_remove_var(_V, none) -> none;
ts_remove_var(V, Ts) -> maps:remove(V, Ts).
infer_type({succeeded,_}, [#b_var{}=Src], Ts, Ds) ->
#b_set{op=Op,args=Args} = maps:get(Src, Ds),
infer_success_type(Op, Args, Ts, Ds);
%% Type tests are handled separately from other BIFs as we're inferring types
%% based on their result, so we know that subtraction is safe even if we're
%% not branching on 'succeeded'.
infer_type(is_tagged_tuple, [#b_var{}=Src,#b_literal{val=Size},
#b_literal{}=Tag], _Ts, _Ds) ->
Es = beam_types:set_tuple_element(1, concrete_type(Tag, #{}), #{}),
T = {Src,#t_tuple{exact=true,size=Size,elements=Es}},
{[T], [T]};
infer_type(is_nonempty_list, [#b_var{}=Src], _Ts, _Ds) ->
T = {Src,#t_cons{}},
{[T], [T]};
infer_type({bif,is_atom}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_atom{}},
{[T], [T]};
infer_type({bif,is_binary}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_bitstring{size_unit=8}},
{[T], [T]};
infer_type({bif,is_bitstring}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_bitstring{}},
{[T], [T]};
infer_type({bif,is_boolean}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, beam_types:make_boolean()},
{[T], [T]};
infer_type({bif,is_float}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_float{}},
{[T], [T]};
infer_type({bif,is_function}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_fun{}},
{[T], [T]};
infer_type({bif,is_function}, [#b_var{}=Arg, Arity0], _Ts, _Ds) ->
Arity = case Arity0 of
#b_literal{val=V} when is_integer(V), V >= 0, V =< 255 -> V;
_ -> any
end,
T = {Arg, #t_fun{arity=Arity}},
{[T], [T]};
infer_type({bif,is_integer}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_integer{}},
{[T], [T]};
infer_type({bif,is_list}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_list{}},
{[T], [T]};
infer_type({bif,is_map}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_map{}},
{[T], [T]};
infer_type({bif,is_number}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, number},
{[T], [T]};
infer_type({bif,is_pid}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, pid},
{[T], [T]};
infer_type({bif,is_port}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, port},
{[T], [T]};
infer_type({bif,is_reference}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, reference},
{[T], [T]};
infer_type({bif,is_tuple}, [#b_var{}=Arg], _Ts, _Ds) ->
T = {Arg, #t_tuple{}},
{[T], [T]};
infer_type({bif,'=:='}, [#b_var{}=LHS,#b_var{}=RHS], Ts, _Ds) ->
%% As an example, assume that L1 is known to be 'list', and L2 is
%% known to be 'cons'. Then if 'L1 =:= L2' evaluates to 'true', it can
%% be inferred that L1 is 'cons' (the meet of 'cons' and 'list').
LType = concrete_type(LHS, Ts),
RType = concrete_type(RHS, Ts),
Type = beam_types:meet(LType, RType),
PosTypes = [{V,Type} || {V, OrigType} <- [{LHS, LType}, {RHS, RType}],
OrigType =/= Type],
%% We must be careful with types inferred from '=:='.
%%
%% If we have seen L =:= [a], we know that L is 'cons' if the
%% comparison succeeds. However, if the comparison fails, L could
%% still be 'cons'. Therefore, we must not subtract 'cons' from the
%% previous type of L.
%%
%% However, it is safe to subtract a type inferred from '=:=' if
%% it is single-valued, e.g. if it is [] or the atom 'true'.
%%
%% Note that we subtract the left-hand type from the right-hand
%% value and vice versa. We must not subtract the meet of the two
%% as it may be too specific. See beam_type_SUITE:type_subtraction/1
%% for details.
NegTypes = [T || {_, OtherType}=T <- [{RHS, LType}, {LHS, RType}],
beam_types:is_singleton_type(OtherType)],
{PosTypes, NegTypes};
infer_type({bif,'=:='}, [#b_var{}=Src,#b_literal{}=Lit], Ts, Ds) ->
Def = maps:get(Src, Ds),
LitType = concrete_type(Lit, Ts),
PosTypes = [{Src, LitType} | infer_eq_lit(Def, LitType)],
%% Subtraction is only safe if LitType is single-valued.
NegTypes = case beam_types:is_singleton_type(LitType) of
true -> PosTypes;
false -> []
end,
{PosTypes, NegTypes};
infer_type(_Op, _Args, _Ts, _Ds) ->
{[], []}.
infer_success_type({bif,Op}, Args, Ts, _Ds) ->
ArgTypes = normalized_types(Args, Ts),
{_, PosTypes0, CanSubtract} = beam_call_types:types(erlang, Op, ArgTypes),
PosTypes = [T || {#b_var{},_}=T <- zip(Args, PosTypes0)],
case CanSubtract of
true -> {PosTypes, PosTypes};
false -> {PosTypes, []}
end;
infer_success_type(call, [#b_var{}=Fun|Args], _Ts, _Ds) ->
T = {Fun, #t_fun{arity=length(Args)}},
{[T], []};
infer_success_type(bs_start_match, [_, #b_var{}=Src], _Ts, _Ds) ->
T = {Src,#t_bs_matchable{}},
{[T], [T]};
infer_success_type(bs_match, [#b_literal{val=binary},
Ctx, _Flags,
#b_literal{val=all},
#b_literal{val=OpUnit}],
_Ts, _Ds) ->
%% This is an explicit tail unit test which does not advance the match
%% position, so we know that Ctx has the same unit.
T = {Ctx, #t_bs_context{tail_unit=OpUnit}},
{[T], [T]};
infer_success_type(_Op, _Args, _Ts, _Ds) ->
{[], []}.
infer_eq_lit(#b_set{op={bif,tuple_size},args=[#b_var{}=Tuple]},
#t_integer{elements={Size,Size}}) ->
[{Tuple,#t_tuple{exact=true,size=Size}}];
infer_eq_lit(#b_set{op=get_tuple_element,
args=[#b_var{}=Tuple,#b_literal{val=N}]},
LitType) ->
Index = N + 1,
case beam_types:set_tuple_element(Index, LitType, #{}) of
#{ Index := _ }=Es ->
[{Tuple,#t_tuple{size=Index,elements=Es}}];
#{} ->
%% Index was above the element limit; subtraction is not safe.
[]
end;
infer_eq_lit(_, _) ->
[].
join_types(Ts, Ts) ->
Ts;
join_types(LHS, RHS) ->
if
map_size(LHS) < map_size(RHS) ->
join_types_1(maps:keys(LHS), RHS, LHS);
true ->
join_types_1(maps:keys(RHS), LHS, RHS)
end.
%% Joins two type maps, keeping the variables that are common to both maps.
join_types_1([V | Vs], Bigger, Smaller) ->
case {Bigger, Smaller} of
{#{ V := Same }, #{ V := Same }} ->
join_types_1(Vs, Bigger, Smaller);
{#{ V := LHS0 }, #{ V := RHS0 }} ->
%% Inlined concrete_type/2 for performance.
LHS = case is_function(LHS0) of
true -> LHS0(Bigger);
false -> LHS0
end,
RHS = case is_function(RHS0) of
true -> RHS0(Smaller);
false -> RHS0
end,
T = beam_types:join(LHS, RHS),
join_types_1(Vs, Bigger, Smaller#{ V := T });
{#{}, #{ V := _ }} ->
join_types_1(Vs, Bigger, maps:remove(V, Smaller))
end;
join_types_1([], _Bigger, Smaller) ->
Smaller.
meet_types([{V,T0}|Vs], Ts) ->
T1 = concrete_type(V, Ts),
case beam_types:meet(T0, T1) of
none -> none;
T1 -> meet_types(Vs, Ts);
T -> meet_types(Vs, Ts#{ V := T })
end;
meet_types([], Ts) ->
Ts.
subtract_types([{V,T0}|Vs], Ts) ->
T1 = concrete_type(V, Ts),
case beam_types:subtract(T1, T0) of
none -> none;
T1 -> subtract_types(Vs, Ts);
T -> subtract_types(Vs, Ts#{ V:= T })
end;
subtract_types([], Ts) ->
Ts.
parallel_join([A | As], [B | Bs]) ->
[beam_types:join(A, B) | parallel_join(As, Bs)];
parallel_join([], []) ->
[].
gcd(A, B) ->
case A rem B of
0 -> B;
X -> gcd(B, X)
end.
%%%
%%% Helpers
%%%
init_metadata(FuncId, Linear, Params) ->
{RetCounter, UsedOnce0} = init_metadata_1(reverse(Linear), 0, #{}),
UsedOnce = maps:without(Params, UsedOnce0),
#metadata{ func_id = FuncId,
limit_return = (RetCounter >= ?RETURN_LIMIT),
params = Params,
used_once = UsedOnce }.
init_metadata_1([{L,#b_blk{is=Is,last=Last}} | Bs], RetCounter0, Uses0) ->
%% Track the number of return terminators in use. See ?RETURN_LIMIT for
%% details.
RetCounter = case Last of
#b_ret{} -> RetCounter0 + 1;
_ -> RetCounter0
end,
%% Calculate the set of variables that are only used once in the
%% terminator of the block that defines them. That will allow us
%% to discard type information for variables that will never be
%% referenced by the successor blocks, potentially improving
%% compilation times.
Uses1 = used_once_last_uses(beam_ssa:used(Last), L, Uses0),
Uses = used_once_2(reverse(Is), L, Uses1),
init_metadata_1(Bs, RetCounter, Uses);
init_metadata_1([], RetCounter, Uses) ->
{RetCounter, Uses}.
used_once_2([#b_set{dst=Dst}=I|Is], L, Uses0) ->
Uses = used_once_uses(beam_ssa:used(I), L, Uses0),
case Uses of
#{Dst:=L} ->
used_once_2(Is, L, Uses);
#{} ->
%% Used more than once or used once in
%% in another block.
used_once_2(Is, L, maps:remove(Dst, Uses))
end;
used_once_2([], _, Uses) -> Uses.
used_once_uses([V|Vs], L, Uses) ->
case Uses of
#{V:=more_than_once} ->
used_once_uses(Vs, L, Uses);
#{} ->
%% Already used or first use is not in
%% a terminator.
used_once_uses(Vs, L, Uses#{V=>more_than_once})
end;
used_once_uses([], _, Uses) -> Uses.
used_once_last_uses([V|Vs], L, Uses) ->
case Uses of
#{V:=more_than_once} ->
%% Used at least twice before.
used_once_last_uses(Vs, L, Uses);
#{V:=_} ->
%% Second time this variable is used.
used_once_last_uses(Vs, L, Uses#{V:=more_than_once});
#{} ->
%% First time this variable is used.
used_once_last_uses(Vs, L, Uses#{V=>L})
end;
used_once_last_uses([], _, Uses) -> Uses.
%%
%% Ordered worklist used in signatures/2.
%%
%% This is equivalent to consing (wl_add) and appending (wl_defer_list)
%% to a regular list, but avoids uneccessary work by reordering elements.
%%
%% We can do this since a function only needs to be visited *once* for all
%% prior updates to take effect, so if an element is added to the front, then
%% all earlier instances of the same element are redundant.
%%
-record(worklist,
{ counter = 0 :: integer(),
elements = gb_trees:empty() :: gb_trees:tree(integer(), term()),
indexes = #{} :: #{ term() => integer() } }).
-type worklist() :: #worklist{}.
wl_new() -> #worklist{}.
%% Adds an element to the worklist, or moves it to the front if it's already
%% present.
wl_add(Element, #worklist{counter=Counter,elements=Es,indexes=Is}) ->
case Is of
#{ Element := Index } ->
wl_add_1(Element, Counter, gb_trees:delete(Index, Es), Is);
#{} ->
wl_add_1(Element, Counter, Es, Is)
end.
wl_add_1(Element, Counter0, Es0, Is0) ->
Counter = Counter0 + 1,
Es = gb_trees:insert(Counter, Element, Es0),
Is = Is0#{ Element => Counter },
#worklist{counter=Counter,elements=Es,indexes=Is}.
%% All mutations bump the counter, so we can check for changes without a deep
%% comparison.
wl_changed(#worklist{counter=Same}, #worklist{counter=Same}) -> false;
wl_changed(#worklist{}, #worklist{}) -> true.
%% Adds the given elements to the back of the worklist, skipping the elements
%% that are already present. This lets us append elements arbitrarily after the
%% current front without changing the work order.
wl_defer_list(Elements, #worklist{counter=Counter,elements=Es,indexes=Is}) ->
wl_defer_list_1(Elements, Counter, Es, Is).
wl_defer_list_1([Element | Elements], Counter0, Es0, Is0) ->
case Is0 of
#{ Element := _ } ->
wl_defer_list_1(Elements, Counter0, Es0, Is0);
#{} ->
Counter = Counter0 + 1,
Es = gb_trees:insert(-Counter, Element, Es0),
Is = Is0#{ Element => -Counter },
wl_defer_list_1(Elements, Counter, Es, Is)
end;
wl_defer_list_1([], Counter, Es, Is) ->
#worklist{counter=Counter,elements=Es,indexes=Is}.
wl_next(#worklist{indexes=Is}) when Is =:= #{} ->
empty;
wl_next(#worklist{elements=Es,indexes=Is}) when Is =/= #{} ->
{_Key, Element} = gb_trees:largest(Es),
{ok, Element}.
%% Removes the front of the worklist.
wl_pop(Element, #worklist{counter=Counter0,elements=Es0,indexes=Is0}=Wl) ->
Counter = Counter0 + 1,
{_Key, Element, Es} = gb_trees:take_largest(Es0), %Assertion.
Is = maps:remove(Element, Is0),
Wl#worklist{counter=Counter,elements=Es,indexes=Is}. | lib/compiler/src/beam_ssa_type.erl | 0.657209 | 0.4184 | beam_ssa_type.erl | starcoder |
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(trcb_exp_local_experiments_support).
-author("<NAME> <<EMAIL>").
-include("trcb_exp.hrl").
-export([run_trcb/1]).
run_trcb(Options) ->
{IToNode, Nodes} = start(Options),
construct_overlay_trcb(IToNode),
verify_overlay_trcb(IToNode),
start_experiment(Nodes),
wait_for_completion(Nodes),
stop(IToNode).
%% @private
start_experiment(Nodes) ->
%% wait for connectedness
timer:sleep(5000),
lists:foreach(
fun(Node) ->
ok = rpc:call(Node, trcb_exp_experiment_runner, start, [])
end,
Nodes
).
%% @private Start nodes.
start(Options) ->
ok = start_erlang_distribution(),
NodeNumber = proplists:get_value(node_number, Options),
InitializerFun = fun(I, Acc) ->
%ct:pal("Starting node: ~p", [I]),
%% Start node
Config = [{monitor_master, true},
{startup_functions, [{code, set_path, [codepath()]}]}],
Name = get_node_name(I),
case ct_slave:start(Name, Config) of
{ok, Node} ->
orddict:store(I, Node, Acc);
Error ->
ct:fail(Error)
end
end,
IToNode = lists:foldl(InitializerFun,
orddict:new(),
lists:seq(0, NodeNumber - 1)),
Nodes = [Node || {_I, Node} <- IToNode],
LoaderFun = fun(Node) ->
%% Load partisan
ok = rpc:call(Node, application, load, [partisan]),
TRCBSettingsTemp = proplists:get_value(trcb_exp_settings, Options),
Mode = proplists:get_value(trcb_exp_mode, TRCBSettingsTemp),
DropRatio = proplists:get_value(trcb_exp_drop_ratio, TRCBSettingsTemp),
Latency = proplists:get_value(trcb_exp_latency, TRCBSettingsTemp),
CheckResendInterval = proplists:get_value(trcb_exp_check_resend_interval, TRCBSettingsTemp),
ResendInterval = proplists:get_value(trcb_exp_resend_interval, TRCBSettingsTemp),
case Mode of
ping ->
ok = rpc:call(Node, application, load, [pingserv]);
_ ->
%% Load trcb
ok = rpc:call(Node, application, load, [trcb]),
ok = rpc:call(Node, trcb_config, set, [trcb_mode, Mode]),
ok = rpc:call(Node, trcb_config, set, [trcb_drop_ratio, DropRatio]),
ok = rpc:call(Node, trcb_config, set, [trcb_latency, Latency]),
ok = rpc:call(Node, trcb_config, set, [trcb_check_resend_interval, CheckResendInterval]),
ok = rpc:call(Node, trcb_config, set, [trcb_resend_interval, ResendInterval])
end,
%% Load trcb_exp
ok = rpc:call(Node, application, load, [?APP]),
%% Set lager log dir
PrivDir = code:priv_dir(?APP),
NodeDir = filename:join([PrivDir, "lager", Node]),
ok = rpc:call(Node,
application,
set_env,
[lager, log_root, NodeDir])
end,
lists:foreach(LoaderFun, Nodes),
ConfigureFun = fun(Node) ->
%% Configure trcb_exp
TRCBSettings0 = proplists:get_value(trcb_exp_settings, Options),
TRCBSettings1 = TRCBSettings0
++ [{trcb_exp_timestamp, trcb_exp_util:generate_timestamp(?UNIT)}],
lists:foreach(
fun({Property, Value}) ->
ok = rpc:call(Node,
trcb_exp_config,
set,
[Property, Value])
end,
TRCBSettings1
)
end,
lists:foreach(ConfigureFun, Nodes),
StartFun = fun(Node) ->
{ok, _} = rpc:call(Node,
application,
ensure_all_started,
[?APP])
end,
lists:foreach(StartFun, Nodes),
{IToNode, Nodes}.
%% @private Connect each node to all other nodes.
construct_overlay_trcb(Nodes) ->
ct:pal("Clustering nodes."),
lists:foreach(fun(Node) -> cluster(Node, Nodes) end, Nodes).
%% @private
%%
%% We have to cluster each node with all other nodes to compute the
%% correct overlay: for instance, sometimes you'll want to establish a
%% client/server topology, which requires all nodes talk to every other
%% node to correctly compute the overlay.
%%
cluster({Name, _Node} = Myself, Nodes) when is_list(Nodes) ->
%% Omit just ourselves.
OtherNodes = omit([Name], Nodes),
lists:foreach(fun(OtherNode) -> join(Myself, OtherNode) end, OtherNodes).
%% @private
omit(OmitNameList, Nodes0) ->
FoldFun = fun({Name, _Node} = N, Nodes) ->
case lists:member(Name, OmitNameList) of
true ->
Nodes;
false ->
Nodes ++ [N]
end
end,
lists:foldl(FoldFun, [], Nodes0).
join({_, Node}, {_, OtherNode}) ->
PeerPort = rpc:call(OtherNode,
partisan_config,
get,
[peer_port, 9000]),
ct:pal("Joining node: ~p to ~p at port ~p", [Node, OtherNode, PeerPort]),
ok = rpc:call(Node,
partisan_peer_service,
join,
[{OtherNode, {127, 0, 0, 1}, PeerPort}]).
verify_overlay_trcb(Nodes) ->
%% Pause for clustering.
timer:sleep(10000),
%% Verify membership.
%%
VerifyFun = fun({_Name, Node}) ->
{ok, Members} = rpc:call(Node, partisan_static_peer_service_manager, members, []),
% {ok, Members} = rpc:call(Node, partisan_default_peer_service_manager, members, []),
%% If this node is a server, it should know about all nodes.
SortedNodes = lists:usort([N || {_, N} <- Nodes]) -- [Node],
SortedMembers = lists:usort(Members) -- [Node],
case SortedMembers =:= SortedNodes of
true ->
ok;
false ->
ct:fail("Membership incorrect; node ~p should have ~p but has ~p", [Node, SortedNodes, SortedMembers])
end
end,
%% Verify the membership is correct.
lists:foreach(VerifyFun, Nodes).
%% @private Poll nodes to see if experiment is ended.
wait_for_completion(Nodes) ->
ct:pal("Waiting for experiment to end"),
NodeNumber = length(Nodes),
Result = wait_until(
fun() ->
Ended = lists:foldl(
fun(Node, Acc) ->
ExperimentEnd = rpc:call(Node,
trcb_exp_config,
get,
[trcb_exp_experiment_end,
false]),
case ExperimentEnd of
true ->
Acc + 1;
false ->
Acc
end
end,
0,
Nodes
),
%ct:pal("~p of ~p with experiment as true", [Ended, NodeNumber]),
Ended == NodeNumber
end,
100, %% 100 retries
10 * 1000 %% every 10 seconds
),
case Result of
ok ->
ct:pal("Experiment ended with success");
fail ->
ct:fail("Experiment failed")
end.
%% @private Stop nodes.
stop(IToNode) ->
StopFun = fun({I, _Node}) ->
Name = get_node_name(I),
case ct_slave:stop(Name) of
{ok, _} ->
ok;
Error ->
ct:fail(Error)
end
end,
lists:foreach(StopFun, IToNode).
%% @private Start erlang distribution.
start_erlang_distribution() ->
os:cmd(os:find_executable("epmd") ++ " -daemon"),
{ok, Hostname} = inet:gethostname(),
case net_kernel:start([list_to_atom("runner@" ++ Hostname), shortnames]) of
{ok, _} ->
ok;
{error, {already_started, _}} ->
ok
end.
%% @private
codepath() ->
lists:filter(fun filelib:is_dir/1, code:get_path()).
%% @private
get_node_name(I) ->
list_to_atom("n" ++ integer_to_list(I)).
%% @doc Wait until `Fun' returns true or `Retry' reaches 0.
%% The sleep time between retries is `Delay'.
wait_until(_Fun, 0, _Delay) ->
fail;
wait_until(Fun, Retry, Delay) when Retry > 0 ->
case Fun() of
true ->
ok;
_ ->
timer:sleep(Delay),
wait_until(Fun, Retry - 1, Delay)
end. | src/trcb_exp_local_experiments_support.erl | 0.50708 | 0.40116 | trcb_exp_local_experiments_support.erl | starcoder |
%% @doc Size-constrained leftist tree
%% Inspired by <a href="http://www.cise.ufl.edu/~sahni/cop5536/powerpoint/lec11.ppt">Leftist Trees</a> by <NAME>.
%%
%% The purpose of this module is to efficiently store a limited number of
%% values in e.g. a lossy histogram (ex. {@link exometer_slot_slide}). The
%% complexity of insert operations is log(N), but once the tree is full,
%% only values higher than the minimum value already in the tree will be
%% inserted, and the old minimum is deleted - i.e. two O(log N) operations.
%% For other values, the cost will be only two comparisons, since the
%% top node in the tree always contains the minimum.
%% @end
-module(exometer_shallowtree).
-export([new/1,
insert/3,
take_min/1,
to_list/1,
filter/2,
size/1,
limit/1]).
-export([fill/1, fill1/2]).
-export_type([tree/0]).
-record(t, {size = 0,
limit = 10,
tree = []}).
-type tree() :: #t{}.
-spec new(pos_integer()) -> tree().
%% @doc Create an empty tree limited to `Size'.
new(Size) when is_integer(Size), Size > 0 ->
#t{limit = Size}.
-spec size(tree()) -> non_neg_integer().
%% @doc Returns the number of values stored in the given tree.
size(#t{size = Sz}) ->
Sz.
-spec limit(tree()) -> non_neg_integer().
%% @doc Returns the maximum number of values for the given tree.
limit(#t{limit = L}) ->
L.
-spec insert(number(), any(), tree()) -> tree().
%% @doc Insert value `V' into tree `T'.
%%
%% If the tree is full and `V' is smaller than the minimum, this function
%% will return immediately, leaving the tree unchanged.
%% @end
insert(K, V, #t{size = X, limit = X, tree = Tr} = T) when is_number(K) ->
case K =< element(1, Tr) of
true ->
T;
false ->
{_, _, Tr1} = take_min_(Tr),
T#t{tree = insert_(K, V, Tr1)}
end;
insert(K, V, #t{size = Sz, tree = Tr} = T) when is_number(K) ->
T#t{size = Sz+1, tree = insert_(K, V, Tr)}.
insert_(K, V, []) -> mknode(K, V);
insert_(K, V, T ) -> meld(mknode(K, V), T).
-spec take_min(tree()) -> {number(), any(), tree()} | error.
%% @doc Extract the smallest value from the tree `T'.
%%
%% If the tree is empty, `error' is returned, otherwise `{Minimum, NewTree}'.
%% @end
take_min(#t{size = Sz, tree = Tr} = T) ->
case take_min_(Tr) of
error -> error;
{K, V, Tr1} ->
{K, V, T#t{size = Sz-1, tree = Tr1}}
end.
take_min_([]) -> error;
take_min_({K,V,_,L,R}) -> {K, V, meld(L, R)}.
-spec to_list(tree()) -> [{number(), any()}].
%% @doc Converts a tree to a list.
%%
%% The list will not be ordered, since the aim is to produce the list as
%% quickly as possible. Also, `lists:sort(to_list(Tree))', if to_list/1
%% uses brute force, seems faster than most approaches for extracting
%% values in order.
%% @end
to_list(#t{tree = T}) -> to_list_([T]).
to_list_([]) -> [];
to_list_([{K,V,_,L,R}|T]) -> [{K,V}|to_list_([L,R|T])];
to_list_([[]|T]) -> to_list_(T).
filter(F, #t{tree = T}) -> filter_(F, [T]).
filter_(_, []) -> [];
filter_(F, [{K,V,_,L,R}|T]) ->
case F(K,V) of false -> filter_(F, [L,R|T]);
{true, Keep} -> [Keep|filter_(F, [L,R|T])]
end;
filter_(F, [[]|T]) -> filter_(F, T).
meld({K1,V1, _, L1, R1} = T1, {K2,V2, _, L2, R2} = T2) ->
case K1 < K2 of
true ->
mknode(K1,V1, L1, meld(R1, T2));
false ->
mknode(K2,V2, L2, meld(R2, T1))
end;
meld([], T2) -> T2;
meld(T1, []) -> T1;
meld([], []) -> [].
mknode(K,V) -> {K,V,1,[],[]}.
mknode(K,V,{_,_,S1,_,_} = T1, {_,_,S2,_,_} = T2) when S1 < S2 ->
{K,V, S1+1, T2, T1};
mknode(K,V, [], [] ) -> {K,V, 1 , [], []};
mknode(K,V, [], {_,_,S2,_,_} = T2) -> {K,V, S2+1, T2, []};
mknode(K,V, {_,_,S1,_,_} = T1, []) -> {K,V, S1+1, T1, []};
mknode(K,V, T1, {_,_,S2,_,_} = T2) -> {K,V, S2+1, T1, T2}.
fill(Size) ->
L = lists:seq(1,Size),
T0 = new(Size),
timer:tc(?MODULE, fill1, [L, T0]).
fill1([H|T], Tree) ->
fill1(T, insert(H, x, Tree));
fill1([], Tree) ->
Tree. | _build/default/lib/exometer_core/src/exometer_shallowtree.erl | 0.723212 | 0.684518 | exometer_shallowtree.erl | starcoder |
% -*- indent-tabs-mode:nil; -*-
%%%-------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright (C) 2020, <NAME>
%%% @doc
%%%
%%% Solve the 8 queens puzzle concurrently using tuple space.
%%%
%%% The chessboard is represented as a `map' of `Row=>Col'
%%% elements. Rows and column numbers start at `1', similar to a
%%% typical chessboard.
%%%
%%% The puzzle is solved by one or more `solver' workers, each taking
%%% a partially covered unique chessboard and generating zero or more
%%% unique boards with the placement of another non-attacking queen.
%%%
%%% For example, starting with the empty chessboard tuple, `{new,
%%% #{}}', one of the solvers will replace it with 8 tuples of the
%%% form `{new, #{1=>Col}}', where `Col' takes the values 1 to
%%% 8. Next, `{new, #{1=>1}}' will be replaced with `{new, #{1=>1,
%%% 2=>3}}', etc.
%%%
%%% During the course of solving the puzzle various tuples will appear
%%% in the tuple space. These are:
%%%
%%% `{new, Board}': `Board' is a new partial solution, which will be
%%% processed by the solver.
%%%
%%% `{deadend, Board}': `Board' is a partial solution that has no free
%%% square that cannot be attacked by the current queens.
%%%
%%% `{solution, Board}': `Board' is a completed solution.
%%%
%%% @end
%%% Created : 17 Dec 2020 by <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(queens8).
-include_lib("kernel/include/logger.hrl").
-define(Num_queens, 8).
-define(Num_solvers, 2).
-export([start/0, get_solutions/0, print_solution/0]).
%%--------------------------------------------------------------------
%% @doc Given the positions of a queen and a list of queens, returns
%% `true' if the single queen can attack any of the queens in the
%% list.
%%
%% We do not check if the queens within the list can attack each
%% other.
%%
%% Two queens can attack each other if they are on the same row, or
%% the same column, or on the same diagonal (the differences between
%% rows and columns are the same).
%%
%% We expect the row and column numbers to be non-zero.
%%
%% @end
%%--------------------------------------------------------------------
-spec attacks_one(tuple(), tuple()) -> boolean().
attacks_one({R, _C1}, {R, _C2}) ->
true; %% same row
attacks_one({_R1, C}, {_R2, C}) ->
true; %% same column
attacks_one({R1, C1}, {R2, C2}) ->
abs(R1-R2) == abs(C1-C2). %% same diagonal?
%%--------------------------------------------------------------------
%% @doc check if the `Queen' can attack any of the queens on `Board'.
%%
%% The board is a map of `{Row=>Col}' elements.
%%
%% @end
%%--------------------------------------------------------------------
-spec attacks_any(tuple(), map()) -> boolean().
attacks_any(Queen, Board) ->
maps:fold(fun (R, C, Acc) ->
Acc orelse attacks_one(Queen, {R, C})
end,
false,
Board).
%%--------------------------------------------------------------------
%% @doc Start the processes.
%%
%% We start `espace', along with the `solver' worker processes.
%%
%% The initial `new' tuple, which is a board with no queens, is added
%% to the tuple space. This will kick off the whole process of finding
%% valid solutions.
%%
%% @end
%%--------------------------------------------------------------------
-spec start() -> ok.
start() ->
case application:ensure_all_started(espace) of
{ok, Started} ->
?LOG_INFO("espace apps started: ~p.", [Started]);
{error, Reason} ->
?LOG_ERROR("could not start espace: ~p", [Reason]),
exit("startup failed")
end,
Worker = fun (_) ->
espace:worker({fun worker_solver/0, []})
end,
lists:foreach(Worker, lists:seq(1, ?Num_solvers)),
espace:out({new, #{}}).
%%--------------------------------------------------------------------
%% @doc Wait for a `new' tuple and process it.
%%
%% @end
%%--------------------------------------------------------------------
-spec worker_solver() -> term().
worker_solver() ->
{[Board], _} = espace:in({new, '$1'}),
check_board(Board),
worker_solver().
%%--------------------------------------------------------------------
%% @doc Given the positions of queens on a chessboard, generate the
%% next set of position from this one.
%%
%% The input, `Board', is a `map' of `Row => Col' elements, each
%% representing the position of a queens on the chessboard.
%%
%% We can always expect a partial board, since a full board would have
%% been caught during a previous iteration.
%%
%% We pick the lowest numbered free row. We then check all columns of
%% that row looking for positions that cannot attack any of the
%% existing queens.
%%
%% If the board has all the queens in place, we have a solution
%% so we produce a `{solution, Board}' tuple.
%%
%% For each non-attacking position found, we generate a `new'
%% tuple. If no such postions are found, we generate a `deadend'
%% tuple.
%%
%% @end
%%--------------------------------------------------------------------
-spec check_board(map()) -> ok.
check_board(Board) ->
%% find the next available row
Next_row = maps:size(Board)+1,
Cols_avail = lists:seq(1, ?Num_queens) -- maps:values(Board),
Squares_avail = [{Next_row, Col} || Col <- Cols_avail],
Non_attacking = fun ({R,C}) -> not attacks_any({R,C}, Board) end,
Squares_to_check = lists:filter(Non_attacking, Squares_avail),
case Squares_to_check of
[] -> %% we cannot go any further
espace:out({deadend, Board});
[{R=?Num_queens,C}] -> %% solution! we've reached the last row with a non-attacking column!
espace:out({solution, Board#{R=>C}});
_ -> %% at least one more row can be checked after the current one
New_board = fun ({R,C}) -> espace:out({new, Board#{R=>C}}) end,
lists:foreach(New_board, Squares_to_check)
end.
%%--------------------------------------------------------------------
%% @doc Extract the solutions from the tuple space.
%%
%% The solutions are returned as a list of maps.
%%
%% @end
%%--------------------------------------------------------------------
-spec get_solutions() -> list().
get_solutions() ->
get_solutions([]).
%%--------------------------------------------------------------------
%% @doc Extract the list of solutions from the tuple space.
%%
%% We repeatedly extract the `solution' tuples and build a list of the
%% solutions.
%%
%% @end
%%--------------------------------------------------------------------
-spec get_solutions(list()) -> list().
get_solutions(Solutions) ->
case espace:inp({solution, '$1'}) of
nomatch ->
Solutions;
{[S], _} ->
get_solutions([S|Solutions])
end.
%%--------------------------------------------------------------------
%% @doc If a solution exists in the tuple space, extract and print it.
%%
%% @end
%%--------------------------------------------------------------------
-spec print_solution() -> ok.
print_solution() ->
case espace:inp({solution, '$1'}) of
nomatch ->
io:format("No solutions found.~n");
{[Board], _} ->
print_board(Board)
end.
%%--------------------------------------------------------------------
%% @doc Print a solution map as a chessboard.
%%
%% Once the rows are printed, a horizontal line is printed at the end.
%%
%% @end
%%--------------------------------------------------------------------
-spec print_board(map()) -> ok.
print_board(Board) ->
lists:foreach(fun print_row/1,
lists:sort(maps:to_list(Board))
),
print_hline().
%%--------------------------------------------------------------------
%% @doc Print a single row.
%%
%% Prints a horizontal line followed by a row of the solution.
%%
%% @end
%%--------------------------------------------------------------------
-spec print_row({integer(), integer()}) -> ok.
print_row({_, Col}) ->
Left_cols = lists:duplicate(Col-1, " |"),
Right_cols = lists:duplicate(?Num_queens-Col, " |"),
Row = erlang:list_to_binary(Left_cols ++ ["X|" | Right_cols]),
print_hline(),
io:format("|~s~n", [Row]).
%%--------------------------------------------------------------------
%% @doc Print a horizontal line.
%%
%% @end
%%--------------------------------------------------------------------
-spec print_hline() -> ok.
print_hline() ->
Hline = erlang:list_to_binary(lists:duplicate(?Num_queens, "-+")),
io:format("+~s~n", [Hline]). | queens/src/queens8.erl | 0.508544 | 0.592372 | queens8.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Causal Context.
%% The current implementation does not have any optimisations such as
%% the causal context compression.
%%
%% @reference <NAME>, <NAME>, and <NAME>
%% Delta State Replicated Data Types (2016)
%% [http://arxiv.org/pdf/1603.01529v1.pdf]
-module(causal_context).
-author("<NAME> <<EMAIL>>").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0,
merge/2,
add_dot/2,
max_dot/2,
next_dot/2,
to_dot_set/1,
to_causal_context/1]).
-export_type([causal_context/0]).
-type causal_context() :: ordsets:ordset(dot_store:dot()).
%% @doc Create an empty Causal Context.
-spec new() -> causal_context().
new() ->
ordsets:new().
%% @doc Merge two Causal Contexts.
-spec merge(causal_context(), causal_context()) -> causal_context().
merge(CausalContextA, CausalContextB) ->
ordsets:union(CausalContextA, CausalContextB).
-spec add_dot(dot_store:dot(), causal_context()) -> causal_context().
add_dot(Dot, CausalContext) ->
ordsets:add_element(Dot, CausalContext).
%% @doc Get `dot_actor()''s max dot
-spec max_dot(dot_store:dot_actor(), causal_context()) -> dot_store:dot().
max_dot(DotActor, CausalContext) ->
MaxValue = ordsets:fold(
fun({Actor, Value}, CurrentMax) ->
case Actor == DotActor andalso Value > CurrentMax of
true ->
Value;
false ->
CurrentMax
end
end,
0,
CausalContext
),
{DotActor, MaxValue}.
%% @doc Get `dot_actor()''s next dot
-spec next_dot(dot_store:dot_actor(), causal_context()) -> dot_store:dot().
next_dot(DotActor, CausalContext) ->
{DotActor, MaxValue} = max_dot(DotActor, CausalContext),
{DotActor, MaxValue + 1}.
%% @doc Convert a CausalContext to a DotSet
-spec to_dot_set(causal_context()) -> dot_store:dot_set().
to_dot_set(CausalContext) ->
ordsets:fold(
fun(Dot, DotSet) ->
dot_set:add_element(Dot, DotSet)
end,
dot_set:new(),
CausalContext
).
%% @doc Given a DotStore, extract a Causal Context.
-spec to_causal_context(dot_store:dot_store()) -> causal_context().
to_causal_context({dot_set, DotSet}) ->
ordsets:fold(
fun(Dot, CausalContext) ->
causal_context:add_dot(Dot, CausalContext)
end,
causal_context:new(),
DotSet
);
to_causal_context({{dot_fun, _CRDTType}, DotFun}) ->
Dots = orddict:fetch_keys(DotFun),
lists:foldl(
fun(Dot, CausalContext) ->
causal_context:add_dot(Dot, CausalContext)
end,
causal_context:new(),
Dots
);
to_causal_context({{dot_map, _DotStoreType}, DotMap}) ->
orddict:fold(
fun(_Key, SubDotStore, CausalContext) ->
causal_context:merge(
to_causal_context(SubDotStore),
CausalContext
)
end,
causal_context:new(),
DotMap
). | _build/default/lib/types/src/causal_context.erl | 0.638272 | 0.411288 | causal_context.erl | starcoder |
-file("elm_core/src/basics.erlt", 1).
-module(basics).
-eqwalizer_unchecked([]).
-export([add/2, add/1, sub/1, mul/2, mul/1, idiv/1]).
-export([eq/2, eq/1, neq/2, neq/1]).
-export_type([order/0]).
-export([lt/2,
lt/1,
gt/2,
gt/1,
le/2,
le/1,
ge/2,
ge/1,
min/2,
min/1,
max/2,
max/1,
compare/2,
compare/1]).
-export(['not'/1,
'and'/2,
'and'/1,
'or'/2,
'or'/1,
'xor'/2,
'xor'/1]).
-export([mod_by/2,
mod_by/1,
remainder_by/2,
remainder_by/1,
negate/1,
abs/1,
clamp/3]).
-export_type([never/0]).
-export([composeL/2,
composeL/1,
composeR/2,
composeR/1,
apR/2,
apR/1,
apL/2,
apL/1,
identity/1,
always/2,
always/1,
never/1]).
-spec add(integer(), integer()) -> integer().
add(X1, X2) -> X1 + X2.
-spec add(integer()) -> fun((integer()) -> integer()).
add(X1) -> fun (X2) -> add(X1, X2) end.
-spec sub(integer(), integer()) -> integer().
sub(X1, X2) -> X1 - X2.
-spec sub(integer()) -> fun((integer()) -> integer()).
sub(X1) -> fun (X2) -> sub(X1, X2) end.
-spec mul(integer(), integer()) -> integer().
mul(X1, X2) -> X1 * X2.
-spec mul(integer()) -> fun((integer()) -> integer()).
mul(X1) -> fun (X2) -> mul(X1, X2) end.
-spec idiv(integer(), integer()) -> integer().
idiv(X1, X2) -> X1 div X2.
-spec idiv(integer()) -> fun((integer()) -> integer()).
idiv(X1) -> fun (X2) -> idiv(X1, X2) end.
-spec eq(A, A) -> boolean().
eq(X1, X2) -> X1 == X2.
-spec eq(A) -> fun((A) -> boolean()).
eq(X1) -> fun (X2) -> eq(X1, X2) end.
-spec neq(A, A) -> boolean().
neq(X1, X2) -> X1 =/= X2.
-spec neq(A) -> fun((A) -> boolean()).
neq(X1) -> fun (X2) -> neq(X1, X2) end.
-spec lt(A, A) -> boolean().
lt(X1, X2) -> X1 < X2.
-spec lt(A) -> fun((A) -> boolean()).
lt(X1) -> fun (X2) -> lt(X1, X2) end.
-spec gt(A, A) -> boolean().
gt(X1, X2) -> X1 > X2.
-spec gt(A) -> fun((A) -> boolean()).
gt(X1) -> fun (X2) -> gt(X1, X2) end.
-spec le(A, A) -> boolean().
le(X1, X2) -> X1 =< X2.
-spec le(A) -> fun((A) -> boolean()).
le(X1) -> fun (X2) -> le(X1, X2) end.
-spec ge(A, A) -> boolean().
ge(X1, X2) -> X1 >= X2.
-spec ge(A) -> fun((A) -> boolean()).
ge(X1) -> fun (X2) -> ge(X1, X2) end.
-spec min(A, A) -> A.
min(X1, X2) ->
case lt(X1, X2) of
true -> X1;
false -> X2
end.
-spec min(A) -> fun((A) -> A).
min(X1) -> fun (X2) -> basics:min(X1, X2) end.
-spec max(A, A) -> A.
max(X1, X2) ->
case gt(X1, X2) of
true -> X1;
false -> X2
end.
-spec max(A) -> fun((A) -> A).
max(X1) -> fun (X2) -> basics:max(X1, X2) end.
-type order() :: {'$#basics:order.lt'} |
{'$#basics:order.eq'} |
{'$#basics:order.gt'}.
-spec compare(A, A) -> order().
compare(X1, X2) ->
case lt(X1, X2) of
true -> {'$#basics:order.lt'};
false ->
case eq(X1, X2) of
true -> {'$#basics:order.eq'};
false -> {'$#basics:order.gt'}
end
end.
-spec compare(A) -> fun((A) -> order()).
compare(X1) -> fun (X2) -> compare(X1, X2) end.
-spec 'not'(boolean()) -> boolean().
'not'(B) -> not B.
-spec 'and'(boolean(), boolean()) -> boolean().
'and'(B1, B2) -> B1 and B2.
-spec 'and'(boolean()) -> fun((boolean()) -> boolean()).
'and'(B1) -> fun (B2) -> 'and'(B1, B2) end.
-spec 'or'(boolean(), boolean()) -> boolean().
'or'(B1, B2) -> B1 or B2.
-spec 'or'(boolean()) -> fun((boolean()) -> boolean()).
'or'(B1) -> fun (B2) -> 'or'(B1, B2) end.
-spec 'xor'(boolean(), boolean()) -> boolean().
'xor'(B1, B2) -> B1 xor B2.
-spec 'xor'(boolean()) -> fun((boolean()) -> boolean()).
'xor'(B1) -> fun (B2) -> 'xor'(B1, B2) end.
-spec mod_by(integer(), integer()) -> integer().
mod_by(X1, X2) -> X2 div X1.
-spec
mod_by(integer()) -> fun((integer()) -> integer()).
mod_by(X1) -> fun (X2) -> mod_by(X1, X2) end.
-spec remainder_by(integer(), integer()) -> integer().
remainder_by(X1, X2) -> X2 rem X1.
-spec
remainder_by(integer()) -> fun((integer()) -> integer()).
remainder_by(X1) ->
fun (X2) -> remainder_by(X1, X2) end.
-spec negate(integer()) -> integer().
negate(X) -> -X.
-spec abs(integer()) -> integer().
abs(X) ->
case lt(X, 0) of
true -> -X;
false -> X
end.
-spec clamp(integer(), integer(),
integer()) -> integer().
clamp(Low, High, Num) ->
case lt(Num, Low) of
true -> Low;
false ->
case gt(Num, High) of
true -> High;
false -> Num
end
end.
-spec composeL(fun((B) -> C),
fun((A) -> B)) -> fun((A) -> C).
composeL(G, F) -> fun (X) -> G(F(X)) end.
-spec
composeL(fun((B) -> C)) -> fun((fun((A) -> B)) -> fun((A) -> C)).
composeL(G) -> fun (F) -> composeL(G, F) end.
-spec composeR(fun((A) -> B),
fun((B) -> C)) -> fun((A) -> C).
composeR(F, G) -> fun (X) -> G(F(X)) end.
-spec
composeR(fun((A) -> B)) -> fun((fun((B) -> C)) -> fun((A) -> C)).
composeR(F) -> fun (G) -> composeR(F, G) end.
-spec apR(A, fun((A) -> B)) -> B.
apR(X, F) -> F(X).
-spec apR(A) -> fun((fun((A) -> B)) -> B).
apR(X) -> fun (F) -> apR(X, F) end.
-spec apL(fun((A) -> B), A) -> B.
apL(F, X) -> F(X).
-spec apL(fun((A) -> B)) -> fun((A) -> B).
apL(F) -> fun (X) -> apL(F, X) end.
-spec identity(A) -> A.
identity(X) -> X.
-spec always(A, _) -> A.
always(A, _) -> A.
-spec always(A) -> fun((_) -> A).
always(A) -> fun (X) -> always(A, X) end.
-type never() :: {'$#basics:never.just_one_more',
never()}.
-spec never(never()) -> _.
never({'$#basics:never.just_one_more', Nvr}) ->
never(Nvr). | tests/elm_core/ir-spec/basics.erl | 0.520253 | 0.435902 | basics.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc This module contains functionaity for using and administrating
%% indexes. In this case an index is an instance of a Solr Core.
-module(yz_index).
-include("yokozuna.hrl").
-compile(export_all).
-record(index_info,
{
%% Each index has it's own N value. This is needed so that
%% the query plan can be calculated. It is up to the user to
%% make sure that all buckets associated with an index use
%% the same N value as the index.
n_val :: n(),
%% The name of the schema this index is using.
schema_name :: schema_name()
}).
-type index_info() :: #index_info{}.
%%%===================================================================
%%% API
%%%===================================================================
%% @doc Get the list of buckets associated with `Index'.
-spec associated_buckets(index_name(), ring()) -> [bucket()].
associated_buckets(Index, Ring) ->
AllProps = riak_core_bucket:get_buckets(Ring),
[riak_core_bucket:name(BProps)
|| BProps <- AllProps,
proplists:get_value(?YZ_INDEX, BProps, ?YZ_INDEX_TOMBSTONE) == Index].
%% @see create/2
-spec create(index_name()) -> ok.
create(Name) ->
create(Name, ?YZ_DEFAULT_SCHEMA_NAME).
%% @see create/3
-spec create(index_name(), schema_name()) ->
ok |
{error, schema_not_found} |
{error, invalid_name}.
create(Name, SchemaName) ->
DefaultNVal = riak_core_bucket:default_object_nval(),
create(Name, SchemaName, DefaultNVal).
%% @doc Create the index `Name' across the entire cluster using
%% `SchemaName' as the schema and `NVal' as the N value.
%%
%% `ok' - The schema was found and index added to the list.
%%
%% `schema_not_found' - The `SchemaName' could not be found.
-spec create(index_name(), schema_name(), n() | undefined) ->
ok |
{error, schema_not_found} |
{error, invalid_name}.
create(Name, SchemaName, undefined) ->
DefaultNVal = riak_core_bucket:default_object_nval(),
create(Name, SchemaName, DefaultNVal);
create(Name, SchemaName, NVal) when is_integer(NVal),
NVal > 0 ->
case verify_name(Name) of
{ok, Name} ->
case yz_schema:exists(SchemaName) of
false ->
{error, schema_not_found};
true ->
Info = make_info(SchemaName, NVal),
ok = riak_core_metadata:put(?YZ_META_INDEXES, Name, Info)
end;
{error, _} = Err ->
Err
end.
%% @doc Determine if an index exists. For an index to exist it must 1)
%% be written to official index list, 2) have a corresponding index
%% dir in the root dir and 3) respond to a ping indicating it started
%% properly. If Solr is down then the check will fallback to
%% performing only the first two checks. If they fail then it
%% shouldn't exist in Solr.
-spec exists(index_name()) -> boolean().
exists(Name) ->
InMeta = riak_core_metadata:get(?YZ_META_INDEXES, Name) /= undefined,
DiskIndexNames = get_indexes_from_disk(?YZ_ROOT_DIR),
OnDisk = lists:member(Name, DiskIndexNames),
case yz_solr:is_up() of
true ->
InMeta andalso OnDisk andalso yz_solr:ping(Name);
false ->
InMeta andalso OnDisk
end.
%% @doc Removed the index `Name' from cluster meta.
-spec remove(index_name()) -> ok.
remove(Name) ->
ok = riak_core_metadata:delete(?YZ_META_INDEXES, Name).
%% @doc Determine list of indexes based on filesystem as opposed to
%% the Riak ring or Solr HTTP resource.
%%
%% NOTE: This function assumes that all Yokozuna indexes live directly
%% under the Yokozuna root data directory and that any dir with a
%% `core.properties' file is an index. DO NOT create a dir with a
%% `core.properties' for any other reason or it will confuse this
%% function and potentially have other consequences up the stack.
-spec get_indexes_from_disk(string()) -> [index_name()].
get_indexes_from_disk(Dir) ->
Files = filelib:wildcard(filename:join([Dir, "*"])),
[unicode:characters_to_binary(filename:basename(F))
|| F <- Files,
filelib:is_dir(F) andalso
filelib:is_file(filename:join([F, "core.properties"]))].
%% @doc Determine the list of indexes based on the cluster metadata.
-spec get_indexes_from_meta() -> indexes().
get_indexes_from_meta() ->
riak_core_metadata:fold(fun meta_index_list_acc/2,
[], ?YZ_META_INDEXES, [{resolver, lww}]).
-spec get_index_info(index_name()) -> undefined | index_info().
get_index_info(Name) ->
riak_core_metadata:get(?YZ_META_INDEXES, Name).
%% @doc Get the N value from the index info.
-spec get_n_val(index_info()) -> n().
get_n_val(IndexInfo) ->
IndexInfo#index_info.n_val.
%% @doc Create the index `Name' locally. Make best attempt to create
%% the index, log if a failure occurs. Always return `ok'.
%%
%% NOTE: This should typically be called by a the ring handler in
%% `yz_event'. The `create/1' API should be used to create a
%% cluster-wide index.
-spec local_create(index_name()) -> ok.
local_create(Name) ->
IndexDir = index_dir(Name),
ConfDir = filename:join([IndexDir, "conf"]),
ConfFiles = filelib:wildcard(filename:join([?YZ_PRIV, "conf", "*"])),
DataDir = filename:join([IndexDir, "data"]),
SchemaName = schema_name(get_index_info(Name)),
case yz_schema:get(SchemaName) of
{ok, RawSchema} ->
SchemaFile = filename:join([ConfDir, yz_schema:filename(SchemaName)]),
LocalSchemaFile = filename:join([".", yz_schema:filename(SchemaName)]),
yz_misc:make_dirs([ConfDir, DataDir]),
yz_misc:copy_files(ConfFiles, ConfDir, update),
%% Delete `core.properties' file or CREATE may complain
%% about the core already existing. This can happen when
%% the core is initially created with a bad schema. Solr
%% gets in a state where CREATE thinks the core already
%% exists but RELOAD says no core exists.
PropsFile = filename:join([IndexDir, "core.properties"]),
file:delete(PropsFile),
ok = file:write_file(SchemaFile, RawSchema),
CoreProps = [
{name, Name},
{index_dir, IndexDir},
{cfg_file, ?YZ_CORE_CFG_FILE},
{schema_file, LocalSchemaFile}
],
case yz_solr:core(create, CoreProps) of
{ok, _, _} ->
lager:info("Created index ~s with schema ~s",
[Name, SchemaName]),
ok;
{error, exists} ->
lager:info("Index ~s already exists in Solr, "
"but not in Riak metadata",
[Name]);
{error, Err} ->
lager:error("Couldn't create index ~s: ~p", [Name, Err])
end,
ok;
{error, _Reason} ->
lager:error("Couldn't create index ~s because the schema ~s isn't found",
[Name, SchemaName]),
ok
end.
%% @doc Remove the index `Name' locally.
-spec local_remove(index_name()) -> ok.
local_remove(Name) ->
CoreProps = [
{core, Name},
{delete_instance, "true"}
],
{ok, _, _} = yz_solr:core(remove, CoreProps),
ok.
%% @doc Reload the `Index' cluster-wide. By default this will also
%% pull the latest version of the schema associated with the
%% index. This call will block for up 5 seconds. Any node which could
%% not reload its index will be returned in a list of failed nodes.
%%
%% Options:
%%
%% `{schema, boolean()}' - Whether to reload the schema, defaults to
%% true.
%%
%% `{timeout, ms()}' - Timeout in milliseconds.
-spec reload(index_name()) -> {ok, [node()]} | {error, reload_errs()}.
reload(Index) ->
reload(Index, []).
-spec reload(index_name(), reload_opts()) -> {ok, [node()]} |
{error, reload_errs()}.
reload(Index, Opts) ->
TO = proplists:get_value(timeout, Opts, 5000),
{Responses, Down} =
riak_core_util:rpc_every_member_ann(?MODULE, reload_local, [Index, Opts], TO),
Down2 = [{Node, {error,down}} || Node <- Down],
BadResponses = [R || {_,{error,_}}=R <- Responses],
case Down2 ++ BadResponses of
[] ->
Nodes = [Node || {Node,_} <- Responses],
{ok, Nodes};
Errors ->
{error, Errors}
end.
%% @doc Remove documents in `Index' that are not owned by the local
%% node. Return the list of non-owned partitions found.
-spec remove_non_owned_data(index_name(), ring()) -> [p()].
remove_non_owned_data(Index, Ring) ->
IndexPartitions = yz_cover:reify_partitions(Ring,
yokozuna:partition_list(Index)),
OwnedAndNext = yz_misc:owned_and_next_partitions(node(), Ring),
NonOwned = ordsets:subtract(IndexPartitions, OwnedAndNext),
LNonOwned = yz_cover:logical_partitions(Ring, NonOwned),
Queries = [{'query', <<?YZ_PN_FIELD_S, ":", (?INT_TO_BIN(LP))/binary>>}
|| LP <- LNonOwned],
ok = yz_solr:delete(Index, Queries),
NonOwned.
-spec schema_name(index_info()) -> schema_name().
schema_name(Info) ->
Info#index_info.schema_name.
%% @doc Verify that the index is a name that Solr can use. Some chars
%% are invalid, namely "/" or non-ascii characters until full
%% UTF-8 support is available
-spec verify_name(index_name()) -> {ok, index_name()} | {error, invalid_name}.
verify_name(Name) ->
case lists:dropwhile(fun(X) -> X < 128 andalso X > 31 end,
binary_to_list(Name)) =:= "" of
true ->
case re:run(Name, "/", []) of
nomatch -> {ok, Name};
{match,_} -> {error, invalid_name}
end;
false ->
{error, invalid_name}
end.
%%%===================================================================
%%% Private
%%%===================================================================
%% @private
%%
%% @doc Used to accumulate list of indexes while folding over index
%% metadata.
-spec meta_index_list_acc({index_name(), '$deleted' | term()}, indexes()) ->
indexes().
meta_index_list_acc({_,'$deleted'}, Acc) ->
Acc;
meta_index_list_acc({Key,_}, Acc) ->
[Key|Acc].
%% @private
-spec reload_local(index_name(), reload_opts()) ->
ok | {error, term()}.
reload_local(Index, Opts) ->
TO = proplists:get_value(timeout, Opts, 5000),
ReloadSchema = proplists:get_value(schema, Opts, true),
case ReloadSchema of
true ->
case reload_schema_local(Index) of
ok ->
case yz_solr:core(reload, [{core, Index}], TO) of
{ok,_,_} -> ok;
Err -> Err
end;
{error,_}=Err ->
Err
end;
false ->
case yz_solr:core(reload, [{core, Index}]) of
{ok,_,_} -> ok;
Err -> Err
end
end.
%% @private
-spec reload_schema_local(index_name()) -> ok | {error, term()}.
reload_schema_local(Index) ->
IndexDir = index_dir(Index),
ConfDir = filename:join([IndexDir, "conf"]),
SchemaName = schema_name(get_index_info(Index)),
case yz_schema:get(SchemaName) of
{ok, RawSchema} ->
SchemaFile = filename:join([ConfDir, yz_schema:filename(SchemaName)]),
file:write_file(SchemaFile, RawSchema);
{error, Reason} ->
{error, Reason}
end.
index_dir(Name) ->
filename:absname(filename:join([?YZ_ROOT_DIR, Name])).
-spec make_info(binary(), n()) -> index_info().
make_info(SchemaName, NVal) ->
#index_info{n_val=NVal,
schema_name=SchemaName}. | deps/yokozuna/src/yz_index.erl | 0.630002 | 0.464355 | yz_index.erl | starcoder |
%% Copyright (c) 2013-2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(emqx_pmon).
-compile({no_auto_import, [monitor/3]}).
-export([new/0]).
-export([monitor/2, monitor/3]).
-export([demonitor/2]).
-export([find/2]).
-export([erase/2, erase_all/2]).
-export([count/1]).
-type(pmon() :: {?MODULE, map()}).
-export_type([pmon/0]).
-spec(new() -> pmon()).
new() ->
{?MODULE, maps:new()}.
-spec(monitor(pid(), pmon()) -> pmon()).
monitor(Pid, PM) ->
?MODULE:monitor(Pid, undefined, PM).
-spec(monitor(pid(), term(), pmon()) -> pmon()).
monitor(Pid, Val, {?MODULE, PM}) ->
{?MODULE, case maps:is_key(Pid, PM) of
true -> PM;
false -> Ref = erlang:monitor(process, Pid),
maps:put(Pid, {Ref, Val}, PM)
end}.
-spec(demonitor(pid(), pmon()) -> pmon()).
demonitor(Pid, {?MODULE, PM}) ->
{?MODULE, case maps:find(Pid, PM) of
{ok, {Ref, _Val}} ->
%% flush
_ = erlang:demonitor(Ref, [flush]),
maps:remove(Pid, PM);
error -> PM
end}.
-spec(find(pid(), pmon()) -> error | {ok, term()}).
find(Pid, {?MODULE, PM}) ->
case maps:find(Pid, PM) of
{ok, {_Ref, Val}} ->
{ok, Val};
error -> error
end.
-spec(erase(pid(), pmon()) -> pmon()).
erase(Pid, {?MODULE, PM}) ->
{?MODULE, maps:remove(Pid, PM)}.
-spec(erase_all([pid()], pmon()) -> {[{pid(), term()}], pmon()}).
erase_all(Pids, PMon0) ->
lists:foldl(
fun(Pid, {Acc, PMon}) ->
case find(Pid, PMon) of
{ok, Val} ->
{[{Pid, Val}|Acc], erase(Pid, PMon)};
error -> {Acc, PMon}
end
end, {[], PMon0}, Pids).
-spec(count(pmon()) -> non_neg_integer()).
count({?MODULE, PM}) ->
maps:size(PM). | src/emqx_pmon.erl | 0.627723 | 0.413773 | emqx_pmon.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% access module for transaction logs
%% implemented as a ram_copies mnesia table
-module(ekka_rlog_tab).
%% Mnesia bootstrap
-export([mnesia/1]).
-export([write/3, first_d/1, last_d/1, next_d/2]).
-include("ekka_rlog.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-type key() :: ekka_rlog_lib:txid().
-type shard() :: ekka_rlog:shard().
-boot_mnesia({mnesia, [boot]}).
-copy_mnesia({mnesia, [copy]}).
%% @doc Mnesia bootstrap.
mnesia(BootType) ->
case ekka_rlog:role() of
core -> [init(BootType, Shard) || Shard <- ekka_rlog:shards()], ok;
_ -> ok
end.
%% @doc Write a transaction log.
-spec write(ekka_rlog:shard(), ekka_rlog_lib:txid(), [ekka_rlog_lib:op(),...]) -> ok.
write(Shard, Key, [_ | _] = Ops) ->
Log = #rlog{ key = Key
, ops = Ops
},
mnesia:write(Shard, Log, write).
%% @doc Search for the first record in the table.
-spec first_d(shard()) -> [key()].
first_d(Shard) ->
case mnesia:dirty_first(Shard) of
'$end_of_table' -> [];
Key -> [Key]
end.
%% @doc Search for the last key in the table.
-spec last_d(shard()) -> [key()].
last_d(Shard) ->
case mnesia:dirty_last(Shard) of
'$end_of_table' -> [];
Key -> [Key]
end.
%% @doc Search for the next key ordered immediately behind the given one.
-spec next_d(shard(), key()) -> [key()].
next_d(Shard, Key) ->
case mnesia:dirty_next(Shard, Key) of
'$end_of_table' -> [];
Key -> [Key]
end.
init(boot, Shard) ->
Opts = [ {type, ordered_set}
, {ram_copies, [node()]}
, {record_name, rlog}
, {attributes, record_info(fields, rlog)}
],
?tp(notice, creating_rlog_tab,
#{ node => node()
, shard => Shard
, type => boot
}),
ok = ekka_mnesia:create_table(Shard, Opts);
init(copy, Shard) ->
?tp(notice, creating_rlog_tab,
#{ node => node()
, shard => Shard
, type => copy
}),
ok = ekka_mnesia:copy_table(Shard, ram_copies). | src/ekka_rlog_tab.erl | 0.516108 | 0.446374 | ekka_rlog_tab.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(tricks_discovery_manager_SUITE).
-author("<NAME> <<EMAIL>>").
-include("tricks.hrl").
%% common_test callbacks
-export([suite/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0]).
-compile([nowarn_export_all, export_all]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
suite() ->
[{timetrap, {hours, 1}}].
init_per_suite(Config) ->
Config.
end_per_suite(Config) ->
Config.
init_per_testcase(Case, Config) ->
ct:pal("Beginning test case: ~p", [Case]),
%% start
ok = test_util:start(),
Config.
end_per_testcase(Case, Config) ->
ct:pal("Ending test case: ~p", [Case]),
%% stop
ok = test_util:stop(),
Config.
all() ->
[register_test,
unregister_test,
nada_test].
%% ===================================================================
%% tests
%% ===================================================================
register_test(_Config) ->
%% there's nothing registered
test_util:discovery_expect(10001, server, []),
test_util:discovery_expect(10001, client, []),
test_util:discovery_expect(10002, server, []),
%% register and expect
test_util:discovery_register(10001, server, {1, "127.0.0.1"}),
test_util:discovery_expect(10001, server, [1]),
test_util:discovery_expect(10001, client, []),
test_util:discovery_expect(10002, server, []),
%% register twice and expect
test_util:discovery_register(10002, server, {10, "127.0.0.1"}),
test_util:discovery_register(10001, server, {1, "127.0.0.1"}),
test_util:discovery_register(10002, server, {10, "127.0.0.1"}),
test_util:discovery_expect(10001, server, [1]),
test_util:discovery_expect(10001, client, []),
test_util:discovery_expect(10002, server, [10]),
%% register and expect
test_util:discovery_register(10001, server, {2, "127.0.0.2"}),
test_util:discovery_expect(10001, server, [1, 2]),
test_util:discovery_expect(10001, client, []),
test_util:discovery_expect(10002, server, [10]),
%% register and expect
test_util:discovery_register(10001, client, {100, "127.0.0.100"}),
test_util:discovery_expect(10001, server, [1, 2]),
test_util:discovery_expect(10001, client, [100]),
test_util:discovery_expect(10002, server, [10]).
unregister_test(_Config) ->
%% unregister something non existing
test_util:discovery_unregister(10001, server, {1, "127.0.0.1"}),
%% register stuff
test_util:discovery_register(10001, server, {1, "127.0.0.1"}),
test_util:discovery_register(10001, server, {2, "127.0.0.2"}),
test_util:discovery_register(10001, client, {100, "127.0.0.100"}),
test_util:discovery_register(10002, server, {10, "127.0.0.1"}),
%% unregister something non existing
test_util:discovery_unregister(10001, server, {3, "127.0.0.3"}),
test_util:discovery_unregister(10002, server, {3, "127.0.0.3"}),
%% expect
test_util:discovery_expect(10001, server, [1, 2]),
test_util:discovery_expect(10001, client, [100]),
test_util:discovery_expect(10002, server, [10]),
%% unregister something existing
test_util:discovery_unregister(10001, server, {2, "127.0.0.2"}),
test_util:discovery_expect(10001, server, [1]),
%% register it back
test_util:discovery_register(10001, server, {2, "127.0.0.2"}),
test_util:discovery_expect(10001, server, [1, 2]),
%% unregister it again
test_util:discovery_unregister(10001, server, {2, "127.0.0.2"}),
test_util:discovery_expect(10001, server, [1]).
nada_test(_Config) ->
%% start nada
%% this experiment will
%% - start 1 app1 when we register event go1
%% - start 3 app2 when we register event go2
%% - stop 1 app1 when 3 app2 are started
ExpId = test_util:example_run("nada"),
%% in the beginning there's nothing
test_util:discovery_expect(ExpId, app1, []),
test_util:discovery_expect(ExpId, app2, []),
%% start app1
test_util:event_register(ExpId, go1),
test_util:discovery_expect(ExpId, app1, 1, [0]),
test_util:discovery_expect(ExpId, app2, []),
%% start app2
test_util:event_register(ExpId, go2),
test_util:discovery_expect(ExpId, app2, 3, [0, 1, 2]). | test/tricks_discovery_manager_SUITE.erl | 0.588534 | 0.628692 | tricks_discovery_manager_SUITE.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2020 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(estatsd_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
all() -> emqx_ct:all(?MODULE).
init_per_suite(Config) ->
application:ensure_all_started(estatsd),
Config.
end_per_suite(_Config) ->
application:stop(estatsd).
t_apis(_) ->
{ok, Socket} = gen_udp:open(8125),
{ok, Pid} = estatsd:start_link(),
estatsd:counter(Pid, example, 1),
should_receive(<<"example:1|c">>),
estatsd:increment(Pid, example, 1),
should_receive(<<"example:1|c">>),
estatsd:increment(Pid, example, -1),
should_receive(<<"example:-1|c">>),
estatsd:decrement(Pid, example, 1),
should_receive(<<"example:-1|c">>),
estatsd:decrement(Pid, example, -1),
should_receive(<<"example:1|c">>),
estatsd:gauge(Pid, example, 10),
should_receive(<<"example:10|g">>),
estatsd:gauge_delta(Pid, example, 1),
should_receive(<<"example:+1|g">>),
estatsd:gauge_delta(Pid, example, -1),
should_receive(<<"example:-1|g">>),
estatsd:set(Pid, example, 10),
should_receive(<<"example:10|s">>),
estatsd:timing(Pid, example, 10),
should_receive(<<"example:10|ms">>),
DelayFunc = fun() ->
ct:sleep(100)
end,
estatsd:timing(Pid, example, DelayFunc),
receive
{udp, _, _, _, Packet} ->
Milliseconds = list_to_integer(lists:nth(2, re:split(Packet,"[:|]",[{return,list}]))),
?assert(Milliseconds > 50 andalso Milliseconds < 150)
after 10 ->
ct:fail(should_recv_packet)
end,
estatsd:histogram(Pid, example, 10),
should_receive(<<"example:10|h">>),
gen_udp:close(Socket),
ok = estatsd:stop(Pid).
t_sample_rate(_) ->
{ok, Socket} = gen_udp:open(8125),
{ok, Pid} = estatsd:start_link([{batch_size, 1}]),
[estatsd:counter(Pid, example, 1, 0.1) || _N <- lists:seq(1, 500)],
Rate = receive_count(0) / 500,
?assert(Rate > 0.06 andalso Rate < 0.14),
gen_udp:close(Socket),
ok = estatsd:stop(Pid).
t_opts(_) ->
{ok, Socket} = gen_udp:open(8125),
{ok, Pid} = estatsd:start_link([{prefix, hostname}, {tags, [{"constant", "abc"}]}]),
estatsd:counter(Pid, example, 1, 1, [{"env", "dev"}]),
should_receive(iolist_to_binary([estatsd:hostname(), $., "example:1|c|#env:dev,constant:abc"])),
ok = estatsd:stop(Pid),
{ok, Pid2} = estatsd:start_link([{prefix, name}]),
estatsd:counter(Pid2, example, 1),
should_receive(iolist_to_binary([estatsd:name(), $., "example:1|c"])),
ok = estatsd:stop(Pid2),
{ok, Pid3} = estatsd:start_link([{prefix, sname}]),
estatsd:counter(Pid3, example, 1),
should_receive(iolist_to_binary([estatsd:sname(), $., "example:1|c"])),
ok = estatsd:stop(Pid3),
gen_udp:close(Socket).
receive_count(Cnt) ->
receive
{udp, _, _, _, _} ->
receive_count(Cnt + 1)
after 100 ->
Cnt
end.
should_receive(Expected) ->
receive
{udp, _, _, _, Packet} ->
?assertEqual(Expected, list_to_binary(Packet))
after 10 ->
ct:fail(should_recv_packet)
end. | test/estatsd_SUITE.erl | 0.552057 | 0.446977 | estatsd_SUITE.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(mem3_reshard_validate).
-export([
start_args/2,
source/1,
targets/2
]).
-include_lib("mem3/include/mem3.hrl").
-spec start_args(#shard{}, any()) -> ok | {error, term()}.
start_args(Source, Split) ->
first_error([
check_split(Split),
check_range(Source, Split),
check_node(Source),
source(Source),
check_shard_map(Source)
]).
-spec source(#shard{}) -> ok | {error, term()}.
source(#shard{name = Name}) ->
case couch_server:exists(Name) of
true ->
ok;
false ->
{error, {source_shard_not_found, Name}}
end.
-spec check_shard_map(#shard{}) -> ok | {error, term()}.
check_shard_map(#shard{name = Name}) ->
DbName = mem3:dbname(Name),
AllShards = mem3:shards(DbName),
case mem3_util:calculate_max_n(AllShards) of
N when is_integer(N), N >= 1 ->
ok;
N when is_integer(N), N < 1 ->
{error, {not_enough_shard_copies, DbName}}
end.
-spec targets(#shard{}, [#shard{}]) -> ok | {error, term()}.
targets(#shard{} = Source, Targets) ->
first_error([
target_ranges(Source, Targets)
]).
-spec check_split(any()) -> ok | {error, term()}.
check_split(Split) when is_integer(Split), Split > 1 ->
ok;
check_split(Split) ->
{error, {invalid_split_parameter, Split}}.
-spec check_range(#shard{}, any()) -> ok | {error, term()}.
check_range(#shard{range = Range = [B, E]}, Split) ->
case (E + 1 - B) >= Split of
true ->
ok;
false ->
{error, {shard_range_cannot_be_split, Range, Split}}
end.
-spec check_node(#shard{}) -> ok | {error, term()}.
check_node(#shard{node = undefined}) ->
ok;
check_node(#shard{node = Node}) when Node =:= node() ->
ok;
check_node(#shard{node = Node}) ->
{error, {source_shard_node_is_not_current_node, Node}}.
-spec target_ranges(#shard{}, [#shard{}]) -> ok | {error, any()}.
target_ranges(#shard{range = [Begin, End]}, Targets) ->
Ranges = [R || #shard{range = R} <- Targets],
SortFun = fun([B1, _], [B2, _]) -> B1 =< B2 end,
[First | RestRanges] = lists:sort(SortFun, Ranges),
try
TotalRange = lists:foldl(fun([B2, E2], [B1, E1]) ->
case B2 =:= E1 + 1 of
true ->
ok;
false ->
throw({range_error, {B2, E1}})
end,
[B1, E2]
end, First, RestRanges),
case [Begin, End] =:= TotalRange of
true ->
ok;
false ->
throw({range_error, {[Begin, End], TotalRange}})
end
catch
throw:{range_error, Error} ->
{error, {shard_range_error, Error}}
end.
-spec first_error([ok | {error, term()}]) -> ok | {error, term()}.
first_error(Results) ->
case [Res || Res <- Results, Res =/= ok] of
[] ->
ok;
[FirstError | _] ->
FirstError
end. | src/mem3/src/mem3_reshard_validate.erl | 0.631822 | 0.422862 | mem3_reshard_validate.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
%% @doc Saves a Key/Value pair to a ini file. The Key consists of a Section
%% and Option combination. If that combination is found in the ini file
%% the new value replaces the old value. If only the Section is found the
%% Option and value combination is appended to the Section. If the Section
%% does not yet exist in the ini file, it is added and the Option/Value
%% pair is appended.
%% @see couch_config
-module(couch_config_writer).
-include("couch_db.hrl").
-export([save_to_file/2]).
%% @spec save_to_file(
%% Config::{{Section::string(), Option::string()}, Value::string()},
%% File::filename()) -> ok
%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
save_to_file({{Section, Option}, Value}, File) ->
?LOG_DEBUG("saving to file '~s', Config: '~p'", [File, {{Section, Option}, Value}]),
% open file and create a list of lines
{ok, Stream} = file:read_file(File),
OldFileContents = binary_to_list(Stream),
{ok, Lines} = regexp:split(OldFileContents, "\r\n|\n|\r|\032"),
% prepare input variables
SectionName = "[" ++ Section ++ "]",
OptionList = Option,
% produce the contents for the config file
NewFileContents =
case {NewFileContents2, DoneOptions} = save_loop({{SectionName, OptionList}, Value}, Lines, "", "", []) of
% we didn't change anything, that means we couldn't find a matching
% [ini section] in which case we just append a new one.
{OldFileContents, DoneOptions} ->
% but only if we haven't actually written anything.
case lists:member(OptionList, DoneOptions) of
true -> OldFileContents;
_ -> append_new_ini_section({{SectionName, OptionList}, Value}, OldFileContents)
end;
_ ->
NewFileContents2
end,
ok = file:write_file(File, list_to_binary(NewFileContents)),
ok.
%% @doc Iterates over the lines of an ini file and replaces or adds a new
%% configuration directive.
save_loop({{Section, Option}, Value}, [Line|Rest], OldCurrentSection, Contents, DoneOptions) ->
% if we find a new [ini section] (Section), save that for reference
NewCurrentSection = parse_module(Line, OldCurrentSection),
% if the current Section is the one we want to change, try to match
% each line with the Option
NewContents =
case NewCurrentSection of
Section ->
case OldCurrentSection of
NewCurrentSection -> % we already were in [Section]
case lists:member(Option, DoneOptions) of
true -> % we already replaced Option, do nothing
DoneOptions2 = DoneOptions,
Line;
_ -> % we haven't written our Option yet
case parse_variable(Line, Option, Value) of
nomatch ->
DoneOptions2 = DoneOptions,
Line;
NewLine ->
DoneOptions2 = [Option|DoneOptions],
NewLine
end
end;
_ -> % we got into a new [section]
{NewLine, DoneOptions2} = append_var_to_section(
{{Section, Option}, Value},
Line,
OldCurrentSection,
DoneOptions),
NewLine
end;
_ -> % we are reading [NewCurrentSection]
{NewLine, DoneOptions2} = append_var_to_section(
{{Section, Option}, Value},
Line,
OldCurrentSection,
DoneOptions),
NewLine
end,
% clumsy way to only append a newline character if the line is not empty. We need this to
% avoid having a newline inserted at the top of the target file each time we save it.
Contents2 = case Contents of "" -> ""; _ -> Contents ++ "\n" end,
% go to next line
save_loop({{Section, Option}, Value}, Rest, NewCurrentSection, Contents2 ++ NewContents, DoneOptions2);
save_loop({{Section, Option}, Value}, [], OldSection, NewFileContents, DoneOptions) ->
case lists:member(Option, DoneOptions) of
% append Deferred Option
false when Section == OldSection ->
{NewFileContents ++ "\n" ++ Option ++ " = " ++ Value ++ "\n", DoneOptions};
% we're out of new lines, just return the new file's contents
_ -> {NewFileContents, DoneOptions}
end.
append_new_ini_section({{SectionName, Option}, Value}, OldFileContents) ->
OldFileContents ++ "\n" ++ SectionName ++ "\n" ++ Option ++ " = " ++ Value ++ "\n".
append_var_to_section({{Section, Option}, Value}, Line, OldCurrentSection, DoneOptions) ->
case OldCurrentSection of
Section -> % append Option to Section
case lists:member(Option, DoneOptions) of
false ->
{Option ++ " = " ++ Value ++ "\n\n" ++ Line, [Option|DoneOptions]};
_ ->
{Line, DoneOptions}
end;
_ ->
{Line, DoneOptions}
end.
%% @spec parse_module(Line::string(), OldSection::string()) -> string()
%% @doc Tries to match a line against a pattern specifying a ini module or
%% section ("[Section]"). Returns OldSection if no match is found.
parse_module(Line, OldSection) ->
case regexp:match(Line, "^\\[([a-zA-Z0-9\_-]*)\\]$") of
nomatch ->
OldSection;
{error, Error} ->
io:format("ini file regex error module: '~s'~n", [Error]),
OldSection;
{match, Start, Length} ->
string:substr(Line, Start, Length)
end.
%% @spec parse_variable(Line::string(), Option::string(), Value::string()) ->
%% string() | nomatch
%% @doc Tries to match a variable assignment in Line. Returns nomatch if the
%% Option is not found. Returns a new line composed of the Option and
%% Value otherwise.
parse_variable(Line, Option, Value) ->
case regexp:match(Line, "^" ++ Option ++ "\s?=") of
nomatch ->
nomatch;
{error, Error}->
io:format("ini file regex error variable: '~s'~n", [Error]),
nomatch;
{match, _Start, _Length} ->
Option ++ " = " ++ Value
end. | src/couchdb/couch_config_writer.erl | 0.556159 | 0.473536 | couch_config_writer.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2015 Helium Systems, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(metadata_SUITE).
-compile({parse_transform, lager_transform}).
-export([
%% suite/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0
]).
-export([
read_write_delete_test/1,
partitioned_cluster_test/1,
siblings_test/1
]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("kernel/include/inet.hrl").
%% ===================================================================
%% common_test callbacks
%% ===================================================================
init_per_suite(_Config) ->
lager:start(),
%% this might help, might not...
os:cmd(os:find_executable("epmd")++" -daemon"),
{ok, Hostname} = inet:gethostname(),
case net_kernel:start([list_to_atom("runner@"++Hostname), shortnames]) of
{ok, _} -> ok;
{error, {already_started, _}} -> ok;
{error, {{already_started, _},_}} -> ok
end,
_Config.
end_per_suite(_Config) ->
application:stop(lager),
_Config.
init_per_testcase(Case, Config) ->
Nodes = plumtree_test_utils:pmap(fun(N) ->
plumtree_test_utils:start_node(N, Config, Case)
end, [electra, katana, flail, gargoyle]),
[{nodes, Nodes}|Config].
end_per_testcase(_, _Config) ->
plumtree_test_utils:pmap(fun(Node) ->ct_slave:stop(Node) end, [electra, katana, flail, gargoyle]),
ok.
all() ->
[read_write_delete_test, partitioned_cluster_test, siblings_test].
read_write_delete_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
?assertEqual(undefined, get_metadata(Node1, {foo, bar}, baz, [])),
ok = put_metadata(Node1, {foo, bar}, baz, quux, []),
?assertEqual(quux, get_metadata(Node1, {foo, bar}, baz, [])),
ok = wait_until_converged(Nodes, {foo, bar}, baz, quux),
ok = put_metadata(Node1, {foo, bar}, baz, norf, []),
ok = wait_until_converged(Nodes, {foo, bar}, baz, norf),
ok = delete_metadata(Node1, {foo, bar}, baz),
ok = wait_until_converged(Nodes, {foo, bar}, baz, undefined),
ok.
partitioned_cluster_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
ok = wait_until_converged(Nodes, {foo, bar}, baz, undefined),
ok = put_metadata(Node1, {foo, bar}, baz, quux, []),
ok = wait_until_converged(Nodes, {foo, bar}, baz, quux),
{ANodes, BNodes} = lists:split(2, Nodes),
plumtree_test_utils:partition_cluster(ANodes, BNodes),
%% write to one side
ok = put_metadata(Node1, {foo, bar}, baz, norf, []),
%% check that whole side has the new value
ok = wait_until_converged(ANodes, {foo, bar}, baz, norf),
%% the far side should have the old value
ok = wait_until_converged(BNodes, {foo, bar}, baz, quux),
plumtree_test_utils:heal_cluster(ANodes, BNodes),
%% all the nodes should see the new value
ok = wait_until_converged(Nodes, {foo, bar}, baz, norf),
ok.
siblings_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
ok = wait_until_converged(Nodes, {foo, bar}, baz, undefined),
ok = put_metadata(Node1, {foo, bar}, baz, quux, []),
ok = put_metadata(Node1, {foo, bar}, canary, 1, []),
ok = wait_until_converged(Nodes, {foo, bar}, baz, quux),
ok = wait_until_converged(Nodes, {foo, bar}, canary, 1),
{ANodes, BNodes} = lists:split(2, Nodes),
plumtree_test_utils:partition_cluster(ANodes, BNodes),
%% write to one side
ok = put_metadata(Node1, {foo, bar}, baz, norf, []),
ok = put_metadata(Node1, {foo, bar}, canary, 2, []),
%% check that whole side has the new value
ok = wait_until_converged(ANodes, {foo, bar}, baz, norf),
ok = wait_until_converged(ANodes, {foo, bar}, canary, 2),
%% the far side should have the old value
ok = wait_until_converged(BNodes, {foo, bar}, baz, quux),
ok = wait_until_converged(BNodes, {foo, bar}, canary, 1),
%% write a competing value to the other side
[Node3|_] = BNodes,
ok = put_metadata(Node3, {foo, bar}, baz, mork, []),
ok = wait_until_converged(BNodes, {foo, bar}, baz, mork),
plumtree_test_utils:heal_cluster(ANodes, BNodes),
%% block until the canary key converges
ok = wait_until_converged(Nodes, {foo, bar}, canary, 2),
%% make sure we have siblings, but don't resolve them yet
ok = wait_until_sibling(Nodes, {foo, bar}, baz),
%% resolve the sibling
spork = get_metadata(Node1, {foo, bar}, baz, [{resolver, fun(_A, _B) ->
spork end}, {allow_put, false}]),
%% without allow_put set, all the siblings are still there...
ok = wait_until_sibling(Nodes, {foo, bar}, baz),
%% resolve the sibling and write it back
spork = get_metadata(Node1, {foo, bar}, baz, [{resolver, fun(_A, _B) ->
spork end}, {allow_put, true}]),
%% check all the nodes see the resolution
ok = wait_until_converged(Nodes, {foo, bar}, baz, spork),
ok.
%% ===================================================================
%% utility functions
%% ===================================================================
get_metadata(Node, Prefix, Key, Opts) ->
rpc:call(Node, plumtree_metadata, get, [Prefix, Key, Opts]).
put_metadata(Node, Prefix, Key, ValueOrFun, Opts) ->
rpc:call(Node, plumtree_metadata, put, [Prefix, Key, ValueOrFun, Opts]).
delete_metadata(Node, Prefix, Key) ->
rpc:call(Node, plumtree_metadata, delete, [Prefix, Key]).
wait_until_converged(Nodes, Prefix, Key, ExpectedValue) ->
plumtree_test_utils:wait_until(fun() ->
lists:all(fun(X) -> X == true end,
plumtree_test_utils:pmap(fun(Node) ->
ExpectedValue == get_metadata(Node, Prefix,
Key,
[{allow_put,
false}])
end, Nodes))
end, 60*2, 500).
wait_until_sibling(Nodes, Prefix, Key) ->
plumtree_test_utils:wait_until(fun() ->
lists:all(fun(X) -> X == true end,
plumtree_test_utils:pmap(fun(Node) ->
case rpc:call(Node, plumtree_metadata_manager,
get, [{Prefix, Key}]) of
undefined -> false;
Value ->
rpc:call(Node,
plumtree_metadata_object,
value_count, [Value]) > 1
end
end, Nodes))
end, 60*2, 500). | test/metadata_SUITE.erl | 0.573081 | 0.423339 | metadata_SUITE.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% ETS backed interface for working with spans.
%% @end
%%%-------------------------------------------------------------------------
-module(ot_span_ets).
-behaviour(ot_span).
-behaviour(gen_server).
-export([start_link/1,
init/1,
handle_call/3,
handle_cast/2]).
-export([start_span/2,
start_span/3,
end_span/1,
end_span/2,
get_ctx/1,
is_recording_events/1,
set_attribute/3,
set_attributes/2,
add_event/3,
add_events/2,
set_status/2,
update_name/2]).
-include_lib("opentelemetry_api/include/opentelemetry.hrl").
-include("ot_span_ets.hrl").
-record(state, {}).
start_link(Opts) ->
gen_server:start_link(?MODULE, Opts, []).
start_span(Name, Opts) ->
start_span(Name, Opts, fun(Span) -> Span end).
%% @doc Start a span and insert into the active span ets table.
-spec start_span(opentelemetry:span_name(), ot_span:start_opts(), fun()) -> opentelemetry:span_ctx().
start_span(Name, Opts, Processors) ->
{SpanCtx, Span} = ot_span_utils:start_span(Name, Opts),
Span1 = Processors(Span),
_ = storage_insert(Span1),
SpanCtx.
end_span(SpanCtx) ->
end_span(SpanCtx, fun(Span) -> Span end).
%% @doc End a span based on its context and send to reporter.
-spec end_span(opentelemetry:span_ctx(), fun()) -> boolean() | {error, term()}.
end_span(#span_ctx{span_id=SpanId,
tracestate=Tracestate,
trace_flags=TraceOptions}, Processors) when ?IS_SPAN_ENABLED(TraceOptions) ->
case ets:take(?SPAN_TAB, SpanId) of
[Span] ->
Span1 = ot_span_utils:end_span(Span#span{tracestate=Tracestate}),
Processors(Span1);
_ ->
false
end;
end_span(_, _) ->
ok.
-spec get_ctx(opentelemetry:span()) -> opentelemetry:span_ctx().
get_ctx(#span{trace_id=TraceId,
span_id=SpanId,
tracestate=TraceState,
is_recorded=IsRecorded}) ->
#span_ctx{trace_id=TraceId,
span_id=SpanId,
tracestate=TraceState,
is_recorded=IsRecorded}.
-spec is_recording_events(opentelemetry:span_ctx()) -> boolean().
is_recording_events(#span_ctx{is_recorded=IsRecorded}) ->
IsRecorded.
-spec set_attribute(opentelemetry:span_ctx(),
opentelemetry:attribute_key(),
opentelemetry:attribute_value()) -> boolean().
set_attribute(#span_ctx{span_id=SpanId}, Key, Value) ->
set_attributes(#span_ctx{span_id=SpanId}, [{Key, Value}]).
-spec set_attributes(opentelemetry:span_ctx(), opentelemetry:attributes()) -> boolean().
set_attributes(#span_ctx{span_id=SpanId}, NewAttributes) ->
case ets:lookup(?SPAN_TAB, SpanId) of
[Span=#span{attributes=Attributes}] ->
Span1 = Span#span{attributes=Attributes++NewAttributes},
1 =:= ets:select_replace(?SPAN_TAB, [{Span, [], [{const, Span1}]}]);
_ ->
false
end.
-spec add_event(opentelemetry:span_ctx(), unicode:unicode_binary(), opentelemetry:attributes()) -> boolean().
add_event(SpanCtx, Name, Attributes) ->
TimedEvents = opentelemetry:timed_events([{opentelemetry:timestamp(),
Name, Attributes}]),
add_events(SpanCtx, TimedEvents).
-spec add_events(opentelemetry:span_ctx(), opentelemetry:timed_events()) -> boolean().
add_events(#span_ctx{span_id=SpanId}, NewTimedEvents) ->
case ets:lookup(?SPAN_TAB, SpanId) of
[Span=#span{timed_events=TimeEvents}] ->
Span1 = Span#span{timed_events=TimeEvents++NewTimedEvents},
1 =:= ets:select_replace(?SPAN_TAB, [{Span, [], [{const, Span1}]}]);
_ ->
false
end.
-spec set_status(opentelemetry:span_ctx(), opentelemetry:status()) -> boolean().
set_status(#span_ctx{span_id=SpanId}, Status) ->
ets:update_element(?SPAN_TAB, SpanId, {#span.status, Status}).
-spec update_name(opentelemetry:span_ctx(), opentelemetry:span_name()) -> boolean().
update_name(#span_ctx{span_id=SpanId}, Name) ->
ets:update_element(?SPAN_TAB, SpanId, {#span.name, Name}).
%%
storage_insert(Span) ->
ets:insert(?SPAN_TAB, Span).
init(_Opts) ->
%% ets table is required for other parts to not crash so we create
%% it in init and not in a handle_continue or whatever else
case ets:info(?SPAN_TAB, name) of
undefined ->
ets:new(?SPAN_TAB, [named_table, public,
{write_concurrency, true},
{keypos, #span.span_id}]);
_ ->
ok
end,
{ok, #state{}}.
handle_call(_Msg, _From, State) ->
{noreply, State}.
handle_cast(_Msg, State) ->
{noreply, State}. | src/ot_span_ets.erl | 0.549761 | 0.410697 | ot_span_ets.erl | starcoder |
%%%
%%% Copyright 2020 RBKmoney
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
-module(machinegun_hay_metric_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
-include_lib("machinegun_core/include/pulse.hrl").
%% tests descriptions
-export([all/0]).
-export([groups/0]).
-export([init_per_suite/1]).
-export([end_per_suite/1]).
-export([init_per_group/2]).
-export([end_per_group/2]).
-export([offset_bin_metric_test/1]).
-export([fraction_and_queue_bin_metric_test/1]).
-export([duration_bin_metric_test/1]).
-define(NS, <<"NS">>).
-define(ID, <<"ID">>).
%%
%% tests descriptions
%%
-type group_name() :: atom().
-type test_name() :: atom().
-type config() :: [{atom(), _}].
-spec all() -> [test_name() | {group, group_name()}].
all() ->
[
offset_bin_metric_test,
fraction_and_queue_bin_metric_test,
duration_bin_metric_test
].
-spec groups() -> [{group_name(), list(_), test_name()}].
groups() ->
[].
%%
%% starting/stopping
%%
-spec init_per_suite(config()) -> config().
init_per_suite(C) ->
Apps = machinegun_ct_helper:start_applications([
gproc,
{how_are_you, [
{metrics_publishers, []},
{metrics_handlers, []}
]}
]),
[
{apps, Apps},
{automaton_options, #{
url => "http://localhost:8022",
ns => ?NS,
retry_strategy => undefined
}},
{event_sink_options, "http://localhost:8022"}
| C
].
-spec end_per_suite(config()) -> ok.
end_per_suite(C) ->
ok = application:set_env(how_are_you, metrics_publishers, []),
ok = application:set_env(how_are_you, metrics_handlers, []),
machinegun_ct_helper:stop_applications(?config(apps, C)).
-spec init_per_group(group_name(), config()) -> config().
init_per_group(_, C) ->
C.
-spec end_per_group(group_name(), config()) -> ok.
end_per_group(_, _C) ->
ok.
%% Tests
-spec offset_bin_metric_test(config()) -> _.
offset_bin_metric_test(_C) ->
Offsets = [erlang:trunc(-10 + math:pow(2, I)) || I <- lists:seq(0, 10, 1)],
_ = [
ok = test_beat(#mg_core_timer_lifecycle_created{
namespace = ?NS,
target_timestamp = genlib_time:unow() + Offset
})
|| Offset <- Offsets
].
-spec fraction_and_queue_bin_metric_test(config()) -> _.
fraction_and_queue_bin_metric_test(_C) ->
Samples = lists:seq(0, 200, 1),
_ = [
ok = test_beat(#mg_core_worker_start_attempt{
namespace = ?NS,
msg_queue_len = Sample,
msg_queue_limit = 100
})
|| Sample <- Samples
].
-spec duration_bin_metric_test(config()) -> _.
duration_bin_metric_test(_C) ->
Samples = [erlang:trunc(math:pow(2, I)) || I <- lists:seq(0, 20, 1)],
_ = [
ok = test_beat(#mg_core_machine_process_finished{
namespace = ?NS,
duration = Sample,
processor_impact = {init, []}
})
|| Sample <- Samples
].
%% Utils
%% Metrics utils
-spec test_beat(mg_woody_api_pulse:beat()) -> ok.
test_beat(Beat) ->
machinegun_pulse_hay:handle_beat(undefined, Beat). | test/machinegun_hay_metric_SUITE.erl | 0.509764 | 0.416737 | machinegun_hay_metric_SUITE.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2015 Helium Systems, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(cluster_membership_SUITE).
-compile({parse_transform, lager_transform}).
-export([
%% suite/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0
]).
-export([
singleton_test/1,
join_test/1,
join_nonexistant_node_test/1,
join_self_test/1,
leave_test/1,
leave_rejoin_test/1,
sticky_membership_test/1
]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("kernel/include/inet.hrl").
%% ===================================================================
%% common_test callbacks
%% ===================================================================
init_per_suite(_Config) ->
lager:start(),
%% this might help, might not...
os:cmd(os:find_executable("epmd")++" -daemon"),
{ok, Hostname} = inet:gethostname(),
case net_kernel:start([list_to_atom("runner@"++Hostname), shortnames]) of
{ok, _} -> ok;
{error, {already_started, _}} -> ok;
{error, {{already_started, _},_}} -> ok
end,
_Config.
end_per_suite(_Config) ->
application:stop(lager),
_Config.
init_per_testcase(Case, Config) ->
Nodes = plumtree_test_utils:pmap(fun(N) ->
plumtree_test_utils:start_node(N, Config, Case)
end, [jaguar, shadow, thorn, pyros]),
{ok, _} = ct_cover:add_nodes(Nodes),
[{nodes, Nodes}|Config].
end_per_testcase(_, _Config) ->
plumtree_test_utils:pmap(fun(Node) ->ct_slave:stop(Node) end, [jaguar, shadow, thorn, pyros]),
ok.
all() ->
[singleton_test, join_test, join_nonexistant_node_test, join_self_test,
leave_test, leave_rejoin_test, sticky_membership_test].
singleton_test(Config) ->
Nodes = proplists:get_value(nodes, Config),
ok = ct_cover:remove_nodes(Nodes),
[[Node] = plumtree_test_utils:get_cluster_members(Node) || Node <- Nodes],
ok.
join_test(Config) ->
[Node1, Node2 |Nodes] = proplists:get_value(nodes, Config),
?assertEqual(ok, rpc:call(Node1, plumtree_peer_service, join, [Node2])),
Expected = lists:sort([Node1, Node2]),
ok = plumtree_test_utils:wait_until_joined([Node1, Node2], Expected),
?assertEqual(Expected, lists:sort(plumtree_test_utils:get_cluster_members(Node1))),
?assertEqual(Expected, lists:sort(plumtree_test_utils:get_cluster_members(Node2))),
%% make sure the last 2 are still singletons
[?assertEqual([Node], plumtree_test_utils:get_cluster_members(Node)) || Node <- Nodes],
ok.
join_nonexistant_node_test(Config) ->
[Node1|_] = proplists:get_value(nodes, Config),
?assertEqual({error, not_reachable}, rpc:call(Node1, plumtree_peer_service, join,
[fake@fakehost])),
?assertEqual([Node1], plumtree_test_utils:get_cluster_members(Node1)),
ok.
join_self_test(Config) ->
[Node1|_] = proplists:get_value(nodes, Config),
?assertEqual({error, self_join}, rpc:call(Node1, plumtree_peer_service, join,
[Node1])),
?assertEqual([Node1], plumtree_test_utils:get_cluster_members(Node1)),
ok.
leave_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
?assertEqual(ok, rpc:call(Node1, plumtree_peer_service, leave, [[]])),
Expected2 = lists:sort(OtherNodes),
ok = plumtree_test_utils:wait_until_left(OtherNodes, Node1),
%% should be a 3 node cluster now
[?assertEqual({Node, Expected2}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- OtherNodes],
%% node1 should be offline
?assertEqual(pang, net_adm:ping(Node1)),
ok.
leave_rejoin_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[Node2|_Rest] = OtherNodes,
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
?assertEqual(ok, rpc:call(Node1, plumtree_peer_service, leave, [[]])),
Expected2 = lists:sort(OtherNodes),
ok = plumtree_test_utils:wait_until_left(OtherNodes, Node1),
%% should be a 3 node cluster now
[?assertEqual({Node, Expected2}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- OtherNodes],
%% node1 should be offline
?assertEqual(pang, net_adm:ping(Node1)),
plumtree_test_utils:start_node(jaguar, Config, leave_rejoin_test),
%% rejoin cluster
?assertEqual(ok, rpc:call(Node1, plumtree_peer_service, join, [Node2])),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
ok.
sticky_membership_test(Config) ->
[Node1|OtherNodes] = Nodes = proplists:get_value(nodes, Config),
[?assertEqual(ok, rpc:call(Node, plumtree_peer_service, join, [Node1]))
|| Node <- OtherNodes],
Expected = lists:sort(Nodes),
ok = plumtree_test_utils:wait_until_joined(Nodes, Expected),
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- Nodes],
ct_slave:stop(jaguar),
ok = plumtree_test_utils:wait_until_offline(Node1),
%% check the membership is the same
[?assertEqual({Node, Expected}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- OtherNodes],
plumtree_test_utils:start_node(jaguar, Config, sticky_membership_test),
?assertEqual({Node1, Expected}, {Node1,
lists:sort(plumtree_test_utils:get_cluster_members(Node1))}),
ct_slave:stop(jaguar),
ok = plumtree_test_utils:wait_until_offline(Node1),
[Node2|LastTwo] = OtherNodes,
?assertEqual(ok, rpc:call(Node2, plumtree_peer_service, leave, [[]])),
ok = plumtree_test_utils:wait_until_left(LastTwo, Node2),
ok = plumtree_test_utils:wait_until_offline(Node2),
Expected2 = lists:sort(Nodes -- [Node2]),
[?assertEqual({Node, Expected2}, {Node,
lists:sort(plumtree_test_utils:get_cluster_members(Node))})
|| Node <- LastTwo],
plumtree_test_utils:start_node(jaguar, Config, sticky_membership_test),
ok = plumtree_test_utils:wait_until_left([Node1], Node2),
?assertEqual({Node1, Expected2}, {Node1,
lists:sort(plumtree_test_utils:get_cluster_members(Node1))}),
plumtree_test_utils:start_node(shadow, Config, sticky_membership_test),
%% node 2 should be a singleton now
?assertEqual([Node2], plumtree_test_utils:get_cluster_members(Node2)),
ok.
%% ===================================================================
%% utility functions
%% =================================================================== | test/cluster_membership_SUITE.erl | 0.533641 | 0.447038 | cluster_membership_SUITE.erl | starcoder |
%%%-------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2017, <NAME>
%% @doc erl_knearest_neighbor.erl
%% K-nearest neighbors classifies a datapoint to the class of
%% its nearest neighbors among previous data.
%% K-nearest neighbors is lazy and requires to training. To classify a new datapoint the
%% algorithm calculates the distance between the datapoint and all records in the dataset.
%% In this program the data is symbolic so the distnace is computed by taking the overlap-distance.
%% After computing the distance to all data-records, the neighborhood is selected by taking the K-nearest
%% records. Finally the class is selected by computing the weighted-sum of classes in the neighborhood.
%% Weighted sum means that the closest neighbors have the highest weight, the least clost neighbors have the
%% lowest neighbor. Given the weighted-sum of all classes, the class with most weight is selected.
%% between the two attribute-sets.
%% Example use-case:
%% > c(erl_knearest_neighbor).
%% > Examples = erl_knearest_neighbor:examples_play_tennis().
%% > erl_knearest_neighbor:classify(4, [{outlook, overcast}, {temperature, hot}, {humidity, high}, {windy, true}], Examples).
%% > erl_knearest_neighbor:classify(4, [{outlook, rain}, {windy, true}], Examples).
%% > erl_knearest_neighbor:classify(4, [{outlook, rain}], Examples).
%% @end
%%%-------------------------------------------------------------------
-module(erl_knearest_neighbor).
-author('<NAME> <<EMAIL>>').
%% API
-export([examples_play_tennis/0, classify/3]).
%% types
-type attribute_value_pairs()::list(attribute_value_pair()).
-type attribute_value_pair()::{attribute(), attribute_value()}.
-type attribute():: atom().
-type attributes():: list(attribute()).
-type attribute_value():: atom().
-type values():: values().
-type classification():: atom().
-type classes():: list(classification()).
-type example_with_distance()::{float(), example()}.
-type example():: {attribute_value_pairs(), classification()}.
-type examples() :: list(example()).
%%====================================================================
%% API functions
%%====================================================================
%% Classify set of attribute-value pairs with k-nearest neighbor given K and set of training examples
-spec classify(integer(), attribute_value_pairs(), examples())-> classification().
classify(K, X, Examples)->
ExamplesWithDist = lists:map(fun(Ex = {AVs, _}) -> {overlap_distance(X, AVs), Ex} end, Examples),
NeighborHood = neighborhood(K, ExamplesWithDist),
select_class(NeighborHood).
%%====================================================================
%% Internal functions
%%====================================================================
%% Calculates the distance between two symbolic objects, uses the overlap-distance (0,1).
-spec overlap_distance(attribute_value_pairs(), attribute_value_pairs())-> float().
overlap_distance(X, Y)->
Attributes = attributes(X) ++ attributes(Y),
Distance = lists:foldl(fun(A, Acc) ->
Bool1 = lists:keymember(A, 1, X),
Bool2 = lists:keymember(A, 1, Y),
case (Bool1 and Bool2) of
true ->
{A, V1} = lists:keyfind(A,1,X),
{A, V2} = lists:keyfind(A,1,Y),
case V1 =:= V2 of
true ->
Acc + 0;
false ->
Acc + 1
end;
false ->
Acc + 1
end
end, 0, Attributes),
Distance.
%% Returns the K-Neighborhood given Examples with distances and a input X to be classfied.
-spec neighborhood(integer(), list(example_with_distance())) -> list(example_with_distance()).
neighborhood(K, Examples)->
NeighborHood = lists:foldl(fun(Ex, Acc) ->
case length(Acc) < K of
true ->
[Ex|Acc];
false ->
case lists:max([Ex|Acc]) of
Ex ->
Acc;
Ex2 ->
[Ex|lists:delete(Ex2, Acc)]
end
end
end, [], Examples),
NeighborHood.
%% Selects the most common class based on neighborhood and distance-weights
-spec select_class(list(example_with_distance())) -> list(example_with_distance()).
select_class(NeighborHood)->
Classes = sets:to_list(sets:from_list(classes(NeighborHood))),
WeightedClasses = lists:map(fun(C) ->
WeightedCount = lists:map(fun(Neighbor) ->
weighted_class_equals(C,Neighbor)
end, NeighborHood),
{WeightedCount, C}
end, Classes),
{_, Class} = lists:max(WeightedClasses),
Class.
%% Calculate weight-equals value for given class and example
-spec weighted_class_equals(classification(), example_with_distance()) -> float().
weighted_class_equals(Class, {Dist, {_, C}})->
I = class_equals(Class, C),
(1/math:pow(Dist, 2))*I.
%% 1 if classes are equals otherwise 0.
-spec class_equals(classification(), classification())->integer().
class_equals(C, C)->
1;
class_equals(_,_) ->
0.
%% Extract list of classes from WeightedExamples
-spec classes(list(example_with_distance())) -> Classes::classes().
classes(Examples)->
lists:map(fun({_, {_, C}}) -> C end, Examples).
%% Extract attributes from attribute-value pairs
-spec attributes(AV :: attribute_value_pairs()) -> Attributes::attributes().
attributes(AV)->
lists:map(fun({A,_}) -> A end, AV).
%%====================================================================
%% Example Data
%%====================================================================
%% Sample set of examples
-spec examples_play_tennis() -> examples().
examples_play_tennis()->
[
{[
{outlook,sunny},{temperature,hot},{humidity,high},{windy,false}
], not_play
},
{[
{outlook,sunny},{temperature,hot},{humidity,high},{windy,true}
], not_play
},
{[
{outlook,overcast},{temperature,hot},{humidity,high},{windy,false}
], play
},
{[
{outlook,rain},{temperature,mild},{humidity,high},{windy,false}
], play
},
{[
{outlook,rain},{temperature,cool},{humidity,normal},{windy,false}
], play
},
{[
{outlook,rain},{temperature,cool},{humidity,normal},{windy,true}
], not_play
},
{[
{outlook,overcast},{temperature,cool},{humidity,normal},{windy,true}
], play
},
{[
{outlook,sunny},{temperature,mild},{humidity,high},{windy,false}
], not_play
},
{[
{outlook,sunny},{temperature,cool},{humidity,normal},{windy,false}
], play
},
{[
{outlook,rain},{temperature,mild},{humidity,normal},{windy,false}
], play
},
{[
{outlook,sunny},{temperature,mild},{humidity,normal},{windy,true}
], play
},
{[
{outlook,overcast},{temperature,mild},{humidity,high},{windy,true}
], play
},
{[
{outlook,overcast},{temperature,hot},{humidity,normal},{windy,false}
], play
},
{[
{outlook,rain},{temperature,mild},{humidity,high},{windy,true}
], not_play
}
]. | erl_knearest_neighbor/erl_knearest_neighbor.erl | 0.683525 | 0.734881 | erl_knearest_neighbor.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riaknostic - automated diagnostic tools for Riak
%%
%% Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Diagnostic that compares the configured
%% <code>ring_creation_size</code> to the actual size of the ring.
-module(riaknostic_check_ring_size).
-behaviour(riaknostic_check).
-export([description/0,
valid/0,
check/0,
format/1]).
-spec description() -> string().
description() ->
"Ring size valid".
-spec valid() -> boolean().
valid() ->
riaknostic_node:can_connect().
-spec check() -> [{lager:log_level(), term()}].
check() ->
Stats = riaknostic_node:stats(),
{ring_creation_size, RingSize} = lists:keyfind(ring_creation_size, 1, Stats),
{ring_num_partitions, NumPartitions} = lists:keyfind(ring_num_partitions, 1, Stats),
% {ring_members, RingMembers} = lists:keyfind(ring_members, 1, Stats),
% NumRingMembers = length(RingMembers),
% VnodesPerNode = erlang:round(RingSize / NumRingMembers),
% MinAcceptableVnodesPerNode = erlang:round(RingSize * 0.03),
% MaxRecommendedVnodesPerNode = erlang:round(RingSize * 0.7),
lists:append([
[ {notice, {ring_size_unequal, RingSize, NumPartitions}} || RingSize /= NumPartitions ],
[ {critical, {ring_size_not_exp2, RingSize}} || (RingSize band -(bnot RingSize)) /= RingSize]
% [ {notice, {ring_size_too_small, RingSize, NumRingMembers}} || VnodesPerNode =< MinAcceptableVnodesPerNode ],
% [ {notice, {too_few_nodes_for_ring, RingSize, NumRingMembers}} || VnodesPerNode >= MaxRecommendedVnodesPerNode ]
]).
-spec format(term()) -> {io:format(), [term()]}.
format({ring_size_unequal, S, P}) ->
{"The configured ring_creation_size (~B) is not equal to the number of partitions in the ring (~B). "
"Please verify that the ring_creation_size in app.config is correct.", [S, P]};
format({ring_size_not_exp2, S}) ->
{"The configured ring_creation_size (~B) should always be a power of 2. "
"Please reconfigure the ring_creation_size in app.config.", [S]}.
%format({ring_size_too_small, S, N}) ->
% {"With a ring_creation_size (~B) and ~B nodes participating in the cluster, each node is responsible for less than 3% of the data. "
% " You have too many nodes for this size ring. "
% "Please consider migrating data to a cluster with 2 or 4x your current ring size.", [S, N]};
%format({too_few_nodes_for_ring, S, N}) ->
% {"With a ring_creation_size (~B) and ~B nodes participating in the cluster, each node is responsible for more than 70% of the data. "
% " You have too few nodes for this size ring. "
% "Please consider joining more nodes to your cluster.", [S, N]}. | deps/riaknostic/src/riaknostic_check_ring_size.erl | 0.66236 | 0.451689 | riaknostic_check_ring_size.erl | starcoder |
%%%------------------------------------------------------------------------------
%%% @copyright (c) 2012, DuoMark International, Inc. All rights reserved
%%% @author <NAME> <<EMAIL>>
%%% @doc
%%% A cache implemented using a process dictionary to manage an index
%%% of data where each datum is a separate erlang process.
%%% @since v0.0.1
%%% @end
%%%------------------------------------------------------------------------------
-module(esp_cache).
-include_lib("erlangsp/include/license_and_copyright.hrl").
-author(jayn).
%% Public API
-export([new_cache_coop/1]).
%% Testing API
-export([new_directory_node/1, new_worker_node/1, new_datum_node/3]).
%% Node setup functions
-export([
init_directory/1, value_request/2, % Directory Coop_Node
init_mfa_worker/1, make_new_datum/3, % MFA Worker Coop_Node
init_datum/1, manage_datum/2 % Cached Datum Coop_Node
]).
%%------------------------------------------------------------------------------
%%
%% Erlang/SP caching is implemented using a Coop pattern.
%%
%% Functionally, there is a Coop_Node for the central directory
%% of data keys which each reference a cached datum. Each datum
%% is held in a separate, dynamic Coop_Node instance. A round-
%% robin pool of workers is used to compute values that are not
%% passed directly to a Coop_Node datum instance, to achieve
%% limited but load-balanced concurrency.
%%
%% Two entries exist per Key:
%% Key lookup: Key => Coop_Node
%% Expired Index: {Key, Coop_Node} => Node_Task_Pid
%%
%% This module includes one comparative implementations:
%% 1) Process dictionary for Keys
%%
%% [Two others are not yet implemented]:
%% 2) Public shared concurrent read ETS table for Keys
%% - One Coop_Node writing to it
%% 3) Concurrent Coop_Node skiplist for Keys
%%
%% An application which employs this cache can either supply a
%% value directly, or provide {Mod, Fun, Args} to execute which
%% result in a cached value. Supplying a value directly incurs
%% the overhead of passing that value as a message argument to
%% a minimum of 2 processes. Using the MFA approach provides a
%% way to asynchronously generate a large data structure, or
%% to cache a value which may take a long time to initially
%% compute without the penalty of passing that data, but rather
%% waiting for the MFA to complete before the value is available.
%%
%% A global idle expiration time can be set for cached values,
%% or an external application can implement an expiration policy
%% by explicitly removing values from the cache.
%%
%% Future enhancements are expected to include:
%% 1) Independent functions per datum for computing expiration
%% 2) Invoking a function on cached datum rather than returning it
%% 3) Sumbitting a function to update a cached datum
%%
%%------------------------------------------------------------------------------
-include_lib("coop/include/coop.hrl").
-include_lib("coop/include/coop_dag.hrl").
-include("esp_cache.hrl").
%% Coop:
%% Dir => X workers => | no receiver
%% Dynamic => Datum workers
new_cache_coop(Num_Workers) ->
%% Make the cache directory and worker function specifications...
Cache_Directory = coop:make_dag_node(cache,
?COOP_INIT_FN(init_directory, []),
?COOP_TASK_FN(value_request),
[],
round_robin),
Workers = [coop:make_dag_node(list_to_atom("worker-" ++ integer_to_list(N)),
?COOP_INIT_FN(init_mfa_worker, {}),
?COOP_TASK_FN(make_new_datum),
[access_coop_head]
)
|| N <- lists:seq(1, Num_Workers)],
%% Cache -E Workers -> none ; Dynamic Cache Nodes
%% One cache directory fans out to Num_Workers with no final fan in.
%% New datum nodes are created dynamically by the workers.
coop:new_fanout(Cache_Directory, Workers, none).
%%========================= Directory Node =================================
-type coop_proc() :: pid() | coop_head() | coop_node().
-type receiver() :: {reference(), coop_proc()}.
-type change_cmd() :: add | replace.
-type value_request() :: {?VALUE, any()} | {?MFA, {module(), atom(), list()}}.
-type change_request() :: {change_cmd(), value_request(), receiver()} | {remove, receiver()}.
-type lookup_request() :: {any(), receiver()}.
-type fep_request() :: {any(), value_request(), receiver()}.
-type fetch_cmd() :: lookup.
%% -type stats_cmd() :: num_keys.
-spec value_request({}, {change_cmd(), change_request()}) -> no_return().
-spec change_value ({}, {change_cmd(), change_request()}, coop_proc() | undefined) -> {{}, ?COOP_NOOP} | {{}, {add, change_request()}}.
-spec return_value ({}, {fetch_cmd(), lookup_request() | fep_request()}, coop_proc() | undefined) -> {{}, ?COOP_NOOP}.
%% Create a new directory Coop_Node.
new_directory_node(Coop_Head) ->
Kill_Switch = coop_head:get_kill_switch(Coop_Head),
coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(value_request), ?COOP_INIT_FN(init_directory, {}), []).
%% No state needed.
init_directory(State) -> State.
%% Modify the cached value process and send the new value to a dynamic downstream coop_node...
value_request(State, {remove, {Key, _Rcvr} } = Req) -> change_value(State, Req, get(Key));
value_request(State, {add, {Key, _Chg_Type, _Rcvr} } = Req) -> change_value(State, Req, get(Key));
value_request(State, {replace, {Key, _Chg_Type, _Rcvr} } = Req) -> change_value(State, Req, get(Key));
%% Return the cached value to a dynamic downstream coop_node...
value_request(State, {lookup, {Key, _Rcvr} } = Req) -> return_value(State, Req, get(Key));
%% Return the number of active keys...
value_request(State, {num_keys, {Ref, Rcvr}}) ->
%% 2 entries for each key and proc_lib added '$ancestors' and '$initial_call'
coop:relay_data(Rcvr, {Ref, (length(get()) - 2) div 2}),
{State, ?COOP_NOOP};
%% Expiration of process removes all references to it in process dictionary.
%% Key => Coop_Node + {Key, Coop_Node} => Node_Data_Pid (the monitored Pid that went down)
value_request(State, {'DOWN', _Ref, process, Pid, _Reason}) ->
[begin erase(Key), erase(Coop_Key) end || {Key, _Coop_Node} = Coop_Key <- get_keys(Pid), get(Coop_Key) =:= Pid],
{State, ?COOP_NOOP};
%% New dynamically created Coop_Nodes are monitored and placed in the process dictionary.
%% Any existing Coop_Node for the same key is expired.
value_request(State, {new, Key, #coop_node{task_pid=Node_Task_Pid} = Coop_Node}) ->
erlang:monitor(process, Node_Task_Pid),
case {put({Key, Coop_Node}, Node_Task_Pid), put(Key, Coop_Node)} of
{undefined, undefined} -> no_existing_datum_to_expire;
{Old_Coop_Node, _} -> erlang:demonitor(process, Node_Task_Pid), coop:relay_data(Old_Coop_Node, {expire})
end,
{State, ?COOP_NOOP}.
%% Terminate the Coop_Node containing the cached value if there is one...
change_value(State, {remove, {_Key, {Ref, Requester}}}, undefined) ->
coop:relay_data(Requester, {Ref, undefined}),
{State, ?COOP_NOOP};
change_value(State, {remove, {Key, {_Ref, _Rqstr} = Requester}}, Coop_Node) ->
erase(Key),
erase({Key, Coop_Node}),
coop:relay_data(Coop_Node, {expire, Requester}),
{State, ?COOP_NOOP};
%% Update the Coop_Node containing the cached value...
change_value(State, {replace, {_Key, _Chg_Type, {_Ref, _Rqstr}} = New_Value }, undefined) -> value_request(State, {add, New_Value});
change_value(State, {replace, {_Key, {?VALUE, V}, {_Ref, _Rqstr} = Requester}}, Coop_Node) -> coop:relay_data(Coop_Node, {replace, V, Requester}), {State, ?COOP_NOOP};
%% But use the downstream worker pool if M:F(A) must be executed to get the value to cache...
change_value(State, {replace, {_Key, {?MFA, _MFA}, {_Ref, _Rqstr}} = Request}, Coop_Node) -> {State, {replace, Request, Coop_Node}};
%% Create a new dynamic Coop_Node containing the cached value using the downstream worker pool.
change_value(State, {add, {_Key, _Chg_Type, {_Ref, _Rqstr}}} = Request, undefined) -> {State, Request}; % Request is passed to a worker.
change_value(State, {add, {_Key, _Chg_Type, {Ref, Requester}}}, _Coop_Node) -> coop:relay_data(Requester, {Ref, defined}), {State, ?COOP_NOOP}.
%% Send the cached value to the requester.
return_value(State, {lookup, {_Key, {Ref, Requester}} }, undefined) -> coop:relay_data(Requester, {Ref, undefined}), {State, ?COOP_NOOP};
return_value(State, {_Any_Type, {_Key, {_Ref, _Rqstr} = Requester}}, Coop_Node) -> coop:relay_data(Coop_Node, {get_value, Requester}), {State, ?COOP_NOOP}.
%%========================= M:F(A) Worker =================================
%% Create a new worker Coop_Node.
new_worker_node(Coop_Head) ->
Kill_Switch = coop_head:get_kill_switch(Coop_Head),
coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(make_new_datum), ?COOP_INIT_FN(init_mfa_worker, {}), [access_coop_head]).
%% Kill_Switch is kept as State to spawn dynamic Coop_Nodes (Coop_Head is added as a function argument via Data Options)
init_mfa_worker({Coop_Head, {}}) -> coop_head:get_kill_switch(Coop_Head).
%% Compute the replacement value and forward to the existing Coop_Node...
make_new_datum(_Coop_Head, Kill_Switch, {replace, {_Key, {?MFA, {Mod, Fun, Args}}, {_Ref, _Rqstr} = Requester}, Coop_Node}) ->
%% Directory already knows about this datum, using worker for potentially long running M:F(A)
coop:relay_data(Coop_Node, {replace, Mod:Fun(Args), Requester}),
{Kill_Switch, ?COOP_NOOP};
%% Create a new Coop_Node initialized with the value to cache, notifying the Coop_Head directory.
make_new_datum(Coop_Head, Kill_Switch, {add, {Key, {?VALUE, V}, {_Ref, _Rqstr} = Requester}}) ->
New_Coop_Node = new_datum_node(Coop_Head, Kill_Switch, V),
relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch);
make_new_datum(Coop_Head, Kill_Switch, {add, {Key, {?MFA, {Mod, Fun, Args}}, {_Ref, _Rqstr} = Requester}}) ->
New_Coop_Node = new_datum_node(Coop_Head, Kill_Switch, Mod:Fun(Args)),
relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch).
relay_new_datum(Coop_Head, Key, New_Coop_Node, Requester, Kill_Switch) ->
coop:relay_high_priority_data(Coop_Head, {new, Key, New_Coop_Node}),
coop:relay_data(New_Coop_Node, {get_value, Requester}),
{Kill_Switch, ?COOP_NOOP}.
%%========================= Datum Node ====================================
%% New Datum processes are dynamically created Coop Nodes.
new_datum_node(Coop_Head, Kill_Switch, V) ->
coop_node:new(Coop_Head, Kill_Switch, ?COOP_TASK_FN(manage_datum), ?COOP_INIT_FN(init_datum, V), []).
%% Initialize the Coop_Node with the value to cache.
init_datum(V) -> V.
%% Cached datum is relayed to requester, no downstream listeners.
manage_datum(_Datum, {expire} ) -> exit(normal);
manage_datum( Datum, {expire, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, Datum}), exit(normal);
manage_datum( Datum, {get_value, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, Datum}), {Datum, ?COOP_NOOP};
manage_datum(_Datum, {replace, New_Value, {Ref, Requester}} ) -> coop:relay_data(Requester, {Ref, New_Value}), {New_Value, ?COOP_NOOP}. | apps/examples/esp_cache/src/esp_cache.erl | 0.531696 | 0.412323 | esp_cache.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%%
-module(prop_causal_context).
-author("<NAME> <<EMAIL>>").
-include_lib("proper/include/proper.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(ACTOR, oneof([a, b, c])).
-define(DOT, {?ACTOR, dot_store:dot_sequence()}).
-define(DOTL, list(?DOT)).
prop_from_dots() ->
?FORALL(
L,
?DOTL,
begin
%% if we construct a cc from a list of dots,
%% all of those dots should be there
CC = cc(L),
lists:foldl(
fun(Dot, Acc) ->
Acc andalso causal_context:is_element(Dot, CC)
end,
true,
L
)
end
).
prop_add_dot() ->
?FORALL(
{Dot, L},
{?DOT, ?DOTL},
begin
%% if we add a dot to a cc it should be there
CC = cc(L),
causal_context:is_element(
Dot,
causal_context:add_dot(Dot, CC)
)
end
).
prop_next_dot() ->
?FORALL(
{Actor, L},
{?ACTOR, ?DOTL},
begin
%% the next dot should not be part of the cc.
CC = cc(L),
Dot = causal_context:next_dot(Actor, CC),
not causal_context:is_element(
Dot,
CC
)
end
).
prop_union() ->
?FORALL(
{L1, L2},
{?DOTL, ?DOTL},
begin
CC1 = cc(L1),
CC2 = cc(L2),
Union = causal_context:union(CC1, CC2),
%% Dots from the cc's belong to the union.
R1 = dot_set:fold(
fun(Dot, Acc) ->
Acc andalso
causal_context:is_element(Dot, Union)
end,
true,
dot_set:union(causal_context:dots(CC1),
causal_context:dots(CC2))
),
%% Dots from the union belong to one of the cc's.
R2 = dot_set:fold(
fun(Dot, Acc) ->
Acc andalso
(
causal_context:is_element(Dot, CC1) orelse
causal_context:is_element(Dot, CC2)
)
end,
true,
causal_context:dots(Union)
),
%% Dots in the DotSet don't belong in the compressed part
{Compressed, DotSet} = Union,
FakeUnion = {Compressed, dot_set:new()},
R3 = dot_set:fold(
fun(Dot, Acc) ->
Acc andalso
not causal_context:is_element(Dot, FakeUnion)
end,
true,
DotSet
),
R1 andalso R2 andalso R3
end
).
%% @private
cc(L) ->
lists:foldl(
fun(Dot, CC) ->
causal_context:add_dot(Dot, CC)
end,
causal_context:new(),
shuffle(L)
).
%% @private
shuffle(L) ->
rand:seed(exsplus, erlang:timestamp()),
lists:map(
fun({_, E}) -> E end,
lists:sort(
lists:map(
fun(E) -> {rand:uniform(), E} end, L
)
)
). | test/prop_causal_context.erl | 0.574156 | 0.410372 | prop_causal_context.erl | starcoder |
-module(gleam_stdlib).
-include_lib("eunit/include/eunit.hrl").
-export([should_equal/2, should_not_equal/2, should_be_ok/1, should_be_error/1,
atom_from_string/1, atom_create_from_string/1, atom_to_string/1,
map_get/2, iodata_append/2, iodata_prepend/2, identity/1,
decode_int/1, decode_bool/1, decode_float/1,
decode_thunk/1, decode_atom/1, decode_list/1, decode_field/2,
decode_element/2, parse_int/1, parse_float/1, compare_strings/2,
string_pop_grapheme/1, string_starts_with/2, string_ends_with/2,
string_pad/4, decode_tuple2/1, decode_tuple3/1, decode_tuple4/1,
decode_tuple5/1, decode_tuple6/1, decode_map/1, bit_string_int_to_u32/1,
bit_string_int_from_u32/1, bit_string_append/2, bit_string_part_/3,
decode_bit_string/1, compile_regex/2, regex_match/2, regex_split/2,
regex_scan/2, base_decode64/1, wrap_list/1, rescue/1, get_line/1]).
should_equal(Actual, Expected) -> ?assertEqual(Expected, Actual).
should_not_equal(Actual, Expected) -> ?assertNotEqual(Expected, Actual).
should_be_ok(A) -> ?assertMatch({ok, _}, A).
should_be_error(A) -> ?assertMatch({error, _}, A).
map_get(Map, Key) ->
case maps:find(Key, Map) of
error -> {error, nil};
OkFound -> OkFound
end.
atom_create_from_string(S) ->
binary_to_atom(S, utf8).
atom_to_string(S) ->
atom_to_binary(S, utf8).
atom_from_string(S) ->
try {ok, binary_to_existing_atom(S, utf8)}
catch error:badarg -> {error, atom_not_loaded}
end.
iodata_append(Iodata, String) -> [Iodata, String].
iodata_prepend(Iodata, String) -> [String, Iodata].
identity(X) -> X.
decode_error_msg(Type, Data) ->
{error, iolist_to_binary(io_lib:format("Expected ~s, got ~s", [Type, classify(Data)]))}.
classify(X) when is_atom(X) -> "an atom";
classify(X) when is_binary(X) -> "a binary";
classify(X) when is_integer(X) -> "an int";
classify(X) when is_float(X) -> "a float";
classify(X) when is_list(X) -> "a list";
classify(X) when is_boolean(X) -> "a bool";
classify(X) when is_function(X, 0) -> "a zero arity function";
classify(X) when is_tuple(X) -> ["a ", integer_to_list(tuple_size(X)), " element tuple"];
classify(_) -> "some other type".
decode_tuple2({_, _} = T) -> {ok, T};
decode_tuple2(Data) -> decode_error_msg("a 2 element tuple", Data).
decode_tuple3({_, _, _} = T) -> {ok, T};
decode_tuple3(Data) -> decode_error_msg("a 3 element tuple", Data).
decode_tuple4({_, _, _, _} = T) -> {ok, T};
decode_tuple4(Data) -> decode_error_msg("a 4 element tuple", Data).
decode_tuple5({_, _, _, _, _} = T) -> {ok, T};
decode_tuple5(Data) -> decode_error_msg("a 5 element tuple", Data).
decode_tuple6({_, _, _, _, _, _} = T) -> {ok, T};
decode_tuple6(Data) -> decode_error_msg("a 6 element tuple", Data).
decode_map(Data) when is_map(Data) -> {ok, Data};
decode_map(Data) -> decode_error_msg("a map", Data).
decode_atom(Data) when is_atom(Data) -> {ok, Data};
decode_atom(Data) -> decode_error_msg("an atom", Data).
decode_bit_string(Data) when is_bitstring(Data) -> {ok, Data};
decode_bit_string(Data) -> decode_error_msg("a bit_string", Data).
decode_int(Data) when is_integer(Data) -> {ok, Data};
decode_int(Data) -> decode_error_msg("an int", Data).
decode_float(Data) when is_float(Data) -> {ok, Data};
decode_float(Data) -> decode_error_msg("a float", Data).
decode_bool(Data) when is_boolean(Data) -> {ok, Data};
decode_bool(Data) -> decode_error_msg("a bool", Data).
decode_thunk(Data) when is_function(Data, 0) -> {ok, Data};
decode_thunk(Data) -> decode_error_msg("a zero arity function", Data).
decode_list(Data) when is_list(Data) -> {ok, Data};
decode_list(Data) -> decode_error_msg("a list", Data).
decode_field(Data, Key) ->
case Data of
#{Key := Value} ->
{ok, Value};
_ ->
decode_error_msg(io_lib:format("a map with key `~p`", [Key]), Data)
end.
decode_element(Data, Position) when is_tuple(Data) ->
case catch element(Position + 1, Data) of
{'EXIT', _Reason} ->
decode_error_msg(["a tuple of at least ", integer_to_list(Position + 1), " size"], Data);
Value ->
{ok, Value}
end;
decode_element(Data, _Position) -> decode_error_msg("a tuple", Data).
parse_int(String) ->
case catch binary_to_integer(String) of
Int when is_integer(Int) -> {ok, Int};
_ -> {error, nil}
end.
parse_float(String) ->
case catch binary_to_float(String) of
Float when is_float(Float) -> {ok, Float};
_ -> {error, nil}
end.
compare_strings(Lhs, Rhs) ->
if
Lhs == Rhs -> eq;
Lhs < Rhs -> lt;
true -> gt
end.
string_starts_with(_, <<>>) -> true;
string_starts_with(String, Prefix) when byte_size(Prefix) > byte_size(String) -> false;
string_starts_with(String, Prefix) ->
PrefixSize = byte_size(Prefix),
Prefix == binary_part(String, 0, PrefixSize).
string_ends_with(_, <<>>) -> true;
string_ends_with(String, Suffix) when byte_size(Suffix) > byte_size(String) -> false;
string_ends_with(String, Suffix) ->
SuffixSize = byte_size(Suffix),
Suffix == binary_part(String, byte_size(String) - SuffixSize, SuffixSize).
string_pad(String, Length, Dir, PadString) ->
Chars = string:pad(String, Length, Dir, binary_to_list(PadString)),
case unicode:characters_to_binary(Chars) of
Bin when is_binary(Bin) -> Bin;
Error -> erlang:error({gleam_error, {string_invalid_utf8, Error}})
end.
string_pop_grapheme(String) ->
case string:next_grapheme(String) of
[ Next | Rest ] ->
{ok, {unicode:characters_to_binary([Next]), unicode:characters_to_binary(Rest)}};
_ -> {error, nil}
end.
bit_string_append(First, Second) ->
<<First/bitstring, Second/bitstring>>.
bit_string_part_(Bin, Pos, Len) ->
try {ok, binary:part(Bin, Pos, Len)}
catch error:badarg -> {error, nil}
end.
bit_string_int_to_u32(I) when 0 =< I, I < 4294967296 ->
{ok, <<I:32>>};
bit_string_int_to_u32(_) ->
{error, nil}.
bit_string_int_from_u32(<<I:32>>) ->
{ok, I};
bit_string_int_from_u32(_) ->
{error, nil}.
compile_regex(String, Options) ->
{options, Caseless, Multiline} = Options,
OptionsList = [
unicode,
Caseless andalso caseless,
Multiline andalso multiline
],
FilteredOptions = [Option || Option <- OptionsList, Option /= false],
case re:compile(String, FilteredOptions) of
{ok, MP} -> {ok, MP};
{error, {Str, Pos}} ->
{error, {compile_error, unicode:characters_to_binary(Str), Pos}}
end.
regex_match(Regex, String) ->
re:run(String, Regex) /= nomatch.
regex_split(Regex, String) ->
re:split(String, Regex).
regex_submatches(String, {S, L}) ->
SubMatch = string:slice(String, S, L),
case string:is_empty(SubMatch) of
true -> none;
false -> {some, SubMatch}
end.
regex_matches(String, [{S, L} | Submatches]) ->
{match, binary:part(String, S, L), S,
lists:map(fun(X) -> regex_submatches(String, X) end, Submatches)}.
regex_scan(Regex, String) ->
case re:run(String, Regex, [global]) of
{match, Captured} -> lists:map(fun(X) -> regex_matches(String, X) end, Captured);
nomatch -> []
end.
base_decode64(S) ->
try {ok, base64:decode(S)}
catch error:badarith -> {error, nil}
end.
wrap_list(X) when is_list(X) -> X;
wrap_list(X) -> [X].
rescue(F) ->
try {ok, F()}
catch
throw:X -> {error, {thrown, X}};
error:X -> {error, {errored, X}};
exit:X -> {error, {exited, X}}
end.
get_line(Prompt) ->
case io:get_line(Prompt) of
eof -> {error, eof};
{error, _} -> {error, no_data};
Data -> {ok, Data}
end. | src/gleam_stdlib.erl | 0.504394 | 0.47244 | gleam_stdlib.erl | starcoder |
%% Copyright (c) 2008-2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_file.erl
%% Author : <NAME>
%% Purpose : The Erlog file consulter.
-module(erlog_file).
-include("erlog_int.hrl").
%% Main interface functions.
-export([consult/2,reconsult/2]).
%% consult(File, DatabaseState) ->
%% {ok,NewState} | {error,Error} | {erlog_error,Error}.
%% reconsult(File, State) ->
%% {ok,NewState} | {error,Error} | {erlog_error,Error}.
%% Load/reload an Erlog file into the interpreter. Reloading will
%% abolish old definitons of clauses.
consult(File, St) ->
case erlog_io:read_file(File) of
{ok,Terms} ->
consult_terms(fun consult_assert/2, St, Terms);
Error -> Error
end.
consult_assert(Term0, #est{db=Db0}=St) ->
Term1 = erlog_lib_dcg:expand_term(Term0),
Db1 = erlog_int:assertz_clause(Term1, Db0),
{ok,St#est{db=Db1}}.
reconsult(File, St0) ->
case erlog_io:read_file(File) of
{ok,Terms} ->
case consult_terms(fun reconsult_assert/2, {St0,[]}, Terms) of
{ok,{St1,_Seen1}} -> {ok,St1};
Error -> Error
end;
Error -> Error
end.
reconsult_assert(Term0, {#est{db=Db0}=St,Seen}) ->
Term1 = erlog_lib_dcg:expand_term(Term0),
Func = functor(Term1),
case lists:member(Func, Seen) of
true ->
Db1 = erlog_int:assertz_clause(Term1, Db0),
{ok,{St#est{db=Db1}, Seen}};
false ->
Db1 = erlog_int:abolish_clauses(Func, Db0),
Db2 = erlog_int:assertz_clause(Term1, Db1),
{ok,{St#est{db=Db2},[Func|Seen]}}
end.
%% consult_terms(InsertFun, Database, Terms) ->
%% {ok,NewDatabase} | {erlog_error,Error}.
%% Add terms to the database using InsertFun. Ignore directives and
%% queries.
consult_terms(Ifun, Db, [{':-',_}|Ts]) ->
consult_terms(Ifun, Db, Ts);
consult_terms(Ifun, Db, [{'?-',_}|Ts]) ->
consult_terms(Ifun, Db, Ts);
consult_terms(Ifun, Db0, [T|Ts]) ->
case catch Ifun(T, Db0) of
{ok,Db1} -> consult_terms(Ifun, Db1, Ts);
{erlog_error,E,_Db1} -> {erlog_error,E};
{erlog_error,E} -> {erlog_error,E}
end;
consult_terms(_Ifun, Db, []) -> {ok,Db}.
functor({':-',H,_B}) -> erlog_int:functor(H);
functor(T) -> erlog_int:functor(T). | src/erlog_file.erl | 0.536799 | 0.432842 | erlog_file.erl | starcoder |
%% @copyright 2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @version $Id$
-module(histogram_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-include("unittest.hrl").
-compile(export_all).
all() -> [
add,
resize,
insert,
find_smallest_interval,
merge_interval
].
suite() -> [ {timetrap, {seconds, 40}} ].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
add(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.0,1}, {2.0,1}, {3.0,1}, {3.5,1}]),
ok.
resize(_Config) ->
H = histogram:create(3),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.0,1}, {2.0,1}, {3.25,2}]),
ok.
insert(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.0,1}, {2.0,1}, {3.0,1}, {3.5,1}]),
ok.
find_smallest_interval(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(0.5, histogram:find_smallest_interval(histogram:get_data(H2))),
ok.
merge_interval(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
MinInterval = histogram:find_smallest_interval(histogram:get_data(H2)),
H3 = histogram:merge_interval(MinInterval, histogram:get_data(H2)),
?equals(0.5, MinInterval),
?equals(H3, [{1.0,1}, {2.0,1}, {3.25,2}]),
ok. | test/histogram_SUITE.erl | 0.621426 | 0.403038 | histogram_SUITE.erl | starcoder |
-module(openapi_image_image_face_bluring_api).
-export([apply_image_image_face_bluring_post/2, apply_image_image_face_bluring_post/3,
get_versions_image_image_face_bluring_get/1, get_versions_image_image_face_bluring_get/2]).
-define(BASE_URL, "").
%% @doc Apply model for the face-bluring task for a given models
%%
-spec apply_image_image_face_bluring_post(ctx:ctx(), binary()) -> {ok, maps:map(), openapi_utils:response_info()} | {ok, hackney:client_ref()} | {error, term(), openapi_utils:response_info()}.
apply_image_image_face_bluring_post(Ctx, Image) ->
apply_image_image_face_bluring_post(Ctx, Image, #{}).
-spec apply_image_image_face_bluring_post(ctx:ctx(), binary(), maps:map()) -> {ok, maps:map(), openapi_utils:response_info()} | {ok, hackney:client_ref()} | {error, term(), openapi_utils:response_info()}.
apply_image_image_face_bluring_post(Ctx, Image, Optional) ->
_OptionalParams = maps:get(params, Optional, #{}),
Cfg = maps:get(cfg, Optional, application:get_env(kuberl, config, #{})),
Method = post,
Path = ["/image/image/face-bluring/"],
QS = lists:flatten([])++openapi_utils:optional_params(['model'], _OptionalParams),
Headers = [],
Body1 = {form, [{<<"image">>, Image}]++openapi_utils:optional_params([], _OptionalParams)},
ContentTypeHeader = openapi_utils:select_header_content_type([<<"multipart/form-data">>]),
Opts = maps:get(hackney_opts, Optional, []),
openapi_utils:request(Ctx, Method, [?BASE_URL, Path], QS, ContentTypeHeader++Headers, Body1, Opts, Cfg).
%% @doc Get list of models available for face-bluring
%%
-spec get_versions_image_image_face_bluring_get(ctx:ctx()) -> {ok, maps:map(), openapi_utils:response_info()} | {ok, hackney:client_ref()} | {error, term(), openapi_utils:response_info()}.
get_versions_image_image_face_bluring_get(Ctx) ->
get_versions_image_image_face_bluring_get(Ctx, #{}).
-spec get_versions_image_image_face_bluring_get(ctx:ctx(), maps:map()) -> {ok, maps:map(), openapi_utils:response_info()} | {ok, hackney:client_ref()} | {error, term(), openapi_utils:response_info()}.
get_versions_image_image_face_bluring_get(Ctx, Optional) ->
_OptionalParams = maps:get(params, Optional, #{}),
Cfg = maps:get(cfg, Optional, application:get_env(kuberl, config, #{})),
Method = get,
Path = ["/image/image/face-bluring/"],
QS = [],
Headers = [],
Body1 = [],
ContentTypeHeader = openapi_utils:select_header_content_type([]),
Opts = maps:get(hackney_opts, Optional, []),
openapi_utils:request(Ctx, Method, [?BASE_URL, Path], QS, ContentTypeHeader++Headers, Body1, Opts, Cfg). | clients/erlang-client/src/openapi_image_image_face_bluring_api.erl | 0.535341 | 0.401981 | openapi_image_image_face_bluring_api.erl | starcoder |
%
% Copyright (c) 2016-2017 <NAME> <<EMAIL>>
% All rights reserved.
% Distributed under the terms of the MIT License. See the LICENSE file.
%
-module(lorawan_utils).
-export([index_of/2]).
-export([precise_universal_time/0, ms_diff/2, datetime_to_timestamp/1, apply_offset/2]).
-export([throw_info/2, throw_warning/2, throw_error/2]).
-include("lorawan.hrl").
-define(MEGA, 1000000).
index_of(Item, List) -> index_of(Item, List, 1).
index_of(_, [], _) -> undefined;
index_of(Item, [Item|_], Index) -> Index;
index_of(Item, [_|Tl], Index) -> index_of(Item, Tl, Index+1).
ms_diff({MSecs1, Secs1, USecs1}, {MSecs2, Secs2, USecs2}) when MSecs1 =< MSecs2 ->
1000*(?MEGA*(MSecs2-MSecs1)+(Secs2-Secs1))
+(USecs2-USecs1) div 1000.
precise_universal_time() ->
{Date, {Hours, Min, Secs}} = calendar:universal_time(),
{_, _, USecs} = erlang:timestamp(),
{Date, {Hours, Min, Secs + (USecs div 1000)/1000}}.
datetime_to_timestamp({Date, {Hours, Min, Secs}}) ->
TotalSecs =
calendar:datetime_to_gregorian_seconds({Date, {Hours, Min, trunc(Secs)}})
- epoch_seconds(),
{TotalSecs div ?MEGA, TotalSecs rem ?MEGA, trunc(?MEGA*Secs)-?MEGA*trunc(Secs)};
datetime_to_timestamp(undefined) ->
{0, 0, 0}. %% midnight
epoch_seconds() ->
calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}).
apply_offset({Date, {Hours, Min, Secs}}, {OHours, OMin, OSecs}) ->
TotalSecs =
calendar:datetime_to_gregorian_seconds({Date, {Hours, Min, trunc(Secs)}})
+ (60*((60*OHours) + OMin)) + OSecs,
{Date2, {Hours2, Min2, Secs2}} = calendar:gregorian_seconds_to_datetime(TotalSecs),
{Date2, {Hours2, Min2, Secs2+(Secs-trunc(Secs))}}.
throw_info({Entity, EID}, Text) ->
throw_event(info, Entity, EID, Text);
throw_info(Entity, Text) ->
throw_event(info, Entity, undefined, Text).
throw_warning({Entity, EID}, Text) ->
throw_event(warning, Entity, EID, Text);
throw_warning(Entity, Text) ->
throw_event(warning, Entity, undefined, Text).
throw_error({Entity, EID}, Text) ->
throw_event(error, Entity, EID, Text);
throw_error(Entity, Text) ->
throw_event(error, Entity, undefined, Text).
throw_event(Severity, Entity, undefined, Event) ->
lager:log(Severity, self(), "~s ~p", [Entity, Event]),
write_event(Severity, Entity, undefined, Event);
throw_event(Severity, Entity, EID, Event) ->
lager:log(Severity, self(), "~s ~s ~p", [Entity, lorawan_mac:binary_to_hex(EID), Event]),
write_event(Severity, Entity, EID, Event).
write_event(Severity, Entity, EID, Event) ->
Text = list_to_binary(io_lib:print(Event)),
EvId = crypto:hash(md4, term_to_binary({Entity, EID,
case Event of
{First, _} -> First;
Only -> Only
end})),
{atomic, ok} =
mnesia:transaction(fun() ->
case mnesia:read(events, EvId, write) of
[E] ->
mnesia:write(events, E#event{last_rx=calendar:universal_time(),
count=inc(E#event.count), text=Text}, write);
[] ->
mnesia:write(events, #event{evid=EvId, severity=Severity,
first_rx=calendar:universal_time(), last_rx=calendar:universal_time(),
count=1, entity=Entity, eid=EID, text=Text}, write)
end
end),
ok.
inc(undefined) -> 1;
inc(Num) -> Num+1.
-include_lib("eunit/include/eunit.hrl").
time_test_()-> [
?_assertEqual({0,1,0}, datetime_to_timestamp({{1970,1,1}, {0,0,1}})),
?_assertEqual({0,10,1000}, datetime_to_timestamp({{1970,1,1}, {0,0,10.001}})),
?_assertEqual(1900, ms_diff(datetime_to_timestamp({{2017,1,1}, {13,0,1.1}}), datetime_to_timestamp({{2017,1,1}, {13,0,3}}))),
?_assertEqual(1, ms_diff(datetime_to_timestamp({{2017,1,1}, {13,1,59.999}}), datetime_to_timestamp({{2017,1,1}, {13,2,0}}))),
?_assertEqual({{1989,11,17}, {16,59,10.001}}, apply_offset({{1989,11,17}, {18,0,10.001}}, {-1,-1,0}))
].
% end of file | src/lorawan_utils.erl | 0.625667 | 0.499512 | lorawan_utils.erl | starcoder |
%%% @doc Abstraction layer for accessing persistant storage. Called
%%% cold-storage by the gang. Also defines the behavior of the callback
%%% modules.
%%%
%%% For convience sake, data handled by this module (and by extension the
%%% backend modules) should conform to the following format:
%%%
%%% `{RecordAtom, IdField, Field1, Field2, ..., FeildN, Created, Updated}'
%%%
%%% For example:
%%%
%%% `{pre_user, 1, <<"name">>, os:timestamp(), os:timestamp()}'
%%%
%%% The backends should check for undefined ids and automatically generate
%%% one. On a save, check for created and updated timestamps and update
%%% those.
%%%
-module(pre_data).
-behaviour(gen_server).
-type error_return() :: {'error', any()}.
-type comparison_op() :: '>' | '>=' | '<' | '=<' | '==' | '=:='.
-type search_parameter() :: {any(), any()} | {any(), comparison_op(), any()}.
-callback get_by_id(Type :: atom(), Id :: any()) -> {'ok', tuple()} | {'error', notfound} | error_return().
-callback save(Record :: tuple()) -> {'ok', tuple()} | error_return().
-callback delete(Type :: atom(), Id :: any()) -> 'ok' | error_return().
-callback search(Type :: atom(), Params :: [search_parameter()]) -> {'ok', []}.
-callback transaction(Fun :: fun(() -> any())) -> any().
% API
-export([start_link/1, stop/0]).
-export([get_by_id/2, search/2, save/1, delete/1, delete/2]).
-export([transaction/1]).
-export([export/2, import/1]).
% gen_server
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-define(callback_key, pre_data_callback).
-record(state, {
callback_mod, % the callback module implementing the behavior
workers = dict:new() % the pids currently doing a request, like save
}).
%% --------------------------------------------------------------------------------------------------------------------
%% API
%% --------------------------------------------------------------------------------------------------------------------
%% @doc Start linked to the calling process with the given callback module.
-spec start_link(CallbackModule :: atom()) -> {'ok', pid()}.
start_link(CallbackModule) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, CallbackModule, []).
%% @doc Stops the server with reason normal.
-spec stop() -> 'ok'.
stop() ->
gen_server:cast(?MODULE, stop).
%% @doc Attempts to get a record of the given type with the given id. This
%% expects to be in the context of a transaction, otherwise explosions may
%% occur.
-spec get_by_id(Record :: atom(), Id :: any()) -> {'ok', tuple()} | {'error', 'notfound'}.
get_by_id(Record, Id) ->
need_transaction_api(get_by_id, [Record, Id]).
%% @doc Stores the record to long term. Expected to be called in the
%% context of a transaction. If the id is `undefined' it is automatically
%% set.
-spec save(Record :: tuple()) -> {'ok', tuple()}.
save(Record) ->
need_transaction_api(save, [Record]).
%% @doc Extract the type and id of the record and call {@link delete/2}.
-spec delete(Record :: tuple()) -> 'ok'.
delete(Record) ->
Type = element(1, Record),
Id = element(2, Record),
delete(Type, Id).
%% @doc Delete the record of the given type with the given Id. Expected to
%% be called within the context of a transaction.
-spec delete(Record :: atom(), Id :: any()) -> 'ok'.
delete(Record, Id) ->
need_transaction_api(delete, [Record, Id]).
%% @doc Search the data backend for records of the given types with the
%% given properties. Expected to be called within the context of a
%% transaction.
-spec search(Record :: atom(), Params :: [search_parameter()]) -> {'ok', [tuple()]} | {'error', any()}.
search(Record, Params) ->
check_params(Params),
need_transaction_api(search, [Record, Params]).
%% @doc Runs the fun as a transaction. This allows save/1, get_by_id/2,
%% delete/1,2 to be used such that the underlying data system can rollback
%% the changes if any later action fails. There is no acutal guarentee the
%% underlying data system supports transactions.
-spec transaction(TransFun :: fun()) -> any().
transaction(Fun) ->
gen_server:call(?MODULE, {api, transaction, [Fun]}, infinity).
%% @doc Query the running system for all data and place it in a file on
%% the given directory. Each type of record handled by this system is
%% put into a file with ".hrl" appeneded to the record name.
-spec export(Dir :: filelib:dirname(), RecordTypes :: [atom()]) -> 'ok'.
export(Dir, Types) ->
true = filelib:is_dir(Dir),
lists:foreach(fun(Type) ->
Recs = pre_data:transaction(fun() ->
search(Type, [])
end),
{ok, File} = file:open(filename:join(Dir, Type) ++ ".hrl", [write]),
lists:foreach(fun(Rec) ->
io:format(File, "~p.~n", [Rec])
end, Recs)
end, Types).
%% @doc Import data stored in the file glob.
-spec import(Wildcard :: filelib:filename() | filelib:dirname()) -> 'ok'.
import(Wildcard) ->
Files = filelib:wildcard(Wildcard),
lists:foreach(fun(File) ->
{ok, Terms} = file:consult(File),
pre_data:transaction(fun() ->
lists:foreach(fun(Term) ->
pre_data:save(Term)
end, Terms)
end)
end, Files).
%% --------------------------------------------------------------------------------------------------------------------
%% gen_server
%% --------------------------------------------------------------------------------------------------------------------
%% @hidden
init(CallbackModule) ->
{ok, #state{callback_mod = CallbackModule}}.
%% --------------------------------------------------------------------------------------------------------------------
%% @private
handle_call({api, Function, Args}, From, State) ->
#state{callback_mod = CallbackModule, workers = Workers} = State,
PidMon = spawn_monitor(fun() ->
% The real reason to do this is to force transaction required
% functions to all run within the same pid. Some backends (mnesia)
% require transations not cross pid bounderies (it uses the process
% dictionary to track transactions). This does the same to keep
% track of the callback module. This has the advantage of requiring
% fewer trips to the pre_data process, so pragmatism beats the
% 'pdict is dirty' purity.
put(?callback_key, CallbackModule),
Res = erlang:apply(CallbackModule, Function, Args),
gen_server:reply(From, Res)
end),
Workers2 = dict:store(PidMon, From, Workers),
{noreply, State#state{workers = Workers2}};
handle_call(_, _From, State) ->
{reply, invalid, State}.
%% --------------------------------------------------------------------------------------------------------------------
%% @hidden
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(_, State) ->
{noreply, State}.
%% --------------------------------------------------------------------------------------------------------------------
%% @hidden
handle_info({'DOWN', Mon, process, Pid, Why}, State) ->
#state{workers = Workers} = State,
Workers2 = dict:erase({Mon, Pid}, Workers),
case dict:find({Pid, Mon}, Workers) of
error ->
lager:info("Didn't find a ~p in workers (~p)", [{Pid, Mon}, Workers]),
ok;
{ok, _From} when Why =:= normal; Why =:= shutdown ->
ok;
{ok, From} ->
lager:warning("Something went horribly wrong with ~p: ~p", [Pid, Why]),
gen_server:reply(From, {error, Why})
end,
{noreply, State#state{workers = Workers2}};
handle_info(_, State) ->
{noreply, State}.
%% --------------------------------------------------------------------------------------------------------------------
%% @hidden
terminate(Reason, _State) ->
lager:info("Terminating due to ~p.", [Reason]),
ok.
%% @hidden
code_change(_OldVersion, State, _Extra) ->
{reply, State}.
%% --------------------------------------------------------------------------------------------------------------------
check_params([]) ->
ok;
check_params([{_Key, _Value} | Tail]) ->
check_params(Tail);
check_params([{_Key, Op, _Value} | Tail]) ->
ValidOps = ['>', '>=', '<', '=<', '==', '=:='],
case lists:member(Op, ValidOps) of
false ->
error({badarg, Op});
true ->
check_params(Tail)
end.
need_transaction_api(Function, Args) ->
Callback = get(?callback_key),
need_transaction_api(Function, Args, Callback).
need_transaction_api(_Function, _Args, undefined) ->
{error, no_transaction};
need_transaction_api(Function, Args, CallbackMod) ->
erlang:apply(CallbackMod, Function, Args). | apps/pre_data/src/pre_data.erl | 0.570451 | 0.411613 | pre_data.erl | starcoder |
-module(assignment).
-export([perimeter/1, area/1, enclose/1, bits/1]).
-include_lib("eunit/include/eunit.hrl").
%%%%%%%%
% 1.24 %
%%%%%%%%
% Shapes
% ------
%
% Define a function perimeter/1 which takes a shape and returns the perimeter
% of the shape.
%
% Choose a suitable representation of triangles, and augment perimeter/1
%
perimeter({rectangle, _, H, W}) ->
2 * H + 2 * W;
perimeter({circle, _, R}) ->
2 * math:pi() * R;
perimeter({triangle, A, B, C}) ->
length(A, B) + length(B, C) + length(C, A).
perimeter_test() ->
?assertEqual(22, perimeter({rectangle, {4, 1.5}, 3, 8})),
?assertEqual("56.5487", float_to_list(perimeter({circle, {0, 0}, 9}), [{decimals, 4}])),
?assertEqual("80.86", float_to_list(perimeter({triangle, {15, 15}, {23, 30}, {50, 25}}), [{decimals, 2}])).
% and area/1 to handle this case too.
area({circle, _, R}) ->
math:pi() * R * R;
area({rectangle, _, H, W}) ->
H * W;
% See: http://www.mathopenref.com/coordtrianglearea.html
area({triangle, {Ax, Ay}, {Bx, By}, {Cx, Cy}}) ->
abs((Ax * (By - Cy) + Bx * (Cy - Ay) + Cx * (Ay - By)) / 2).
area_test() ->
?assertEqual(60, area({rectangle, {5, 3}, 6, 10})),
?assertEqual("28.27", float_to_list(area({circle, {0, 0}, 3}), [{decimals, 2}])),
?assertEqual(222.5, area({triangle, {15, 15}, {23, 30}, {50, 25}})),
?assertEqual(170.0, area({triangle, {15, 15}, {23, 30}, {43, 25}})).
% Define a function enclose/1 that takes a shape an returns the smallest
% enclosing rectangle of the shape.
enclose({rectangle, _, _, _} = R) ->
R;
enclose({circle, C, R}) ->
{rectangle, C, 2 * R, 2 * R};
enclose({triangle, {Ax, Ay}, {Bx, By}, {Cx, Cy}}) ->
Xmin = minThree(Ax, Bx, Cx),
Xmax = maxThree(Ax, Bx, Cx),
Ymin = minThree(Ay, By, Cy),
Ymax = maxThree(Ay, By, Cy),
{rectangle, {avg(Xmin, Xmax), avg(Ymin, Ymax)}, Ymax - Ymin, Xmax - Xmin}.
enclose_test() ->
?assertEqual({rectangle, {1, 1}, 2, 2}, enclose({rectangle, {1, 1}, 2, 2})),
?assertEqual({rectangle, {0, 0}, 2, 2}, enclose({circle, {0, 0}, 1})),
?assertEqual({rectangle, {1.0, 0.5}, 1, 2}, enclose({triangle, {0, 0}, {1, 1}, {2, 0}})).
% Summing the bits
% ----------------
%
% Define a function bits/1 that takes a positive integer N and returns the sum of
% the bits in the binary representation. For example bits(7) is 3 and bits(8) is 1.
%
% See whether you can make both a direct recursive and a tail recursive
% definition.
%
% Step-by-Step evaluation (tail recursive)
%
% bits(7)
% bits(7, 1)
% bits(3, 2)
% bits(1, 3)
% bits(0, 3)
% 3
bits(N) when N > 0 ->
bits(N, 0).
bits(0, Acc) ->
Acc;
bits(N, Acc) ->
{Q, R} = divrem(N, 2),
bits(Q, R + Acc).
% Step-by-step evaluation
% bits_direct(7)
% 1 + bits_direct(3)
% 1 + 1 + bits_direct(1)
% 1 + 1 + 1 + bits_direct(0)
% 1 + 1 + 1 + 0
% 3
bits_direct(0) ->
0;
bits_direct(N) when N > 0 ->
{Q, R} = divrem(N, 2),
R + bits_direct(Q).
bits_test() ->
?assertEqual(3, bits(7)),
?assertEqual(1, bits(8)),
?assertEqual(3, bits_direct(7)),
?assertEqual(1, bits_direct(8)).
%%%%%%%%%%%
% Helpers %
%%%%%%%%%%%
% Pythagoras' Theorem
%
% See: http://www.mathopenref.com/coorddist.html
length({X1, Y1}, {X2, Y2}) ->
math:sqrt(math:pow(X2 - X1, 2) + math:pow(Y2 - Y1, 2)).
divrem(N, M) ->
{N div M, N rem M}.
avg(A, B) ->
(A + B) / 2.
% From: 1.15
maxThree(X, Y, Z) ->
max(max(X, Y), Z).
minThree(X, Y, Z) ->
min(min(X, Y), Z). | week1/assignment.erl | 0.611266 | 0.83602 | assignment.erl | starcoder |
%Copyright [2012] [<NAME>]
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
% http://www.apache.org/licenses/LICENSE-2.0
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
-module(wsock_message_spec).
-include_lib("espec/include/espec.hrl").
-include_lib("hamcrest/include/hamcrest.hrl").
-include("wsock.hrl").
-define(FRAGMENT_SIZE, 4096).
spec() ->
describe("encode", fun() ->
before_each(fun()->
meck:new(wsock_framing, [passthrough])
end),
after_each(fun()->
meck:unload(wsock_framing)
end),
it("should return an error if no datatype option is given", fun() ->
Return = wsock_message:encode("motosicleta man", []),
assert_that(Return, is({error, missing_datatype}))
end),
it("should mask data if 'mask' option is present", fun() ->
wsock_message:encode("asdasda", [text ,mask]),
[_Data, Options] = meck_arguments(wsock_framing, frame),
assert_that(proplists:get_value(mask, Options), is(true))
end),
it("should not mask data if 'mask' option is not present", fun() ->
wsock_message:encode("frotisfrotis", [text]),
[_Data, Options] = meck_arguments(wsock_framing, frame),
assert_that(proplists:get_value(mask, Options), is(undefined))
end),
it("should set opcode to 'text' if type is text", fun() ->
wsock_message:encode("asadsd", [text]),
[_Data, Options] = meck_arguments(wsock_framing, frame),
assert_that(proplists:get_value(opcode, Options), is(text))
end),
it("should set opcode to 'binary' if type is binary", fun() ->
wsock_message:encode(<<"asdasd">>, [binary]),
[_Data, Options] = meck_arguments(wsock_framing, frame),
assert_that(proplists:get_value(opcode, Options), is(binary))
end),
describe("when payload size is <= fragment size", fun()->
it("should return a list with only one binary fragment", fun()->
Data = "Foo bar",
[BinFrame | []] = wsock_message:encode(Data, [text]),
assert_that(byte_size(list_to_binary(Data)),is(less_than(?FRAGMENT_SIZE))),
assert_that(is_binary(BinFrame), is(true)),
assert_that(meck:called(wsock_framing, to_binary, '_'), is(true)),
assert_that(meck:called(wsock_framing, frame, '_'), is(true))
end),
it("should set opcode to 'type'", fun() ->
Data = "Foo bar",
[Frame] = wsock_message:encode(Data, [text]),
<<_:4, Opcode:4, _/binary>> = Frame,
assert_that(Opcode, is(1))
end),
it("should set fin", fun()->
Data = "Foo bar",
[Frame] = wsock_message:encode(Data, [text]),
<<Fin:1, _/bits>> = Frame,
assert_that(Fin, is(1))
end)
end),
describe("when payload size is > fragment size", fun() ->
it("should return a list of binary fragments", fun()->
Data = crypto:rand_bytes(5000),
Frames = wsock_message:encode(Data, [binary]),
assert_that(meck:called(wsock_framing, to_binary, '_'), is(true)),
assert_that(meck:called(wsock_framing, frame, '_'), is(true)),
assert_that(length(Frames), is(2))
end),
it("should set a payload of 4096 bytes or less on each fragment", fun() ->
Data = crypto:rand_bytes(?FRAGMENT_SIZE*3),
Frames = wsock_message:encode(Data, [binary]),
[Frame1, Frame2, Frame3] = Frames,
<<_:32, Payload1/binary>> = Frame1,
<<_:32, Payload2/binary>> = Frame2,
<<_:32, Payload3/binary>> = Frame3,
assert_that(byte_size(Payload1), is(?FRAGMENT_SIZE)),
assert_that(byte_size(Payload2), is(?FRAGMENT_SIZE)),
assert_that(byte_size(Payload3), is(?FRAGMENT_SIZE))
end),
it("should set opcode to 'type' on the first fragment", fun()->
Data = crypto:rand_bytes(5000),
Frames = wsock_message:encode(Data, [binary]),
[FirstFragment | _ ] = Frames,
<<_:4, Opcode:4, _/binary>> = FirstFragment,
assert_that(Opcode, is(2))
end),
it("should unset fin on all fragments but last", fun() ->
Data = crypto:rand_bytes(12288), %4096 * 3
Frames = wsock_message:encode(Data, [binary]),
[Frame1, Frame2, Frame3] = Frames,
<<Fin1:1, _/bits>> = Frame1,
<<Fin2:1, _/bits>> = Frame2,
<<Fin3:1, _/bits>> = Frame3,
assert_that(Fin1, is(0)),
assert_that(Fin2, is(0)),
assert_that(Fin3, is(1))
end),
it("should set opcode to 'continuation' on all fragments but first", fun() ->
Data = crypto:rand_bytes(12288), %4096 * 3
Frames = wsock_message:encode(Data, [binary]),
[Frame1, Frame2, Frame3] = Frames,
<<_:4, Opcode1:4, _/binary>> = Frame1,
<<_:4, Opcode2:4, _/binary>> = Frame2,
<<_:4, Opcode3:4, _/binary>> = Frame3,
assert_that(Opcode1, is(2)),
assert_that(Opcode2, is(0)),
assert_that(Opcode3, is(0))
end)
end),
describe("control messages", fun() ->
describe("close", fun() ->
it("should return a list of one frame", fun() ->
[_Frame] = wsock_message:encode([], [close])
end),
it("should return a close frame", fun() ->
[Frame] = wsock_message:encode([], [close]),
<<Fin:1, Rsv:3, Opcode:4, _/binary>> = Frame,
assert_that(Fin, is(1)),
assert_that(Rsv, is(0)),
assert_that(Opcode, is(8))
end),
it("should attach application payload", fun() ->
[Frame] = wsock_message:encode({1004, "Chapando el garito"}, [mask, close]),
<<_Fin:1, _Rsv:3, _Opcode:4, 1:1, _PayloadLen:7, _Mask:32, _Payload/binary>> = Frame
end)
end),
describe("ping", fun() ->
it("should return a list of one frame", fun() ->
[_Frame] = wsock_message:encode([], [ping])
end),
it("should return a ping frame", fun() ->
[Frame] = wsock_message:encode([], [ping]),
<<Fin:1, Rsv:3, Opcode:4, _/binary>> = Frame,
assert_that(Fin, is(1)),
assert_that(Rsv, is(0)),
assert_that(Opcode, is(9))
end),
it("should attach application payload", fun() ->
[Frame] = wsock_message:encode("1234", [mask, ping]),
<<_Fin:1, _Rsv:3, _Opcode:4, 1:1, 4:7, _Mask:32, _Payload:4/binary>> = Frame
end)
end),
describe("pong", fun() ->
it("should return a list of one frame", fun() ->
[_Frame] = wsock_message:encode([], [pong])
end),
it("should return a ping frame", fun() ->
[Frame] = wsock_message:encode([], [pong]),
<<Fin:1, Rsv:3, Opcode:4, _/binary>> = Frame,
assert_that(Fin, is(1)),
assert_that(Rsv, is(0)),
assert_that(Opcode, is(10))
end),
it("should attach application payload", fun() ->
[Frame] = wsock_message:encode("1234", [mask, pong]),
<<_Fin:1, _Rsv:3, _Opcode:4, 1:1, 4:7, _Mask:32, _Payload:4/binary>> = Frame
end)
end)
end)
end),
describe("decode", fun()->
it("should return an error if unexpected masking", fun() ->
Payload = crypto:rand_bytes(20),
Frame = get_binary_frame(0, 0, 0, 0, 2, 1, 20, 0, Payload),
Response = wsock_message:decode(Frame, []),
assert_that(Response, is({error, frames_masked}))
end),
it("should return an error if expected masking", fun() ->
Payload = crypto:rand_bytes(20),
Frame = get_binary_frame(0, 0, 0, 0, 2, 0, 20, 0, Payload),
Response = wsock_message:decode(Frame, [masked]),
assert_that(Response, is({error, frames_unmasked}))
end),
it("should decode masked messages", fun() ->
Payload = crypto:rand_bytes(20),
Fragment = get_binary_frame(0, 0, 0, 0, 2, 1, 20, 0, Payload),
[_Message] = wsock_message:decode(Fragment, [masked])
end),
it("should decode unmasked messages", fun() ->
Payload = crypto:rand_bytes(20),
Frame = get_binary_frame(0, 0, 0, 0, 2, 0, 20, 0, Payload),
[_Message] = wsock_message:decode(Frame, [])
end),
describe("fragmented messages", fun() ->
%describe("when they are control messages", )
it("should complain when control messages are fragmented", fun() ->
Data = crypto:rand_bytes(10),
Frame1 = get_binary_frame(0, 0, 0, 0, 8, 0, 10, 0, Data),
Frame2 = get_binary_frame(1, 0, 0, 0, 0, 0, 10, 0, Data),
Message = <<Frame1/binary, Frame2/binary>>,
Return = wsock_message:decode(Message, []),
assert_that(Return, is({error, fragmented_control_message}))
end),
it("should return a fragmented message with undefined payload when message is not complete", fun() ->
Payload = crypto:rand_bytes(20),
<<
Payload1:10/binary,
Payload2:5/binary,
_Payload3/binary
>> = Payload,
FakeFragment1 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, Payload1),
FakeFragment2 = get_binary_frame(0, 0, 0, 0, 0, 0, 5, 0, Payload2),
Data = <<FakeFragment1/binary, FakeFragment2/binary>>,
[Message] = wsock_message:decode(Data, []),
assert_that(Message#message.type, is(fragmented)),
assert_that(length(Message#message.frames), is(2))
end),
it("should decode data containing a complete fragmented binary message", fun() ->
Payload = crypto:rand_bytes(40),
<<
Payload1:10/binary,
Payload2:10/binary,
Payload3:10/binary,
Payload4:10/binary
>> = Payload,
FakeFragment1 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, Payload1),
FakeFragment2 = get_binary_frame(0, 0, 0, 0, 0, 0, 10, 0, Payload2),
FakeFragment3 = get_binary_frame(0, 0, 0, 0, 0, 0, 10, 0, Payload3),
FakeFragment4 = get_binary_frame(1, 0, 0, 0, 0, 0, 10, 0, Payload4),
Data = << FakeFragment1/binary, FakeFragment2/binary, FakeFragment3/binary, FakeFragment4/binary>>,
[Message] = wsock_message:decode(Data, []),
assert_that(Message#message.type, is(binary)),
assert_that(Message#message.payload, is(Payload))
end),
it("should decode data containing a complete fragmented text message", fun() ->
Text = "asasdasdasdasdasdasdasdasdasdasdasdasdasdasdasd",
Payload = list_to_binary(Text),
<<
Payload1:5/binary,
Payload2:2/binary,
Payload3/binary
>> = Payload,
FakeFragment1 = get_binary_frame(0, 0, 0, 0, 1, 0, byte_size(Payload1), 0, Payload1),
FakeFragment2 = get_binary_frame(0, 0, 0, 0, 0, 0, byte_size(Payload2), 0, Payload2),
FakeFragment3 = get_binary_frame(1, 0, 0, 0, 0, 0, byte_size(Payload3), 0, Payload3),
Data = << FakeFragment1/binary, FakeFragment2/binary, FakeFragment3/binary>>,
[Message] = wsock_message:decode(Data, []),
assert_that(Message#message.type, is(text)),
assert_that(Message#message.payload, is(Text))
end),
it("should complete a fragmented message", fun() ->
Payload = crypto:rand_bytes(20),
<<
Payload1:10/binary,
Payload2:5/binary,
Payload3/binary
>> = Payload,
FakeFragment1 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, Payload1),
FakeFragment2 = get_binary_frame(0, 0, 0, 0, 0, 0, 5, 0, Payload2),
FakeFragment3 = get_binary_frame(1, 0, 0, 0, 0, 0, 5, 0, Payload3),
Data1 = <<FakeFragment1/binary, FakeFragment2/binary>>,
Data2 = <<FakeFragment3/binary>>,
[Message1] = wsock_message:decode(Data1, []),
[Message2] = wsock_message:decode(Data2, Message1, []),
assert_that(Message1#message.type, is(fragmented)),
assert_that(Message2#message.type, is(binary)),
assert_that(Message2#message.payload, is(Payload))
end),
it("should decode data with complete fragmented messages and part of fragmented one", fun() ->
BinPayload1 = crypto:rand_bytes(30),
<<
Payload1:10/binary,
Payload2:10/binary,
Payload3/binary
>> = BinPayload1,
FakeFragment1 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, Payload1),
FakeFragment2 = get_binary_frame(0, 0, 0, 0, 0, 0, 10, 0, Payload2),
FakeFragment3 = get_binary_frame(1, 0, 0, 0, 0, 0, 10, 0, Payload3),
BinPayload2 = crypto:rand_bytes(10),
FakeFragment4 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, BinPayload2),
Data = << FakeFragment1/binary, FakeFragment2/binary, FakeFragment3/binary, FakeFragment4/binary>>,
[Message1, Message2] = wsock_message:decode(Data, []),
assert_that(Message1#message.type, is(binary)),
assert_that(Message1#message.payload, is(BinPayload1)),
assert_that(length(Message1#message.frames), is(3)),
assert_that(Message2#message.type, is(fragmented)),
assert_that(length(Message2#message.frames), is(1))
end),
describe("fragmented frames", fun() ->
it("should return a fragmented message", fun() ->
FakeFrame = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, crypto:rand_bytes(10)),
<<Data:1/binary, _/binary>> = FakeFrame,
[Message] = wsock_message:decode(Data, []),
assert_that(Message#message.type, is(fragmented)),
assert_that(length(Message#message.frames), is(1))
end),
it("should return a fragmented message that is made up of more that one frame", fun() ->
Payload = crypto:rand_bytes(10),
FakeFrame = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, Payload),
<<FirstFragment:1/binary, SecondFragment/binary>> = FakeFrame,
[FragmentedMessage] = wsock_message:decode(FirstFragment, []),
[Message] = wsock_message:decode(SecondFragment, FragmentedMessage, []),
assert_that(Message#message.type, is(fragmented)),
assert_that(length(Message#message.frames), is(1))
end),
it("should complete a fragmented message that is made up of one frame", fun() ->
Payload = crypto:rand_bytes(10),
FakeFrame = get_binary_frame(1, 0, 0, 0, 2, 0, 10, 0, Payload),
<<FirstFragment:1/binary, SecondFragment/binary>> = FakeFrame,
[FragmentedMessage] = wsock_message:decode(FirstFragment, []),
[Message] = wsock_message:decode(SecondFragment, FragmentedMessage, []),
assert_that(Message#message.type, is(binary)),
assert_that(length(Message#message.frames), is(1)),
assert_that(Message#message.payload, is(Payload))
end),
it("should complete a fragmented message that is made up of more than one frame", fun() ->
Data = crypto:rand_bytes(20),
<<DataFrame1:10/binary, DataFrame2/binary>> = Data,
FakeFrame1 = get_binary_frame(0, 0, 0, 0, 2, 0, 10, 0, DataFrame1),
FakeFrame2 = get_binary_frame(1, 0, 0, 0, 0, 0, 10, 0, DataFrame2),
<<FakeFrameFragment1:3/binary, FakeFrameFragment2/binary>> = FakeFrame2,
InputData = <<FakeFrame1/binary, FakeFrameFragment1/binary>>,
[FragmentedMessage] = wsock_message:decode(InputData, []),
[Message] = wsock_message:decode(FakeFrameFragment2, FragmentedMessage, []),
assert_that(Message#message.type, is(binary)),
assert_that(length(Message#message.frames), is(2)),
assert_that(Message#message.payload, is(Data))
end)
end)
end),
describe("unfragmented messages", fun()->
it("should decode data containing various text messages", fun()->
Text1 = "Churras churras",
Payload1 = list_to_binary(Text1),
PayloadLength1 = byte_size(Payload1),
Text2 = "Pitas pitas",
Payload2 = list_to_binary(Text2),
PayloadLength2 = byte_size(Payload2),
Text3 = "Pero que jallo eh",
Payload3 = list_to_binary(Text3),
PayloadLength3 = byte_size(Payload3),
FakeMessage1 = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength1, 0, Payload1),
FakeMessage2 = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength2, 0, Payload2),
FakeMessage3 = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength3, 0, Payload3),
Data = << FakeMessage1/binary, FakeMessage2/binary, FakeMessage3/binary>>,
[Message1, Message2, Message3] = wsock_message:decode(Data, []),
assert_that(Message1#message.type, is(text)),
assert_that(Message1#message.payload, is(Text1)),
assert_that(Message2#message.type, is(text)),
assert_that(Message2#message.payload, is(Text2)),
assert_that(Message3#message.type, is(text)),
assert_that(Message3#message.payload, is(Text3))
end),
it("should decode data containing text and binary messages", fun()->
Text1 = "Churras churras",
Payload1 = list_to_binary(Text1),
PayloadLength1 = byte_size(Payload1),
Payload2 = crypto:rand_bytes(20),
PayloadLength2 = 20,
Text3 = "Pero que jallo eh",
Payload3 = list_to_binary(Text3),
PayloadLength3 = byte_size(Payload3),
FakeMessage1 = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength1, 0, Payload1),
FakeMessage2 = get_binary_frame(1, 0, 0, 0, 2, 0, PayloadLength2, 0, Payload2),
FakeMessage3 = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength3, 0, Payload3),
Data = << FakeMessage1/binary, FakeMessage2/binary, FakeMessage3/binary>>,
[Message1, Message2, Message3] = wsock_message:decode(Data, []),
assert_that(Message1#message.type, is(text)),
assert_that(Message1#message.payload, is(Text1)),
assert_that(Message2#message.type, is(binary)),
assert_that(Message2#message.payload, is(Payload2)),
assert_that(Message3#message.type, is(text)),
assert_that(Message3#message.payload, is(Text3))
end),
it("should decode data containing all message types"),
it("should decode data containing a binary message", fun() ->
Payload = crypto:rand_bytes(45),
%")
FakeMessage = get_binary_frame(1, 0, 0, 0, 2, 0, 45, 0, Payload),
[Message] = wsock_message:decode(FakeMessage, []),
assert_that( Message#message.payload, is(Payload))
end),
it("should decode data containing a text message", fun() ->
Payload = "Iepa yei!",
PayloadLength = length(Payload),
PayloadData = list_to_binary(Payload),
FakeMessage = get_binary_frame(1, 0, 0, 0, 1, 0, PayloadLength, 0, PayloadData),
[Message] = wsock_message:decode(FakeMessage, []),
assert_that( Message#message.payload, is(Payload))
end),
describe("control frames", fun() ->
describe("ping", fun() ->
it("should return a message with type ping", fun() ->
FakeMessage = get_binary_frame(1, 0, 0, 0, 9, 0, 0, 0, <<>>),
[Message] = wsock_message:decode(FakeMessage, []),
assert_that(Message#message.type, is(ping))
end)
end),
describe("pong", fun() ->
it("should return a message with type pong", fun() ->
FakeMessage = get_binary_frame(1, 0, 0, 0, 10, 0, 0, 0, <<>>),
[Message] = wsock_message:decode(FakeMessage, []),
assert_that(Message#message.type, is(pong))
end)
end),
describe("close", fun() ->
it("should return a message with type close", fun() ->
FakeMessage = get_binary_frame(1, 0, 0, 0, 8, 0, 0, 0, <<>>),
[Message] = wsock_message:decode(FakeMessage, []),
assert_that(Message#message.type, is(close))
end),
describe("with payload", fun() ->
it("should return the payload a a tuple {Status, Reason}", fun()->
Status = 1004,
Reason = list_to_binary("A tomar por saco"),
Payload = <<Status:16, Reason/binary>>,
PayloadLen = byte_size(Payload),
FakeMessage = get_binary_frame(1, 0, 0, 0, 8, 0, PayloadLen, 0, Payload),
[Message] = wsock_message:decode(FakeMessage, []),
{St, Re} = Message#message.payload,
assert_that(St, is(Status)),
assert_that(Re, is("A tomar por saco"))
end)
end),
describe("without payload", fun() ->
it("should return the payload as a tuple {undefined, undefined}", fun() ->
FakeMessage = get_binary_frame(1, 0, 0, 0, 8, 0, 0, 0, <<>>),
[Message] = wsock_message:decode(FakeMessage, []),
{Status, Reason} = Message#message.payload,
assert_that(Status, is(undefined)),
assert_that(Reason, is(undefined))
end)
end)
end)
end)
end)
end).
get_binary_frame(Fin, Rsv1, Rsv2, Rsv3, Opcode, Mask, Length, ExtendedPayloadLength, Payload) ->
Head = <<Fin:1, Rsv1:1, Rsv2:1, Rsv3:1, Opcode:4, Mask:1, Length:7>>,
TempBin = case Length of
126 ->
<<Head/binary, ExtendedPayloadLength:16>>;
127 ->
<<Head/binary, ExtendedPayloadLength:64>>;
_ ->
<<Head/binary>>
end,
case Mask of
0 ->
<<TempBin/binary, Payload/binary>>;
1 ->
<<Mk:32>> = crypto:rand_bytes(4),
MaskedPayload = mask(Payload, Mk, <<>>),
<<TempBin/binary, Mk:32, MaskedPayload/binary>>
end.
mask(<<Data:32, Rest/bits>>, MaskKey, Acc) ->
T = Data bxor MaskKey,
mask(Rest, MaskKey, <<Acc/binary, T:32>>);
mask(<< Data:24>>, MaskKey, Acc) ->
<<MaskKey2:24, _/bits>> = <<MaskKey:32>>,
T = Data bxor MaskKey2,
<<Acc/binary, T:24>>;
mask(<< Data:16>>, MaskKey, Acc) ->
<<MaskKey2:16, _/bits>> = <<MaskKey:32>>,
T = Data bxor MaskKey2,
<<Acc/binary, T:16>>;
mask(<< Data:8>>, MaskKey, Acc) ->
<<MaskKey2:8, _/bits>> = <<MaskKey:32>>,
T = Data bxor MaskKey2,
<<Acc/binary, T:8>>;
mask(<<>>, _, Acc) ->
Acc.
meck_arguments(Module, Function) ->
History = meck:history(Module),
[Args] = [ X || {_, {_, F, X}, _} <- History, F == Function],
Args. | test/spec/wsock_message_spec.erl | 0.685423 | 0.426172 | wsock_message_spec.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(kai_store_ets).
-behaviour(gen_server).
-export([start_link/1]).
-export([
init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
code_change/3
]).
-include("kai.hrl").
start_link(Server) ->
gen_server:start_link({local, Server}, ?MODULE, [], _Opts = []).
init(_Args) ->
ets:new(?MODULE, [set, private, named_table, {keypos, 2}]),
{ok, []}.
terminate(_Reason, _State) ->
ets:delete(?MODULE),
ok.
do_list(Bucket, State) ->
Head = #data{
key = '$1',
bucket = Bucket,
last_modified = '$2',
vector_clocks = '$3',
checksum = '$4',
flags = '_',
value = '_'
},
Cond = [],
Body = [{#data{
key = '$1',
bucket = Bucket,
last_modified = '$2',
vector_clocks = '$3',
checksum = '$4'
}}],
ListOfData = ets:select(?MODULE, [{Head, Cond, Body}]),
{reply, {list_of_data, ListOfData}, State}.
do_get(#data{key=Key} = _Data, State) ->
case ets:lookup(?MODULE, Key) of
[Data] -> {reply, Data, State};
_ -> {reply, undefined, State}
end.
do_match(#data{key=Key} = _Data, State) ->
case ets:match(?MODULE, Key) of
[Data] -> {reply, Data, State};
_ -> {reply, undefined, State}
end.
do_put(Data, State) when is_record(Data, data) ->
case ets:lookup(?MODULE, Data#data.key) of
[StoredData] ->
case vclock:descends(Data#data.vector_clocks, StoredData#data.vector_clocks) of
true -> insert_and_reply(Data, State);
_ -> {reply, {error, "stale or concurrent state found in kai_store"}, State}
end;
_ -> insert_and_reply(Data, State)
end;
do_put(a, b) -> ok.
insert_and_reply(Data, State) ->
ets:insert(?MODULE, Data),
{reply, ok, State}.
do_delete(#data{key=Key} = _Data, State) ->
case ets:lookup(?MODULE, Key) of
[_Data2] ->
ets:delete(?MODULE, Key),
{reply, ok, State};
_ ->
{reply, undefined, State}
end.
info(Name, State) ->
Value =
case Name of
bytes ->
% this code roughly estimates the size of stored objects,
% since ets only store a reference to the binary
Ets = erlang:system_info(wordsize) + ets:info(?MODULE, memory),
Bin = erlang:memory(binary),
Ets + Bin;
size ->
ets:info(?MODULE, size)
end,
{reply, Value, State}.
handle_call(stop, _From, State) ->
{stop, normal, stopped, State};
handle_call({list, Bucket}, _From, State) ->
do_list(Bucket, State);
handle_call({get, Data}, _From, State) ->
do_get(Data, State);
handle_call({match, Data}, _From, State) ->
do_match(Data, State);
handle_call({put, Data}, _From, State) ->
do_put(Data, State);
handle_call({delete, Data}, _From, State) ->
do_delete(Data, State);
handle_call({info, Name}, _From, State) ->
info(Name, State).
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(_Info, State) ->
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}. | src/kai_store_ets.erl | 0.607081 | 0.42471 | kai_store_ets.erl | starcoder |
%%%---------------------------------------------------------------------------
%%% @doc
%%% `inets'/`httpd' request handler module.
%%%
%%% For example of use, see {@link examples_server}.
%%%
%%% == Configuration ==
%%%
%%% Obviously, `mod_xmerlrpc' needs to be added to `modules' list:
%```
%Config = [
% % ...
% {modules, [mod_xmerlrpc, mod_alias, mod_dir, mod_get, mod_log]},
% % ...
%],
%inets:start(httpd, Config).
%'''
%%%
%%% Additionally, paths that are supposed to handle XML-RPC requests must be
%%% provided, along with lists of procedures that are exposed through these
%%% paths.
%%%
%%% An entry specifying such path has type
%%% {@type @{xmlrpc, @{Path :: string(), Procedures :: dispatch_table()@}@}}
%%%
%%% The entry could look like this:
%```
%Config = [
% % ...
% {xmlrpc, {"/rpc", [
% {<<"proc.name">>, {example_handler, proc_name}},
% {<<"another.proc">>, {example_handler, another_procedure}},
% % ...
% ]}},
% % ...
%],
%'''
%%%
%%% There can be many `{xmlrpc, {Path, Procs}}' entries in config, each for
%%% different path. For call will be selected the entry that has the longest
%%% `Path' that is a prefix of what was requested.
%%%
%%% Note that the module does not process a request if its path is not under
%%% any of the configured paths for RPC.
%%%
%%% == Handler interface ==
%%%
%%% <b>TODO</b>
%%%
%%% Function exported from module is called with two arguments: {@type
%%% [xmerlrpc_xml:proc_arg()]} and {@type call_environment()} and should
%%% return {@type call_result()}.
%%%
%%% @todo Error reporting better than current hardcoded `{exception, "Some
%%% error"}'
%%% @todo Allow procedure names to be strings and atoms in {@type
%%% dispatch_table()}
%%% @end
%%%---------------------------------------------------------------------------
-module(mod_xmerlrpc).
%%% enable qlc parse transform
-include_lib("stdlib/include/qlc.hrl").
%% `httpd' module API
-export([do/1]).
-export_type([proc_spec/0, dispatch_table/0, call_environment/0]).
%%%---------------------------------------------------------------------------
%% `httpd' request record
-record(mod, {
init_data, % this field is not mentioned in doc, but exists in passed record
data = [],
socket_type = ip_comm,
socket,
config_db,
method,
absolute_uri,
request_uri,
http_version,
request_line,
parsed_header = [],
entity_body,
connection
}).
%%%---------------------------------------------------------------------------
%%----------------------------------------------------------
%% inets/httpd configuration for mod_xmerlrpc
-type proc_spec() ::
{module(), Function :: atom()}
| fun(([xmerlrpc_xml:proc_arg()], call_environment()) -> call_result()).
-type dispatch_table() :: [{ProcName :: binary(), Function :: proc_spec()}].
%% List of procedures that are exposed, along with what function to call for
%% specific name.
%%----------------------------------------------------------
%% call data types
-type call_env_var() ::
{root_uri, string()}
| {uri, string()}.
%% `uri' is the requested URI. `root_uri' is the prefix from `mod_xmerlrpc'
%% configuration that matched `uri'.
-type call_environment() :: [call_env_var()].
%% List (proplist) of variables describing HTTP request the call handles.
-type call_result() ::
{ok, xmerlrpc_xml:proc_arg()}
| {error, term()}
| xmerlrpc_xml:proc_arg().
%% <b>TODO</b>: standarize this.
%%----------------------------------------------------------
%% response data for inets/httpd
-type http_response() :: {status_code(), [header()], body()}.
-type status_code() :: pos_integer().
-type header() :: {Name :: atom(), Value :: string()}.
-type body() :: iolist().
%%----------------------------------------------------------
%% inets/httpd request data
-type req_method() :: string().
-type req_header() :: {Name :: string(), Value :: string()}.
%% `Name' is lower case.
-type req_body() :: string().
-type request_data() :: term().
%% Whatever came in `ModData#mod.data'.
%%----------------------------------------------------------
%%%---------------------------------------------------------------------------
%% @private
%% @doc Process HTTP request (`inets' callback).
%%
%% Return `{proceed, ModData#mod.data}' when operation skipped.
-spec do(#mod{}) ->
{proceed, request_data()}
| {proceed, [{response, {response, [header()], body()}}]}
| {break, [{response, {response, [header()], body()}}]}.
do(ModData = #mod{}) ->
case find_prefix(ModData#mod.config_db, ModData#mod.request_uri) of
none ->
% didn't find anything, pass the request over
{proceed, ModData#mod.data};
{RootURI, DispatchTable} ->
% intercept and consume the request
% TODO: what else?
% * HTTP authentication data
% * SSL certificate details, if any
% * client IP+port
% * server IP+port
% * virtual host name
% * protocol (HTTP, HTTPs)
Environment = [
{root_uri, RootURI},
{uri, ModData#mod.request_uri}
],
% `Headers' don't contain `content_length'
{StatusCode, Headers, Body} = step_validate_request(
ModData#mod.method,
ModData#mod.parsed_header,
ModData#mod.entity_body,
Environment,
DispatchTable
),
ContentLength = integer_to_list(iolist_size(Body)),
Response = {
response,
[{code, StatusCode}, {content_length, ContentLength} | Headers],
Body
},
{proceed, [{response, Response}]}
end.
%%%---------------------------------------------------------------------------
%%% XML-RPC request processing steps
%%%---------------------------------------------------------------------------
%% @doc Validate HTTP request.
%% Function validates HTTP part of XML-RPC protocol (method and
%% <i>Content-Type</i> header).
-spec step_validate_request(req_method(), [req_header()], req_body(),
call_environment(), dispatch_table()) ->
http_response().
step_validate_request("POST" = _Method, ReqHeaders, ReqBody,
Environment, DispatchTable) ->
case proplists:lookup("content-type", ReqHeaders) of
{_Key, "text/xml"} ->
step_parse_xmlrpc_request(ReqBody, Environment, DispatchTable);
{_Key, OtherContentType} ->
error_logger:error_report(xmerlrpc, [{step, validate_request}, {error, invalid_content_type}, {content_type, OtherContentType}]),
http_error(bad_request, ["Invalid content type: ", OtherContentType]);
none ->
error_logger:error_report(xmerlrpc, [{step, validate_request}, {error, no_content_type}]),
http_error(bad_request, "No content type")
end;
step_validate_request(Method, _ReqHeaders, _ReqBody,
_Environment, _DispatchTable) ->
error_logger:error_report(xmerlrpc, [{step, validate_request}, {error, invalid_method}, {method, Method}]),
http_error(bad_method, ["Invalid method: ", Method]).
%% @doc Parse XML-RPC request.
%% Function parses request body as XML and extracts procedure name and
%% arguments.
-spec step_parse_xmlrpc_request(req_body(), call_environment(),
dispatch_table()) ->
http_response().
step_parse_xmlrpc_request(ReqBody, Environment, DispatchTable) ->
case xmerlrpc_xml:parse_request(ReqBody) of
{ok, request, {ProcName, ProcArgs}} ->
step_execute_request(ProcName, ProcArgs, Environment, DispatchTable);
{error, Reason} ->
error_logger:error_report(xmerlrpc, [{step, parse_xmlrpc_request}, {error, Reason}]),
http_error(bad_request, "Invalid XML-RPC request")
end.
%% @doc Execute RPC request.
-spec step_execute_request(xmerlrpc_xml:proc_name(), [xmerlrpc_xml:proc_arg()],
call_environment(), dispatch_table()) ->
http_response().
step_execute_request(ProcName, ProcArgs, Environment, DispatchTable) ->
case step_dispatch(ProcName, ProcArgs, Environment, DispatchTable) of
{ok, Result} ->
step_encode_result(Result);
{exception, Exception} ->
% exception is not something to log
step_encode_exception(Exception)
%{error, _Reason} ->
% % this kind of errors should already be handled
% http_error(internal, "Dispatch error")
end.
%% @doc Encode value returned by called function.
%% Function finished successfully.
-spec step_encode_result(xmerlrpc_xml:proc_arg()) ->
http_response().
step_encode_result(Result) ->
case xmerlrpc_xml:result(Result) of
{ok, Body} ->
http_success(Body);
{error, Reason} ->
error_logger:error_report(xmerlrpc, [{step, encode_result}, {error, Reason}, {data, Result}]),
http_error(internal, "Procedure returned unserializable data")
end.
%% @doc Encode error returned by called function.
%% Function finished with an error.
-spec step_encode_exception(iolist()) ->
http_response().
step_encode_exception(Exception) ->
{ok, Body} = xmerlrpc_xml:exception(1, Exception),
http_success(Body).
%% @doc Find an appropriate function from dispatch table.
%%
%% `{error,_}' is operational error. `{exception,_}' is an error reported by
%% the called function.
-spec step_dispatch(xmerlrpc_xml:proc_name(), [xmerlrpc_xml:proc_arg()],
call_environment(), dispatch_table()) ->
{ok, xmerlrpc_xml:xmlrpc_result()}
| {exception, Exception :: iolist()}.
% | {error, term()}.
step_dispatch(ProcName, ProcArgs, Environment, DispatchTable) ->
case proplists:lookup(ProcName, DispatchTable) of
{ProcName, Function} when is_function(Function, 2) ->
step_call_function(Function, ProcArgs, Environment);
{ProcName, {_Module, _Function} = FunSpec} ->
step_call_function(FunSpec, ProcArgs, Environment);
none ->
error_logger:error_report(xmerlrpc, [{step, dispatch}, {error, unknown_procedure}]),
% unknown procedure is an exception, not a transport error
{exception, ["Unknown procedure: ", ProcName]}
end.
%% @doc Call specified function with arguments and environment.
-spec step_call_function(proc_spec(), [xmerlrpc_xml:proc_arg()],
call_environment()) ->
{ok, xmerlrpc_xml:proc_arg()}
| {exception, Exception :: iolist()}.
% | {error, term()}.
step_call_function(Function, Args, Environment) ->
% {ok,_} | {error,_} are for case when error is signaled by returned value
% _ | erlang:error() are for case when error is signaled by dying
try do_call(Function, Args, Environment) of
{ok, Result} ->
{ok, Result};
{error, Reason} ->
error_logger:error_report(xmerlrpc, [{step, call}, {error, Reason}, {mfae, {Function, Args, Environment}}]),
{exception, "Some error"}; % TODO: include `Reason'
Result ->
{ok, Result}
catch
% TODO: case for error:undef?
error:Reason ->
error_logger:error_report(xmerlrpc, [{step, call}, {exit, Reason}, {mfae, {Function, Args, Environment}}]),
{exception, "Some error"} % TODO: include `Reason'
end.
%% @doc Apply arguments to function.
-spec do_call(proc_spec(), [xmerlrpc_xml:proc_arg()], call_environment()) ->
call_result().
do_call(Function, Args, Environment) when is_function(Function, 2) ->
Function(Args, Environment);
do_call({Module, Function}, Args, Environment) ->
Module:Function(Args, Environment).
%%%---------------------------------------------------------------------------
%%% HTTP helpers
%%%---------------------------------------------------------------------------
%% @doc Reply with HTTP success.
%% `Result' should already be XML-encoded, either XML-RPC result (serialized
%% {@type xmerlrpc_xml:xmlrpc_result()}) or exception (serialized {@type
%% xmerlrpc_xml:xmlrpc_exception()}).
-spec http_success(iolist()) ->
http_response().
http_success(Result) ->
{200, [{content_type, "text/xml"}], Result}.
%% @doc Reply with HTTP error.
-spec http_error(internal | bad_request | bad_method, iolist()) ->
http_response().
http_error(internal = _Reason, Message) ->
{500, [{content_type, "text/plain"}], [Message, "\n"]};
%http_error(not_implemented = _Reason, Message) ->
% {501, [{content_type, "text/plain"}], [Message, "\n"]};
http_error(bad_request = _Reason, Message) ->
{400, [{content_type, "text/plain"}], [Message, "\n"]};
http_error(bad_method = _Reason, Message) ->
{405, [{content_type, "text/plain"}], [Message, "\n"]}.
%%%---------------------------------------------------------------------------
%% @doc Find the longest URI prefix in configuration that matches the request.
-spec find_prefix(ets:tab(), string()) ->
{RootURI :: string(), dispatch_table()} | none.
find_prefix(Table, RequestURI) ->
Q = qlc:q([
{Root, DispatchTable} ||
{xmlrpc, {Root, DispatchTable}} <- ets:table(Table),
is_uri_prefix_of(Root, RequestURI)
]),
% thanks to the sorting (DESC), nested prefixes should work consistently
case qlc:e(qlc:keysort(1, Q, {order, descending})) of
[] ->
none;
[Prefix | _Rest] ->
Prefix
end.
%%%---------------------------------------------------------------------------
%% @doc Check if the `Prefix' is a prefix of `URI'.
-spec is_uri_prefix_of(string(), string()) ->
boolean().
is_uri_prefix_of("" = _Prefix, "" = _URI) ->
true;
is_uri_prefix_of("" = _Prefix, "/" ++ _URI) ->
true;
is_uri_prefix_of("" = _Prefix, "?" ++ _URI) ->
true;
is_uri_prefix_of("/" = _Prefix, "/" ++ _URI) ->
true;
is_uri_prefix_of([C | Prefix], [C | URI]) ->
is_uri_prefix_of(Prefix, URI);
is_uri_prefix_of(_Prefix, _URI) ->
false.
%%%---------------------------------------------------------------------------
%%% vim:ft=erlang:foldmethod=marker | src/mod_xmerlrpc.erl | 0.554712 | 0.49762 | mod_xmerlrpc.erl | starcoder |
%% Copyright (c) 2016 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(any).
-export([to_atom/1]).
-export([to_binary/1]).
-export([to_boolean/1]).
-export([to_float/1]).
-export([to_integer/1]).
-export([to_list/1]).
-spec to_boolean(list() | binary() | boolean()) -> boolean().
to_boolean(<<"true">>) -> true;
to_boolean("true") -> true;
to_boolean(<<"false">>) -> false;
to_boolean("false") -> false;
to_boolean(false) -> false;
to_boolean(true) -> true.
-spec to_integer(integer() | list() | binary()) -> integer().
to_integer(X) when is_integer(X) -> X;
to_integer(X) when is_list(X) -> list_to_integer(X);
to_integer(X) when is_binary(X) -> to_integer(binary_to_list(X)).
-spec to_atom(atom() | list() | binary()) -> atom().
to_atom(X) when is_atom(X) -> X;
to_atom(X) when is_list(X) -> list_to_atom(X);
to_atom(X) when is_binary(X) -> to_atom(binary_to_list(X)).
-spec to_float(float() | list() | binary()) -> float().
to_float(X) when is_float(X) -> X;
to_float(X) when is_list(X) -> list_to_float(X);
to_float(X) when is_binary(X) -> to_float(binary_to_list(X)).
-spec to_binary(binary() |
list() |
integer() |
float() |
atom() |
pid() |
port() |
reference()) -> binary().
to_binary(X) when is_binary(X) -> X;
to_binary(X) when is_list(X) -> list_to_binary(X);
to_binary(X) when is_integer(X) -> to_binary(integer_to_list(X));
to_binary(X) when is_float(X) -> to_binary(float_to_list(X));
to_binary(X) when is_atom(X) -> to_binary(atom_to_list(X));
to_binary(X) when is_pid(X) -> to_binary(pid_to_list(X));
to_binary(X) when is_port(X) -> to_binary(port_to_list(X));
to_binary(X) when is_reference(X) -> to_binary(to_list(X)).
-spec to_list(
list() |
binary() |
atom() |
integer() |
float() |
port() |
pid() |
reference()) -> list().
to_list(X) when is_list(X) -> X;
to_list(X) when is_binary(X) -> binary_to_list(X);
to_list(X) when is_atom(X) -> atom_to_list(X);
to_list(X) when is_integer(X) -> integer_to_list(X);
to_list(X) when is_float(X) -> float_to_list(X);
to_list(X) when is_pid(X) -> pid_to_list(X);
to_list(X) when is_port(X) -> port_to_list(X);
to_list(X) when is_reference(X) -> erlang:ref_to_list(X). | src/any.erl | 0.700997 | 0.543348 | any.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_expiring_cache_server).
-behaviour(gen_server).
-callback start_link() -> {ok, pid()} | ignore | {error, term()}.
-export([
now_ts/0,
start_link/2
]).
-export([
init/1,
terminate/2,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3
]).
-define(DEFAULT_BATCH_SIZE, 1000).
-define(DEFAULT_PERIOD_MSEC, 5000).
-define(DEFAULT_MAX_JITTER_MSEC, 1000).
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
-include_lib("kernel/include/logger.hrl").
start_link(Name, Opts) when is_atom(Name) ->
gen_server:start_link({local, Name}, ?MODULE, Opts#{name => Name}, []).
init(Opts) ->
DefaultCacheName = atom_to_binary(maps:get(name, Opts), utf8),
Period = maps:get(period, Opts, ?DEFAULT_PERIOD_MSEC),
MaxJitter = maps:get(max_jitter, Opts, ?DEFAULT_MAX_JITTER_MSEC),
{ok, #{
cache_name => maps:get(cache_name, Opts, DefaultCacheName),
batch_size => maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE),
period => Period,
max_jitter => MaxJitter,
timer_ref => schedule_remove_expired(Period, MaxJitter),
oldest_ts => 0,
elapsed => 0,
largest_elapsed => 0,
lag => 0}}.
terminate(_, _) ->
ok.
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
handle_info(remove_expired, St) ->
#{
cache_name := Name,
batch_size := BatchSize,
period := Period,
max_jitter := MaxJitter,
oldest_ts := OldestTS0,
largest_elapsed := LargestElapsed
} = St,
NowTS = now_ts(),
OldestTS = max(OldestTS0,
couch_expiring_cache_fdb:clear_range_to(Name, NowTS, BatchSize)),
Elapsed = now_ts() - NowTS,
{noreply, St#{
timer_ref := schedule_remove_expired(Period, MaxJitter),
oldest_ts := OldestTS,
elapsed := Elapsed,
largest_elapsed := max(Elapsed, LargestElapsed),
lag := NowTS - OldestTS}};
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Prevent crashing server and application
?LOG_ERROR(#{
what => spurious_future_ready,
ref => Ref
}),
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
now_ts() ->
{Mega, Sec, Micro} = os:timestamp(),
((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
%% Private
schedule_remove_expired(Timeout, MaxJitter) ->
Jitter = max(Timeout div 2, MaxJitter),
Wait = Timeout + rand:uniform(max(1, Jitter)),
erlang:send_after(Wait, self(), remove_expired). | src/couch_expiring_cache/src/couch_expiring_cache_server.erl | 0.629319 | 0.436202 | couch_expiring_cache_server.erl | starcoder |
%% @doc
%% BEAM friendly spinlocks for Elixir/Erlang.
%%
%% This module provides a very simple API for managing locks
%% inside a BEAM instance. It's modeled on spinlocks, but works
%% through message passing rather than loops. Locks can have
%% multiple slots to enable arbitrary numbers of associated
%% processes. The moment a slot is freed, the next awaiting
%% process acquires the lock.
%%
%% All of this is done in a simple Erlang process so there's
%% very little dependency, and management is extremely simple.
-module(sleeplocks).
-compile(inline).
%% Public API
-export([new/1, new/2, acquire/1, attempt/1, execute/2, release/1, start_link/1, start_link/2]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
%% Record definition for internal use.
-record(lock, {slots, current=#{}, waiting=queue:new()}).
%% Inlining for convenience functions.
-compile({inline, [new/1, start_link/1, start_link/2]}).
%% Name references available to call a lock.
-type name() ::
atom() |
{local, Name :: atom()} |
{global, GlobalName :: any()} |
{via, Module :: atom(), ViaName :: any()}.
%% Startup call return types to pass back through.
-type start_ret() :: {ok, pid()} | ignore | {error, term()}.
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc
%% Creates a new lock with a provided concurrency factor.
-spec new(Slots :: pos_integer()) -> start_ret().
new(Slots) ->
new(Slots, []).
%% @doc
%% Creates a new lock with a provided concurrency factor.
-spec new(Slots :: pos_integer(), Args :: list()) -> start_ret().
new(Slots, Args) when
is_number(Slots),
is_list(Args)
->
case proplists:get_value(name, Args) of
undefined ->
gen_server:start_link(?MODULE, Slots, []);
Name when is_atom(Name) ->
gen_server:start_link({local, Name}, ?MODULE, Slots, []);
Name ->
gen_server:start_link(Name, ?MODULE, Slots, [])
end.
%% @doc
%% Acquires a lock for the current process.
%%
%% This will block until a lock can be acquired.
-spec acquire(Name :: name()) -> ok.
acquire(Ref) ->
gen_server:call(Ref, acquire, infinity).
%% @doc
%% Attempts to acquire a lock for the current process.
%%
%% In the case there are no slots available, an error will be
%% returned immediately rather than waiting.
-spec attempt(Name :: name()) -> ok | {error, unavailable}.
attempt(Ref) ->
gen_server:call(Ref, attempt).
%% @doc
%% Executes a function when a lock can be acquired.
%%
%% The lock is automatically released after the function has
%% completed execution; there's no need to manually release.
-spec execute(Name :: name(), Exec :: fun(() -> any())) -> any().
execute(Ref, Fun) ->
acquire(Ref),
try Fun() of
Res -> Res
after
release(Ref)
end.
%% @doc
%% Releases a lock held by the current process.
-spec release(Name :: name()) -> ok.
release(Ref) ->
gen_server:call(Ref, release).
%% @hidden
%% Aliasing for Elixir interoperability.
-spec start_link(Slots :: pos_integer()) -> start_ret().
start_link(Slots) ->
new(Slots).
%% @hidden
%% Aliasing for Elixir interoperability.
-spec start_link(Slots :: pos_integer(), Args :: list()) -> start_ret().
start_link(Slots, Args) ->
new(Slots, Args).
%%====================================================================
%% Callback functions
%%====================================================================
%% @hidden
%% Initialization phase.
init(Slots) ->
{ok, #lock{slots = Slots}}.
%% @hidden
%% Handles a lock acquisition (blocks until one is available).
handle_call(acquire, Caller, #lock{waiting = Waiting} = Lock) ->
case try_lock(Caller, Lock) of
{ok, NewLock} ->
{reply, ok, NewLock};
{error, unavailable} ->
{noreply, Lock#lock{waiting = queue:snoc(Waiting, Caller)}}
end;
%% @hidden
%% Handles an attempt to acquire a lock.
handle_call(attempt, Caller, Lock) ->
case try_lock(Caller, Lock) of
{ok, NewLock} ->
{reply, ok, NewLock};
{error, unavailable} = E ->
{reply, E, Lock}
end;
%% @hidden
%% Handles the release of a previously acquired lock.
handle_call(release, {From, _Ref}, #lock{current = Current} = Lock) ->
NewLock = case maps:is_key(From, Current) of
false -> Lock;
true ->
NewCurrent = maps:remove(From, Current),
next_caller(Lock#lock{current = NewCurrent})
end,
{reply, ok, NewLock}.
%% @hidden
%% Empty shim to implement behaviour.
handle_cast(_Msg, Lock) ->
{noreply, Lock}.
%% @hidden
%% Empty shim to implement behaviour.
handle_info(_Msg, Lock) ->
{noreply, Lock}.
%% @hidden
%% Empty shim to implement behaviour.
terminate(_Reason, _Lock) ->
ok.
%% @hidden
%% Empty shim to implement behaviour.
code_change(_Vsn, Lock, _Extra) ->
{ok, Lock}.
%%====================================================================
%% Private functions
%%====================================================================
%% Locks a caller in the internal locks map.
lock_caller({From, _Ref}, #lock{current = Current} = Lock) ->
Lock#lock{current = maps:put(From, ok, Current)}.
%% Attempts to pass a lock to a waiting caller.
next_caller(#lock{waiting = Waiting} = Lock) ->
case queue:out(Waiting) of
{empty, {[], []}} ->
Lock;
{{value, Next}, NewWaiting} ->
gen_server:reply(Next, ok),
NewLock = lock_caller(Next, Lock),
NewLock#lock{waiting = NewWaiting}
end.
%% Attempts to acquire a lock for a calling process
try_lock(Caller, #lock{slots = Slots, current = Current} = Lock) ->
case maps:size(Current) of
S when S == Slots ->
{error, unavailable};
_ ->
{ok, lock_caller(Caller, Lock)}
end.
%% ===================================================================
%% Private test cases
%% ===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif. | src/sleeplocks.erl | 0.501953 | 0.409103 | sleeplocks.erl | starcoder |
%% ``The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved via the world wide web at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
%% AB. All Rights Reserved.''
%%
%% Modified: 17 Jan 2007 by <NAME> <<EMAIL>>
%%
%%
%% Erlang token scanning functions of io library. This Lexer has been changed by
%% <NAME> to keep the comments and whitespaces in the token stream.
%% For handling ISO 8859-1 (Latin-1) we use the following type
%% information:
%%
%
%% @hidden
%% @private
-module(wrangler_scan_with_layout).
-export([string/1,string/2,string/3, string/4]).
-import(wrangler_scan, [reserved_word/1, escape_char/1]).
-import(lists, [member/2, reverse/1]).
-define(DEFAULT_TABWIDTH, 8).
-define(DEFAULT_FILEFORMAT, unix).
%% string(CharList, StartPos)
%% Takes a list of characters and tries to tokenise them.
%%
%% Returns:
%% {ok,[Tok]}
%% {error,{ErrorPos,?MODULE,What},EndPos}
string(Cs) ->
string(Cs, {1, 1}, ?DEFAULT_TABWIDTH, ?DEFAULT_FILEFORMAT).
string(Cs, {Line, Col}) ->
string(Cs, {Line, Col}, ?DEFAULT_TABWIDTH, ?DEFAULT_FILEFORMAT).
string(Cs, {Line, Col}, TabWidth) ->
string(Cs, {Line, Col}, TabWidth, ?DEFAULT_FILEFORMAT).
string(Cs, {Line, Col}, TabWidth, FileFormat)
when is_list(Cs), is_integer(Line), is_integer(Col), is_integer(TabWidth) ->
scan(Cs, [], [], {Line, Col}, [], [],TabWidth,FileFormat).
%% String
more(Cs, Stack, Toks, {Line, Col}, eos, Errors, _TabWidth, _FileFormat, Fun) ->
erlang:error(badstate, [Cs, Stack, Toks, {Line, Col}, eos, Errors, Fun]);
% %% Debug clause for chopping string into 1-char segments
% more(Cs, Stack, Toks, Pos, [H|T], Errors, Fun) ->
% Fun(Cs++[H], Stack, Toks, Pos, T, Errors);
more(Cs, Stack, Toks, {Line, Col}, [], Errors, TabWidth, FileFormat, Fun) ->
Fun(Cs ++ eof, Stack, Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Stream
more(Cs, Stack, Toks, {Line, Col}, eof, Errors, TabWidth, FileFormat, Fun) ->
erlang:error(badstate, [Cs, Stack, Toks, {Line, Col}, eof, Errors, TabWidth, FileFormat, Fun]);
more(Cs, Stack, Toks, {Line, Col}, io, Errors, TabWidth, FileFormat, Fun) ->
{more, {Cs, Stack, Toks, {Line, Col}, io, Errors,TabWidth, FileFormat, Fun}}.
%% Scan loop.
%%
%% The scan_*/7 and sub_scan_*/7 functions does tail recursive calls
%% between themselves to change state. State data is kept on the Stack.
%% Results are passed on the Stack and on the stream (Cs). The variable
%% State in this loop is not the scan loop state, but the state for
%% instream handling by more/8 and done/6. The variable Stack is not
%% always a stack, it is just stacked state data for the scan loop, and
%% the variable Errors is a reversed list of scan error {Error,Pos} tuples.
%%
%% All the scan_*/7 functions have the same arguments (in the same order),
%% to keep the tail recursive calls (jumps) fast.
%%
%% When more data is needed from the stream, the tail recursion loop is
%% broken by calling more/8 that either returns to the I/O-server to
%% get more data or fetches it from a string, or by calling done/6 when
%% scanning is done.
%%
%% The last argument to more/8 is a fun to jump back to with more data
%% to continue scanning where it was interrupted.
%%
%% more/8 and done/6 handles scanning from I/O-server (Stream) or from String.
%%
scan([$\r | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
case FileFormat of
mac ->
scan(Cs, Stack, [{whitespace, {Line, Col}, '\r'}|Toks], {Line + 1, 1}, State, Errors, TabWidth, FileFormat);
_ ->
scan(Cs, Stack, [{whitespace, {Line, Col}, '\r'}|Toks], {Line , Col+1}, State, Errors, TabWidth, FileFormat)
end;
scan([$\n | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{whitespace, {Line, Col}, '\n'}|Toks], {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C == $\t ->
scan(Cs, Stack, [{whitespace, {Line, Col}, '\t'}|Toks], {Line, Col + TabWidth}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\000, C =< $\s -> % Control chars - skip
scan(Cs, Stack, [{whitespace, {Line,Col}, '\s'}|Toks], {Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\200, C =< $\240 -> % Control chars -skip
scan(Cs, Stack, [{whitespace, {Line,Col}, '\s'}|Toks], {Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $a, C =< $z -> % Atoms
sub_scan_name(Cs, [C, fun scan_atom/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\337, C =< $\377,
C /= $\367 -> % Atoms
sub_scan_name(Cs, [C, fun scan_atom/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $A, C =< $Z -> % Variables
sub_scan_name(Cs, [C, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\300, C =< $\336, C /= $\327 -> % Variables
sub_scan_name(Cs, [C, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([$_ | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % _Variables
sub_scan_name(Cs, [$_, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 -> % Numbers
scan_number(Cs, [C], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([$$ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % Character constant
scan_char(Cs, Stack, Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan([$' | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % Quoted atom
scan_qatom(Cs, [$', {Line, Col}], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan([$" | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % String
scan_string(Cs, [$", {Line, Col}], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan([$% | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % Comment
scan_comment(Cs, [$%, {Line, Col}], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
%% Punctuation characters and operators, first recognise multiples.
%% Clauses are rouped by first character (a short with the same head has
%% to come after a longer).
%%
%% << <- <=
scan("<<" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'<<', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("<-" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'<-', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("<=" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'<=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth, FileFormat);
scan("<" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat, fun scan/8);
%% >> >=
scan(">>" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'>>', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth, FileFormat);
scan(">=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'>=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan(">" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% -> --
scan("->" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'->', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth, FileFormat);
scan("--" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'--', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("-" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% ++
scan("++" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'++', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("+" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% =:= =/= =< ==
scan("=:=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=:=', {Line, Col}} | Toks], {Line, Col + 3}, State, Errors, TabWidth,FileFormat);
scan("=:" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
scan("=/=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=/=', {Line, Col}} | Toks], {Line, Col + 3}, State, Errors, TabWidth,FileFormat);
scan("=/" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat,fun scan/8);
scan("=<" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=<', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("=>" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=>', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan(":=" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{':=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("==" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'==', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("=" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% /=
scan("/=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'/=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("/" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth,FileFormat, fun scan/8);
%% ||
scan("||" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'||', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("|" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% :-
scan(":-" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{':-', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
%% :: for typed records
scan("::"++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'::',{Line, Col}}|Toks], {Line, Col+2}, State, Errors, TabWidth,FileFormat);
scan(":" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat, fun scan/8);
scan("..."++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'...', {Line, Col}}|Toks], {Line, Col+3}, State, Errors, TabWidth, FileFormat);
scan(".."=Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
scan(".."++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'..', {Line, Col}}|Toks], {Line, Col+2}, State, Errors, TabWidth, FileFormat);
scan("."=Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
%% Full stop and plain '.'
scan("." ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_dot(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% All single-char punctuation characters and operators (except '.')
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{list_to_atom([C]), {Line, Col}} | Toks],
{Line, Col + 1}, State, Errors, TabWidth,FileFormat);
%%
scan([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat, fun scan/8);
scan(Eof, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Eof, Errors, Toks, {Line, Col}, State, TabWidth,FileFormat).
scan_atom(Cs, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_atom(Name) of
Atom when is_atom(Atom) ->
case reserved_word(Atom) of
true ->
scan(Cs, [], [{Atom, {Line, Col}} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat);
false ->
scan(Cs, [], [{atom, {Line, Col}, Atom} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat)
end;
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, atom}, {Line, Col}} | Errors],TabWidth,FileFormat)
end.
scan_variable(Cs, Name, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat) ->
case catch list_to_atom(Name) of
A when is_atom(A) ->
scan(Cs, [], [{var, {Line, Col}, A} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, var}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
%% Scan for a name - unqouted atom or variable, after the first character.
%%
%% Stack argument: return fun.
%% Returns the scanned name on the stack, unreversed.
%%
sub_scan_name([C | Cs] = Css, Stack, Toks, {Line, Col},State, Errors,TabWidth,FileFormat) ->
case name_char(C) of
true ->
sub_scan_name(Cs, [C | Stack], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
false ->
[Fun | Name] = reverse(Stack),
Fun(Css, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
end;
sub_scan_name([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun sub_scan_name/8);
sub_scan_name(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[Fun | Name] = reverse(Stack),
Fun(Eof, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
name_char(C) when C >= $a, C =< $z -> true;
name_char(C) when C >= $\337, C =< $\377, C /= $\367 -> true;
name_char(C) when C >= $A, C =< $Z -> true;
name_char(C) when C >= $\300, C =< $\336, C /= $\327 -> true;
name_char(C) when C >= $0, C =< $9 -> true;
name_char($_) -> true;
name_char($@) -> true;
name_char(_) -> false.
scan_char([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
sub_scan_escape(Cs, [fun scan_char_escape/8, $\\ | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_char([$\n | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, $\n} | Toks], {Line + 1, Col}, State, Errors, TabWidth,FileFormat);
scan_char([$ | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, 32 } | Toks], {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_char([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_char/8);
scan_char(Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan_char_escape(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
scan_char_escape([nl | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, $\n} | Toks], {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_char_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
C1 = case Stack of [$\\|_] ->
list_to_atom("$\\"++[C]);
_ ->
io_lib:write_char(C)
end,
scan(Cs, [], [{char, {Line, Col-1}, C1} | Toks],{Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan_char_escape(Eof, _Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Eof, [{char, {Line, Col}} | Errors], [], {Line, Col + 1}, State, TabWidth,FileFormat).
scan_string([$" | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
scan(Cs, [], [{string, StartPos, S} | Toks],{Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string([$\r | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac -> scan_string(Cs, [$\r | Stack], Toks, {Line + 1, 1},
State, Errors, TabWidth,FileFormat);
_ -> scan_string(Cs, [$\r | Stack], Toks, {Line, Col+1},
State, Errors, TabWidth,FileFormat)
end;
scan_string([$\n | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_string([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
sub_scan_escape(Cs, [fun scan_string_escape/8, $\\| Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C==$\t ->
scan_string(Cs, [C | Stack], Toks, {Line, Col+TabWidth}, State,Errors, TabWidth,FileFormat);
scan_string([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [C | Stack], Toks, {Line, Col+1}, State,Errors, TabWidth,FileFormat);
scan_string([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_string/8);
scan_string(Eof, Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $", SS}, StartPos} | Errors], [],
{Line, Col}, State, TabWidth,FileFormat).
scan_string_escape([nl | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_string_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [C| Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string_escape(Eof, Stack, _Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $", SS}, StartPos} | Errors], [],
{Line, Col + length(S) + 2}, State, TabWidth,FileFormat).
scan_qatom([$' | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $' | S] = reverse([$'|Stack]),
case catch list_to_atom(S) of
A when is_atom(A) ->
scan(Cs, [], [{qatom, StartPos, list_to_atom([$' | S]) } | Toks],{Line, Col + 1}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,[{{illegal, qatom}, StartPos} | Errors], TabWidth,FileFormat)
end;
scan_qatom([$\r|Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac -> scan_qatom(Cs,[$\r|Stack], Toks, {Line+1, 1}, State, Errors, TabWidth,FileFormat);
_ -> scan_qatom(Cs,[$\r|Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat)
end;
scan_qatom([$\n | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [$\n | Stack], Toks, {Line + 1, 1},State, Errors, TabWidth,FileFormat);
%% scan_qatom([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
%% sub_scan_escape(Cs, [fun scan_qatom_escape/8, $\\ | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_qatom([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C==$\t ->
scan_qatom(Cs, [C | Stack], Toks, {Line, Col+TabWidth}, State, Errors, TabWidth,FileFormat);
scan_qatom([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [C | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_qatom([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_qatom/8);
scan_qatom(Eof, Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $' | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $', SS}, StartPos} | Errors], [],{Line, Col}, State, TabWidth,FileFormat).
%% scan_qatom_escape([nl | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
%% scan_qatom(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
%% scan_qatom_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
%% scan_qatom(Cs, [C | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
%% scan_qatom_escape(Eof, Stack, _Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
%% [StartPos, $' | S] = reverse(Stack),
%% SS = string:substr(S, 1, 16),
%% done(Eof, [{{string, $', SS}, StartPos} | Errors], [],
%% {Line, Col}, State, TabWidth,FileFormat).
%% Scan for a character escape sequence, in character literal or string.
%% A string is a syntactical sugar list (e.g "abc")
%% or a quoted atom (e.g 'EXIT').
%%
%% Stack argument: return fun.
%% Returns the resulting escape character on the stream.
%% The return atom 'nl' means that the escape sequence Backslash Newline
%% was found, i.e an actual Newline in the input.
%%
%% \<1-3> octal digits
sub_scan_escape([O1, O2, O3 | Cs], [Fun|Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7, O3 >= $0,
O3 =< $7 ->
case reverse(Stack) of
[_,$"|_]->
Fun([O1, O2, O3 | Cs], Stack, Toks, {Line, Col+2}, State,
Errors, TabWidth,FileFormat);
_ ->
C1=list_to_atom("$\\"++[O1, O2, O3]),
scan(Cs, [], [{char, {Line, Col-2}, C1} | Toks],{Line, Col + 3}, State, Errors, TabWidth,FileFormat)
end;
sub_scan_escape([O1, O2] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7 ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([O1, O2 | Cs], [_Fun | _Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7 ->
C1=list_to_atom("$\\"++[O1, O2]),
scan(Cs, [], [{char, {Line, Col-2}, C1} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
%% Val = O1 * 8 + O2 - 9 * $0,
%% Fun([O1, O2 | Cs], Stack, Toks, {Line, Col+1}, State,
%% Errors, TabWidth,FileFormat);
sub_scan_escape([O1] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7 ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([O1 | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7 ->
%%Val = O1 - $0,
Fun([O1 | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
%% \^X -> CTL-X
sub_scan_escape([$^, C | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
%%Val = C band 31,
Fun([C | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
sub_scan_escape([$^] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([$^ | Eof], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
Fun(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% \NL (backslash newline)
sub_scan_escape([$\n | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
Fun([nl | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% \X - familiar escape sequences
sub_scan_escape([C | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
%% Val = escape_char(C),
Fun([C | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
%%
sub_scan_escape([], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape(Eof, [Fun | Stack], Toks, {Line, Col},
State, Errors, TabWidth,FileFormat) ->
Fun(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
scan_number([$., C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_fraction(Cs, [C, $. | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_number([$.] = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan_number/8);
scan_number([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_number(Cs, [C | Stack], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
scan_number([$# | Cs], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
case catch list_to_integer(reverse(Stack)) of
B when is_integer(B), B >= 2, B =< 1 + $Z - $A + 10 ->
scan_based_int(Cs, [B], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
B ->
scan(Cs, [], Toks, {Line, Col}, State,[{{base, B}, {Line, Col}} | Errors], TabWidth,FileFormat)
end;
scan_number([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_number/8);
scan_number(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_integer(reverse(Stack)) of
N when is_integer(N) ->
scan(Cs, [], [{integer, {Line, Col}, reverse(Stack)} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, integer}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9, C < $0 + B ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $A, B > 10, C < $A + B - 10 ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col},State, Errors,TabWidth,FileFormat)
when C >= $a, B > 10, C < $a + B - 10 ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_based_int([], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan_based_int/8);
scan_based_int(Cs, [B | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
case catch erlang:list_to_integer(reverse(Stack), B) of
N when is_integer(N) ->
scan(Cs, [], [{integer, {Line, Col}, integer_to_list(B)++[$#| reverse(Stack)]} | Toks], %% "replaced 'N' with 'reverse(Stack)'";
{Line, Col + length(integer_to_list(B))+1+length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, integer}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_fraction([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_fraction(Cs, [C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_fraction([$e | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent_sign(Cs, [$E | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_fraction([$E | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent_sign(Cs, [$E | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_fraction([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_fraction/8);
scan_fraction(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_float(reverse(Stack)) of
F when is_float(F) ->
scan(Cs, [], [{float, {Line, Col}, F} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, float}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_exponent_sign([$+ | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, [$+ | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent_sign([$- | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, [$- | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent_sign([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_exponent_sign/8);
scan_exponent_sign(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
scan_exponent([C | Cs], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_exponent(Cs, [C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent([], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_exponent/8);
scan_exponent(Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
case catch list_to_float(reverse(Stack)) of
F when is_float(F) ->
scan(Cs, [], [{float, {Line, Col}, F} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, float}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_comment([$\r | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos|S] = reverse(Stack),
scan([$\r | Cs], [], [{comment, StartPos, S}|Toks], {Line, Col}, State, Errors, TabWidth,FileFormat);
%% case FileFormat of
%% mac -> [StartPos|S] = reverse([$\r|Stack]),
%% scan(Cs, [], [{comment, StartPos, S}|Toks], {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
%% _ -> scan_comment(Cs, [$\r|Stack], Toks, {Line, Col + 1}, State, Errors, TabWidth,FileFormat)
%% end;
scan_comment([$\n | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos|S] = reverse(Stack),
scan([$\n | Cs], [], [{comment, StartPos, S}|Toks], {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_comment([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_comment(Cs, [C|Stack], Toks, {Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan_comment([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth,FileFormat,fun scan_comment/8);
scan_comment(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos|S] = reverse(Stack),
done(Eof, Errors, [{comment, StartPos, S}|Toks], {Line, Col}, State, TabWidth,FileFormat).
scan_dot([$% | _] = Cs, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks], {Line, Col + 1}, State, TabWidth,FileFormat);
scan_dot([$\r | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac ->
done(Cs, Errors, [{whitespace, {Line+1, 1}, '\r'}, {dot, {Line, Col}} | Toks], {Line + 1, 1}, State, TabWidth,FileFormat);
_ ->
done(Cs, Errors, [{whitespace, {Line, Col+1}, '\r'}, {dot, {Line, Col}} | Toks], {Line, Col+1}, State, TabWidth,FileFormat)
end;
scan_dot([$\n | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Cs, Errors, [{whitespace, {Line, Col+1}, '\n'}, {dot, {Line, Col}} | Toks], {Line + 1, 1}, State, TabWidth,FileFormat);
scan_dot([$\t | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Cs, Errors, [{whitespace, {Line, Col+1}, '\t'}, {dot, {Line, Col}} | Toks], {Line, Col+TabWidth}, State, TabWidth,FileFormat);
scan_dot([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\000, C =< $\s ->
done(Cs, Errors, [{whitespace, {Line,Col+1}, '\s'}, {dot, {Line, Col}} | Toks],{Line, Col + 2}, State, TabWidth,FileFormat);
scan_dot([C | Cs], _Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat)
when C >= $\200, C =< $\240 ->
done(Cs, Errors, [{whitespace, {Line,Col+1}, C}, {dot, {Line, Col}} | Toks],{Line, Col + 2}, State, TabWidth,FileFormat);
scan_dot([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth,FileFormat, fun scan_dot/8);
scan_dot(eof, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(eof, Errors, [{dot, {Line, Col}} | Toks], {Line, Col}, State, TabWidth,FileFormat);
scan_dot(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'.', {Line, Col}} | Toks],{Line, Col + 1}, State, Errors, TabWidth,FileFormat).
%% String
done(eof, [], Toks, {Line, Col}, eos, _TabWidth,_FileFormat) ->
{ok, reverse(Toks), {Line, Col}};
done(eof, Errors, _Toks, {Line, Col}, eos, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{error, {ErrorPos, ?MODULE, Error}, {Line, Col}};
done(Cs, Errors, Toks, {Line, Col}, eos, TabWidth,FileFormat) ->
scan(Cs, [], Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Debug clause for chopping string into 1-char segments
%% done(Cs, Errors, Toks, Pos, [H|T]) ->
%% scan(Cs++[H], [], Toks, Pos, T, Errors);
done(Cs, Errors, Toks, {Line, Col}, [], TabWidth,FileFormat) ->
scan(Cs ++ eof, [], Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Stream
done(Cs, [], [{dot, _} | _] = Toks, {Line, Col}, io, _TabWidth,_FileFormat) ->
{done, {ok, reverse(Toks), {Line, Col}}, Cs};
done(Cs, [], [_ | _], {Line, Col}, io, _TabWidth,_FileFormat) ->
{done,
{error, {{Line, Col}, ?MODULE, scan}, {Line, Col}}, Cs};
done(Cs, [], [], {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done, {eof, {Line, Col}}, Cs};
done(Cs, [], [{dot, _} | _] = Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done, {ok, reverse(Toks), {Line, Col}}, Cs};
done(Cs, [], _Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done,
{error, {{Line, Col}, ?MODULE, scan}, {Line, Col}}, Cs};
done(Cs, Errors, _Toks, {Line, Col}, io, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{done, {error, {ErrorPos, ?MODULE, Error}, {Line, Col}},
Cs};
done(Cs, Errors, _Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{done, {error, {ErrorPos, ?MODULE, Error}, {Line, Col}},
Cs}. | src/wrangler_scan_with_layout.erl | 0.579043 | 0.727304 | wrangler_scan_with_layout.erl | starcoder |
%% @copyright 2014-2016 <NAME> <<EMAIL>>
%%
%% @doc Log Message Layout Behaviour
%%
%% This module defines the standard interface to format log messages issued by `logi' functions
%% (e.g. {@link logi:info/3}, {@link logi:warning/3}, etc).
%%
%% A layout instance may be installed into a channel along with an associated sink.
%%
%% == EXAMPLE ==
%% Usage example of a layout instance:
%% <pre lang="erlang">
%% > error_logger:tty(false). % Suppresses annoying warning outputs for brevity
%%
%% > Context = logi_context:new(sample_log, info).
%% > FormatFun = fun (_, Format, Data) -> lists:flatten(io_lib:format("EXAMPLE: " ++ Format, Data)) end.
%% > Layout = logi_builtin_layout_fun:new(FormatFun).
%% > logi_layout:format(Context, "Hello ~s", ["World"], Layout).
%% "EXAMPLE: Hello World"
%% </pre>
%%
%% A more realistic example:
%% <pre lang="erlang">
%% > FormatFun = fun (_, Format, Data) -> lists:flatten(io_lib:format("EXAMPLE: " ++ Format ++ "\n", Data)) end.
%% > Layout = logi_builtin_layout_fun:new(FormatFun).
%% > {ok, _} = logi_channel:install_sink(logi_builtin_sink_io_device:new(foo, [{layout, Layout}]), info).
%% > logi:info("hello world").
%% EXAMPLE: hello world
%% </pre>
%% @end
-module(logi_layout).
%%----------------------------------------------------------------------------------------------------------------------
%% Exported API
%%----------------------------------------------------------------------------------------------------------------------
-export([new/1, new/2]).
-export([is_layout/1]).
-export([get_module/1, get_extra_data/1]).
-export([format/4]).
-export_type([layout/0, layout/1]).
-export_type([data/0, formatted_data/0]).
-export_type([callback_module/0]).
-export_type([extra_data/0]).
%%----------------------------------------------------------------------------------------------------------------------
%% Behaviour Callbacks
%%----------------------------------------------------------------------------------------------------------------------
-callback format(logi_context:context(), io:format(), data(), extra_data()) -> formatted_data().
%%----------------------------------------------------------------------------------------------------------------------
%% Types
%%----------------------------------------------------------------------------------------------------------------------
-type layout() :: layout(formatted_data()).
%% An instance of `logi_layout' behaviour implementation module.
-opaque layout(_FormattedData) :: {callback_module(), extra_data()}
| callback_module().
%% An instance of `logi_layout' behaviour implementation module.
-type callback_module() :: module().
%% A module that implements the `logi_layout' behaviour.
-type extra_data() :: term().
%% The value of the fourth arguemnt of the `format/4' callback function.
%%
%% If the `layout()' does not have an explicit `extra_data()', `undefined' will be passed instead.
-type data() :: [term()].
%% A data which is subject to format
%%
%% This type is an alias of the type of second arguemnt of the {@link io_lib:format/2}
-type formatted_data() :: term().
%% Formatted Data
%%----------------------------------------------------------------------------------------------------------------------
%% Exported Functions
%%----------------------------------------------------------------------------------------------------------------------
%% @equiv new(Module, undefined)
-spec new(callback_module()) -> layout().
new(Module) -> new(Module, undefined).
%% @doc Creates a new layout instance
-spec new(callback_module(), extra_data()) -> layout().
new(Module, ExtraData) ->
_ = is_layout(Module) orelse error(badarg, [Module, ExtraData]),
case ExtraData of
undefined -> Module;
_ -> {Module, ExtraData}
end.
%% @doc Returns `true' if `X' is a layout, `false' otherwise
-spec is_layout(X :: (layout() | term())) -> boolean().
is_layout({Module, _}) -> is_atom(Module) andalso is_layout(Module);
is_layout(Module) -> is_atom(Module) andalso logi_utils:function_exported(Module, format, 4).
%% @doc Gets the module of `Layout'
-spec get_module(Layout :: layout()) -> callback_module().
get_module(Module) when is_atom(Module) -> Module;
get_module({Module, _}) -> Module.
%% @doc Gets the extra data of `Layout'
-spec get_extra_data(Layout :: layout()) -> extra_data().
get_extra_data(Module) when is_atom(Module) -> undefined;
get_extra_data({_, ExtraData}) -> ExtraData.
%% @doc Returns an `iodata()' which represents `Data' formatted by `Layout' in accordance with `Format' and `Context'
-spec format(logi_context:context(), io:format(), data(), Layout) -> FormattedData when
Layout :: layout(FormattedData),
FormattedData :: formatted_data().
format(Context, Format, Data, {Module, Extra}) -> Module:format(Context, Format, Data, Extra);
format(Context, Format, Data, Module) -> Module:format(Context, Format, Data, undefined). | src/logi_layout.erl | 0.680348 | 0.517388 | logi_layout.erl | starcoder |
%% @doc
%% Load data from files. Data files are Erlang terms.
%% @end
-module(dby_load).
% load identifiers and links from files.
-export([load/2,
load/3,
dirload/2,
dirload/3]).
% ------------------------------------------------------------------------------
% API functions
% ------------------------------------------------------------------------------
%% @doc
%% @equiv load(Publisher, Filename, infinity)
%% @end
-spec load(binary(), string()) -> ok | {error, term()}.
load(Publisher, Filename) ->
load(Publisher, Filename, infinity).
%% @doc
%% `load/3' reads `Filename' and calls `dby:publish/3' with the contents
%% of the file using `Publisher' as te publisher.
%% The maximum batch size
%% is `BatchSize'. If `BatchSize' is `infinity' the contents of the file
%% are not batched.
%% @end
-spec load(binary(), string(), inifinity | non_neg_integer()) -> ok | {error, term()}.
load(Publisher, Filename, BatchSize) ->
case file:consult(Filename) of
{ok, [Data]} ->
process_file(Publisher, Data, BatchSize);
Err ->
Err
end.
%% @equiv dirload(Publisher, Dirname, infinity)
-spec dirload(binary(), string()) -> ok | {error, term()}.
dirload(Publisher, Dirname) ->
dirload(Publisher, Dirname, infinity).
%% @doc
%% `dirload/3' finds all the files in `Dirname' with the
%% `.dobby' extension and calls `load/2' on each one. Processing
%% aborts if there is an error. Each file is loaded as a separate
%% transaction, so an abort will result in a partial load of the data.
%% The maximum batch size
%% is `BatchSize'. If `BatchSize' is `infinity' the contents of the file
%% are not batched.
%% @end
-spec dirload(binary(), string(), infinity | non_neg_integer()) -> ok | {error, term()}.
dirload(Publisher, Dirname, BatchSize) ->
case file:list_dir(Dirname) of
{ok, Filenames} ->
process_files(Publisher, Dirname, Filenames, BatchSize);
Err ->
Err
end.
% ------------------------------------------------------------------------------
% internal functions
% ------------------------------------------------------------------------------
process_files(Publisher, Dirname, Filenames, BatchSize) ->
try
lists:foreach(
fun(Filename) ->
case filename:extension(Filename) of
".dobby" ->
case load(Publisher, filename:join(Dirname, Filename), BatchSize) of
ok ->
ok;
Err ->
throw({Filename, Err})
end;
_ ->
ok
end
end, Filenames),
ok
catch
throw:{Filename, Err} ->
{error, {Filename, Err}}
end.
process_file(Publisher, Data, BatchSize) ->
Batches = split(Data, BatchSize),
try
lists:foreach(
fun(Batch) ->
case dby:publish(Publisher, Batch, [persistent]) of
ok ->
ok;
Err ->
throw(Err)
end
end, Batches),
ok
catch
throw:Error ->
Error
end.
split(Data, infinity) ->
[Data];
split(Data, BatchSize) ->
split(Data, BatchSize, []).
split([], _, Acc) ->
Acc;
split(Data, BatchSize, Acc) when length(Data) < BatchSize ->
[Data | Acc];
split(Data, BatchSize, Acc) ->
{Batch, Rest} = lists:split(BatchSize, Data),
split(Rest, BatchSize, [Batch | Acc]). | src/dby_load.erl | 0.513425 | 0.517205 | dby_load.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2018-2020. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%%
%%% If a fun is defined locally and only used for calls, it can be replaced
%%% with direct calls to the relevant function. This greatly speeds up "named
%%% functions" (which rely on make_fun to recreate themselves) and macros that
%%% wrap their body in a fun.
%%%
-module(beam_ssa_funs).
-export([module/2]).
-include("beam_ssa.hrl").
-import(lists, [foldl/3]).
-spec module(Module, Options) -> Result when
Module :: beam_ssa:b_module(),
Options :: [compile:option()],
Result :: {ok, beam_ssa:b_module()}.
module(#b_module{body=Fs0}=Module, _Opts) ->
Trampolines = foldl(fun find_trampolines/2, #{}, Fs0),
Fs = [lfo(F, Trampolines) || F <- Fs0],
{ok, Module#b_module{body=Fs}}.
%% If a function does absolutely nothing beyond calling another function with
%% the same arguments in the same order, we can shave off a call by short-
%% circuiting it.
find_trampolines(#b_function{args=Args,bs=Blocks}=F, Trampolines) ->
case map_get(0, Blocks) of
#b_blk{is=[#b_set{op=call,
args=[#b_local{}=Actual | Args],
dst=Dst}],
last=#b_ret{arg=Dst}} ->
{_, Name, Arity} = beam_ssa:get_anno(func_info, F),
Trampoline = #b_local{name=#b_literal{val=Name},arity=Arity},
Trampolines#{Trampoline => Actual};
_ ->
Trampolines
end.
lfo(#b_function{bs=Blocks0}=F, Trampolines) ->
Linear0 = beam_ssa:linearize(Blocks0),
Linear = lfo_optimize(Linear0, lfo_analyze(Linear0, #{}), Trampolines),
F#b_function{bs=maps:from_list(Linear)}.
%% Gather a map of the locally defined funs that are only used for calls.
lfo_analyze([{_L,#b_blk{is=Is,last=Last}}|Bs], LFuns0) ->
LFuns = lfo_analyze_last(Last, lfo_analyze_is(Is, LFuns0)),
lfo_analyze(Bs, LFuns);
lfo_analyze([], LFuns) ->
LFuns.
lfo_analyze_is([#b_set{op=make_fun,
dst=Dst,
args=[#b_local{} | FreeVars]}=Def | Is],
LFuns0) ->
LFuns = maps:put(Dst, Def, maps:without(FreeVars, LFuns0)),
lfo_analyze_is(Is, LFuns);
lfo_analyze_is([#b_set{op=call,
args=[Fun | CallArgs]} | Is],
LFuns) when is_map_key(Fun, LFuns) ->
#b_set{args=[#b_local{arity=Arity} | FreeVars]} = map_get(Fun, LFuns),
case length(CallArgs) + length(FreeVars) of
Arity ->
lfo_analyze_is(Is, maps:without(CallArgs, LFuns));
_ ->
%% This will `badarity` at runtime, and it's easier to disable the
%% optimization than to simulate it.
lfo_analyze_is(Is, maps:without([Fun | CallArgs], LFuns))
end;
lfo_analyze_is([#b_set{args=Args} | Is], LFuns) when map_size(LFuns) =/= 0 ->
%% We disqualify funs that are used outside calls because this forces them
%% to be created anyway, and the slight performance gain from direct calls
%% is not enough to offset the potential increase in stack frame size (the
%% free variables need to be kept alive until the call).
lfo_analyze_is(Is, maps:without(Args, LFuns));
lfo_analyze_is([_ | Is], LFuns) ->
lfo_analyze_is(Is, LFuns);
lfo_analyze_is([], LFuns) ->
LFuns.
lfo_analyze_last(#b_switch{arg=Arg}, LFuns) ->
maps:remove(Arg, LFuns);
lfo_analyze_last(#b_ret{arg=Arg}, LFuns) ->
maps:remove(Arg, LFuns);
lfo_analyze_last(_, LFuns) ->
LFuns.
%% Replace all calls of suitable funs with a direct call to their
%% implementation. Liveness optimization will get rid of the make_fun
%% instruction.
lfo_optimize(Linear, LFuns, _Trampolines) when map_size(LFuns) =:= 0 ->
Linear;
lfo_optimize(Linear, LFuns, Trampolines) ->
lfo_optimize_1(Linear, LFuns, Trampolines).
lfo_optimize_1([{L,#b_blk{is=Is0}=Blk}|Bs], LFuns, Trampolines) ->
Is = lfo_optimize_is(Is0, LFuns, Trampolines),
[{L,Blk#b_blk{is=Is}} | lfo_optimize_1(Bs, LFuns, Trampolines)];
lfo_optimize_1([], _LFuns, _Trampolines) ->
[].
lfo_optimize_is([#b_set{op=call,
args=[Fun | CallArgs]}=Call0 | Is],
LFuns, Trampolines) when is_map_key(Fun, LFuns) ->
#b_set{args=[Local | FreeVars]} = map_get(Fun, LFuns),
Args = [lfo_short_circuit(Local, Trampolines) | CallArgs ++ FreeVars],
Call = beam_ssa:add_anno(local_fun_opt, Fun, Call0#b_set{args=Args}),
[Call | lfo_optimize_is(Is, LFuns, Trampolines)];
lfo_optimize_is([I | Is], LFuns, Trampolines) ->
[I | lfo_optimize_is(Is, LFuns, Trampolines)];
lfo_optimize_is([], _LFuns, _Trampolines) ->
[].
lfo_short_circuit(Call, Trampolines) ->
lfo_short_circuit(Call, Trampolines, sets:new([{version, 2}])).
lfo_short_circuit(Call, Trampolines, Seen0) ->
%% Beware of infinite loops! Get out if this call has been seen before.
case sets:is_element(Call, Seen0) of
true ->
Call;
false ->
case Trampolines of
#{Call := Other} ->
Seen = sets:add_element(Call, Seen0),
lfo_short_circuit(Other, Trampolines, Seen);
#{} ->
Call
end
end. | lib/compiler/src/beam_ssa_funs.erl | 0.621541 | 0.477615 | beam_ssa_funs.erl | starcoder |
%%% @hidden
%%% @doc Riak store implementation.
%%% <u>Implementation Notes:</u>
%%% <ul>
%%% <li> Riak Data Types as main structures to push/pull data.</li>
%%% <li> Bulk operations (such as: delete_all and find_all) were
%%% optimized using streaming. Records are streamed in portions
%%% (using Riak 2i to stream keys first), and then the current
%%% operation (e.g.: delete the record or accumulate the values
%%% to return them later) is applied. This allows better memory
%%% and cpu efficiency.</li>
%%% <li> Query functions were implemented using Riak Search on Data Types,
%%% to get better performance and flexibility.</li>
%%% </ul>
%%%
%%% Copyright 2012 Inaka <<EMAIL>>
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%% @end
%%% @copyright Inaka <<EMAIL>>
%%%
-module(sumo_store_riak).
-author("<NAME> <<EMAIL>>").
-github("https://github.com/inaka").
-license("Apache License 2.0").
-behavior(sumo_store).
-include_lib("riakc/include/riakc.hrl").
%% @todo remove this when riakc releases a new version > 2.5.3
%% They already fixed on master so we should wait until they release a new version
-dialyzer([{nowarn_function, new_doc/2}]).
%% API.
-export(
[
init/1,
create_schema/2,
persist/2,
fetch/3,
delete_by/3,
delete_all/2,
find_all/2,
find_all/5,
find_by/3,
find_by/5,
find_by/6,
count/2,
count_by/3
]
).
%% Utilities
-export(
[
doc_to_rmap/1,
map_to_rmap/1,
rmap_to_doc/2,
rmap_to_map/2,
fetch_map/4,
fetch_docs/5,
delete_map/4,
update_map/5,
search/6,
build_query/2
]
).
%%%=============================================================================
%%% Types
%%%=============================================================================
%% Riak base parameters
-type connection() :: pid().
-type index() :: binary().
-type options() :: [proplists:property()].
-export_type([connection/0, index/0, options/0]).
%% @doc
%% conn: is the Pid of the gen_server that holds the connection with Riak
%% bucket: Riak bucket (per store)
%% index: Riak index to be used by Riak Search
%% get_opts: Riak read options parameters.
%% put_opts: Riak write options parameters.
%% del_opts: Riak delete options parameters.
%% <a href="http://docs.basho.com/riak/latest/dev/using/basics">Reference</a>.
%% @end
-record(
state,
{
conn :: connection(),
bucket :: {binary(), binary()},
index :: index(),
get_opts :: get_options(),
put_opts :: put_options(),
del_opts :: delete_options()
}
).
-type state() :: #state{}.
%%%=============================================================================
%%% API
%%%=============================================================================
-spec init(term()) -> {ok, term()}.
init(Opts) ->
% The storage backend key in the options specifies the name of the process
% which creates and initializes the storage backend.
Backend = proplists:get_value(storage_backend, Opts),
Conn = sumo_backend_riak:get_connection(Backend),
BucketType =
sumo_utils:to_bin(sumo_utils:keyfind(bucket_type, Opts, <<"maps">>)),
Bucket = sumo_utils:to_bin(sumo_utils:keyfind(bucket, Opts, <<"sumo">>)),
Index = sumo_utils:to_bin(sumo_utils:keyfind(index, Opts, <<"sumo_index">>)),
GetOpts = proplists:get_value(get_options, Opts, []),
PutOpts = proplists:get_value(put_options, Opts, []),
DelOpts = proplists:get_value(delete_options, Opts, []),
State =
#state{
conn = Conn,
bucket = {BucketType, Bucket},
index = Index,
get_opts = GetOpts,
put_opts = PutOpts,
del_opts = DelOpts
},
{ok, State}.
-spec persist(Doc, State) ->
Response
when Doc :: sumo_internal:doc(),
State :: state(),
Response :: sumo_store:result(sumo_internal:doc(), state()).
persist(Doc, #state{conn = Conn, bucket = Bucket, put_opts = Opts} = State) ->
{Id, NewDoc} = new_doc(sleep(Doc), State),
case update_map(Conn, Bucket, Id, doc_to_rmap(NewDoc), Opts) of
{error, Error} -> {error, Error, State};
_ -> {ok, wakeup(NewDoc), State}
end.
-spec fetch(DocName, Id, State) ->
Response
when DocName :: sumo:schema_name(),
Id :: sumo:field_value(),
State :: state(),
Response :: sumo_store:result(sumo_internal:doc(), state()).
fetch(DocName, Id, State) ->
#state{conn = Conn, bucket = Bucket, get_opts = Opts} = State,
case fetch_map(Conn, Bucket, sumo_utils:to_bin(Id), Opts) of
{ok, RMap} -> {ok, rmap_to_doc(DocName, RMap), State};
{error, {notfound, _Type = map}} -> {error, notfound, State};
{error, Error} -> {error, Error, State}
end.
-spec delete_by(DocName, Conditions, State) ->
Response
when DocName :: sumo:schema_name(),
Conditions :: sumo:conditions(),
State :: state(),
Response :: sumo_store:result(sumo_store:affected_rows(), state()).
delete_by(DocName, Conditions, State) when is_list(Conditions) ->
#state{conn = Conn, bucket = Bucket, index = Index, del_opts = Opts} = State,
IdField = sumo_internal:id_field_name(DocName),
case lists:keyfind(IdField, 1, Conditions) of
{_K, Key} ->
case delete_map(Conn, Bucket, sumo_utils:to_bin(Key), Opts) of
ok -> {ok, 1, State};
{error, Error} -> {error, Error, State}
end;
_ ->
Query = build_query(Conditions, Bucket),
case search_keys_by(Conn, Index, Query, [], 0, 0) of
{ok, {Total, Res}} ->
delete_keys(Conn, Bucket, Res, Opts),
{ok, Total, State};
{error, Error} -> {error, Error, State}
end
end;
delete_by(DocName, Conditions, State) ->
#state{conn = Conn, bucket = Bucket, index = Index, del_opts = Opts} = State,
TranslatedConditions = transform_conditions(DocName, Conditions),
Query = build_query(TranslatedConditions, Bucket),
case search_keys_by(Conn, Index, Query, [], 0, 0) of
{ok, {Total, Res}} ->
delete_keys(Conn, Bucket, Res, Opts),
{ok, Total, State};
{error, Error} -> {error, Error, State}
end.
-spec delete_all(DocName, State) ->
Response
when DocName :: sumo:schema_name(),
State :: state(),
Response :: sumo_store:result(sumo_store:affected_rows(), state()).
delete_all(_DocName, State) ->
#state{conn = Conn, bucket = Bucket, del_opts = Opts} = State,
Del =
fun
(Kst, Acc) ->
lists:foreach(fun (K) -> delete_map(Conn, Bucket, K, Opts) end, Kst),
Acc + length(Kst)
end,
case stream_keys(Conn, Bucket, Del, 0) of
{ok, Count} -> {ok, Count, State};
{error, Reason, Count} -> {error, {stream_keys, Reason, Count}, State}
end.
-spec find_all(DocName, State) ->
Response
when DocName :: sumo:schema_name(),
State :: state(),
Response :: sumo_store:result([sumo_internal:doc()], state()).
find_all(DocName, State) ->
#state{conn = Conn, bucket = Bucket, get_opts = Opts} = State,
Get =
fun (Kst, Acc) -> fetch_docs(DocName, Conn, Bucket, Kst, Opts) ++ Acc end,
case stream_keys(Conn, Bucket, Get, []) of
{ok, Docs} -> {ok, Docs, State};
{error, Reason, Count} -> {error, {stream_keys, Reason, Count}, State}
end.
-spec find_all(DocName, Sort, Limit, Offset, State) ->
Response
when DocName :: sumo:schema_name(),
Sort :: term(),
Limit :: non_neg_integer(),
Offset :: non_neg_integer(),
State :: state(),
Response :: sumo_store:result([sumo_internal:doc()], state()).
find_all(DocName, Sort, Limit, Offset, State) ->
find_by(DocName, [], Sort, Limit, Offset, State).
%% @doc
%% find_by may be used in two ways: either with a given limit and offset or not
%% If a limit and offset is not given, then the atom 'undefined' is used as a
%% marker to indicate that the store should find out how many keys matching the
%% query exist, and then obtain results for all of them.
%% This is done to overcome Solr's default pagination value of 10.
%% @end
-spec find_by(DocName, Conditions, State) ->
Response
when DocName :: sumo:schema_name(),
Conditions :: sumo:conditions(),
State :: state(),
Response :: sumo_store:result([sumo_internal:doc()], state()).
find_by(DocName, Conditions, State) ->
find_by(DocName, Conditions, undefined, undefined, State).
-spec find_by(DocName, Conditions, Limit, Offset, State) ->
Response
when DocName :: sumo:schema_name(),
Conditions :: sumo:conditions(),
Limit :: non_neg_integer() | undefined,
Offset :: non_neg_integer() | undefined,
State :: state(),
Response :: sumo_store:result([sumo_internal:doc()], state()).
find_by(DocName, Conditions, undefined, undefined, State) ->
%% First get all keys matching the query, and then obtain documents for those
%% keys.
#state{conn = Conn, bucket = Bucket, index = Index, get_opts = Opts} = State,
TranslatedConditions = transform_conditions(DocName, Conditions),
Query = build_query(TranslatedConditions, Bucket),
case find_by_query_get_keys(Conn, Index, Query) of
{ok, Keys} ->
Results = fetch_docs(DocName, Conn, Bucket, Keys, Opts),
{ok, Results, State};
{error, Error} -> {error, Error, State}
end;
find_by(DocName, Conditions, Limit, Offset, State) ->
%% Limit and offset were specified so we return a possibly partial result set.
find_by(DocName, Conditions, [], Limit, Offset, State).
-spec find_by(DocName, Conditions, Sort, Limit, Offset, State) ->
Response
when DocName :: sumo:schema_name(),
Conditions :: sumo:conditions(),
Sort :: term(),
Limit :: non_neg_integer(),
Offset :: non_neg_integer(),
State :: state(),
Response :: sumo_store:result([sumo_internal:doc()], state()).
find_by(DocName, Conditions, Sort, Limit, Offset, State) ->
#state{conn = Conn, bucket = Bucket, index = Index, get_opts = Opts} = State,
TranslatedConditions = transform_conditions(DocName, Conditions),
SortOpts = build_sort(Sort),
Query = <<(build_query(TranslatedConditions, Bucket))/binary>>,
case search_keys_by(Conn, Index, Query, SortOpts, Limit, Offset) of
{ok, {_Total, Keys}} ->
Results = fetch_docs(DocName, Conn, Bucket, Keys, Opts),
{ok, Results, State};
{error, Error} -> {error, Error, State}
end.
%% @doc
%% This function is used when none pagination parameter is given.
%% By default the search operation returns a specific set of results,
%% it handles a limit internally, so the total amount of docs may be
%% not returned. For this reason, this operation gets the first result
%% set, and then it fetches the rest fo them.
%% @end
%% @private
find_by_query_get_keys(Conn, Index, Query) ->
InitialResults =
case search_keys_by(Conn, Index, Query, [], 0, 0) of
{ok, {Total, Keys}} -> {ok, length(Keys), Total, Keys};
Error -> Error
end,
case InitialResults of
{ok, ResultCount, Total1, Keys1} when ResultCount < Total1 ->
Limit = Total1 - ResultCount,
Offset = ResultCount,
case search_keys_by(Conn, Index, Query, [], Limit, Offset) of
{ok, {Total1, Keys2}} -> {ok, lists:append(Keys1, Keys2)};
{error, Error1} -> {error, Error1}
end;
{ok, _ResultCount, _Total, Keys1} -> {ok, Keys1};
{error, Error2} -> {error, Error2}
end.
-spec count(DocName, State) ->
Response
when DocName :: sumo:schema_name(),
State :: state(),
Response :: sumo_store:result(non_neg_integer(), state()).
count(_DocName, #state{conn = Conn, bucket = Bucket} = State) ->
Sum = fun (Kst, Acc) -> length(Kst) + Acc end,
case stream_keys(Conn, Bucket, Sum, 0) of
{ok, Count} -> {ok, Count, State};
{_, _, _} -> {error, {error, count_failed}, State}
end.
-spec create_schema(Schema, State) ->
Response
when Schema :: sumo_internal:schema(),
State :: state(),
Response :: sumo_store:result(state()).
create_schema(_Schema, State) -> {ok, State}.
-spec count_by(DocName, Conditions, State) ->
Response
when DocName :: sumo:schema_name(),
Conditions :: sumo:conditions(),
State :: state(),
Response :: sumo_store:result(non_neg_integer(), state()).
count_by(DocName, [], State) -> count(DocName, State);
count_by(_DocName, _Conditions, #{conn := _Conn} = _State) -> 0.
%%%=============================================================================
%%% Utilities
%%%=============================================================================
-spec doc_to_rmap(sumo_internal:doc()) -> riakc_map:crdt_map().
doc_to_rmap(Doc) ->
Fields = sumo_internal:doc_fields(Doc),
map_to_rmap(Fields).
-spec map_to_rmap(map()) -> riakc_map:crdt_map().
map_to_rmap(Map) ->
lists:foldl(fun rmap_update/2, riakc_map:new(), maps:to_list(Map)).
-spec rmap_to_doc(sumo:schema_name(), riakc_map:crdt_map()) ->
sumo_internal:doc().
rmap_to_doc(DocName, RMap) ->
wakeup(sumo_internal:new_doc(DocName, rmap_to_map(DocName, RMap))).
-spec rmap_to_map(sumo:schema_name(), riakc_map:crdt_map()) -> map().
rmap_to_map(DocName, RMap) ->
lists:foldl(
fun
({{K, map}, V}, Acc) ->
NewV = rmap_to_map(DocName, {map, V, [], [], undefined}),
maps:put(sumo_utils:to_atom(K), NewV, Acc);
({{K, _}, V}, Acc) -> maps:put(sumo_utils:to_atom(K), V, Acc)
end,
#{},
riakc_map:value(RMap)
).
-spec fetch_map(Conn, Bucket, Key, Opts) ->
Result
when Conn :: connection(),
Bucket :: bucket_and_type(),
Key :: key(),
Opts :: options(),
Result :: {ok, riakc_datatype:datatype()} | {error, term()}.
fetch_map(Conn, Bucket, Key, Opts) ->
riakc_pb_socket:fetch_type(Conn, Bucket, Key, Opts).
-spec fetch_docs(DocName, Conn, Bucket, Keys, Opts) ->
Result
when DocName :: sumo:schema_name(),
Conn :: connection(),
Bucket :: bucket_and_type(),
Keys :: [key()],
Opts :: options(),
Result :: [sumo_internal:doc()].
fetch_docs(DocName, Conn, Bucket, Keys, Opts) ->
lists:foldl(
fun
(K, Acc) ->
case fetch_map(Conn, Bucket, K, Opts) of
{ok, M} -> [rmap_to_doc(DocName, M) | Acc];
_ -> Acc
end
end,
[],
Keys
).
-spec delete_map(connection(), bucket_and_type(), key(), options()) ->
ok | {error, term()}.
delete_map(Conn, Bucket, Key, Opts) ->
riakc_pb_socket:delete(Conn, Bucket, Key, Opts).
-spec update_map(Conn, Bucket, Key, Map, Opts) ->
Result
when Conn :: connection(),
Bucket :: bucket_and_type(),
Key :: key() | undefined,
Map :: riakc_map:crdt_map(),
Opts :: options(),
Ok
::
ok
| {ok, Key | riakc_datatype:datatype()}
| {ok, Key, riakc_datatype:datatype()},
Error :: {error, term()},
Result :: Ok | Error.
update_map(Conn, Bucket, Key, Map, Opts) ->
riakc_pb_socket:update_type(Conn, Bucket, Key, riakc_map:to_op(Map), Opts).
-spec search(Conn, Index, Query, Sort, Limit, Offset) ->
Result
when Conn :: connection(),
Index :: index(),
Query :: binary(),
Sort :: [term()],
Limit :: non_neg_integer(),
Offset :: non_neg_integer(),
Result :: {ok, search_result()} | {error, term()}.
search(Conn, Index, Query, Sort, 0, 0) ->
riakc_pb_socket:search(Conn, Index, Query, Sort);
search(Conn, Index, Query, Sort, Limit, Offset) ->
riakc_pb_socket:search(
Conn,
Index,
Query,
[{start, Offset}, {rows, Limit}] ++ Sort
).
-spec build_query(sumo:conditions(), {binary(), binary()}) -> binary().
build_query(Conditions, {Type, Bucket}) ->
Query = build_query1(Conditions, fun escape/1, fun quote/1),
<<
"_yz_rt:\"",
Type/binary,
"\" AND ",
"_yz_rb:\"",
Bucket/binary,
"\" AND ",
Query/binary
>>.
%%%=============================================================================
%%% Internal functions
%%%=============================================================================
%% @private
transform_conditions(DocName, Conditions) ->
sumo_utils:transform_conditions(
fun validate_date/1,
DocName,
Conditions,
[date, datetime]
).
%% @private
validate_date({FieldType, _, FieldValue}) ->
case {FieldType, sumo_utils:is_datetime(FieldValue)} of
{datetime, true} -> iso8601:format(FieldValue);
{date, true} ->
DateTime = {FieldValue, {0, 0, 0}},
iso8601:format(DateTime)
end.
%% @private
sleep(Doc) -> sumo_utils:doc_transform(fun sleep_fun/4, Doc).
%% @private
sleep_fun(_, FieldName, undefined, _) when FieldName /= id -> <<"$nil">>;
sleep_fun(FieldType, _, FieldValue, _)
when FieldType =:= datetime; FieldType =:= date ->
case {FieldType, sumo_utils:is_datetime(FieldValue)} of
{datetime, true} -> iso8601:format(FieldValue);
{date, true} -> iso8601:format({FieldValue, {0, 0, 0}});
_ -> FieldValue
end;
sleep_fun(custom, _, FieldValue, FieldAttrs) ->
Type = sumo_utils:keyfind(type, FieldAttrs, custom),
sleep_custom(FieldValue, Type);
sleep_fun(_, _, FieldValue, _) -> FieldValue.
%% @private
sleep_custom(FieldValue, FieldType) ->
case lists:member(FieldType, [term, tuple, map, list]) of
true -> base64:encode(term_to_binary(FieldValue));
_ -> FieldValue
end.
%% @private
wakeup(Doc) -> sumo_utils:doc_transform(fun wakeup_fun/4, Doc).
wakeup_fun(_, _, undefined, _) -> undefined;
wakeup_fun(_, _, <<"$nil">>, _) -> undefined;
wakeup_fun(FieldType, _, FieldValue, _)
when FieldType =:= datetime; FieldType =:= date ->
case {FieldType, sumo_utils:is_datetime(FieldValue)} of
{datetime, true} -> iso8601:parse(FieldValue);
{date, true} ->
{Date, _} = iso8601:parse(FieldValue),
Date;
_ -> FieldValue
end;
wakeup_fun(integer, _, FieldValue, _) when is_binary(FieldValue) ->
binary_to_integer(FieldValue);
wakeup_fun(float, _, FieldValue, _) when is_binary(FieldValue) ->
binary_to_float(FieldValue);
wakeup_fun(boolean, _, FieldValue, _) when is_binary(FieldValue) ->
binary_to_atom(FieldValue, utf8);
wakeup_fun(custom, _, FieldValue, FieldAttrs) ->
Type = sumo_utils:keyfind(type, FieldAttrs, custom),
wakeup_custom(FieldValue, Type);
wakeup_fun(_, _, FieldValue, _) -> FieldValue.
%% @private
wakeup_custom(FieldValue, FieldType) ->
case lists:member(FieldType, [term, tuple, map, list]) of
true -> binary_to_term(base64:decode(FieldValue));
_ -> FieldValue
end.
%% @private
new_doc(Doc, #state{conn = Conn, bucket = Bucket, put_opts = Opts}) ->
DocName = sumo_internal:doc_name(Doc),
IdField = sumo_internal:id_field_name(DocName),
Id =
case sumo_internal:get_field(IdField, Doc) of
undefined ->
case update_map(Conn, Bucket, undefined, doc_to_rmap(Doc), Opts) of
{ok, RiakMapId} -> RiakMapId;
{error, Error} -> exit(Error);
Unexpected -> exit({unexpected, Unexpected})
end;
Id0 -> sumo_utils:to_bin(Id0)
end,
{Id, sumo_internal:set_field(IdField, Id, Doc)}.
%% @private
list_to_rset(_, [], Acc) -> Acc;
list_to_rset(K, [H | T], Acc) ->
M =
riakc_map:update(
{sumo_utils:to_bin(K), set},
fun (S) -> riakc_set:add_element(sumo_utils:to_bin(H), S) end,
Acc
),
list_to_rset(K, T, M).
%% @private
rmap_update({K, V}, RMap) when is_map(V) ->
NewV = map_to_rmap(V),
riakc_map:update({sumo_utils:to_bin(K), map}, fun (_M) -> NewV end, RMap);
rmap_update({K, V}, RMap) when is_list(V) ->
case io_lib:printable_list(V) of
true ->
riakc_map:update(
{sumo_utils:to_bin(K), register},
fun (R) -> riakc_register:set(sumo_utils:to_bin(V), R) end,
RMap
);
false -> list_to_rset(K, V, RMap)
end;
rmap_update({K, V}, RMap) ->
riakc_map:update(
{sumo_utils:to_bin(K), register},
fun (R) -> riakc_register:set(sumo_utils:to_bin(V), R) end,
RMap
).
%% @private
stream_keys(Conn, Bucket, F, Acc) ->
{ok, Ref} =
riakc_pb_socket:get_index_eq(
Conn,
Bucket,
<<"$bucket">>,
<<"">>,
[{stream, true}]
),
receive_stream(Ref, F, Acc).
%% @private
receive_stream(Ref, F, Acc) ->
receive
{Ref, {_, Keys, _}} -> receive_stream(Ref, F, F(Keys, Acc));
{Ref, {done, _Continuation = undefined}} -> {ok, Acc};
Unexpected -> {error, {unexpected, Unexpected}, Acc}
after
30000 -> {error, timeout, Acc}
end.
%% @private
%% @doc
%% Search all docs that match with the given query, but only keys are returned.
%% IMPORTANT: assumes that default schema 'yokozuna' is being used.
%% @end
search_keys_by(Conn, Index, Query, SortOpts, Limit, Offset) ->
case sumo_store_riak:search(Conn, Index, Query, SortOpts, Limit, Offset) of
{ok, {search_results, Results, _, Total}} ->
Keys =
lists:foldl(
fun
({_, KV}, Acc) ->
{_, K} = lists:keyfind(<<"_yz_rk">>, 1, KV),
[K | Acc]
end,
[],
Results
),
{ok, {Total, Keys}};
{error, Error} -> {error, Error}
end.
%% @private
delete_keys(Conn, Bucket, Keys, Opts) ->
lists:foreach(fun (K) -> delete_map(Conn, Bucket, K, Opts) end, Keys).
%%%=============================================================================
%%% Query Builder
%%%=============================================================================
%% @private
build_query1([], _EscapeFun, _QuoteFun) -> <<"*:*">>;
build_query1(Exprs, EscapeFun, QuoteFun) when is_list(Exprs) ->
Clauses = [build_query1(Expr, EscapeFun, QuoteFun) || Expr <- Exprs],
binary:list_to_bin(["(", interpose(" AND ", Clauses), ")"]);
build_query1({'and', Exprs}, EscapeFun, QuoteFun) ->
build_query1(Exprs, EscapeFun, QuoteFun);
build_query1({'or', Exprs}, EscapeFun, QuoteFun) ->
Clauses = [build_query1(Expr, EscapeFun, QuoteFun) || Expr <- Exprs],
binary:list_to_bin(["(", interpose(" OR ", Clauses), ")"]);
build_query1({'not', Expr}, EscapeFun, QuoteFun) ->
binary:list_to_bin(["(NOT ", build_query1(Expr, EscapeFun, QuoteFun), ")"]);
build_query1({Name, '<', Value}, EscapeFun, _QuoteFun) ->
NewVal = binary:list_to_bin(["{* TO ", EscapeFun(Value), "}"]),
query_eq(Name, NewVal);
build_query1({Name, '=<', Value}, EscapeFun, _QuoteFun) ->
NewVal = binary:list_to_bin(["[* TO ", EscapeFun(Value), "]"]),
query_eq(Name, NewVal);
build_query1({Name, '>', Value}, EscapeFun, _QuoteFun) ->
NewVal = binary:list_to_bin(["{", EscapeFun(Value), " TO *}"]),
query_eq(Name, NewVal);
build_query1({Name, '>=', Value}, EscapeFun, _QuoteFun) ->
NewVal = binary:list_to_bin(["[", EscapeFun(Value), " TO *]"]),
query_eq(Name, NewVal);
build_query1({Name, '==', Value}, EscapeFun, QuoteFun) ->
build_query1({Name, Value}, EscapeFun, QuoteFun);
build_query1({Name, '/=', Value}, EscapeFun, QuoteFun) ->
build_query1({negative_field(Name), Value}, EscapeFun, QuoteFun);
build_query1({Name, like, Value}, _EscapeFun, _QuoteFun) ->
NewVal = like_to_wildcard_search(Value),
Bypass = fun (X) -> X end,
build_query1({Name, NewVal}, Bypass, Bypass);
build_query1({Name, null}, _EscapeFun, _QuoteFun) ->
%% null: (Field:<<"$nil">> OR (NOT Field:[* TO *]))
Val = {'or', [{Name, <<"$nil">>}, {'not', {Name, <<"[* TO *]">>}}]},
Bypass = fun (X) -> X end,
build_query1(Val, Bypass, Bypass);
build_query1({Name, not_null}, _EscapeFun, _QuoteFun) ->
%% not_null: (Field:[* TO *] AND -Field:<<"$nil">>)
Val = {'and', [{Name, <<"[* TO *]">>}, {Name, '/=', <<"$nil">>}]},
Bypass = fun (X) -> X end,
build_query1(Val, Bypass, Bypass);
build_query1({Name, Value}, _EscapeFun, QuoteFun) ->
query_eq(Name, QuoteFun(Value)).
%% @private
query_eq(K, V) -> binary:list_to_bin([build_key(K), V]).
%% @private
build_key(K) ->
build_key(binary:split(sumo_utils:to_bin(K), <<".">>, [global]), <<"">>).
%% @private
build_key([K], <<"">>) -> binary:list_to_bin([K, "_register:"]);
build_key([K], Acc) -> binary:list_to_bin([Acc, ".", K, "_register:"]);
build_key([K | T], <<"">>) -> build_key(T, binary:list_to_bin([K, "_map"]));
build_key([K | T], Acc) ->
build_key(T, binary:list_to_bin([Acc, ".", K, "_map"])).
%% @private
interpose(Sep, List) -> interpose(Sep, List, []).
%% @private
interpose(_Sep, [], Result) -> lists:reverse(Result);
interpose(Sep, [Item | []], Result) -> interpose(Sep, [], [Item | Result]);
interpose(Sep, [Item | Rest], Result) ->
interpose(Sep, Rest, [Sep, Item | Result]).
%% @private
negative_field(Name) -> binary:list_to_bin([<<"-">>, sumo_utils:to_bin(Name)]).
%% @private
quote(Value) ->
BinVal = sumo_utils:to_bin(Value),
[$", re:replace(BinVal, "[\\\"\\\\]", "\\\\&", [global]), $"].
%% @private
escape(Value) ->
Escape = "[\\+\\-\\&\\|\\!\\(\\)\\{\\}\\[\\]\\^\\\"\\~\\*\\?\\:\\\\]",
re:replace(
sumo_utils:to_bin(Value),
Escape,
"\\\\&",
[global, {return, binary}]
).
%% @private
whitespace(Value) ->
re:replace(Value, "[\\ \\\\]", "\\\\&", [global, {return, binary}]).
%% @private
like_to_wildcard_search(Like) ->
whitespace(
binary:replace(sumo_utils:to_bin(Like), <<"%">>, <<"*">>, [global])
).
%% @private
build_sort([]) -> [];
build_sort({Field, Dir}) ->
[
{
sort,
<<
(sumo_utils:to_bin(Field))/binary,
"_register",
(sumo_utils:to_bin(Dir))/binary
>>
}
];
build_sort(Sorts) ->
Res =
[
begin
binary:list_to_bin(
[sumo_utils:to_bin(Field), "_register", " ", sumo_utils:to_bin(Dir)]
)
end
|| {Field, Dir} <- Sorts
],
[{sort, binary:list_to_bin(interpose(", ", Res))}]. | src/sumo_store_riak.erl | 0.508056 | 0.496399 | sumo_store_riak.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2014 <NAME>
%% @version 0.2.0
%%
%% @doc
%%
%% The HDR histogram library is an Erlang native interface function wrapper of
%% Mike Barker's C port of Gil Tene's HDR Histogram utility.
%%
%%
%% A high dynamic range histogram is one that supports recording and analyzing
%% sampled data points across a configurable range with configurable precision
%% within that range. The precision is expressed as a number of significant
%% figures in the recording.
%%
%% This HDR histogram implementation is designed for recording histograms of
%% value measurements in latency sensitive environments. Although the native
%% recording times can be as low as single digit nanoseconds there is added
%% overhead in this wrapper/binding due to both the frontend overhead of converting
%% from native C to the NIF interface, and the erlang overhead incurred calling
%% into the NIFs. C'est la vie, I suppose.
%%
%% A distinct advantage of this histogram implementation is constant space and
%% recording (time) overhead with an ability to recycle and reset instances whilst
%% reclaiming already allocated space for reuse thereby reducing allocation cost
%% and garbage collection overhead in the BEAM where repeated or continuous usage
%% is likely. For example, a gen_server recording metrics continuously and resetting
%% and logging histogram dumps on a periodic or other windowed basis.
%%
%% The code is released to the public domain, under the same terms as its
%% sibling projects, as explained in the LICENSE.txt and COPYING.txt in the
%% root of this repository, but normatively at:
%%
%% http://creativecommons.org/publicdomain/zero/1.0/
%%
%% For users of this code who wish to consume it under the "BSD" license
%% rather than under the public domain or CC0 contribution text mentioned
%% above, the code found under this directory is *also* provided under the
%% following license (commonly referred to as the BSD 2-Clause License). This
%% license does not detract from the above stated release of the code into
%% the public domain, and simply represents an additional license granted by
%% http://creativecommons.org/publicdomain/zero/1.0/
%%
%% -----------------------------------------------------------------------------
%% ** Beginning of "BSD 2-Clause License" text. **
%%
%% Copyright (c) 2012, 2013, 2014 <NAME>
%% Copyright (c) 2014 <NAME>
%% Copyright (c) 2014 <NAME>
%% All rights reserved.
%%
%% Redistribution and use in source and binary forms, with or without
%% modification, are permitted provided that the following conditions are met:
%%
%% 1. Redistributions of source code must retain the above copyright notice,
%% this list of conditions and the following disclaimer.
%%
%% 2. Redistributions in binary form must reproduce the above copyright notice,
%% this list of conditions and the following disclaimer in the documentation
%% and/or other materials provided with the distribution.
%%
%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
%% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
%% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
%% ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
%% LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
%% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
%% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
%% INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
%% CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
%% ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
%% THE POSSIBILITY OF SUCH DAMAGE.
%%
%% @end
-module(hdr_histogram).
-export([open/2]).
-export([get_memory_size/1]).
-export([get_total_count/1]).
-export([record/2]).
-export([record_corrected/3]).
-export([record_many/3]).
-export([add/2]).
-export([min/1]).
-export([max/1]).
-export([mean/1]).
-export([median/1]).
-export([stddev/1]).
-export([percentile/2]).
-export([same/3]).
-export([lowest_at/2]).
-export([count_at/2]).
-export([print/2]).
-export([log/3]).
-export([reset/1]).
-export([rotate/2]).
-export([close/1]).
-export([from_binary/1]).
-export([to_binary/1]).
-export([to_binary/2]).
-export([iter_open/1]).
-export([iter_init/3]).
-export([iter_next/1]).
-export([iter_close/1]).
-on_load(init/0).
-type ref() :: binary(). %% NIF private data (looks like empty binary)
load_default() ->
filename:join(
case code:priv_dir(?MODULE) of
{error, bad_name} ->
Dir = code:which(?MODULE),
filename:join([filename:dirname(Dir),"..","priv"]);
Dir -> Dir
end, atom_to_list(?MODULE)).
load_nif() ->
case os:getenv("NIF_DIR") of
false -> load_default();
Path -> filename:join(Path, atom_to_list(?MODULE))
end.
init() ->
SoName = load_nif(),
erlang:load_nif(SoName, 0).
-spec open(HighestTrackableValue,SignificantFigures)
-> {ok,Ref} | {error,Reason} when
HighestTrackableValue :: integer(),
SignificantFigures :: 1..5,
Ref :: ref(),
Reason :: term().
%% @doc Open a fresh instance of a high dynamic range (HDR) histogram
open(_High,_Sig) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec get_memory_size(Ref) -> Size when
Ref :: ref(),
Size :: integer().
%% @doc Get memory footprint of an HDR histogram
get_memory_size(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec get_total_count(Ref) -> Count when
Ref :: ref(),
Count :: integer().
%% @doc Get total count of record values
get_total_count(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec record(Ref,Value) -> ok | {error, Reason} when
Ref :: ref(),
Value :: integer(),
Reason :: term().
%% @doc Record an uncorrected histogram data point value in a HDR histogram
record(_Ref,_Value) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec record_corrected(Ref,Value,ExpectedInterval) -> ok | {error, Reason} when
Ref :: ref(),
Value :: integer(),
ExpectedInterval :: integer(),
Reason :: term().
%% @doc Record a histogram data point value in a HDR histogram with
%% expected interval for correction for coordinated ommission
record_corrected(_Ref,_Value,_ExpectedInterval) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec record_many(Ref,Value,Count) -> ok | {error, Reason} when
Ref :: ref(),
Value :: integer(),
Count :: integer(),
Reason :: term().
%% @doc Record a histogram data point value and number of occurances
record_many(_Ref,_Value,_Count) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec add(To,From) -> integer() | {error,Reason} when
To :: ref(),
From :: ref(),
Reason :: term().
%% @doc Contribute the data points from a HDR histogram to another.
%% Return the number of dropped data point values.
%% That is, the values from `From' that are higher than the
%% `HighestTrackableValue' of `To'.
add(_ToRef,_FromRef) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec min(Ref) -> MinValue when
Ref :: ref(),
MinValue :: integer().
%% @doc Get the minimum recorded data point value
min(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec max(Ref) -> MaxValue when
Ref :: ref(),
MaxValue :: integer().
%% @doc Get the maximum recorded data point value
max(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec mean(Ref) -> MeanValue when
Ref :: ref(),
MeanValue :: float().
%% @doc Get the mean data point value to a significant figure
mean(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec median(Ref) -> MedianValue when
Ref :: ref(),
MedianValue :: float().
%% @doc Get the median data point value to a significant figure
median(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec stddev(Ref) -> StddevValue when
Ref :: ref(),
StddevValue :: float().
%% @doc Get the standard deviation data point value to a significant figure
stddev(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec percentile(Ref,Percentile) -> PercentileValue when
Ref :: ref(),
Percentile :: float(),
PercentileValue :: float().
%% @doc Get the specified percentile data point value to a significant figure
percentile(_Ref,_Percentile) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec same(Ref,A,B) -> AreEquivalent when
Ref :: ref(),
A :: integer(),
B :: integer(),
AreEquivalent :: boolean().
%% @doc Are two data point values considered to be equivalent
same(_Ref,_A,_B) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec lowest_at(Ref,Value) -> LowestValueAt when
Ref :: ref(),
Value :: integer(),
LowestValueAt :: float().
%% @doc Get the lowest equivalent value
lowest_at(_Ref,_Value) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec count_at(Ref,Value) -> CountAt when
Ref :: ref(),
Value :: integer(),
CountAt :: integer().
%% @doc Get the count of values at a given at a given value
count_at(_Ref,_Value) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec print(Ref,Format) -> ok | {error, Reason} when
Ref :: ref(),
Format :: classic | csv,
Reason :: term().
%% @doc Print the histogram to standard output in classic or CSV format
print(Ref,classic) ->
print_classic(Ref);
print(Ref,csv) ->
print_csv(Ref).
%% @private
print_classic(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
%% @private
print_csv(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec log(Ref,Format,file:name()) -> ok | {error, Reason} when
Ref :: ref(),
Format :: classic | csv,
Reason :: term().
%% @doc Log the histogram to a file in classic or CSV format
log(Ref,classic,FileName) ->
log_classic(Ref,FileName);
log(Ref,csv,FileName) ->
log_csv(Ref,FileName).
%% @private
log_classic(_Ref,_FileName) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
%% @private
log_csv(_Ref,_FileName) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec reset(Ref) -> ok | {error,term()} when
Ref :: ref().
%% @doc Reset the memory backing this HDR histogram instance and zero results
reset(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec rotate(Ref, To) -> Diff :: binary() | {error, term()} when
Ref :: ref(),
To :: ref().
%% @doc Copy data from Ref to To and return the difference as binary
rotate(_Ref, _To) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec close(Ref) -> ok | {error,term()} when
Ref :: ref().
%% @doc Close this HDR histogram instance and free any system resources
close(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec from_binary(Binary) -> {ok,Ref} | {error,term()} when
Binary :: binary(),
Ref :: ref().
%% @doc Take a snapshot of HDR histogram internal state as a compressed binary and hydrate/open a reference. The reference SHOULD be closed when no longer needed to reclaim the memory used
from_binary(_Binary) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec to_binary(Ref) -> binary() | {error,term()} when
Ref :: ref().
%% @doc Take a snapshot of HDR histogram internal state as a compressed binary. The reference HDR instance can be modified after a snapshot is taken in the usual way with no restrictions.
to_binary(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec to_binary_uncompressed(Ref) -> binary() | {error,term()} when
Ref :: ref().
to_binary_uncompressed(_Ref) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
-spec to_binary(Ref, [{compression, none} |
{compression, zlib}]) ->
binary() | {error,term()} when
Ref :: ref().
%% @doc Take a snapshot of HDR histogram internal state as an (optionally) compressed binary. The reference HDR instance can be modified after a snapshot is taken in the usual way with no restrictions.
to_binary(Ref, []) ->
to_binary(Ref);
to_binary(Ref, [{compression, zlib}]) ->
to_binary(Ref);
to_binary(Ref, [{compression, none}]) ->
to_binary_uncompressed(Ref);
to_binary(_Ref, _) ->
{error, bad_options}.
%% @private
iter_open(_) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
%% @private
iter_init(_,_,_) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
%% @private
iter_next(_) ->
erlang:nif_error({nif_not_loaded, ?MODULE}).
%% @private
iter_close(_) ->
erlang:nif_error({nif_not_loaded, ?MODULE}). | src/hdr_histogram.erl | 0.5769 | 0.476701 | hdr_histogram.erl | starcoder |
%%
%% Copyright 2013, <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @copyright 2013, <NAME>
%% @author <NAME> <<EMAIL>>
%% @doc Schema functions.
%%
%% This module exports functions for interacting with a compiled
%% schema.
-module(ecapnp_schema).
-author("<NAME> <<EMAIL>>").
-export([type_of/1, get/2, lookup/2, lookup/3, size_of/1, size_of/2,
data_size/1, ptrs_size/1, get_ref_kind/1, get_ref_kind/2,
set_ref_to/2, find_method_by_name/2, find_field/2, dump/1]).
-include("ecapnp.hrl").
-type lookup_type() :: type_id() | type_name() | object | schema_node().
%% The various types that can be looked up.
-type lookup_search() :: object() | ref() | pid()
| schema_nodes() | schema_node().
%% Where to search for the type being looked up.
%% ===================================================================
%% API functions
%% ===================================================================
get(Type, Schema) ->
case lookup(Type, Schema) of
undefined -> throw({schema_not_found, Type, Schema});
Node -> Node
end.
lookup(Type, Schema, Default) ->
case lookup(Type, Schema) of
undefined -> Default;
Node -> Node
end.
-spec lookup(lookup_type(), lookup_search()) -> schema_node() | undefined.
%% @doc Find schema node for type.
lookup(N, _) when is_record(N, schema_node) -> N;
lookup(Type, Schema) when is_atom(Schema) ->
if Type =:= object -> Schema;
true -> Schema:schema(Type)
end;
lookup(Id, #schema_node{ id=Id }=N) -> N;
lookup(Name, #schema_node{ name=Name }=N) -> N;
lookup(Type, #schema_node{ module=Module }) -> lookup(Type, Module);
lookup(Type, #object{ schema = Schema }) -> lookup(Type, Schema);
lookup(Type, [N|Ns]) ->
case {lookup(Type, N), Ns} of
{undefined, []} -> undefined;
{undefined, _} -> lookup(Type, Ns);
Node -> Node
end;
lookup(_Type, _Schema) ->
%%io:format("type not found: ~p (in schema ~p)~n", [Type, Schema]),
undefined.
-spec type_of(object()) -> schema_node().
%% @doc Get type of object.
%% @todo Doesn't this belong in ecapnp_obj?
type_of(#object{ schema=Type }) -> Type.
-spec size_of(lookup_type(), lookup_search()) -> non_neg_integer().
%% @doc Lookup struct type and query it's size.
size_of(Type, Store) ->
size_of(lookup(Type, Store)).
-spec size_of(Node::schema_node()) -> non_neg_integer().
%% @doc Query size of a struct type.
%%
%% Will crash with `function_clause' if `Node' is not a struct or
%% interface node.
size_of(#schema_node{ kind=Kind }) -> size_of(Kind);
size_of(#struct{ dsize=DSize, psize=PSize }) -> DSize + PSize;
%% Size in message data, which simply is a Capability pointer, the
%% CapDescriptor is stored out-of-band.
size_of(#interface{}) -> 1.
-spec data_size(schema_node()) -> non_neg_integer().
%% @doc Get data size of a struct type.
data_size(#struct{ dsize=DSize }) -> DSize.
-spec ptrs_size(schema_node()) -> non_neg_integer().
%% @doc Get pointer count for a struct type.
ptrs_size(#struct{ psize=PSize }) -> PSize.
get_ref_kind(#struct{ dsize=DSize, psize=PSize }) ->
#struct_ref{ dsize=DSize, psize=PSize };
get_ref_kind(#schema_node{ kind=Kind }) ->
get_ref_kind(Kind);
get_ref_kind(#interface{}) ->
#interface_ref{}.
get_ref_kind(Type, Ref) when is_atom(Type); is_number(Type) ->
get_ref_kind(lookup(Type, Ref));
get_ref_kind(Type, _) ->
get_ref_kind(Type).
-spec set_ref_to(lookup_type(), ref()) -> ref().
%% @doc Set reference kind.
%%
%% Lookup struct `Type' and return an updated {@link ref(). ref}.
%%
%% Note: it is only the record that is updated, the change is not
%% committed to the message.
set_ref_to(Type, Ref) ->
Ref#ref{ kind=get_ref_kind(Type, Ref) }.
%% @doc Find Interface and Method.
find_method_by_name(MethodName, #schema_node{
kind = #interface{ methods = Ms }
}=S) ->
case lists:keyfind(MethodName, #method.name, Ms) of
false -> undefined;
Method -> {ok, S, Method}
end;
find_method_by_name(MethodName, [S|Ss]) ->
case find_method_by_name(MethodName, S) of
undefined ->
find_method_by_name(MethodName, Ss);
Result ->
Result
end;
find_method_by_name(_MethodName, []) -> undefined.
%% @doc Find struct field from schema definition by name or index.
find_field(Field, #schema_node{ kind = #struct{ fields = Fields } }) ->
Idx = if is_atom(Field) -> #field.name;
is_number(Field) -> #field.id
end,
lists:keyfind(Field, Idx, Fields).
dump(#schema_node{ src = Source, id = Id }) ->
io_lib:format("~s(~p)", [Source, Id]).
%% ===================================================================
%% internal functions
%% =================================================================== | src/ecapnp_schema.erl | 0.549882 | 0.467636 | ecapnp_schema.erl | starcoder |
%% @doc Simple Moving Average
%%
%% [https://en.wikipedia.org/wiki/Moving_average#Simple_moving_average]
%%
%% This module implements SMA calculation logic. The same way as folsom does for EMA
%% See [https://github.com/boundary/folsom/blob/8914823067c623d2839ecd6d17785ba94ad004c8/src/folsom_ewma.erl]
%%
%% see the description of the algorithm here
%% [https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages#simple_moving_average_calculation]
%%
%% NOTE: For the first N values SMA is unavailable.
%%
%% @end
-module(metric_sma).
-export([
new/2,
rate/1,
update/2,
tick/1
]).
-export_type([
sma/0
]).
-record(sma, {
n :: integer(),
rate :: undefined | float(),
total = 0.0 :: float(),
window = [] :: list(float()),
interval :: integer()
}).
-opaque sma() :: #sma{}.
-spec new(N :: integer(), Interval :: integer()) -> sma().
%% @doc Creates new SMA struct
%%
%% `N' - Smooth. the amount of data on the basis of which the average is calculated
%%
%% `Interval' - Time interval (in seconds) the timer ticks. Each tick we dump the next value.
%% @see tick/1
%%
%% @end
new(N, Interval) ->
#sma{n = N, interval = Interval}.
-spec rate(sma()) -> Rate :: float().
rate(#sma{rate = Rate}) ->
Rate.
-spec update(SMA :: sma(), Value :: float()) -> sma().
update(#sma{total = Total} = SMA, Value) ->
SMA#sma{total = Total + Value}.
-spec tick(sma()) -> sma().
%% @doc Calculates the next SMA
%%
%% Here the calculation occurs
%%
%% - Dump a new value
%%
%% - Drop the oldest one out
%%
%% - Calculate a new SMA
%%
%% - Reset a total counter
%%
%% @end
tick(#sma{n = N} = SMA) ->
InstantRate = instant_rate(SMA),
Window = update_window(SMA, InstantRate),
Rate = if
length(Window) < N ->
undefined;
true ->
lists:sum(Window) / N
end,
SMA#sma{rate = Rate, total = 0.0, window = Window}.
%% @private
instant_rate(#sma{total = Total, interval = Interval}) ->
Total / Interval.
%% @private
update_window(#sma{n = N} = SMA, InstantRate) ->
Window = [InstantRate | SMA#sma.window],
if
length(Window) > N ->
lists:droplast(Window);
true ->
Window
end. | src/metric_sma.erl | 0.851181 | 0.618752 | metric_sma.erl | starcoder |
%% Copyright (c) 2016 <NAME> <<EMAIL>>
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%%
%% CIDR Wiki: https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
%%
%% The module is copied from inet_cidr.erl to avoid one file depencency.
%%
%% @end
-module(esockd_cidr).
-export([ parse/1
, parse/2
, match/2
, count/1
, to_string/1
]).
-export([ is_ipv4/1
, is_ipv6/1
]).
-export_type([ cidr_string/0
, cidr/0
]).
-type(cidr_string() :: string()).
-type(cidr() :: {inet:ip_address(), inet:ip_address(), 0..128}).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
%% @doc Parse CIDR.
-spec(parse(string()) -> cidr()).
parse(S) ->
parse(S, false).
-spec(parse(string(), boolean()) -> cidr()).
parse(S, Adjust) ->
case string:tokens(S, "/") of
[AddrStr] -> parse_addr(AddrStr);
[AddrStr, LenStr] -> parse_cidr(AddrStr, LenStr, Adjust)
end.
parse_addr(AddrStr) ->
{ok, Addr} = inet:parse_address(AddrStr),
{Addr, Addr, bit_count(Addr)}.
parse_cidr(AddrStr, LenStr, Adjust) ->
{ok, Addr} = inet:parse_address(AddrStr),
PrefixLen = list_to_integer(LenStr),
StartAddr = band_with_mask(Addr, start_mask(Addr, PrefixLen)),
if
Adjust /= true, StartAddr /= Addr -> error(invalid_cidr);
true -> ok
end,
EndAddr = calc_end_address(StartAddr, PrefixLen),
{StartAddr, EndAddr, PrefixLen}.
%% @doc Check if the IP address is in the CIDR block.
-spec(match(inet:ip_address(), cidr()) -> boolean()).
match({W, X, Y, Z}, {{A, B, C, D}, {E, F, G, H}, _Len}) when
((W >= A) andalso (W =< E)),
((X >= B) andalso (X =< F)),
((Y >= C) andalso (Y =< G)),
((Z >= D) andalso (Z =< H)) ->
true;
match({R, S, T, U, V, W, X, Y}, {{A, B, C, D, E, F, G, H}, {I, J, K, L, M, N, O, P}, _Len}) when
((R >= A) andalso (R =< I)),
((S >= B) andalso (S =< J)),
((T >= C) andalso (T =< K)),
((U >= D) andalso (U =< L)),
((V >= E) andalso (V =< M)),
((W >= F) andalso (W =< N)),
((X >= G) andalso (X =< O)),
((Y >= H) andalso (Y =< P)) ->
true;
match(_, _) ->
false.
count({{_, _, _, _}, _EndAddr, Len}) ->
1 bsl (32 - Len);
count({{_, _, _, _, _, _, _, _}, _EndAddr, Len}) ->
1 bsl (128 - Len).
to_string({StartAddr, _EndAddr, Len}) ->
inet:ntoa(StartAddr) ++ "/" ++ integer_to_list(Len).
%% @doc Return true if the value is an ipv4 address
is_ipv4({A, B, C, D}) ->
((A >= 0) and (A =< 255)) and
((B >= 0) and (B =< 255)) and
((C >= 0) and (C =< 255)) and
((D >= 0) and (D =< 255));
is_ipv4(_) ->
false.
%% @doc Return true if the value is an ipv6 address
is_ipv6({A, B, C, D, E, F, G, H}) ->
((A >= 0) and (A =< 65535)) and
((B >= 0) and (B =< 65535)) and
((C >= 0) and (C =< 65535)) and
((D >= 0) and (D =< 65535)) and
((E >= 0) and (E =< 65535)) and
((F >= 0) and (F =< 65535)) and
((G >= 0) and (G =< 65535)) and
((H >= 0) and (H =< 65535));
is_ipv6(_) ->
false.
%%--------------------------------------------------------------------
%% Internal Functions
%%--------------------------------------------------------------------
start_mask({_, _, _, _} = Addr, Len) when 0 =< Len, Len =< 32 ->
{A, B, C, D} = end_mask(Addr, Len),
{bnot A, bnot B, bnot C, bnot D};
start_mask({_, _, _, _, _, _, _, _} = Addr, Len) when 0 =< Len, Len =< 128 ->
{A, B, C, D, E, F, G, H} = end_mask(Addr, Len),
{bnot A, bnot B, bnot C, bnot D, bnot E, bnot F, bnot G, bnot H}.
end_mask({_, _, _, _}, Len) when 0 =< Len, Len =< 32 ->
if
Len == 32 -> {0, 0, 0, 0};
Len >= 24 -> {0, 0, 0, bmask(Len, 8)};
Len >= 16 -> {0, 0, bmask(Len, 8), 16#FF};
Len >= 8 -> {0, bmask(Len, 8), 16#FF, 16#FF};
Len >= 0 -> {bmask(Len, 8), 16#FF, 16#FF, 16#FF}
end;
end_mask({_, _, _, _, _, _, _, _}, Len) when 0 =< Len, Len =< 128 ->
if
Len == 128 -> {0, 0, 0, 0, 0, 0, 0, 0};
Len >= 112 -> {0, 0, 0, 0, 0, 0, 0, bmask(Len, 16)};
Len >= 96 -> {0, 0, 0, 0, 0, 0, bmask(Len, 16), 16#FFFF};
Len >= 80 -> {0, 0, 0, 0, 0, bmask(Len, 16), 16#FFFF, 16#FFFF};
Len >= 64 -> {0, 0, 0, 0, bmask(Len, 16), 16#FFFF, 16#FFFF, 16#FFFF};
Len >= 49 -> {0, 0, 0, bmask(Len, 16), 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF};
Len >= 32 -> {0, 0, bmask(Len, 16), 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF};
Len >= 16 -> {0, bmask(Len, 16), 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF};
Len >= 0 -> {bmask(Len, 16), 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF, 16#FFFF}
end.
bmask(I, 8) when 0 =< I, I =< 32 ->
16#FF bsr (I rem 8);
bmask(I, 16) when 0 =< I, I =< 128 ->
16#FFFF bsr (I rem 16).
calc_end_address(Addr, Len) ->
bor_with_mask(Addr, end_mask(Addr, Len)).
bor_with_mask({A, B, C, D}, {E, F, G, H}) ->
{A bor E, B bor F, C bor G, D bor H};
bor_with_mask({A, B, C, D, E, F, G, H}, {I, J, K, L, M, N, O, P}) ->
{A bor I, B bor J, C bor K, D bor L, E bor M, F bor N, G bor O, H bor P}.
band_with_mask({A, B, C, D}, {E, F, G, H}) ->
{A band E, B band F, C band G, D band H};
band_with_mask({A, B, C, D, E, F, G, H}, {I, J, K, L, M, N, O, P}) ->
{A band I, B band J, C band K, D band L, E band M, F band N, G band O, H band P}.
bit_count({_, _, _, _}) -> 32;
bit_count({_, _, _, _, _, _, _, _}) -> 128. | src/esockd_cidr.erl | 0.629661 | 0.431165 | esockd_cidr.erl | starcoder |
-module(eministat_analysis).
-include("eministat.hrl").
-export([outlier_variance/3]).
-export([relative/3]).
%% -- OUTLIER VARIANCE ------
%%
%% @doc outlier_variance/3 computes the severity of the outlier variance
%%
%% http://www.ellipticgroup.com/misc/article_supplement.pdf
%%
%% @end
outlier_variance(_, Sigma, _) when Sigma < 0.000000000000001 -> {0.0, unaffected};
outlier_variance(Mu, Sigma, A) ->
MinBy = fun(F, Q, R) -> min(F(Q), F(R)) end,
MuA = Mu / A,
MugMin = MuA / 2,
SigmaG = min(MugMin / 4, Sigma / math:sqrt(A)),
SigmaG2 = SigmaG * SigmaG,
Sigma2 = Sigma * Sigma,
VarOut = fun(C) ->
AC = A - C,
(AC / A) * (Sigma2 - AC * SigmaG2)
end,
CMax = fun(X) ->
K = MuA - X,
D = K * K,
AD = A * D,
K0 = -A * AD,
K1 = Sigma2 - A * SigmaG2 + AD,
Det = K1 * K1 - 4 * SigmaG2 * K0,
trunc(-2 * K0 / (K1 + math:sqrt(Det)))
end,
VarOutMin = MinBy(VarOut, 1, (MinBy(CMax, 0, MugMin))) / Sigma2,
case VarOutMin of
K when K < 0.01 -> {K, unaffected};
K when K < 0.1 -> {K, slight};
K when K < 0.5 -> {K, moderate};
K when K > 0.5 -> {K, severe}
end.
%% -- RELATIVE #dataset{} COMPARISONS -----
relative(#dataset { n = DsN } = Ds, #dataset { n = RsN } = Rs, ConfIdx) ->
I = DsN + RsN - 2,
T = element(ConfIdx, student_lookup(I)),
Spool1 = (DsN - 1) * eministat_ds:variance(Ds) + (RsN - 1) * eministat_ds:variance(Rs),
Spool = math:sqrt(Spool1 / I),
S = Spool * math:sqrt(1.0 / DsN + 1.0 / RsN),
D = eministat_ds:mean(Ds) - eministat_ds:mean(Rs),
E = T * S,
case abs(D) > E of
false ->
{no_difference, element(ConfIdx, student_pct())};
true ->
{difference,
#{ confidence_level => element(ConfIdx, student_pct()),
difference => {D, E},
difference_pct => {
D * 100 / eministat_ds:mean(Rs),
E * 100 / eministat_ds:mean(Rs)},
pooled_s => Spool
}}
end.
%% -- STUDENT's T TABLES -----
%% Constant tables, represented as tuples for O(1) lookup speeds
student_pct() -> {80.0, 90.0, 95.0, 98.0, 99.0, 99.5}.
student_inf() ->
{ 1.282, 1.645, 1.960, 2.326, 2.576, 3.090 }. %% inf
-define(NSTUDENT, 100). %% Number of elements in the students distribution lookup table
student() ->
{
{ 3.078, 6.314, 12.706, 31.821, 63.657, 318.313 }, %% 1.
{ 1.886, 2.920, 4.303, 6.965, 9.925, 22.327 }, %% 2.
{ 1.638, 2.353, 3.182, 4.541, 5.841, 10.215 }, %% 3.
{ 1.533, 2.132, 2.776, 3.747, 4.604, 7.173 }, %% 4.
{ 1.476, 2.015, 2.571, 3.365, 4.032, 5.893 }, %% 5.
{ 1.440, 1.943, 2.447, 3.143, 3.707, 5.208 }, %% 6.
{ 1.415, 1.895, 2.365, 2.998, 3.499, 4.782 }, %% 7.
{ 1.397, 1.860, 2.306, 2.896, 3.355, 4.499 }, %% 8.
{ 1.383, 1.833, 2.262, 2.821, 3.250, 4.296 }, %% 9.
{ 1.372, 1.812, 2.228, 2.764, 3.169, 4.143 }, %% 10.
{ 1.363, 1.796, 2.201, 2.718, 3.106, 4.024 }, %% 11.
{ 1.356, 1.782, 2.179, 2.681, 3.055, 3.929 }, %% 12.
{ 1.350, 1.771, 2.160, 2.650, 3.012, 3.852 }, %% 13.
{ 1.345, 1.761, 2.145, 2.624, 2.977, 3.787 }, %% 14.
{ 1.341, 1.753, 2.131, 2.602, 2.947, 3.733 }, %% 15.
{ 1.337, 1.746, 2.120, 2.583, 2.921, 3.686 }, %% 16.
{ 1.333, 1.740, 2.110, 2.567, 2.898, 3.646 }, %% 17.
{ 1.330, 1.734, 2.101, 2.552, 2.878, 3.610 }, %% 18.
{ 1.328, 1.729, 2.093, 2.539, 2.861, 3.579 }, %% 19.
{ 1.325, 1.725, 2.086, 2.528, 2.845, 3.552 }, %% 20.
{ 1.323, 1.721, 2.080, 2.518, 2.831, 3.527 }, %% 21.
{ 1.321, 1.717, 2.074, 2.508, 2.819, 3.505 }, %% 22.
{ 1.319, 1.714, 2.069, 2.500, 2.807, 3.485 }, %% 23.
{ 1.318, 1.711, 2.064, 2.492, 2.797, 3.467 }, %% 24.
{ 1.316, 1.708, 2.060, 2.485, 2.787, 3.450 }, %% 25.
{ 1.315, 1.706, 2.056, 2.479, 2.779, 3.435 }, %% 26.
{ 1.314, 1.703, 2.052, 2.473, 2.771, 3.421 }, %% 27.
{ 1.313, 1.701, 2.048, 2.467, 2.763, 3.408 }, %% 28.
{ 1.311, 1.699, 2.045, 2.462, 2.756, 3.396 }, %% 29.
{ 1.310, 1.697, 2.042, 2.457, 2.750, 3.385 }, %% 30.
{ 1.309, 1.696, 2.040, 2.453, 2.744, 3.375 }, %% 31.
{ 1.309, 1.694, 2.037, 2.449, 2.738, 3.365 }, %% 32.
{ 1.308, 1.692, 2.035, 2.445, 2.733, 3.356 }, %% 33.
{ 1.307, 1.691, 2.032, 2.441, 2.728, 3.348 }, %% 34.
{ 1.306, 1.690, 2.030, 2.438, 2.724, 3.340 }, %% 35.
{ 1.306, 1.688, 2.028, 2.434, 2.719, 3.333 }, %% 36.
{ 1.305, 1.687, 2.026, 2.431, 2.715, 3.326 }, %% 37.
{ 1.304, 1.686, 2.024, 2.429, 2.712, 3.319 }, %% 38.
{ 1.304, 1.685, 2.023, 2.426, 2.708, 3.313 }, %% 39.
{ 1.303, 1.684, 2.021, 2.423, 2.704, 3.307 }, %% 40.
{ 1.303, 1.683, 2.020, 2.421, 2.701, 3.301 }, %% 41.
{ 1.302, 1.682, 2.018, 2.418, 2.698, 3.296 }, %% 42.
{ 1.302, 1.681, 2.017, 2.416, 2.695, 3.291 }, %% 43.
{ 1.301, 1.680, 2.015, 2.414, 2.692, 3.286 }, %% 44.
{ 1.301, 1.679, 2.014, 2.412, 2.690, 3.281 }, %% 45.
{ 1.300, 1.679, 2.013, 2.410, 2.687, 3.277 }, %% 46.
{ 1.300, 1.678, 2.012, 2.408, 2.685, 3.273 }, %% 47.
{ 1.299, 1.677, 2.011, 2.407, 2.682, 3.269 }, %% 48.
{ 1.299, 1.677, 2.010, 2.405, 2.680, 3.265 }, %% 49.
{ 1.299, 1.676, 2.009, 2.403, 2.678, 3.261 }, %% 50.
{ 1.298, 1.675, 2.008, 2.402, 2.676, 3.258 }, %% 51.
{ 1.298, 1.675, 2.007, 2.400, 2.674, 3.255 }, %% 52.
{ 1.298, 1.674, 2.006, 2.399, 2.672, 3.251 }, %% 53.
{ 1.297, 1.674, 2.005, 2.397, 2.670, 3.248 }, %% 54.
{ 1.297, 1.673, 2.004, 2.396, 2.668, 3.245 }, %% 55.
{ 1.297, 1.673, 2.003, 2.395, 2.667, 3.242 }, %% 56.
{ 1.297, 1.672, 2.002, 2.394, 2.665, 3.239 }, %% 57.
{ 1.296, 1.672, 2.002, 2.392, 2.663, 3.237 }, %% 58.
{ 1.296, 1.671, 2.001, 2.391, 2.662, 3.234 }, %% 59.
{ 1.296, 1.671, 2.000, 2.390, 2.660, 3.232 }, %% 60.
{ 1.296, 1.670, 2.000, 2.389, 2.659, 3.229 }, %% 61.
{ 1.295, 1.670, 1.999, 2.388, 2.657, 3.227 }, %% 62.
{ 1.295, 1.669, 1.998, 2.387, 2.656, 3.225 }, %% 63.
{ 1.295, 1.669, 1.998, 2.386, 2.655, 3.223 }, %% 64.
{ 1.295, 1.669, 1.997, 2.385, 2.654, 3.220 }, %% 65.
{ 1.295, 1.668, 1.997, 2.384, 2.652, 3.218 }, %% 66.
{ 1.294, 1.668, 1.996, 2.383, 2.651, 3.216 }, %% 67.
{ 1.294, 1.668, 1.995, 2.382, 2.650, 3.214 }, %% 68.
{ 1.294, 1.667, 1.995, 2.382, 2.649, 3.213 }, %% 69.
{ 1.294, 1.667, 1.994, 2.381, 2.648, 3.211 }, %% 70.
{ 1.294, 1.667, 1.994, 2.380, 2.647, 3.209 }, %% 71.
{ 1.293, 1.666, 1.993, 2.379, 2.646, 3.207 }, %% 72.
{ 1.293, 1.666, 1.993, 2.379, 2.645, 3.206 }, %% 73.
{ 1.293, 1.666, 1.993, 2.378, 2.644, 3.204 }, %% 74.
{ 1.293, 1.665, 1.992, 2.377, 2.643, 3.202 }, %% 75.
{ 1.293, 1.665, 1.992, 2.376, 2.642, 3.201 }, %% 76.
{ 1.293, 1.665, 1.991, 2.376, 2.641, 3.199 }, %% 77.
{ 1.292, 1.665, 1.991, 2.375, 2.640, 3.198 }, %% 78.
{ 1.292, 1.664, 1.990, 2.374, 2.640, 3.197 }, %% 79.
{ 1.292, 1.664, 1.990, 2.374, 2.639, 3.195 }, %% 80.
{ 1.292, 1.664, 1.990, 2.373, 2.638, 3.194 }, %% 81.
{ 1.292, 1.664, 1.989, 2.373, 2.637, 3.193 }, %% 82.
{ 1.292, 1.663, 1.989, 2.372, 2.636, 3.191 }, %% 83.
{ 1.292, 1.663, 1.989, 2.372, 2.636, 3.190 }, %% 84.
{ 1.292, 1.663, 1.988, 2.371, 2.635, 3.189 }, %% 85.
{ 1.291, 1.663, 1.988, 2.370, 2.634, 3.188 }, %% 86.
{ 1.291, 1.663, 1.988, 2.370, 2.634, 3.187 }, %% 87.
{ 1.291, 1.662, 1.987, 2.369, 2.633, 3.185 }, %% 88.
{ 1.291, 1.662, 1.987, 2.369, 2.632, 3.184 }, %% 89.
{ 1.291, 1.662, 1.987, 2.368, 2.632, 3.183 }, %% 90.
{ 1.291, 1.662, 1.986, 2.368, 2.631, 3.182 }, %% 91.
{ 1.291, 1.662, 1.986, 2.368, 2.630, 3.181 }, %% 92.
{ 1.291, 1.661, 1.986, 2.367, 2.630, 3.180 }, %% 93.
{ 1.291, 1.661, 1.986, 2.367, 2.629, 3.179 }, %% 94.
{ 1.291, 1.661, 1.985, 2.366, 2.629, 3.178 }, %% 95.
{ 1.290, 1.661, 1.985, 2.366, 2.628, 3.177 }, %% 96.
{ 1.290, 1.661, 1.985, 2.365, 2.627, 3.176 }, %% 97.
{ 1.290, 1.661, 1.984, 2.365, 2.627, 3.175 }, %% 98.
{ 1.290, 1.660, 1.984, 2.365, 2.626, 3.175 }, %% 99.
{ 1.290, 1.660, 1.984, 2.364, 2.626, 3.174 } %% 100.
}.
student_lookup(I) when I > ?NSTUDENT -> student_inf();
student_lookup(I) -> element(I, student()). | src/eministat_analysis.erl | 0.530723 | 0.503296 | eministat_analysis.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright (C) 2019, <NAME>
%%% @doc
%%%
%%% A set of utility functions to support the rest of the espace
%%% modules.
%%%
%%% When `espace' instances are started, a number of instance specific
%%% `persistent_term' entries are created. The `pterm_*' functions are
%%% used for handling these terms.
%%%
%%% The `persistent_term' entries are expected to have the following
%%% format: `{espace, Inst_name, Key}', where, `Key' identifies the
%%% particular `espace' item, such as server name to ETS table name,
%%% and `Inst_name' is the espace instance name, which is the same as
%%% the application name. For the unnamed instance the `Inst_name' is
%%% `espace'.
%%%
%%% @end
%%% Created : 11 Mar 2019 by <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(espace_util).
%% API
-export([eval_out/1, eval_out/2]).
-export([inst_to_name/2, wait4etsmgr/4]).
-export([pterm_erase/1, pterm_get/2, pterm_put/3]).
-export([opcount_new/0, opcount_incr/1, opcount_counts/0, opcount_reset/0]).
-export([opcount_new/1, opcount_incr/2, opcount_counts/1, opcount_reset/1]).
%%--------------------------------------------------------------------
-type(pt_key() :: {espace, atom(), atom()}).
%% used for locating all keys
-define(PTerm_key(Inst_name), {espace, Inst_name, _}).
%% used for locating specific keys
-define(PTerm_key(Inst_name, Key), {espace, Inst_name, Key}).
%%--------------------------------------------------------------------
-record(opctr, {in, rd, inp, rdp, out, eval}).
%% the counter elements by name
-define(Opctr_names, record_info(fields, opctr)).
-define(Opctr_size, record_info(size, opctr)).
-define(Opctr_ref, pterm_get(Inst_name, opcounters)).
-type(espace_op() :: in | rd | inp | rdp | out | eval).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Convert an instance name to longer prefixed name.
%%
%% This is used for obtaining the instance specific server/table
%% names. For example `inst_to_name(espace_sup, aaa)' will return
%% `espace_sup_aaa'.
%%
%% If the instance name is `espace', then the prefix is returned
%% without an instance name suffix. For example
%% `inst_to_name(espace_sup, espace)' will return `espace_sup'.
%%
%% Instead of generating the full name each time this function is
%% called, we perform the conversion once and cache the result as an
%% entry in the `persistent_term' store. Each entry has the key
%% `{espace, Inst_name, Prefix}', e.g. `{espace, aaa, espace_sup}',
%% and the full name as value, e.g. `espace_sup_aaa'.
%%
%% The use of `persistent_term' store will help speed up all named
%% instances, including the short lived `eval/2' processes.
%%
%% @end
%%--------------------------------------------------------------------
-spec inst_to_name(atom(), atom()) -> atom().
inst_to_name(Prefix, Inst_name) ->
case pterm_get(Inst_name, Prefix) of
undefined ->
V = case Inst_name of
espace ->
Prefix;
_ ->
list_to_atom(atom_to_list(Prefix) ++ "_" ++ atom_to_list(Inst_name))
end,
pterm_put(Inst_name, Prefix, V),
V;
V ->
V
end.
%%--------------------------------------------------------------------
%% @doc Lookup the instance `Key' for `Inst_name'.
%%
%% If the persistence term does not exist, `undefined' is returned.
%%
%% @end
%%--------------------------------------------------------------------
-spec pterm_get(atom(), atom()) -> undefined | term().
pterm_get(Inst_name, Key) ->
PT_key = ?PTerm_key(Inst_name, Key),
persistent_term:get(PT_key, undefined).
%%--------------------------------------------------------------------
%% @doc Create an `espace' persistent term.
%%
%% @end
%%--------------------------------------------------------------------
-spec pterm_put(atom(), atom(), term()) -> ok.
pterm_put(Inst_name, Key, Term) ->
PT_key = ?PTerm_key(Inst_name, Key),
persistent_term:put(PT_key, Term).
%%--------------------------------------------------------------------
%% @doc Remove all the persistent terms for a given `espace' instance.
%%
%% @end
%%--------------------------------------------------------------------
-spec pterm_erase(atom()) -> [] | [{pt_key(), term()}].
pterm_erase(Inst_name) ->
Erase_term = fun ({K = ?PTerm_key(Inst), V})
when Inst == Inst_name ->
case persistent_term:erase(K) of
true ->
{true, {K, V}};
false ->
false
end;
(_) ->
false
end,
Pterms = persistent_term:get(),
lists:filtermap(Erase_term, Pterms).
%%--------------------------------------------------------------------
%% @doc Conditionally evaluate a tuple and `out' the result to the
%% unnamed instance.
%%
%% See `eval_out/2' for details.
%%
%% @end
%%--------------------------------------------------------------------
-spec eval_out(tuple()) -> done.
eval_out(Tuple_in) ->
eval_out(espace, Tuple_in).
%%--------------------------------------------------------------------
%% @doc Conditionally evaluate a tuple and `out' the result to a named
%% instance.
%%
%% The elements of the output tuple correspond to those of
%% `Tuple_in'. If any of the elements of `Tuple_in' are recognized as
%% function, then the corresponding output element will be the value
%% of the function.
%%
%% Two types of patterns are recognized as functions and are
%% evaluated. A normal function expression of arity zero, `fun () ->
%% expr end'. And, a tuple with two elements, a function expresion of
%% arity `N' and a list of length `N', `N' can be zero.
%%
%% Any other pattern will move the element to the output tuple
%% untouched.
%%
%% @end
%%--------------------------------------------------------------------
-spec eval_out(atom(), tuple()) -> done.
eval_out(Inst_name, Tuple_in) ->
List_in = erlang:tuple_to_list(Tuple_in),
List_out = lists:map(fun (X) -> do_eval(X) end, List_in),
Tuple_out = erlang:list_to_tuple(List_out),
espace:out(Inst_name, Tuple_out).
%%--------------------------------------------------------------------
%% @doc wait for etsmgr to (re)start, then ask it to manage our table.
%%
%% There are two occassions where this function is called:
%%
%% <ol>
%%
%% <li> `init' - start/restart of our gen_server that owns a table, in
%% this case we do not have, or know of, the ETS table. So we ask
%% `etsmgr' to create a new table using `etsmgr:new_table/4'. If
%% `etsmgr' is already managing such a table that does not already
%% belong to another process, then that table will be given to
%% us.</li>
%%
%% <li> `recover' - recovery of the `etsmgr' server, in this case we
%% ask `etsmgr' to start managing our ETS table.</li>
%%
%% </ol>
%%
%% @end
%%--------------------------------------------------------------------
-spec wait4etsmgr(atom(), init | recover, atom(), term()) -> {ok, pid(), ets:tab()} | {error, term()}.
wait4etsmgr(Inst_name, init, Table_name, Table_opts) ->
etsmgr:wait4etsmgr(Inst_name),
etsmgr:new_table(Inst_name, Table_name, Table_name, Table_opts);
wait4etsmgr(Inst_name, recover, Table_name, Table_id) ->
etsmgr:wait4etsmgr(Inst_name),
etsmgr:add_table(Inst_name, Table_name, Table_id).
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
%% @doc Create a new ops counter array for the unnamed instance.
%%
%% See opcount_new/1 for details.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_new() -> ok.
opcount_new() ->
opcount_new(espace).
%%--------------------------------------------------------------------
%% @doc Create a new ops counter array for a named instance.
%%
%% The array will have one counter per espace operation. The counters
%% ref is saved in as a persistent term.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_new(atom()) -> ok.
opcount_new(Inst_name) ->
Ctr_ref = counters:new(?Opctr_size, []),
pterm_put(Inst_name, opcounters, Ctr_ref).
%%--------------------------------------------------------------------
%% @doc Increment a single espace op counter for the unnamed instance.
%%
%% See opcount_incr/1 for details.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_incr(espace_op()) -> ok.
opcount_incr(Op) ->
opcount_incr(espace, Op).
%%--------------------------------------------------------------------
%% @doc Increment a single espace op counter for a named instance.
%%
%% In the interested of keeping the code simple, the counter index of
%% each op corresponds to the position of `Op' in the record tuple,
%% which ranges from 2 to 7.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_incr(Inst_name :: atom(), Op :: espace_op()) -> ok.
opcount_incr(Inst_name, in) -> counters:add(?Opctr_ref, #opctr.in, 1);
opcount_incr(Inst_name, rd) -> counters:add(?Opctr_ref, #opctr.rd, 1);
opcount_incr(Inst_name, inp) -> counters:add(?Opctr_ref, #opctr.inp, 1);
opcount_incr(Inst_name, rdp) -> counters:add(?Opctr_ref, #opctr.rdp, 1);
opcount_incr(Inst_name, out) -> counters:add(?Opctr_ref, #opctr.out, 1);
opcount_incr(Inst_name, eval) -> counters:add(?Opctr_ref, #opctr.eval, 1).
%%--------------------------------------------------------------------
%% @doc Return the current counts for the unnamed instance as a map.
%%
%% See opcount_count/1 for details.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_counts() -> #{espace_op() => integer()}.
opcount_counts() ->
opcount_counts(espace).
%%--------------------------------------------------------------------
%% @doc Return the current counts for a named instance as a map.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_counts(atom()) -> #{espace_op() => integer()}.
opcount_counts(Inst_name) ->
C = ?Opctr_ref,
Counts = [ counters:get(C, Idx) || Idx <- lists:seq(2, ?Opctr_size) ],
maps:from_list(lists:zip(?Opctr_names, Counts)).
%%--------------------------------------------------------------------
%% @doc Reset all the op counters of the unnamed instance.
%%
%% See `opcount_reset/1' for details.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_reset() -> ok.
opcount_reset() ->
opcount_reset(espace).
%%--------------------------------------------------------------------
%% @doc Reset all the op counters of a named instance.
%%
%% This function has been provided for investigating an application.
%%
%% @end
%%--------------------------------------------------------------------
-spec opcount_reset(atom()) -> ok.
opcount_reset(Inst_name) ->
C = ?Opctr_ref,
[ counters:put(C, Idx, 0) || Idx <- lists:seq(2, ?Opctr_size) ],
ok.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc check and evaluate a term, if it is a function.
%%
%% @end
%%--------------------------------------------------------------------
-spec do_eval(term()) -> term().
do_eval(Fun) when is_function(Fun, 0) ->
erlang:apply(Fun, []);
do_eval({Fun, Args}) when is_list(Args) andalso is_function(Fun, length(Args)) ->
erlang:apply(Fun, Args);
do_eval(X) ->
X. | src/espace_util.erl | 0.546254 | 0.562898 | espace_util.erl | starcoder |
%% Copyright (c) 2013-2020 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : luerl_ex.erl
%% Authors : <NAME>
%% Purpose : Elixir-style wrappers for luerl.erl
%% This module just contains functions that forward to luerl.erl, but place
%% the VM State arguments in the first position rather than the last. This
%% better matches Elixir conventions and allows for using the Elixir pipe
%% operator '|>' to chain Luerl function calls.
-module('Elixir.Luerl').
-export([eval/2,evalfile/2,
do/2,dofile/2,
load/2,load/3,
loadfile/2,loadfile/3,
path_loadfile/2,path_loadfile/3,path_loadfile/4,
load_module/3,load_module1/3,
call/3,call_chunk/3,
call_function/3,call_function1/3,function_list/2,
call_method/3,call_method1/3,method_list/2,
get_table/2,get_table1/2,set_table/3,set_table1/3,set_table1/4,
init/0,stop/1,gc/1,
encode/2,encode_list/2,decode/2,decode_list/2]).
eval(St, Chunk) ->
luerl:eval(Chunk, St).
evalfile(St, Path) ->
luerl:evalfile(Path, St).
do(St, S) ->
luerl:do(S, St).
dofile(St, Path) ->
luerl:dofile(Path, St).
load(St, Bin) ->
luerl:load(Bin, St).
load(St, Bin, Opts) ->
luerl:load(Bin, Opts, St).
loadfile(St, Name) ->
luerl:loadfile(Name, St).
loadfile(St, Name, Opts) ->
luerl:loadfile(Name, Opts, St).
path_loadfile(St, Name) ->
luerl:path_loadfile(Name, St).
path_loadfile(St, Dirs, Name) ->
luerl:path_loadfile(Dirs, Name, St).
path_loadfile(St, Dir, Name, Opts) ->
luerl:path_loadfile(Dir, Name, Opts, St).
load_module(St, Fp, Mod) ->
luerl:load_module(Fp, Mod, St).
load_module1(St, Fp, Mod) ->
luerl:load_module1(Fp, Mod, St).
init() ->
luerl:init().
call(St, C, As) ->
luerl:call(C, As, St).
call_chunk(St, C, As) ->
luerl:call_chunk(C, As, St).
call_function(St, Fp, As) ->
luerl:call_function(Fp, As, St).
call_function1(St, Lfp, Las) ->
luerl:call_function1(Lfp, Las, St).
function_list(St, Ks) ->
luerl:function_list(Ks, St).
call_method(St, Fp, As) ->
luerl:call_method(Fp, As, St).
call_method1(St, Fp, Las) ->
luerl:call_method1(Fp, Las, St).
method_list(St, Ks) ->
luerl:method_list(Ks, St).
get_table(St, Fp) ->
luerl:get_table(Fp, St).
get_table1(St, Fp) ->
luerl:get_table1(Fp, St).
set_table(St, Fp, V) ->
luerl:set_table(Fp, V, St).
set_table1(St, Lfp, Lv) ->
luerl:set_table1(Lfp, Lv, St).
set_table1(St, Tab, Key, Lv) ->
luerl:set_table1(Tab, Key, Lv, St).
stop(St) ->
luerl:stop(St).
gc(St) ->
luerl:gc(St).
encode_list(St, Ts) ->
luerl:encode_list(Ts, St).
encode(St, V) ->
luerl:encode(V, St).
decode_list(St, Lts) ->
luerl:decode_list(Lts, St).
decode(St, V) ->
luerl:decode(V, St). | src/Elixir.Luerl.erl | 0.554229 | 0.470919 | Elixir.Luerl.erl | starcoder |
%%%=============================================================================
%%% @copyright (C) 2018, <NAME>
%%% @doc
%%% ADT for keeping the state of State Channels
%%% @end
%%%=============================================================================
-module(aesc_state_tree).
%% API
-export([commit_to_db/1,
delete/2,
empty/0,
empty_with_backend/0,
enter/2,
get/2,
lookup/2,
mtree_iterator/1,
new_with_backend/1,
root_hash/1]).
-export([ from_binary_without_backend/1
, to_binary_without_backend/1
]).
%%%===================================================================
%%% Types
%%%===================================================================
-type channel() :: aesc_channels:channel().
-type chkey() :: aesc_channels:pubkey().
-type chvalue() :: aesc_channels:serialized().
-opaque tree() :: aeu_mtrees:mtree(chkey(), chvalue()).
-export_type([tree/0]).
-define(VSN, 1).
%%%===================================================================
%%% API
%%%===================================================================
-spec commit_to_db(tree()) -> tree().
commit_to_db(Tree) ->
aeu_mtrees:commit_to_db(Tree).
-spec delete(aesc_channels:pubkey(), tree()) -> tree().
delete(PubKey, Tree) ->
aeu_mtrees:delete(PubKey, Tree).
-spec empty() -> tree().
empty() ->
aeu_mtrees:empty().
-spec empty_with_backend() -> tree().
empty_with_backend() ->
aeu_mtrees:empty_with_backend(aec_db_backends:channels_backend()).
-spec new_with_backend(aeu_mtrees:root_hash() | 'empty') -> tree().
new_with_backend(Hash) ->
aeu_mtrees:new_with_backend(Hash, aec_db_backends:channels_backend()).
-spec enter(channel(), tree()) -> tree().
enter(Channel, Tree) ->
PubKey = aesc_channels:pubkey(Channel),
Serialized = aesc_channels:serialize(Channel),
aeu_mtrees:enter(PubKey, Serialized, Tree).
-spec get(chkey(), tree()) -> aesc_channels:channel().
get(Id, Tree) ->
aesc_channels:deserialize(Id, aeu_mtrees:get(Id, Tree)).
-spec lookup(chkey(), tree()) -> {value, channel()} | none.
lookup(PubKey, Tree) ->
case aeu_mtrees:lookup(PubKey, Tree) of
{value, Val} -> {value, aesc_channels:deserialize(PubKey, Val)};
none -> none
end.
-spec root_hash(tree()) -> {ok, aeu_mtrees:root_hash()} | {error, empty}.
root_hash(Tree) ->
aeu_mtrees:root_hash(Tree).
-spec to_binary_without_backend(tree()) -> binary().
to_binary_without_backend(Tree) ->
Bin = aeu_mtrees:serialize(Tree),
aeser_chain_objects:serialize(
channels_mtree,
?VSN,
serialization_template(?VSN),
[{channels, Bin}]).
-spec from_binary_without_backend(binary()) -> tree().
from_binary_without_backend(Bin) ->
[{channels, ChannelsBin}] =
aeser_chain_objects:deserialize(channels_mtree, ?VSN,
serialization_template(?VSN), Bin),
aeu_mtrees:deserialize(ChannelsBin).
serialization_template(?VSN) ->
[{channels, binary}].
-spec mtree_iterator(tree()) -> aeu_mtrees:iterator().
mtree_iterator(Tree) ->
aeu_mtrees:iterator(Tree). | apps/aechannel/src/aesc_state_tree.erl | 0.611962 | 0.417242 | aesc_state_tree.erl | starcoder |
%%==============================================================================
%% Text Manipulation Functions
%%==============================================================================
-module(els_text).
-export([ last_token/1
, line/2
, line/3
, range/3
, tokens/1
]).
-type text() :: binary().
-type line_num() :: non_neg_integer().
-type column_num() :: pos_integer().
-type token() :: erl_scan:token().
%% @doc Extract the N-th line from a text.
-spec line(text(), line_num()) -> text().
line(Text, LineNum) ->
Lines = binary:split(Text, <<"\n">>, [global]),
lists:nth(LineNum + 1, Lines).
%% @doc Extract the N-th line from a text, up to the given column number.
-spec line(text(), line_num(), column_num()) -> text().
line(Text, LineNum, ColumnNum) ->
Line = line(Text, LineNum),
binary:part(Line, {0, ColumnNum}).
%% @doc Extract a snippet from a text, from [StartLoc..EndLoc).
-spec range(text(), {line_num(), column_num()}, {line_num(), column_num()}) ->
text().
range(Text, StartLoc, EndLoc) ->
LineStarts = line_starts(Text),
StartPos = pos(LineStarts, StartLoc),
EndPos = pos(LineStarts, EndLoc),
binary:part(Text, StartPos, EndPos - StartPos).
%% @doc Return tokens from text.
-spec tokens(text()) -> [any()].
tokens(Text) ->
case erl_scan:string(els_utils:to_list(Text)) of
{ok, Tokens, _} -> Tokens;
{error, _, _} -> []
end.
%% @doc Extract the last token from the given text.
-spec last_token(text()) -> token() | {error, empty}.
last_token(Text) ->
case tokens(Text) of
[] -> {error, empty};
Tokens -> lists:last(Tokens)
end.
%%==============================================================================
%% Internal functions
%%==============================================================================
-spec line_starts(text()) -> [{integer(), any()}].
line_starts(Text) ->
[{-1, 1} | binary:matches(Text, <<"\n">>)].
-spec pos([{integer(), any()}], {line_num(), column_num()}) ->
pos_integer().
pos(LineStarts, {LineNum, ColumnNum}) ->
{LinePos, _} = lists:nth(LineNum, LineStarts),
LinePos + ColumnNum. | apps/els_core/src/els_text.erl | 0.51562 | 0.510619 | els_text.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2019 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% Module that allows traversing via different structures and execution of jobs on them.
%%% The basic concepts are tasks (see traverse_task.erl) and pools. Each task represents single traverse while pool
%%% represents single execution environment. The tasks can be scheduled to be executed on different environments
%%% (different clusters). Thus, concepts of task creator and executor are introduced. The creator represents the
%%% environment where the task is created while executor represents environment where the task is processed.
%%% The task is processed executing jobs. The job represents processing of single entity (e.g., dir or file).
%%% First job (called main_job) is provided to task start function, further jobs can be generated as first and next
%%% jobs result. First job is treated specially as it is the only job that exists before task start. Thus, its id is
%%% stored in a task document (see traverse_task.erl) and its special status is highlighted when it is persisted.
%%% Such special treatment is a result of the fact that task can be scheduled for other environment. Thus main job must
%%% be available there (all other jobs will be created on the environment that processes task).
%%% The jobs are divided into two groups: master and slave jobs. Master jobs process entities that define structure
%%% (e.g., directories) while slave jobs should process other elements (e.g., files). Thus, master jobs can produce
%%% further master jobs and/or slave jobs.
%%% Each pool uses two instances of worker pool for jobs execution: one instance for execution of master jobs and
%%% one for slave jobs. Each master job is blocked until all of slave jobs generated by it are finished.
%%% Worker pools can be started on several nodes. In such a case, load is balanced between nodes executing different
%%% tasks on different nodes (all jobs connected with single task are executed on the same node - see
%%% traverse_tasks_scheduler.erl).
%%% Job handling functions are provided by callback modules (see traverse_behaviour.erl). Callback modules provide also
%%% functions needed for jobs persistency (master jobs are stored only). They also can provide additional information
%%% for datastore documents synchronization and tasks sorting (see traverse_task_list.erl). Multiple callback modules
%%% can be used for single pool (different callback modules for different tasks).
%%% Task load balancing base on groups. Each task can be connected with a group and load balancing algorithm first
%%% chooses group and only than task to be executed (to prevent single group from taking all resources while other
%%% groups are waiting - see traverse_tasks_scheduler.erl).
%%%
%%% Typical task lifecycle is as follows:
%%% - user calls run function
%%% - the job that initializes task is persisted
%%% - number of ongoing tasks is verified
%%% - if the number is lower than the limit and task executor is local environment,
%%% task will be created with status "ongoing" and number of parallel tasks is incremented
%%% - otherwise, task will be created with status "scheduled"; if task executor is local environment group
%%% connected with task is marked as waiting for execution
%%% - if task is created as ongoing, it is put in master worker pool queue, next:
%%% - workers from worker pool execute jobs connected with task (main job and jobs created as a result of master
%%% jobs processing) until all jobs are finished
%%% - last job connected with task updates task status to "finished"
%%% - last job connected with task checks if there is any waiting tasks
%%% - if some tasks are waiting, longest waiting group for execution is chosen, and oldest task from
%%% this group is put in master worker pool queue; chosen task status is changed to "ongoing"
%%% - otherwise, number of parallel tasks is decremented
%%% Additionally, tasks may be started when task scheduled at other environment appears (and local environment is
%%% chosen for execution) or during environment restart. In first case, number of ongoing tasks is verified to check if
%%% task can be executed or has to wait.
%%% @end
%%%-------------------------------------------------------------------
-module(traverse).
-author("<NAME>").
-include("traverse/traverse.hrl").
-include("modules/datastore/datastore.hrl").
-include_lib("ctool/include/logging.hrl").
%% API
-export([init_pool/4, init_pool/5, init_pool_service/5, restart_tasks/3, stop_pool/1, stop_pool_service/1,
run/3, run/4, cancel/2, cancel/3, on_task_change/2, on_job_change/5]).
%% Functions executed on pools
-export([execute_master_job/10, execute_slave_job/5]).
%% For rpc
-export([run_on_master_pool/10, run_task/3]).
%% @formatter:off
% Basic types for execution environment
-type pool() :: binary(). % term used to identify instance of execution environment
-type callback_module() :: module().
-type environment_id() :: traverse_task_list:tree(). % environment (cluster) where task is scheduled or processed
-type pool_options() :: #{
executor => environment_id(),
callback_modules => [callback_module()],
restart => boolean(),
% If the limit is not set, limit is equal to overall limit (argument of init_pool function) divided by nodes number
parallel_orders_per_node_limit => traverse_tasks_scheduler:ongoing_tasks_limit()
}.
% Basic types for tasks management
-type id() :: datastore:key().
-type task() :: traverse_task:doc().
-type description() :: #{atom() => integer()}. % map describing progress of task; framework provides following counters:
% slave_jobs_delegated, master_jobs_delegated, slave_jobs_done,
% master_jobs_done, slave_jobs_failed, master_jobs_failed;
% the user can add own counters returning map with value upgrade from
% job (see traverse_behaviour.erl)
-type additional_data() :: #{binary() => binary()}.
-type status() :: atom(). % framework uses statuses: scheduled, ongoing, finished and canceled but user can set
% any intermediary status using traverse_task:update_status function
-type group() :: binary(). % group used for load balancing (see traverse_tasks_scheduler.erl)
-type master_job_mode() :: single | all. % mode of master jobs starting on pool; for `all` each master job start is
% triggered immediately after job definition; for `single` only one master
% job start is triggered immediately after job definition - rest of jobs
% waits until execution of triggered job is finished; as a result only one
% master job is performed in parallel for each task for `single` mode
-type run_options() :: #{
executor => environment_id(),
creator => environment_id(),
callback_module => callback_module(),
group_id => group(),
additional_data => additional_data(),
master_job_mode => master_job_mode()
}.
-type task_execution_info() :: #task_execution_info{}. % see traverse.hrl
% Basic types for jobs management
-type job() :: term().
-type job_id() :: datastore:key().
-type job_status() :: waiting | on_pool | ended | failed | canceled.
-type master_job_map() :: #{
slave_jobs => [job()],
sequential_slave_jobs => [job()],
master_jobs => [job()],
async_master_jobs => [job()],
description => description(),
cancel_callback => job_cancel_callback(),
finish_callback => job_finish_callback()
}.
-type master_job_extended_args() :: #{
task_id := id(),
master_job_starter_callback => master_job_starter_callback() % callback is not supported when master_job_mode =:= single
}.
-type master_job_starter_callback() :: fun((master_job_starter_args()) -> ok).
-type master_job_starter_args() :: #{
jobs => [job()],
cancel_callback => job_cancel_callback()
}.
-type job_cancel_callback() :: fun((description()) -> ok).
-type job_finish_callback() :: fun((master_job_extended_args(), description()) -> ok).
% Types used to provide additional information to framework
-type timestamp() :: non_neg_integer(). % Timestamp used to sort tasks (usually provided by callback function)
-type sync_info() :: #{
mutator => datastore_doc:mutator(),
scope => datastore_doc:scope(),
sync_enabled => boolean(),
local_links_tree_id => datastore:tree_id(),
remote_driver => datastore:remote_driver(),
remote_driver_ctx => datastore:remote_driver_ctx()
}.
%% @formatter:on
% Internal types for framework
-type execution_pool() :: worker_pool:name(). % internal names of worker pools used by framework
-type ctx() :: traverse_task:ctx().
-type node_crash_policy() :: restart | cancel_task.
-type task_callback() :: task_started | task_finished | task_canceled | on_cancel_init.
% Types used by functions that restart tasks after node restart
-type tasks_ctxs() :: #{id() => ctx() | ctx_not_found}.
-type jobs_per_task() :: #{id() => [{job_id(), job()}]}.
-export_type([pool/0, id/0, task/0, group/0, master_job_mode/0, task_execution_info/0, job/0, job_id/0, job_status/0,
environment_id/0, description/0, status/0, additional_data/0, master_job_extended_args/0, timestamp/0,
sync_info/0, master_job_map/0, callback_module/0, node_crash_policy/0]).
-define(MASTER_POOL_NAME(Pool), binary_to_atom(<<Pool/binary, "_master">>, utf8)).
-define(SLAVE_POOL_NAME(Pool), binary_to_atom(<<Pool/binary, "_slave">>, utf8)).
-define(CALL_TIMEOUT, timer:hours(24)).
-define(DEFAULT_GROUP, <<"main_group">>).
-define(DEFAULT_ENVIRONMENT_ID, <<"default_executor">>).
%% When to_string/1 function is not implemented by the callback module,
%% str_utils:format/1 is used to print job in logs (see to_string/1 function).
%% Unfortunately, an exception is thrown and handled inside this function
%% and because of that macro ?error_stacktrace prints wrong stacktrace.
%% This macro is used to bypass the problem.
-define(log_error_with_stacktrace(Format, Args, Stacktrace),
log_error_with_stacktrace(Stacktrace, Format, Args)
).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% @equiv init_pool(PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit, #{}).
%% @end
%%--------------------------------------------------------------------
-spec init_pool(pool(), non_neg_integer(), non_neg_integer(), non_neg_integer()) -> ok | no_return().
init_pool(PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit) ->
init_pool(PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit, #{}).
%%--------------------------------------------------------------------
%% @doc
%% Inits the pool as an internal service to allow failover.
%% @end
%%--------------------------------------------------------------------
-spec init_pool(pool(), non_neg_integer(), non_neg_integer(), non_neg_integer(), pool_options()) -> ok | no_return().
init_pool(PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit, Options) ->
StartArgs = [PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit, Options],
TakeoverArgs = [PoolName, Options, node()],
ServiceOptions = #{
start_function => init_pool_service,
start_function_args => StartArgs,
takeover_function => restart_tasks,
takeover_function_args => TakeoverArgs,
migrate_function => undefined,
migrate_function_args => [],
stop_function => stop_pool_service,
stop_function_args => [PoolName]
},
internal_services_manager:start_service(?MODULE, PoolName, ServiceOptions).
%%--------------------------------------------------------------------
%% @doc
%% Inits the pool (starting appropriate worker pools and adding node to load balancing document)
%% and restarts tasks if needed.
%% @end
%%--------------------------------------------------------------------
-spec init_pool_service(pool(), non_neg_integer(), non_neg_integer(), non_neg_integer(), pool_options()) -> ok | no_return().
init_pool_service(PoolName, MasterJobsNum, SlaveJobsNum, ParallelOrdersLimit, Options) ->
MasterPool = worker_pool:start_sup_pool(?MASTER_POOL_NAME(PoolName), [{workers, MasterJobsNum}, {queue_type, lifo}]),
SlavePool = worker_pool:start_sup_pool(?SLAVE_POOL_NAME(PoolName), [{workers, SlaveJobsNum}, {queue_type, lifo}]),
try
{{ok, _}, {ok, _}} = {MasterPool, SlavePool}
catch
error:{badmatch, {{error, {already_started, _}}, {error, {already_started, _}}}} ->
throw({error, already_exists})
end,
ParallelOrdersPerNodeLimitDefault = max(1, ceil(ParallelOrdersLimit / length(consistent_hashing:get_all_nodes()))),
ParallelOrdersPerNodeLimit = maps:get(parallel_orders_per_node_limit, Options, ParallelOrdersPerNodeLimitDefault),
ok = traverse_tasks_scheduler:init(PoolName, ParallelOrdersLimit, ParallelOrdersPerNodeLimit),
restart_tasks(PoolName, Options, node()).
%%--------------------------------------------------------------------
%% @doc
%% Restart tasks that have been running on particular node. Can be used during node restart or to handle
%% failure of other node.
%% Warning - if node has not been stopped properly, some tasks can be corrupted and impossible to be restarted.
%% @end
%%--------------------------------------------------------------------
-spec restart_tasks(pool(), pool_options(), node()) -> ok | no_return().
restart_tasks(PoolName, Options, Node) ->
Executor = maps:get(executor, Options, ?DEFAULT_ENVIRONMENT_ID),
CallbackModules = maps:get(callback_modules, Options, [binary_to_atom(PoolName, utf8)]),
LocalNode = node(),
ok = traverse_tasks_scheduler:reset_node_ongoing_tasks(PoolName, Node),
TaskIdToCtxMap = repair_ongoing_tasks(PoolName, Executor, Node),
lists:foreach(fun(CallbackModule) ->
{JobsPerTask, JobsWitoutCtx, UpdatedTaskIdToCtxMap} =
get_tasks_jobs(PoolName, CallbackModule, Node, Executor, TaskIdToCtxMap),
{TasksToRestart, TasksToCancel} = clasiffy_tasks_to_restart_and_cancel(
UpdatedTaskIdToCtxMap, JobsPerTask, PoolName, CallbackModule, Options, Node),
traverse_tasks_scheduler:change_node_ongoing_tasks(PoolName, LocalNode, length(TasksToRestart)),
clean_tasks_and_jobs(UpdatedTaskIdToCtxMap, JobsPerTask, TasksToCancel, JobsWitoutCtx, PoolName, CallbackModule, Node),
restart_jobs(UpdatedTaskIdToCtxMap, maps:with(TasksToRestart, JobsPerTask), PoolName, CallbackModule, Executor, Node)
end, CallbackModules),
schedule_waiting_tasks_if_possible(PoolName, Executor).
%%--------------------------------------------------------------------
%% @doc
%% Stops pool internal service.
%% @end
%%--------------------------------------------------------------------
-spec stop_pool(pool()) -> ok.
stop_pool(PoolName) ->
internal_services_manager:stop_service(?MODULE, PoolName, <<>>).
%%--------------------------------------------------------------------
%% @doc
%% Stops pool and prevents load balancing algorithm from scheduling tasks on node.
%% Warning: possible races with task scheduling - make sure that there are no tasks waiting to be executed.
%% @end
%%--------------------------------------------------------------------
-spec stop_pool_service(pool()) -> ok.
stop_pool_service(PoolName) ->
ok = worker_pool:stop_sup_pool(?MASTER_POOL_NAME(PoolName)),
ok = worker_pool:stop_sup_pool(?SLAVE_POOL_NAME(PoolName)),
case traverse_tasks_scheduler:clear(PoolName) of
ok -> ok;
{error, not_found} -> ok
end.
%%--------------------------------------------------------------------
%% @doc
%% @equiv run(PoolName, TaskId, Job, #{}).
%% @end
%%--------------------------------------------------------------------
-spec run(pool(), id(), job()) -> ok.
run(PoolName, TaskId, Job) ->
run(PoolName, TaskId, Job, #{}).
%%--------------------------------------------------------------------
%% @doc
%% Initializes task. The task can be started immediately or scheduled (see traverse_tasks_scheduler.erl).
%% @end
%%--------------------------------------------------------------------
-spec run(pool(), id(), job(), run_options()) -> ok.
run(PoolName, TaskId, Job, Options) ->
Executor = maps:get(executor, Options, ?DEFAULT_ENVIRONMENT_ID),
Creator = maps:get(creator, Options, Executor),
CallbackModule = maps:get(callback_module, Options, binary_to_atom(PoolName, utf8)),
TaskGroup = maps:get(group_id, Options, ?DEFAULT_GROUP),
AdditionalData = maps:get(additional_data, Options, #{}),
MasterJobMode = maps:get(master_job_mode, Options, all),
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
{JobStatus, Node, Description} = case Creator =:= Executor of
true ->
case traverse_tasks_scheduler:increment_ongoing_tasks_and_choose_node(PoolName) of
{ok, ChosenNode} ->
{on_pool, ChosenNode, #{master_jobs_delegated => 1}};
{error, limit_exceeded} ->
{waiting, undefined, #{}}
end;
_ ->
{waiting, remote, #{}}
end,
{ok, JobId} = CallbackModule:update_job_progress(main_job, Job, PoolName, TaskId, JobStatus),
ok = traverse_task:create(ExtendedCtx, PoolName, CallbackModule, TaskId, Creator, Executor,
TaskGroup, JobId, Node, Description, AdditionalData, MasterJobMode),
case Node of
undefined ->
ok;
remote ->
ok;
_ ->
ok = task_callback(CallbackModule, task_started, TaskId, PoolName),
ok = rpc:call(Node, ?MODULE, run_on_master_pool, [
PoolName, ?MASTER_POOL_NAME(PoolName), ?SLAVE_POOL_NAME(PoolName),
CallbackModule, ExtendedCtx, Executor, TaskId, Job, JobId, MasterJobMode])
end.
%%--------------------------------------------------------------------
%% @doc
%% Executes action connected with remote task change (can start task).
%% @end
%%--------------------------------------------------------------------
-spec on_task_change(task(), environment_id()) -> ok.
on_task_change(Task, Environment) ->
case traverse_task:on_task_change(Task, Environment) of
{remote_change, CallbackModule, MainJobId} ->
case CallbackModule:get_job(MainJobId) of
{ok, Job, PoolName, _} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
case traverse_task:on_remote_change(ExtendedCtx, Task, CallbackModule, Environment) of
ok ->
ok;
{ok, remote_cancel, TaskId} ->
task_callback(CallbackModule, on_cancel_init, TaskId, PoolName),
ok
end;
{error, not_found} ->
ok
end;
{run, CallbackModule, MainJobId} ->
case CallbackModule:get_job(MainJobId) of
{ok, Job, PoolName, TaskId} ->
{ok, #task_execution_info{
master_job_mode = MasterJobMode
}} = traverse_task:get_execution_info(Task),
maybe_run_scheduled_task(
PoolName, CallbackModule, TaskId, Task, Environment, Job, MainJobId, MasterJobMode);
{error, not_found} ->
ok
end;
ignore ->
ok
end.
%%--------------------------------------------------------------------
%% @doc
%% Starts task scheduled on other environment if possible (limit of parallel tasks is not reached).
%% @end
%%--------------------------------------------------------------------
-spec on_job_change(job(), job_id(), pool(), id(), environment_id()) -> ok.
on_job_change(Job, JobId, PoolName, TaskId, Environment) ->
case traverse_task:get(PoolName, TaskId) of
{ok, Task} ->
case traverse_task:is_enqueued(Task) of
false ->
ok;
true ->
{ok, #task_execution_info{
callback_module = CallbackModule,
executor = Executor,
master_job_mode = MasterJobMode
}} = traverse_task:get_execution_info(Task),
case Executor =:= Environment of
true ->
maybe_run_scheduled_task(
PoolName, CallbackModule, TaskId, Task, Executor, Job, JobId, MasterJobMode);
_ ->
ok
end
end;
{error, not_found} ->
ok
end.
%%--------------------------------------------------------------------
%% @doc
%% @equiv cancel(PoolName, TaskId, ?DEFAULT_ENVIRONMENT_ID).
%% @end
%%--------------------------------------------------------------------
-spec cancel(pool(), id()) -> ok | {error, term()}.
cancel(PoolName, TaskId) ->
cancel(PoolName, TaskId, ?DEFAULT_ENVIRONMENT_ID).
%%--------------------------------------------------------------------
%% @doc
%% Cancels task. Prevents jobs waiting in worker pools queues from execution.
%% @end
%%--------------------------------------------------------------------
-spec cancel(pool(), id(), environment_id()) -> ok | {error, term()}.
cancel(PoolName, TaskId, Environment) ->
case traverse_task:get(PoolName, TaskId) of
{ok, Task} ->
{ok, #task_execution_info{
callback_module = CallbackModule,
main_job_id = MainJobId
}} = traverse_task:get_execution_info(Task),
case CallbackModule:get_job(MainJobId) of
{ok, Job, _, _} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
{ok, Info} = traverse_task:cancel(ExtendedCtx, PoolName, CallbackModule, TaskId, Environment),
case Info of
local_cancel -> task_callback(CallbackModule, on_cancel_init, TaskId, PoolName);
_ -> ok
end;
{error, not_found} ->
{error, main_job_not_found}
end;
Other ->
Other
end.
%%%===================================================================
%%% Functions executed on pools
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Executes master job using function provided by callback module. To be executed by worker pool process.
%% Master job is provided with function to enqueue next master jobs during the callback execution if needed.
%% After callback execution, async_master_jobs from master job answer are enqueued. Afterwards, sequential_slave_jobs
%% and next slave_jobs are executed on slave pool and the process awaits for their finish. At the end, master_jobs
%% are enqueued and finish_callback is executed.
%% @end
%%--------------------------------------------------------------------
-spec execute_master_job(pool(), execution_pool(), execution_pool(), callback_module(), ctx(), environment_id(),
id(), job(), job_id(), master_job_mode()) -> ok.
execute_master_job(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor,
TaskId, Job, JobId, MasterJobMode) ->
try
MasterJobExtendedArgs = case MasterJobMode of
single ->
#{task_id => TaskId}; % callback is not supported when master_job_mode =:= single
all ->
MasterJobCallback = prepare_master_callback(PoolName, MasterPool, SlavePool, CallbackModule,
ExtendedCtx, Executor, TaskId),
#{task_id => TaskId, master_job_starter_callback => MasterJobCallback}
end,
{ok, MasterAns} = CallbackModule:do_master_job(Job, MasterJobExtendedArgs),
MasterJobsList = maps:get(master_jobs, MasterAns, []),
AsyncMasterJobsList = maps:get(async_master_jobs, MasterAns, []),
SlaveJobsList = maps:get(slave_jobs, MasterAns, []),
SequentialSlaveJobsList = maps:get(sequential_slave_jobs, MasterAns, []),
SlaveJobsDelegatedNum = length(SlaveJobsList) + length(lists:flatten(SequentialSlaveJobsList)),
Description0 = maps:get(description, MasterAns, #{}),
Description = Description0#{
slave_jobs_delegated => SlaveJobsDelegatedNum,
master_jobs_delegated => length(MasterJobsList) + length(AsyncMasterJobsList)
},
{ok, _, Canceled} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, Description),
{_, NewDescription, Canceled2} = case Canceled of
true ->
ok = traverse_task_list:delete_job_link(PoolName, CallbackModule, node(), JobId),
{ok, _} = CallbackModule:update_job_progress(JobId, Job, PoolName, TaskId, canceled),
CancelDescription = #{
slave_jobs_delegated => -1 * (length(SlaveJobsList) + length(lists:flatten(SequentialSlaveJobsList))),
master_jobs_delegated => -1 * (length(MasterJobsList) + length(AsyncMasterJobsList)) - 1
},
CancelCallback = maps:get(cancel_callback, MasterAns, fun(_Description) -> ok end),
CancelCallback(CancelDescription),
{ok, _, _} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, CancelDescription);
_ ->
ok = run_or_queue_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx,
Executor, TaskId, AsyncMasterJobsList, MasterJobMode, MasterJobMode),
SequentialSlaveAnswers = sequential_run_on_slave_pool(
PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, SequentialSlaveJobsList),
SlaveAnswers = run_on_slave_pool(
PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, SlaveJobsList),
SyncJobsToRun = case {MasterJobMode, AsyncMasterJobsList} of
{single, [_ | _]} -> none; % Single master job has been already started from AsyncMasterJobsList
_ -> MasterJobMode
end,
ok = run_or_queue_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx,
Executor, TaskId, MasterJobsList, SyncJobsToRun, MasterJobMode),
{SlavesOk, SlavesErrors} = lists:foldl(fun
({ok, ok}, {OkSum, ErrorSum}) -> {OkSum + 1, ErrorSum};
(_, {OkSum, ErrorSum}) -> {OkSum, ErrorSum + 1}
end, {0, 0}, SequentialSlaveAnswers ++ SlaveAnswers),
ok = traverse_task_list:delete_job_link(PoolName, CallbackModule, node(), JobId),
{ok, _} = CallbackModule:update_job_progress(JobId, Job, PoolName, TaskId, ended),
Description2 = #{
slave_jobs_done => SlavesOk,
slave_jobs_failed => SlavesErrors,
master_jobs_done => 1
},
SlavesDescription = #{
slave_jobs_delegated => SlaveJobsDelegatedNum,
slave_jobs_done => SlavesOk,
slave_jobs_failed => SlavesErrors
},
FinishCallback = maps:get(finish_callback, MasterAns, fun(_Args, _SlavesDescription) -> ok end),
FinishCallback(MasterJobExtendedArgs, SlavesDescription),
case {AsyncMasterJobsList, MasterJobsList} of
{[], []} ->
run_from_queue(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx,
Executor, TaskId, MasterJobMode);
_ ->
ok
end,
{ok, _, _} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, Description2)
end,
try
maybe_finish(PoolName, CallbackModule, ExtendedCtx, TaskId, Executor, NewDescription, Canceled2)
catch
E2:R2:Stacktrace2 ->
?log_error_with_stacktrace("Checking finish of job ~s of task ~p (module ~p) error ~p:~p",
[to_string(CallbackModule, Job), TaskId, CallbackModule, E2, R2], Stacktrace2)
end
catch
E1:R1:Stacktrace1 ->
?log_error_with_stacktrace("Master job ~s of task ~s (module ~p) error ~p:~p",
[to_string(CallbackModule, Job), TaskId, CallbackModule, E1, R1], Stacktrace1),
ErrorDescription = #{
master_jobs_failed => 1
},
% TODO - VFS-5532
catch traverse_task_list:delete_job_link(PoolName, CallbackModule, node(), JobId),
catch CallbackModule:update_job_progress(JobId, Job, PoolName, TaskId, failed),
{ok, ErrorDescription2, Canceled3} = traverse_task:update_description(
ExtendedCtx, PoolName, TaskId, ErrorDescription),
try
maybe_finish(PoolName, CallbackModule, ExtendedCtx, TaskId, Executor, ErrorDescription2, Canceled3)
catch
E3:R3:Stacktrace3 ->
?log_error_with_stacktrace("Checking finish of job ~s of task ~p (module ~p) error ~p:~p",
[to_string(CallbackModule, Job), TaskId, CallbackModule, E3, R3], Stacktrace3)
end
end,
ok.
%%--------------------------------------------------------------------
%% @doc
%% Executes slave job using function provided by callback module. To be executed by worker pool process.
%% @end
%%--------------------------------------------------------------------
-spec execute_slave_job(pool(), callback_module(), ctx(), id(), job()) -> ok | error.
execute_slave_job(PoolName, CallbackModule, ExtendedCtx, TaskId, Job) ->
try
case CallbackModule:do_slave_job(Job, TaskId) of
ok ->
ok;
{ok, Description} ->
{ok, _, _} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, Description),
ok;
{error, _} ->
error
end
catch
E:R:Stacktrace ->
?log_error_with_stacktrace("Slave job ~s of task ~p (module ~p) error ~p:~p",
[to_string(CallbackModule, Job), TaskId, CallbackModule, E, R], Stacktrace),
error
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec run_on_slave_pool(pool(), execution_pool(), callback_module(), ctx(), id(), job() | [job()]) -> [ok | error].
run_on_slave_pool(PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, Jobs) when is_list(Jobs) ->
lists_utils:pmap(fun(Job) ->
worker_pool:call(SlavePool, {?MODULE, execute_slave_job, [PoolName, CallbackModule, ExtendedCtx, TaskId, Job]},
worker_pool:default_strategy(), ?CALL_TIMEOUT)
end, Jobs);
run_on_slave_pool(PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, Job) ->
run_on_slave_pool(PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, [Job]).
-spec sequential_run_on_slave_pool(pool(), execution_pool(), callback_module(), ctx(), id(), [job() | [job()]]) ->
[ok | error].
sequential_run_on_slave_pool(PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, Jobs) ->
Ans = lists:map(fun(ParallelJobs) ->
run_on_slave_pool(PoolName, SlavePool, CallbackModule, ExtendedCtx, TaskId, ParallelJobs)
end, Jobs),
lists:flatten(Ans).
-spec run_or_queue_on_master_pool(pool(), execution_pool(), execution_pool(), callback_module(), ctx(), environment_id(),
id(), [job() | {job(), job_id()}], master_job_mode() | none, master_job_mode()) -> ok.
run_or_queue_on_master_pool(_PoolName, _MasterPool, _SlavePool, _CallbackModule, _ExtendedCtx,
_Executor, _TaskId, [], _RunNow, _MasterJobMode) ->
ok;
run_or_queue_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx,
Executor, TaskId, Jobs, RunNow, MasterJobMode) ->
{JobsToRun, JobsToQueue} = case RunNow of
all -> {Jobs, []};
single -> {[hd(Jobs)], tl(Jobs)};
none -> {[], Jobs}
end,
lists:foreach(fun
({Job, JobId}) ->
run_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule,
ExtendedCtx, Executor, TaskId, Job, JobId, MasterJobMode);
(Job) ->
run_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule,
ExtendedCtx, Executor, TaskId, Job, undefined, MasterJobMode)
end, JobsToRun),
lists:foreach(fun
({Job, JobId}) -> queue_on_master_pool(PoolName, CallbackModule, TaskId, Job, JobId);
(Job) -> queue_on_master_pool(PoolName, CallbackModule, TaskId, Job, undefined)
end, JobsToQueue).
-spec prepare_master_callback(pool(), execution_pool(), execution_pool(), callback_module(), ctx(), environment_id(),
id()) -> master_job_starter_callback().
prepare_master_callback(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor, TaskId) ->
fun(Args) ->
Jobs = maps:get(jobs, Args, []),
Description = #{
master_jobs_delegated => length(Jobs)
},
{ok, _, Canceled} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, Description),
case Canceled of
true ->
CancelDescription = #{
master_jobs_delegated => -1 * length(Jobs)
},
case maps:get(cancel_callback, Args, undefined) of
undefined -> ok;
CancelCallback -> CancelCallback(CancelDescription)
end,
{ok, _, _} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, CancelDescription);
_ ->
run_or_queue_on_master_pool(
PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor, TaskId, Jobs, all, all)
end
end.
-spec run_on_master_pool(pool(), execution_pool(), execution_pool(), callback_module(), ctx(), environment_id(),
id(), job(), job_id(), master_job_mode()) -> ok.
run_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor, TaskId, Job, JobId, MasterJobMode) ->
{ok, JobId2} = CallbackModule:update_job_progress(JobId, Job, PoolName, TaskId, on_pool),
ok = traverse_task_list:add_job_link(PoolName, CallbackModule, JobId2),
ok = worker_pool:cast(MasterPool, {?MODULE, execute_master_job,
[PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor, TaskId, Job, JobId2, MasterJobMode]}).
-spec queue_on_master_pool(pool(), callback_module(), id(), job(), job_id()) -> ok.
queue_on_master_pool(PoolName, CallbackModule, TaskId, Job, JobId) ->
{ok, JobId2} = CallbackModule:update_job_progress(JobId, Job, PoolName, TaskId, on_pool),
ok = traverse_task_list:add_task_job_link(PoolName, CallbackModule, TaskId, JobId2).
-spec run_from_queue(pool(), execution_pool(), execution_pool(), callback_module(), ctx(), environment_id(),
id(), master_job_mode()) -> ok.
run_from_queue(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx, Executor, TaskId, MasterJobMode) ->
case traverse_task_list:get_and_delete_first_task_job_link(PoolName, CallbackModule, node(), TaskId) of
{ok, JobId} ->
case CallbackModule:get_job(JobId) of
{ok, Job, _, _} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
ok = run_on_master_pool(PoolName, MasterPool, SlavePool, CallbackModule, ExtendedCtx,
Executor, TaskId, Job, JobId, MasterJobMode);
{error, not_found} ->
?warning("Job ~p not found for pool ~p, callback module ~p, task ~p",
[JobId, PoolName, CallbackModule, TaskId])
end;
{error, not_found} ->
ok
end.
-spec maybe_finish(pool(), callback_module(), ctx(), id(), environment_id(), description(), boolean()) -> ok.
maybe_finish(PoolName, CallbackModule, ExtendedCtx, TaskId, Executor, #{
master_jobs_delegated := Delegated
} = Description, Canceled) ->
Done = maps:get(master_jobs_done, Description, 0),
Failed = maps:get(master_jobs_failed, Description, 0),
case Delegated == Done + Failed of
true ->
% VFS-5532 - can never be equal in case of description saving error
ok = case Canceled of
true -> task_callback(CallbackModule, task_canceled, TaskId, PoolName);
_ -> task_callback(CallbackModule, task_finished, TaskId, PoolName)
end,
case traverse_task:finish(ExtendedCtx, PoolName, CallbackModule, TaskId, false, graceful) of
ok -> check_task_list_and_run(PoolName, Executor, []);
{error, already_finished} -> ok
end;
_ -> ok
end.
-spec check_task_list_and_run(pool(), environment_id(), [traverse:group()]) -> ok.
check_task_list_and_run(PoolName, Executor, CheckedGroups) ->
case traverse_tasks_scheduler:get_next_group(PoolName) of
{error, no_groups} ->
ok = traverse_tasks_scheduler:decrement_ongoing_tasks(PoolName),
case traverse_tasks_scheduler:get_next_group(PoolName) of
{error, no_groups} ->
ok;
_ ->
% Race with task starting
retry_run(PoolName, Executor, 0)
end;
{ok, GroupId} ->
case lists:member(GroupId, CheckedGroups) of
true ->
ok = traverse_tasks_scheduler:decrement_ongoing_tasks(PoolName),
retry_run(PoolName, Executor, 10000);
false ->
StartAns = case traverse_task_list:get_first_scheduled_link(PoolName, GroupId, Executor) of
{ok, not_found} ->
case deregister_group_and_check(PoolName, GroupId, Executor) of
ok ->
start_interrupted;
{abort, TaskId} ->
run_task(PoolName, TaskId, Executor)
end;
{ok, TaskId} ->
run_task(PoolName, TaskId, Executor)
end,
case StartAns of
ok ->
ok;
start_interrupted ->
check_task_list_and_run(PoolName, Executor, [GroupId | CheckedGroups])
end
end
end.
-spec run_task(pool(), id(), environment_id()) -> ok | start_interrupted.
run_task(PoolName, TaskId, Executor) ->
{ok, #task_execution_info{
callback_module = CallbackModule,
main_job_id = MainJobId,
master_job_mode = MasterJobMode
}} = traverse_task:get_execution_info(PoolName, TaskId),
case CallbackModule:get_job(MainJobId) of
{ok, Job, _, _} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
case traverse_task:start(ExtendedCtx, PoolName, CallbackModule, TaskId, #{master_jobs_delegated => 1}) of
ok ->
ok = task_callback(CallbackModule, task_started, TaskId, PoolName),
ok = run_on_master_pool(PoolName, ?MASTER_POOL_NAME(PoolName), ?SLAVE_POOL_NAME(PoolName),
CallbackModule, ExtendedCtx, Executor, TaskId, Job, MainJobId, MasterJobMode);
{error, start_aborted} ->
start_interrupted;
{error, not_found} ->
start_interrupted
end;
{error, not_found} ->
start_interrupted
end.
-spec retry_run(pool(), environment_id(), non_neg_integer()) -> ok.
retry_run(PoolName, Executor, Delay) ->
spawn(fun() ->
timer:sleep(Delay),
case traverse_tasks_scheduler:get_next_group(PoolName) of
{error, no_groups} ->
ok;
{ok, GroupId} ->
ToStart = case traverse_task_list:get_first_scheduled_link(PoolName, GroupId, Executor) of
{ok, not_found} ->
case deregister_group_and_check(PoolName, GroupId, Executor) of
ok ->
empty_group;
{abort, ID} ->
{ok, ID}
end;
{ok, ID} ->
{ok, ID}
end,
case ToStart of
empty_group ->
retry_run(PoolName, Executor, 0);
{ok, TaskId} ->
case traverse_tasks_scheduler:increment_ongoing_tasks_and_choose_node(PoolName) of
{ok, Node} ->
case rpc:call(Node, ?MODULE, run_task, [PoolName, TaskId, Executor]) of
ok ->
ok;
start_interrupted ->
% TODO VFS-6297 - what if node crashes before next line
traverse_tasks_scheduler:decrement_ongoing_tasks(PoolName),
retry_run(PoolName, Executor, 10000)
end;
{error, limit_exceeded} ->
ok
end
end
end
end),
ok.
-spec maybe_run_scheduled_task(pool(), callback_module(), id(), task(), environment_id(),
job(), job_id(), master_job_mode()) -> ok.
maybe_run_scheduled_task(PoolName, CallbackModule, TaskId, Task, Executor, Job, MainJobId, MasterJobMode) ->
case traverse_tasks_scheduler:increment_ongoing_tasks_and_choose_node(PoolName) of
{ok, Node} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
case traverse_task:start(ExtendedCtx, PoolName, CallbackModule, TaskId, #{master_jobs_delegated => 1}) of
ok ->
ok = task_callback(CallbackModule, task_started, TaskId, PoolName),
ok = rpc:call(Node, ?MODULE, run_on_master_pool, [PoolName, ?MASTER_POOL_NAME(PoolName),
?SLAVE_POOL_NAME(PoolName), CallbackModule, ExtendedCtx, Executor, TaskId, Job,
MainJobId, MasterJobMode]);
{error, start_aborted} ->
% TODO VFS-6297 - what if node crashes before next line
traverse_tasks_scheduler:decrement_ongoing_tasks(PoolName)
end;
{error, limit_exceeded} ->
traverse_task:schedule_for_local_execution(PoolName, TaskId, Task)
end.
-spec deregister_group_and_check(pool(), group(), environment_id()) -> ok| {abort, traverse:id()}.
deregister_group_and_check(PoolName, Group, Executor) ->
ok = traverse_tasks_scheduler:deregister_group(PoolName, Group),
% check for races with task creation
case traverse_task_list:get_first_scheduled_link(PoolName, Group, Executor) of
{ok, not_found} ->
ok;
{ok, Task} ->
traverse_tasks_scheduler:register_group(PoolName, Group),
{abort, Task}
end.
-spec get_extended_ctx(callback_module(), job()) -> ctx().
get_extended_ctx(CallbackModule, Job) ->
{ok, CtxExtension} = case erlang:function_exported(CallbackModule, get_sync_info, 1) of
true ->
CallbackModule:get_sync_info(Job);
_ ->
{ok, #{}}
end,
maps:merge(traverse_task:get_ctx(), CtxExtension).
-spec task_callback(callback_module(), task_callback(), id(), pool()) -> ok.
task_callback(CallbackModule, Method, TaskId, PoolName) ->
case erlang:function_exported(CallbackModule, Method, 2) of
true ->
ok = CallbackModule:Method(TaskId, PoolName);
_ ->
ok
end.
-spec get_node_crash_policy(callback_module(), id(), pool()) -> node_crash_policy().
get_node_crash_policy(CallbackModule, TaskId, PoolName) ->
case erlang:function_exported(CallbackModule, node_crash_policy, 2) of
true ->
CallbackModule:node_crash_policy(TaskId, PoolName);
_ ->
restart
end.
-spec to_string(callback_module(), job()) -> term().
to_string(CallbackModule, Job) ->
case erlang:function_exported(CallbackModule, to_string, 1) of
true ->
CallbackModule:to_string(Job);
_ ->
str_utils:format_bin("~p", [Job])
end.
-spec repair_ongoing_tasks(pool(), environment_id(), node()) -> tasks_ctxs().
repair_ongoing_tasks(Pool, Executor, Node) ->
{ok, TaskIds, _} = traverse_task_list:list(Pool, ongoing, #{tree_id => Executor}),
% Repair all tasks found using links (links repair is not needed)
lists:foldl(fun(Id, Acc) ->
{_, UpdatedAcc} = repair_ongoing_task_and_add_to_map(Pool, Executor, Node, Id, Acc, false),
UpdatedAcc
end, #{}, TaskIds).
-spec repair_ongoing_task_and_add_to_map(pool(), environment_id(), node(), id(), tasks_ctxs(), boolean()) ->
{ok | other_node | not_found, tasks_ctxs()}.
repair_ongoing_task_and_add_to_map(Pool, Executor, Node, Id, TaskIdToCtxMap, FixLink) ->
case traverse_task:get_execution_info(Pool, Id) of
{ok, #task_execution_info{
callback_module = CallbackModule,
executor = Executor,
main_job_id = MainJobId,
node = Node,
start_time = Timestamp
}} ->
case CallbackModule:get_job(MainJobId) of
{ok, Job, _, _} ->
ExtendedCtx = get_extended_ctx(CallbackModule, Job),
fix_task_description(Pool, Executor, Node, Id, TaskIdToCtxMap, FixLink, ExtendedCtx, Timestamp);
JobError ->
?warning("Error getting main job ~p for task id ~p (pool ~p, executor ~p, node ~p): ~p",
[MainJobId, Id, Pool, Executor, Node, JobError]),
{not_found, TaskIdToCtxMap#{Id => ctx_not_found}}
end;
{ok, #task_execution_info{}} ->
{other_node, TaskIdToCtxMap};
InfoError ->
?warning("Error getting execution info for task id ~p (pool ~p, executor ~p, node ~p): ~p",
[Id, Pool, Executor, Node, InfoError]),
{not_found, TaskIdToCtxMap}
end.
-spec fix_task_description(pool(), environment_id(), node(), id(), tasks_ctxs(), boolean(), ctx(), timestamp()) ->
{ok | other_node, tasks_ctxs()}.
fix_task_description(Pool, Executor, Node, Id, TaskIdToCtxMap, FixLink, ExtendedCtx, Timestamp) ->
case traverse_task:fix_description(ExtendedCtx, Pool, Id, Node) of
{ok, _} when FixLink ->
ok = traverse_task_list:add_link(ExtendedCtx,
Pool, ongoing, Executor, Id, Timestamp),
{ok, TaskIdToCtxMap#{Id => ExtendedCtx}};
{ok, _} ->
{ok, TaskIdToCtxMap#{Id => ExtendedCtx}};
{error, other_node} ->
{other_node, TaskIdToCtxMap}
end.
-spec get_tasks_jobs(pool(), callback_module(), node(),environment_id(), tasks_ctxs()) ->
{JobsPerTask :: jobs_per_task(), JobsWitoutCtx :: [job_id()],
UpdatedTaskIdToCtxMap :: tasks_ctxs()} | no_return().
get_tasks_jobs(PoolName, CallbackModule, Node, Executor, InitialTaskIdToCtxMap) ->
{ok, JobIds} = traverse_task_list:list_node_jobs(PoolName, CallbackModule, Node),
lists:foldl(fun(JobId, {JobsPerTask, JobsWitoutCtx, TaskIdToCtxMap}) ->
case CallbackModule:get_job(JobId) of
{ok, Job, _, TaskId} ->
case maps:get(TaskId, TaskIdToCtxMap, ctx_not_found) of
ctx_not_found ->
?warning("Found job ~s assigned to task ~s (callback module ~p, pool name ~p, node ~p) "
"which does not exist (anymore?). Job data:~n~p~nTrying to find task without link", [
JobId, TaskId, CallbackModule, PoolName, Node, Job
]),
% Task has not been found using task links so it has not been repaired
% Repair it and fix its link
case repair_ongoing_task_and_add_to_map(PoolName, Executor, Node, TaskId, TaskIdToCtxMap, true) of
{ok, UpdatedTaskIdToCtxMap} ->
{JobsPerTask#{TaskId => [{JobId, Job}]}, JobsWitoutCtx, UpdatedTaskIdToCtxMap};
{_, UpdatedTaskIdToCtxMap} ->
{JobsPerTask, [JobId | JobsWitoutCtx], UpdatedTaskIdToCtxMap}
end;
_ExtendedCtx ->
TaskJobs = maps:get(TaskId, JobsPerTask, []),
{JobsPerTask#{TaskId => [{JobId, Job} | TaskJobs]}, JobsWitoutCtx, TaskIdToCtxMap}
end;
Error ->
?warning("Error getting job ~p (callback module ~p, pool name ~p, node ~p): ~p",
[JobId, CallbackModule, PoolName, Node, Error]),
{JobsPerTask, [JobId | JobsWitoutCtx], TaskIdToCtxMap}
end
end, {#{}, [], InitialTaskIdToCtxMap}, JobIds).
-spec clasiffy_tasks_to_restart_and_cancel(tasks_ctxs(), jobs_per_task(), pool(), callback_module(),
pool_options(), node()) -> {TasksToRestart :: [id()], TasksToCancel :: [id()]} | no_return().
clasiffy_tasks_to_restart_and_cancel(TaskIdToCtxMap, JobsPerTask, PoolName, CallbackModule, Options, Node) ->
ShouldRestart = maps:get(restart, Options, true),
DBError = datastore_worker:get_application_closing_status() =:= ?CLOSING_PROCEDURE_FAILED,
LocalNode = node(),
TasksWithJobs = maps:keys(JobsPerTask),
OtherTasks = maps:keys(maps:without(TasksWithJobs, TaskIdToCtxMap)),
case {ShouldRestart, DBError andalso Node =:= LocalNode} of
{true, true} ->
lists:foldl(fun(TaskId, {ToRestartAcc, ToCancelAcc}) ->
case get_node_crash_policy(CallbackModule, TaskId, PoolName) of
cancel_task -> {ToRestartAcc, [TaskId | ToCancelAcc]};
_ -> {[TaskId | ToRestartAcc], ToCancelAcc}
end
end, {[], OtherTasks}, TasksWithJobs);
{true, false} ->
{TasksWithJobs, OtherTasks};
{false, _} ->
{[], maps:keys(TaskIdToCtxMap)}
end.
-spec clean_tasks_and_jobs(tasks_ctxs(), jobs_per_task(), [id()], [job_id()],
pool(), callback_module(), node()) -> ok.
clean_tasks_and_jobs(TaskIdToCtxMap, JobsPerTask, TasksToCancel, JobsWitoutCtx, PoolName, CallbackModule, Node) ->
lists:foreach(fun(TaskId) ->
case maps:get(TaskId, TaskIdToCtxMap) of
ctx_not_found -> traverse_task:finish(traverse_task:get_ctx(), PoolName, CallbackModule, TaskId, true, force);
ExtendedCtx -> traverse_task:finish(ExtendedCtx, PoolName, CallbackModule, TaskId, true, force)
end,
clean_jobs(maps:get(TaskId, JobsPerTask, []), PoolName, CallbackModule, Node)
end, TasksToCancel),
clean_jobs(JobsWitoutCtx, PoolName, CallbackModule, Node).
-spec clean_jobs([job_id()], pool(), callback_module(), node()) -> ok.
clean_jobs(JobIds, PoolName, CallbackModule, Node) ->
lists:foreach(fun(JobId) ->
traverse_task_list:delete_job_link(PoolName, CallbackModule, Node, JobId)
end, JobIds).
-spec restart_jobs(tasks_ctxs(), jobs_per_task(), pool(), callback_module(), environment_id(), node()) ->
ok | no_return().
restart_jobs(TaskIdToCtxMap, JobsPerTask, PoolName, CallbackModule, Executor, Node) ->
LocalNode = node(),
lists:foreach(fun({TaskId, JobsToRestart}) ->
{ok, #task_execution_info{
master_job_mode = MasterJobMode
}} = traverse_task:get_execution_info(PoolName, TaskId),
lists:foreach(fun({JobId, Job}) ->
case Node of
LocalNode -> ok;
_ ->
traverse_task_list:add_job_link(PoolName, CallbackModule, JobId),
traverse_task_list:delete_job_link(PoolName, CallbackModule, Node, JobId)
end,
ExtendedCtx = maps:get(TaskId, TaskIdToCtxMap),
{ok, _, _} = traverse_task:update_description(ExtendedCtx, PoolName, TaskId, #{
master_jobs_delegated => 1
}),
ok = run_on_master_pool(PoolName, ?MASTER_POOL_NAME(PoolName), ?SLAVE_POOL_NAME(PoolName),
CallbackModule, ExtendedCtx, Executor, TaskId, Job, JobId, MasterJobMode)
end, JobsToRestart)
end, maps:to_list(JobsPerTask)).
-spec schedule_waiting_tasks_if_possible(pool(), environment_id()) -> ok | no_return().
schedule_waiting_tasks_if_possible(PoolName, Executor) ->
case traverse_tasks_scheduler:get_next_group(PoolName) of
{error, no_groups} ->
ok;
{ok, GroupId} ->
case traverse_task_list:get_first_scheduled_link(PoolName, GroupId, Executor) of
{ok, not_found} ->
deregister_group_and_schedule_waiting_tasks_if_possible(PoolName, Executor, GroupId);
{ok, TaskId} ->
schedule_task_and_check_other_waiting(PoolName, Executor, TaskId)
end
end.
-spec deregister_group_and_schedule_waiting_tasks_if_possible(pool(), environment_id(), group()) ->
ok | no_return().
deregister_group_and_schedule_waiting_tasks_if_possible(PoolName, Executor, GroupId) ->
case deregister_group_and_check(PoolName, GroupId, Executor) of
ok ->
?info("Group ~p deregistered on restart of node for pool ~p and executor ~p",
[GroupId, PoolName, Executor]),
schedule_waiting_tasks_if_possible(PoolName, Executor);
{abort, _TaskId} ->
?info("Group ~p deregistration on restart of node aborted for pool ~p and executor ~p",
[GroupId, PoolName, Executor]),
schedule_waiting_tasks_if_possible(PoolName, Executor)
end.
-spec schedule_task_and_check_other_waiting(pool(), environment_id(), id()) -> ok | no_return().
schedule_task_and_check_other_waiting(PoolName, Executor, TaskId) ->
case traverse_tasks_scheduler:increment_ongoing_tasks_and_choose_node(PoolName) of
{ok, Node} ->
case rpc:call(Node, ?MODULE, run_task, [PoolName, TaskId, Executor]) of
ok ->
?info("Task ~p started on restart of node for pool ~p and executor ~p",
[TaskId, PoolName, Executor]),
schedule_waiting_tasks_if_possible(PoolName, Executor);
start_interrupted ->
?info("Task ~p start interrupted on restart of node for pool ~p and executor ~p",
[TaskId, PoolName, Executor]),
% TODO VFS-6297 - what if node crashes before next line
traverse_tasks_scheduler:decrement_ongoing_tasks(PoolName),
schedule_waiting_tasks_if_possible(PoolName, Executor)
end;
{error, limit_exceeded} ->
ok
end.
-spec log_error_with_stacktrace(term(), string(), [term()]) -> ok.
log_error_with_stacktrace(Stacktrace, Format, Args) ->
?error(Format ++ "~nStacktrace:~n~p", Args ++ [Stacktrace]). | src/traverse/traverse.erl | 0.566738 | 0.509459 | traverse.erl | starcoder |
%% The fun_chain parse transform. See README.md for usage
-module(fun_chain).
-export([parse_transform/2]).
parse_transform(ParseTree, _Options) -> deep_walk(ParseTree).
% Top level function -> here we have to report error if we have any.
deep_walk({function, _, _, _, _} = FunctionDeclaration) ->
result_or_error(deep_walk_tuple(FunctionDeclaration));
% fun_chain call
deep_walk({call, Line, {remote, _, {atom, _, fun_chain}, {atom, _, ArgumentPosition}}, Clauses}) ->
chain_calls(Line, ArgumentPosition, Clauses);
deep_walk(List) when is_list(List) -> deep_walk_list(List);
deep_walk(Tuple) when is_tuple(Tuple) -> deep_walk_tuple(Tuple);
deep_walk(Other) -> Other.
% Walking through tuples and lists. Each subvalue must be inspected for errors. If an error
% has occured, it is propagated up the stack
deep_walk_tuple(Tuple) ->
parse_chain(deep_walk_list(tuple_to_list(Tuple)), fun erlang:list_to_tuple/1).
deep_walk_list(List) ->
parse_chain(deep_walk_list(List, []), fun lists:reverse/1).
deep_walk_list([], Acc) -> Acc;
deep_walk_list([H|T], Acc) ->
parse_chain(deep_walk(H), fun(Result) ->
deep_walk_list(T, [Result | Acc])
end).
% Chaining of function calls.
chain_calls(Line, _, []) -> make_parse_error(Line, "clauses are empty");
chain_calls(_, ArgumentPosition, [Initial | Rest])
when (ArgumentPosition =:= first orelse ArgumentPosition =:= last) ->
do_chain_calls(ArgumentPosition, Rest, deep_walk(Initial));
chain_calls(Line, ArgumentPosition, _) ->
make_parse_error(Line, io_lib:format("invalid function ~p", [ArgumentPosition])).
do_chain_calls(_, [], LastResult) -> LastResult;
do_chain_calls(ArgumentPosition, [CurrentCall | RemainingCalls], LastResult) ->
parse_chain(
add_argument(ArgumentPosition, CurrentCall, LastResult),
fun(Args) ->
do_chain_calls(ArgumentPosition, RemainingCalls, Args)
end
).
% Appending of arguments to appropriate place.
add_argument(last, {call, Line, Fun, Args}, Argument) ->
{call, Line, deep_walk(Fun), deep_walk(Args) ++ [Argument]};
add_argument(first, {call, Line, Fun, Args}, Argument) ->
{call, Line, deep_walk(Fun), [Argument | deep_walk(Args)]};
add_argument(_, Term, _) -> make_parse_error(element(2, Term), "not a function call").
% Maybe monad style helper which chains two functions. If first function returns parse error, the second
% one is not invoked.
parse_chain({parse_error, _} = Error, _) -> Error;
parse_chain(Result, Fun) -> Fun(Result).
result_or_error({parse_error, Error}) -> Error;
result_or_error(Result) -> Result.
% Wraps parse error in a custom tuple which can be safely propagated up the call stack.
make_parse_error(Line, Message) ->
{parse_error,
{error,
{Line, erl_parse,[io_lib:format("fun_chain error: ~s", [Message])]}
}
}. | src/fun_chain.erl | 0.612078 | 0.62289 | fun_chain.erl | starcoder |
%%% Copyright 2010-2013 <NAME> <<EMAIL>>,
%%% <NAME> <<EMAIL>>
%%% and <NAME> <<EMAIL>>
%%%
%%% This file is part of PropEr.
%%%
%%% PropEr is free software: you can redistribute it and/or modify
%%% it under the terms of the GNU General Public License as published by
%%% the Free Software Foundation, either version 3 of the License, or
%%% (at your option) any later version.
%%%
%%% PropEr is distributed in the hope that it will be useful,
%%% but WITHOUT ANY WARRANTY; without even the implied warranty of
%%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
%%% GNU General Public License for more details.
%%%
%%% You should have received a copy of the GNU General Public License
%%% along with PropEr. If not, see <http://www.gnu.org/licenses/>.
%%% @copyright 2010-2013 <NAME>, <NAME> and <NAME>
%%% @version {@version}
%%% @author <NAME>
%%% @doc This module defines the `proper_statem' behaviour, useful for testing
%%% stateful reactive systems whose internal state and side-effects are
%%% specified via an abstract state machine. Given a callback module
%%% implementing the `proper_statem' behaviour (i.e. defining an abstract state
%%% machine of the system under test), PropEr can generate random symbolic
%%% sequences of calls to that system.
%%% As a next step, generated symbolic calls are actually performed, while
%%% monitoring the system's responses to ensure it behaves as expected. Upon
%%% failure, the shrinking mechanism attempts to find a minimal sequence of
%%% calls provoking the same error.
%%%
%%% When including the <code>"proper/include/proper.hrl"</code> header file,
%%% all <a href="#index">API functions </a> of {@module} are automatically
%%% imported, unless `PROPER_NO_IMPORTS' is defined.
%%%
%%% === The role of commands ===
%%% Testcases generated for testing a stateful system are lists of symbolic API
%%% calls to that system. Symbolic representation has several benefits, which
%%% are listed here in increasing order of importance:
%%% <ul>
%%% <li>Generated testcases are easier to read and understand.</li>
%%% <li>Failing testcases are easier to shrink.</li>
%%% <li>The generation phase is side-effect free and this results in
%%% repeatable testcases, which is essential for correct shrinking.</li>
%%% </ul>
%%% Since the actual results of symbolic calls are not known at generation time,
%%% we use symbolic variables ({@type symb_var()}) to refer to them.
%%% A command ({@type command()}) is a symbolic term, used to bind a symbolic
%%% variable to the result of a symbolic call. For example:
%%%
%%% ```[{set, {var,1}, {call,erlang,put,[a,42]}},
%%% {set, {var,2}, {call,erlang,erase,[a]}},
%%% {set, {var,3}, {call,erlang,put,[b,{var,2}]}}]'''
%%%
%%% is a command sequence that could be used to test the process dictionary.
%%% In this example, the first call stores the pair `{a,42}' in the process
%%% dictionary, while the second one deletes it. Then, a new pair `{b,{var,2}}'
%%% is stored. `{var,2}' is a symbolic variable bound to the result of
%%% `erlang:erase/1'. This result is not known at generation time, since none of
%%% these operations is performed at that time. After evaluating the command
%%% sequence at runtime, the process dictionary will eventually contain the
%%% pair `{b,42}'.
%%%
%%% === The abstract model-state ===
%%% In order to be able to test impure code, we need a way to track its
%%% internal state (at least the useful part of it). To this end, we use an
%%% abstract state machine representing the possible configurations of the
%%% system under test. When referring to the <i>model state</i>, we mean the
%%% state of the abstract state machine. The <i>model state</i> can be either
%%% symbolic or dynamic:
%%% <ul>
%%% <li>During command generation, we use symbolic variables to bind the
%%% results of symbolic calls. Therefore, the model state might
%%% (and usually does) contain symbolic variables and/or symbolic calls, which
%%% are necessary to operate on symbolic variables. Thus, we refer to it as
%%% symbolic state. For example, assuming that the internal state of the
%%% process dictionary is modeled as a proplist, the model state after
%%% generating the previous command sequence will be `[{b,{var,2}}]'.</li>
%%% <li>During runtime, symbolic calls are evaluated and symbolic variables are
%%% replaced by their corresponding real values. Now we refer to the state as
%%% dynamic state. After running the previous command sequence, the model state
%%% will be `[{b,42}]'.</li>
%%% </ul>
%%%
%%% === The callback functions ===
%%% The following functions must be exported from the callback module
%%% implementing the abstract state machine:
%%% <ul>
%%% <li>`initial_state() ::' {@type symbolic_state()}
%%% <p>Specifies the symbolic initial state of the state machine. This state
%%% will be evaluated at command execution time to produce the actual initial
%%% state. The function is not only called at command generation time, but
%%% also in order to initialize the state every time the command sequence is
%%% run (i.e. during normal execution, while shrinking and when checking a
%%% counterexample). For this reason, it should be deterministic and
%%% self-contained.</p></li>
%%% <li>`command(S::'{@type symbolic_state()}`) ::' {@type proper_types:type()}
%%% <p>Generates a symbolic call to be included in the command sequence,
%%% given the current state `S' of the abstract state machine. However,
%%% before the call is actually included, a precondition is checked. This
%%% function will be repeatedly called to produce the next call to be
%%% included in the test case.</p></li>
%%% <li>`precondition(S::'{@type symbolic_state()}`,
%%% Call::'{@type symb_call()}`) :: boolean()'
%%% <p>Specifies the precondition that should hold so that `Call' can be
%%% included in the command sequence, given the current state `S' of the
%%% abstract state machine. In case precondition doesn't hold, a new call is
%%% chosen using the `command/1' generator. If preconditions are very strict,
%%% it will take a lot of tries for PropEr to randomly choose a valid command.
%%% Testing will be stopped in case the `constraint_tries' limit is reached
%%% (see the 'Options' section in the {@link proper} module documentation).
%%% Preconditions are also important for correct shrinking of failing
%%% testcases. When shrinking command sequences, we try to eliminate commands
%%% that do not contribute to failure, ensuring that all preconditions still
%%% hold. Validating preconditions is necessary because during shrinking we
%%% usually attempt to perform a call with the system being in a state
%%% different from the state it was when initially running the test.</p></li>
%%% <li>`postcondition(S::'{@type dynamic_state()}`,
%%% Call::'{@type symbolic_call()}`,
%%% Res::term()) :: boolean()'
%%% <p>Specifies the postcondition that should hold about the result `Res' of
%%% performing `Call', given the dynamic state `S' of the abstract state
%%% machine prior to command execution. This function is called during
%%% runtime, this is why the state is dynamic.</p></li>
%%% <li>`next_state(S::'{@type symbolic_state()} `|' {@type dynamic_state()}`,
%%% Res::term(),
%%% Call::'{@type symbolic_call()}`) ::'
%%% {@type symbolic_state()} `|' {@type dynamic_state()}
%%% <p>Specifies the next state of the abstract state machine, given the
%%% current state `S', the symbolic `Call' chosen and its result `Res'. This
%%% function is called both at command generation and command execution time
%%% in order to update the model state, therefore the state `S' and the
%%% result `Res' can be either symbolic or dynamic.</p></li>
%%% </ul>
%%%
%%% === The property used ===
%%% Each test consists of two phases:
%%% <ul>
%%% <li>As a first step, PropEr generates random symbolic command sequences
%%% deriving information from the callback module implementing the abstract
%%% state machine. This is the role of {@link commands/1} generator.</li>
%%% <li>As a second step, command sequences are executed so as to check that
%%% the system behaves as expected. This is the role of
%%% {@link run_commands/2}, a function that evaluates a symbolic command
%%% sequence according to an abstract state machine specification.</li>
%%% </ul>
%%%
%%% These two phases are encapsulated in the following property, which can be
%%% used for testing the process dictionary:
%%%
%%% ```prop_pdict() ->
%%% ?FORALL(Cmds, proper_statem:commands(?MODULE),
%%% begin
%%% {_History, _State, Result} = proper_statem:run_commands(?MODULE, Cmds),
%%% cleanup(),
%%% Result =:= ok
%%% end).'''
%%%
%%% When testing impure code, it is very important to keep each test
%%% self-contained. For this reason, almost every property for testing stateful
%%% systems contains some clean-up code. Such code is necessary to put the
%%% system in a known state, so that the next test can be executed
%%% independently from previous ones.
%%%
%%% == Parallel testing ==
%%% After ensuring that a system's behaviour can be described via an abstract
%%% state machine when commands are executed sequentially, it is possible to
%%% move to parallel testing. The same state machine can be used to generate
%%% command sequences that will be executed in parallel to test for race
%%% conditions. A parallel testcase ({@type parallel_testcase()}) consists of
%%% a sequential and a parallel component. The sequential component is a
%%% command sequence that is run first to put the system in a random state.
%%% The parallel component is a list containing 2 command sequences to be
%%% executed in parallel, each of them in a separate newly-spawned process.
%%%
%%% Generating parallel test cases involves the following actions. Initially,
%%% we generate a command sequence deriving information from the abstract
%%% state machine specification, as in the case of sequential statem testing.
%%% Then, we parallelize a random suffix (up to 12 commands) of the initial
%%% sequence by splitting it into 2 subsequences that will be executed
%%% concurrently. Limitations arise from the fact that each subsequence should
%%% be a <i>valid</i> command sequence (i.e. all commands should satisfy
%%% preconditions and use only symbolic variables bound to the results of
%%% preceding calls in the same sequence). Furthermore, we apply an additional
%%% check: we have to ensure that preconditions are satisfied in all possible
%%% interleavings of the concurrent tasks. Otherwise, an exception might be
%%% raised during parallel execution and lead to unexpected (and unwanted) test
%%% failure. In case these constraints cannot be satisfied for a specific test
%%% case, the test case will be executed sequentially. Then an `f' is printed
%%% on screen to inform the user. This usually means that preconditions need
%%% to become less strict for parallel testing to work.
%%%
%%% After running a parallel testcase, PropEr uses the state machine
%%% specification to check if the results observed could have been produced by
%%% a possible serialization of the parallel component. If no such serialization
%%% is possible, then an atomicity violation has been detected. In this case,
%%% the shrinking mechanism attempts to produce a counterexample that is minimal
%%% in terms of concurrent operations. Properties for parallel testing are very
%%% similar to those used for sequential testing.
%%%
%%% ```prop_parallel_testing() ->
%%% ?FORALL(Testcase, proper_statem:parallel_commands(?MODULE),
%%% begin
%%% {_Sequential, _Parallel, Result} = proper_statem:run_parallel_commands(?MODULE, Testcase),
%%% cleanup(),
%%% Result =:= ok
%%% end).'''
%%%
%%% Please note that the actual interleaving of commands of the parallel
%%% component depends on the Erlang scheduler, which is too deterministic.
%%% For PropEr to be able to detect race conditions, the code of the system
%%% under test should be instrumented with `erlang:yield/0' calls to the
%%% scheduler.
%%% @end
-module(proper_statem).
-export([behaviour_info/1]).
-export([commands/1, commands/2, parallel_commands/1, parallel_commands/2,
more_commands/2]).
-export([run_commands/2, run_commands/3, run_parallel_commands/2,
run_parallel_commands/3]).
-export([state_after/2, command_names/1, zip/2]).
-include("proper_internal.hrl").
-define(WORKERS, 2).
-define(LIMIT, 12).
%% -----------------------------------------------------------------------------
%% Exported only for testing purposes
%% -----------------------------------------------------------------------------
-export([index/2, all_insertions/3, insert_all/2]).
-export([is_valid/4, args_defined/2]).
-export([get_next/6, mk_first_comb/3]).
-export([execute/4, check/6, run/3, get_initial_state/2]).
%% -----------------------------------------------------------------------------
%% Type declarations
%% -----------------------------------------------------------------------------
%% @type symbolic_state()
-type symbolic_state() :: term().
%% @type dynamic_state()
-type dynamic_state() :: term().
-type symb_var() :: {'var',pos_integer()}.
-type symb_call() :: {'call',mod_name(),fun_name(),[term()]}.
-type command() :: {'set',symb_var(),symb_call()}
| {'init',symbolic_state()}.
-type command_list() :: [command()].
-type parallel_testcase() :: {command_list(),[command_list()]}.
-type parallel_history() :: [{command(),term()}].
-type history() :: [{dynamic_state(),term()}].
-type statem_result() :: 'ok'
| 'initialization_error'
| {'precondition', 'false' | proper:exception()}
| {'postcondition', 'false' | proper:exception()}
| proper:exception()
| 'no_possible_interleaving'.
-type index() :: pos_integer().
-type indices() :: [index()].
-type combination() :: [{pos_integer(),indices()}].
-type lookup() :: orddict:orddict().
-export_type([symb_var/0, symb_call/0, statem_result/0]).
%% -----------------------------------------------------------------------------
%% Proper_statem behaviour
%% ----------------------------------------------------------------------------
%% @doc Specifies the callback functions that should be exported from a module
%% implementing the `proper_statem' behaviour.
-spec behaviour_info('callbacks') -> [{fun_name(),arity()}].
behaviour_info(callbacks) ->
[{initial_state,0},
{command,1},
{precondition,2},
{postcondition,3},
{next_state,3}];
behaviour_info(_Attribute) ->
undefined.
%% -----------------------------------------------------------------------------
%% Sequential command generation
%% -----------------------------------------------------------------------------
%% @doc A special PropEr type which generates random command sequences,
%% according to an absract state machine specification. The function takes as
%% input the name of a callback module, which contains the state machine
%% specification. The initial state is computed by `Mod:initial_state/0'.
-spec commands(mod_name()) -> proper_types:type().
commands(Mod) ->
?LET(InitialState, ?LAZY(Mod:initial_state()),
?SUCHTHAT(
Cmds,
?LET(List,
?SIZED(Size,
proper_types:noshrink(
commands(Size, Mod, InitialState, 1))),
proper_types:shrink_list(List)),
is_valid(Mod, InitialState, Cmds, []))).
%% @doc Similar to {@link commands/1}, but generated command sequences always
%% start at a given state. In this case, the first command is always
%% `{init,InitialState}' and is used to correctly initialize the state
%% every time the command sequence is run (i.e. during normal execution,
%% while shrinking and when checking a counterexample). In this case,
%% `Mod:initial_state/0' is never called.
-spec commands(mod_name(), symbolic_state()) -> proper_types:type().
commands(Mod, InitialState) ->
?SUCHTHAT(
Cmds,
?LET(CmdTail,
?LET(List,
?SIZED(Size,
proper_types:noshrink(
commands(Size, Mod, InitialState, 1))),
proper_types:shrink_list(List)),
[{init,InitialState}|CmdTail]),
is_valid(Mod, InitialState, Cmds, [])).
-spec commands(size(), mod_name(), symbolic_state(), pos_integer()) ->
proper_types:type().
commands(Size, Mod, State, Count) ->
?LAZY(
proper_types:frequency(
[{1, []},
{Size, ?LET(Call,
?SUCHTHAT(X, Mod:command(State),
Mod:precondition(State, X)),
begin
Var = {var,Count},
NextState = Mod:next_state(State, Var, Call),
?LET(
Cmds,
commands(Size-1, Mod, NextState, Count+1),
[{set,Var,Call}|Cmds])
end)}])).
%% @doc Increases the expected length of command sequences generated from
%% `CmdType' by a factor `N'.
-spec more_commands(pos_integer(), proper_types:type()) -> proper_types:type().
more_commands(N, CmdType) ->
?SIZED(Size, proper_types:resize(Size * N, CmdType)).
%% -----------------------------------------------------------------------------
%% Parallel command generation
%% -----------------------------------------------------------------------------
%% @doc A special PropEr type which generates parallel testcases,
%% according to an absract state machine specification. The function takes as
%% input the name of a callback module, which contains the state machine
%% specification. The initial state is computed by `Mod:initial_state/0'.
-spec parallel_commands(mod_name()) -> proper_types:type().
parallel_commands(Mod) ->
?LET({ShrunkSeq, ShrunkPar},
?LET({Seq, Par},
proper_types:noshrink(parallel_gen(Mod)),
parallel_shrinker(Mod, Seq, Par)),
move_shrinker(ShrunkSeq, ShrunkPar, ?WORKERS)).
%% @doc Similar to {@link parallel_commands/1}, but generated command sequences
%% always start at a given state.
-spec parallel_commands(mod_name(), symbolic_state()) -> proper_types:type().
parallel_commands(Mod, InitialState) ->
?LET({ShrunkSeq, ShrunkPar},
?LET({Seq, Par},
proper_types:noshrink(parallel_gen(Mod, InitialState)),
parallel_shrinker(Mod, Seq, Par)),
move_shrinker(ShrunkSeq, ShrunkPar, ?WORKERS)).
-spec parallel_gen(mod_name()) -> proper_types:type().
parallel_gen(Mod) ->
?LET(Seq,
commands(Mod),
mk_parallel_testcase(Mod, Seq)).
-spec parallel_gen(mod_name(), symbolic_state()) -> proper_types:type().
parallel_gen(Mod, InitialState) ->
?LET(Seq,
commands(Mod, InitialState),
mk_parallel_testcase(Mod, Seq)).
-spec mk_parallel_testcase(mod_name(), command_list()) -> proper_types:type().
mk_parallel_testcase(Mod, Seq) ->
{State, SymbEnv} = state_env_after(Mod, Seq),
Count = case SymbEnv of
[] -> 1;
[{var,N}|_] -> N + 1
end,
?LET(Parallel,
?SUCHTHAT(C, commands(?LIMIT, Mod, State, Count),
length(C) > ?WORKERS),
begin
LenPar = length(Parallel),
Len = LenPar div ?WORKERS,
Comb = mk_first_comb(LenPar, Len, ?WORKERS),
LookUp = orddict:from_list(mk_dict(Parallel, 1)),
{Seq, fix_parallel(LenPar, Len, Comb, LookUp, Mod,
State, SymbEnv, ?WORKERS)}
end).
-spec parallel_shrinker(mod_name(), command_list(), [command_list()]) ->
proper_types:type().
parallel_shrinker(Mod, [{init,I} = Init|Seq], Parallel) ->
?SUCHTHAT({Seq1, Parallel1},
?LET(ParInstances,
[proper_types:shrink_list(P) || P <- Parallel],
?LET(SeqInstance,
proper_types:shrink_list(Seq),
{[Init|SeqInstance], ParInstances})),
lists:all(
fun(P) -> is_valid(Mod, I, Seq1 ++ P, []) end,
Parallel1));
parallel_shrinker(Mod, Seq, Parallel) ->
I = Mod:initial_state(),
?SUCHTHAT({Seq1, Parallel1},
?LET(ParInstances,
[proper_types:shrink_list(P) || P <- Parallel],
?LET(SeqInstance,
proper_types:shrink_list(Seq),
{SeqInstance, ParInstances})),
lists:all(
fun(P) -> is_valid(Mod, I, Seq1 ++ P, []) end,
Parallel1)).
-spec move_shrinker(command_list(), [command_list()], index()) ->
proper_types:type().
move_shrinker(Seq, Par, 1) ->
?SHRINK({Seq, Par},
[{Seq ++ Slice, remove_slice(1, Slice, Par)}
|| Slice <- get_slices(lists:nth(1, Par))]);
move_shrinker(Seq, Par, I) ->
?LET({NewSeq, NewPar},
?SHRINK({Seq, Par},
[{Seq ++ Slice, remove_slice(I, Slice, Par)}
|| Slice <- get_slices(lists:nth(I, Par))]),
move_shrinker(NewSeq, NewPar, I-1)).
%% -----------------------------------------------------------------------------
%% Sequential command execution
%% -----------------------------------------------------------------------------
%% @doc Evaluates a given symbolic command sequence `Cmds' according to the
%% state machine specified in `Mod'. The result is a triple of the form<br/>
%% `{History, DynamicState, Result}', where:
%% <ul>
%% <li>`History' contains the execution history of all commands that were
%% executed without raising an exception. It contains tuples of the form
%% {{@type dynamic_state()}, {@type term()}}, specifying the state prior to
%% command execution and the actual result of the command.</li>
%% <li>`DynamicState' contains the state of the abstract state machine at
%% the moment when execution stopped. In case execution has stopped due to a
%% false postcondition, `DynamicState' corresponds to the state prior to
%% execution of the last command.</li>
%% <li>`Result' specifies the outcome of command execution. It can be
%% classified in one of the following categories:
%% <ul>
%% <li><b>ok</b>
%% <p>All commands were successfully run and all postconditions were true.
%% </p></li>
%% <li><b>initialization error</b>
%% <p>There was an error while evaluating the initial state.</p></li>
%% <li><b>postcondition error</b>
%% <p>A postcondition was false or raised an exception.</p></li>
%% <li><b>precondition error</b>
%% <p>A precondition was false or raised an exception.</p></li>
%% <li><b>exception</b>
%% <p>An exception was raised while running a command.</p></li>
%% </ul></li>
%% </ul>
-spec run_commands(mod_name(), command_list()) ->
{history(),dynamic_state(),statem_result()}.
run_commands(Mod, Cmds) ->
run_commands(Mod, Cmds, []).
%% @doc Similar to {@link run_commands/2}, but also accepts an environment,
%% used for symbolic variable evaluation during command execution. The
%% environment consists of `{Key::atom(), Value::term()}' pairs. Keys may be
%% used in symbolic variables (i.e. `{var,Key}') whithin the command sequence
%% `Cmds'. These symbolic variables will be replaced by their corresponding
%% `Value' during command execution.
-spec run_commands(mod_name(), command_list(), proper_symb:var_values()) ->
{history(),dynamic_state(),statem_result()}.
run_commands(Mod, Cmds, Env) ->
element(1, run(Mod, Cmds, Env)).
%% @private
-spec run(mod_name(), command_list(), proper_symb:var_values()) ->
{{history(),dynamic_state(),statem_result()}, proper_symb:var_values()}.
run(Mod, Cmds, Env) ->
InitialState = get_initial_state(Mod, Cmds),
try proper_symb:eval(Env, InitialState) of
DynState ->
run_commands(Cmds, Env, Mod, [], DynState)
catch
_Exc:_Reason ->
{{[], undefined, initialization_error}, []}
end.
-spec run_commands(command_list(), proper_symb:var_values(), mod_name(),
history(), dynamic_state()) ->
{{history(),dynamic_state(),statem_result()}, proper_symb:var_values()}.
run_commands(Cmds, Env, Mod, History, State) ->
case Cmds of
[] ->
{{lists:reverse(History), State, ok}, Env};
[{init,_S}|Rest] ->
run_commands(Rest, Env, Mod, History, State);
[{set, {var,V}, {call,M,F,A}}|Rest] ->
M2 = proper_symb:eval(Env, M),
F2 = proper_symb:eval(Env, F),
A2 = proper_symb:eval(Env, A),
Call = {call,M2,F2,A2},
case check_precondition(Mod, State, Call) of
true ->
case safe_apply(M2, F2, A2) of
{ok,Res} ->
Env2 = [{V,Res}|Env],
History2 = [{State,Res}|History],
case check_postcondition(Mod, State, Call, Res) of
true ->
State2 = proper_symb:eval(Env2, Mod:next_state(State, Res, Call)),
run_commands(Rest, Env2, Mod, History2, State2);
false ->
{{lists:reverse(History2), State, {postcondition,false}}, []};
{exception,_,_,_} = Exception ->
{{lists:reverse(History2), State, {postcondition,Exception}}, []}
end;
{error,Exception} ->
{{lists:reverse(History), State, Exception}, []}
end;
false ->
{{lists:reverse(History), State, {precondition,false}}, []};
{exception,_,_,_} = Exc ->
{{lists:reverse(History), State, {precondition,Exc}}, []}
end
end.
-spec check_precondition(mod_name(), dynamic_state(), symb_call()) ->
boolean() | proper:exception().
check_precondition(Mod, State, Call) ->
try Mod:precondition(State, Call)
catch
Kind:Reason ->
{exception, Kind, Reason, erlang:get_stacktrace()}
end.
-spec check_postcondition(mod_name(), dynamic_state(), symb_call(), term()) ->
boolean() | proper:exception().
check_postcondition(Mod, State, Call, Res) ->
try Mod:postcondition(State, Call, Res)
catch
Kind:Reason ->
{exception, Kind, Reason, erlang:get_stacktrace()}
end.
-spec safe_apply(mod_name(), fun_name(), [term()]) ->
{'ok', term()} | {'error', proper:exception()}.
safe_apply(M, F, A) ->
try apply(M, F, A) of
Result -> {ok, Result}
catch
Kind:Reason ->
{error, {exception, Kind, Reason, erlang:get_stacktrace()}}
end.
%% -----------------------------------------------------------------------------
%% Parallel command execution
%% -----------------------------------------------------------------------------
%% @doc Runs a given parallel testcase according to the state machine
%% specified in `Mod'. The result is a triple of the form<br/>
%% `@{Sequential_history, Parallel_history, Result@}', where:
%% <ul>
%% <li>`Sequential_history' contains the execution history of the
%% sequential component.</li>
%% <li>`Parallel_history' contains the execution history of each of the
%% concurrent tasks.</li>
%% <li>`Result' specifies the outcome of the attemp to serialize command
%% execution, based on the results observed. It can be one of the following:
%% <ul><li> `ok' </li><li> `no_possible_interleaving' </li></ul> </li>
%% </ul>
-spec run_parallel_commands(mod_name(), parallel_testcase()) ->
{history(),[parallel_history()],statem_result()}.
run_parallel_commands(Mod, {_Sequential, _Parallel} = Testcase) ->
run_parallel_commands(Mod, Testcase, []).
%% @doc Similar to {@link run_parallel_commands/2}, but also accepts an
%% environment used for symbolic variable evaluation, exactly as described in
%% {@link run_commands/3}.
-spec run_parallel_commands(mod_name(), parallel_testcase(),
proper_symb:var_values()) ->
{history(),[parallel_history()],statem_result()}.
run_parallel_commands(Mod, {Sequential, Parallel}, Env) ->
case run(Mod, Sequential, Env) of
{{Seq_history, State, ok}, SeqEnv} ->
F = fun(T) -> execute(T, SeqEnv, Mod, []) end,
Parallel_history = pmap(F, Parallel),
case check(Mod, State, SeqEnv, false, [], Parallel_history) of
true ->
{Seq_history, Parallel_history, ok};
false ->
{Seq_history, Parallel_history, no_possible_interleaving}
end;
{{Seq_history, _, Res}, _} ->
{Seq_history, [], Res}
end.
%% @private
-spec execute(command_list(), proper_symb:var_values(), mod_name(),
parallel_history()) -> parallel_history().
execute(Cmds, Env, Mod, History) ->
case Cmds of
[] ->
lists:reverse(History);
[{set, {var,V}, {call,M,F,A}} = Cmd|Rest] ->
M2 = proper_symb:eval(Env, M),
F2 = proper_symb:eval(Env, F),
A2 = proper_symb:eval(Env, A),
Res = apply(M2, F2, A2),
Env2 = [{V,Res}|Env],
History2 = [{Cmd,Res}|History],
execute(Rest, Env2, Mod, History2)
end.
-spec pmap(fun((command_list()) -> parallel_history()), [command_list()]) ->
[parallel_history()].
pmap(F, L) ->
await(spawn_jobs(F,L)).
-spec spawn_jobs(fun((command_list()) -> parallel_history()),
[command_list()]) -> [pid()].
spawn_jobs(F, L) ->
Parent = self(),
[spawn_link_cp(fun() -> Parent ! {self(),catch {ok,F(X)}} end)
|| X <- L].
-spec await([pid()]) -> [parallel_history()].
await([]) -> [];
await([H|T]) ->
receive
{H, {ok, Res}} ->
[Res|await(T)];
{H, {'EXIT',_} = Err} ->
_ = [exit(Pid, kill) || Pid <- T],
_ = [receive {P,_} -> d_ after 0 -> i_ end || P <- T],
erlang:error(Err)
end.
%% @private
-spec check(mod_name(), dynamic_state(), proper_symb:var_values(),
boolean(), [parallel_history()], [parallel_history()]) -> boolean().
check(_Mod, _State, _Env, _Changed, [], []) ->
true;
check(_Mod, _State, _Env, false, _Tried, []) ->
false;
check(Mod, State, Env, true, Tried, []) ->
check(Mod, State, Env, false, [], Tried);
check(Mod, State, Env, Changed, Tried, [P|ToTry]) ->
case P of
[] ->
check(Mod, State, Env, Changed, Tried, ToTry);
[H|Tail] ->
{{set, {var,N}, {call,M,F,A}}, Res} = H,
M_ = proper_symb:eval(Env, M),
F_ = proper_symb:eval(Env, F),
A_ = proper_symb:eval(Env, A),
Call = {call,M_,F_,A_},
case Mod:postcondition(State, Call, Res) of
true ->
Env2 = [{N, Res}|Env],
NextState = proper_symb:eval(
Env2, Mod:next_state(State, Res, Call)),
check(Mod, NextState, Env2, true, Tried, [Tail|ToTry])
orelse check(Mod, State, Env, Changed,
[P|Tried], ToTry);
false ->
check(Mod, State, Env, Changed, [P|Tried], ToTry)
end
end.
%% -----------------------------------------------------------------------------
%% Other API functions
%% -----------------------------------------------------------------------------
%% @doc Extracts the names of the commands from a given command sequence, in
%% the form of MFAs. It is useful in combination with functions such as
%% {@link proper:aggregate/2} in order to collect statistics about command
%% execution.
-spec command_names(command_list() | parallel_testcase()) -> [mfa()].
command_names({Cmds, L}) ->
lists:flatten([command_names(Cmds)|[ command_names(Cs) || Cs <- L ]]);
command_names(Cmds) ->
[{M, F, length(Args)} || {set, _Var, {call,M,F,Args}} <- Cmds].
%% @doc Returns the symbolic state after running a given command sequence,
%% according to the state machine specification found in `Mod'. The commands
%% are not actually executed.
-spec state_after(mod_name(), command_list()) -> symbolic_state().
state_after(Mod, Cmds) ->
element(1, state_env_after(Mod, Cmds)).
-spec state_env_after(mod_name(), command_list()) ->
{symbolic_state(), [symb_var()]}.
state_env_after(Mod, Cmds) ->
lists:foldl(fun({init,S}, _) ->
{S, []};
({set,Var,Call}, {S,Vars}) ->
{Mod:next_state(S, Var, Call), [Var|Vars]}
end,
{get_initial_state(Mod, Cmds), []},
Cmds).
%% @doc Behaves like `lists:zip/2', but the input lists do no not necessarily
%% have equal length. Zipping stops when the shortest list stops. This is
%% useful for zipping a command sequence with its (failing) execution history.
-spec zip([A], [B]) -> [{A,B}].
zip([A|X], [B|Y]) -> [{A,B}|zip(X, Y)];
zip(_, []) -> [];
zip([], _) -> [].
%% -----------------------------------------------------------------------------
%% Utility functions
%% -----------------------------------------------------------------------------
%% @private
-spec is_valid(mod_name(), symbolic_state(), command_list(), [symb_var()]) ->
boolean().
is_valid(_Mod, _State, [], _SymbEnv) -> true;
is_valid(Mod, _State, [{init,S}|Cmds], _SymbEnv) ->
is_valid(Mod, S, Cmds, _SymbEnv);
is_valid(Mod, State, [{set, Var, {call,_M,_F,A} = Call}|Cmds], SymbEnv) ->
args_defined(A, SymbEnv) andalso Mod:precondition(State, Call)
andalso is_valid(Mod, Mod:next_state(State, Var, Call), Cmds,
[Var|SymbEnv]).
%% @private
-spec args_defined([term()], [symb_var()]) -> boolean().
args_defined(List, SymbEnv) ->
lists:all(fun (A) -> arg_defined(A, SymbEnv) end, List).
-spec arg_defined(term(), [symb_var()]) -> boolean().
arg_defined({var,I} = V, SymbEnv) when is_integer(I) ->
lists:member(V, SymbEnv);
arg_defined(Tuple, SymbEnv) when is_tuple(Tuple) ->
args_defined(tuple_to_list(Tuple), SymbEnv);
arg_defined([Head|Tail], SymbEnv) ->
arg_defined(Head, SymbEnv) andalso arg_defined(Tail, SymbEnv);
arg_defined(_, _) ->
true.
%% @private
-spec get_initial_state(mod_name(), command_list()) -> symbolic_state().
get_initial_state(_, [{init,S}|_]) -> S;
get_initial_state(Mod, Cmds) when is_list(Cmds) ->
Mod:initial_state().
%% @private
-spec fix_parallel(index(), non_neg_integer(), combination() | 'done',
lookup(), mod_name(), symbolic_state(), [symb_var()],
pos_integer()) -> [command_list()].
fix_parallel(_, 0, done, _, _, _, _, _) ->
exit(error); %% not supposed to reach here
fix_parallel(MaxIndex, Len, done, LookUp, Mod, State, SymbEnv, W) ->
Comb = mk_first_comb(MaxIndex, Len-1, W),
case Len of
1 -> io:format("f");
_ -> ok
end,
fix_parallel(MaxIndex, Len-1, Comb , LookUp, Mod, State, SymbEnv, W);
fix_parallel(MaxIndex, Len, Comb, LookUp, Mod, State, SymbEnv, W) ->
CmdLists = lookup_cmd_lists(Comb, LookUp),
case can_parallelize(CmdLists, Mod, State, SymbEnv) of
true ->
lists:reverse(CmdLists);
false ->
C1 = proplists:get_value(1, Comb),
C2 = proplists:get_value(2, Comb),
Next = get_next(Comb, Len, MaxIndex, lists:sort(C1 ++ C2), W, 2),
fix_parallel(MaxIndex, Len, Next, LookUp, Mod, State, SymbEnv, W)
end.
-spec can_parallelize([command_list()], mod_name(), symbolic_state(),
[symb_var()]) -> boolean().
can_parallelize(CmdLists, Mod, State, SymbEnv) ->
lists:all(fun(C) -> is_valid(Mod, State, C, SymbEnv) end, CmdLists)
andalso lists:all(fun(C) -> is_valid(Mod, State, C, SymbEnv) end,
possible_interleavings(CmdLists)).
%% @private
-spec possible_interleavings([command_list()]) -> [command_list()].
possible_interleavings([P1,P2]) ->
insert_all(P1, P2);
possible_interleavings([P1|Rest]) ->
[I || L <- possible_interleavings(Rest),
I <- insert_all(P1, L)].
%% @private
%% Returns all possible insertions of the elements of the first list,
%% preserving their order, inside the second list, i.e. all possible
%% command interleavings between two parallel processes
-spec insert_all([term()], [term()]) -> [[term()]].
insert_all([], List) ->
[List];
insert_all([X], List) ->
all_insertions(X, length(List) + 1, List);
insert_all([X|[Y|Rest]], List) ->
[L2 || L1 <- insert_all([Y|Rest], List),
L2 <- all_insertions(X, index(Y, L1), L1)].
%% @private
-spec all_insertions(term(), pos_integer(), [term()]) -> [[term()]].
all_insertions(X, Limit, List) ->
all_insertions_tr(X, Limit, 0, [], List, []).
-spec all_insertions_tr(term(), pos_integer(), non_neg_integer(),
[term()], [term()], [[term()]]) -> [[term()]].
all_insertions_tr(X, Limit, LengthFront, Front, [], Acc) ->
case LengthFront < Limit of
true ->
[Front ++ [X] | Acc];
false ->
Acc
end;
all_insertions_tr(X, Limit, LengthFront, Front, Back = [BackH|BackT], Acc) ->
case LengthFront < Limit of
true ->
all_insertions_tr(X, Limit, LengthFront+1, Front ++ [BackH],
BackT, [Front ++ [X] ++ Back | Acc]);
false -> Acc
end.
%% @private
-spec index(term(), [term(),...]) -> index().
index(X, List) ->
index(X, List, 1).
-spec index(term(), [term(),...], index()) -> index().
index(X, [X|_], N) -> N;
index(X, [_|Rest], N) -> index(X, Rest, N+1).
%% @private
-spec mk_dict(command_list(), pos_integer()) -> [{pos_integer(), command()}].
mk_dict([], _) -> [];
mk_dict([{init,_}|T], N) -> mk_dict(T, N);
mk_dict([H|T], N) -> [{N,H}|mk_dict(T, N+1)].
%% @private
-spec mk_first_comb(pos_integer(), non_neg_integer(), pos_integer()) ->
combination().
mk_first_comb(N, Len, W) ->
mk_first_comb_tr(1, N, Len, [], W).
-spec mk_first_comb_tr(pos_integer(), pos_integer(), non_neg_integer(),
combination(), pos_integer()) -> combination().
mk_first_comb_tr(Start, N, _Len, Accum, 1) ->
[{1,lists:seq(Start, N)}|Accum];
mk_first_comb_tr(Start, N, Len, Accum, W) ->
K = Start + Len,
mk_first_comb_tr(K, N, Len, [{W,lists:seq(Start, K-1)}|Accum], W-1).
-spec lookup_cmds(indices(), lookup()) -> command_list().
lookup_cmds(Indices, LookUp) ->
[orddict:fetch(Index, LookUp) || Index <- Indices].
-spec lookup_cmd_lists(combination(), lookup()) -> [command_list()].
lookup_cmd_lists(Combination, LookUp) ->
[lookup_cmds(Indices, LookUp) || {_, Indices} <- Combination].
%% @private
-spec get_next(combination(), non_neg_integer(), index(), indices(),
pos_integer(), pos_integer()) -> combination() | 'done'.
get_next(L, _Len, _MaxIndex, Available, _Workers, 1) ->
[{1,Available}|proplists:delete(1, L)];
get_next(L, Len, MaxIndex, Available, Workers, N) ->
C = case proplists:is_defined(N, L) of
true ->
next_comb(MaxIndex, proplists:get_value(N, L), Available);
false ->
lists:sublist(Available, Len)
end,
case C of
done ->
if N =:= Workers ->
done;
N =/= Workers ->
C2 = proplists:get_value(N+1, L),
NewList = [E || {M,_}=E <- L, M > N],
get_next(NewList, Len, MaxIndex,
lists:sort(C2 ++ Available), Workers, N+1)
end;
_ ->
get_next([{N,C}|proplists:delete(N, L)],
Len, MaxIndex, Available -- C, Workers, N-1)
end.
-spec next_comb(index(), indices(), indices()) -> indices() | 'done'.
next_comb(MaxIndex, Indices, Available) ->
Res = next_comb_tr(MaxIndex, lists:reverse(Indices), []),
case is_well_defined(Res, Available) of
true -> Res;
false -> next_comb(MaxIndex, Res, Available)
end.
-spec is_well_defined(indices() | 'done', indices()) -> boolean().
is_well_defined(done, _) -> true;
is_well_defined(Comb, Available) ->
lists:usort(Comb) =:= Comb andalso
lists:all(fun(X) -> lists:member(X, Available) end, Comb).
-spec next_comb_tr(index(), indices(), indices()) -> indices() | 'done'.
next_comb_tr(_MaxIndex, [], _Acc) ->
done;
next_comb_tr(MaxIndex, [MaxIndex | Rest], Acc) ->
next_comb_tr(MaxIndex, Rest, [1 | Acc]);
next_comb_tr(_MaxIndex, [X | Rest], Acc) ->
lists:reverse(Rest, [X+1|Acc]).
-spec remove_slice(index(), command_list(), [command_list(),...]) ->
[command_list(),...].
remove_slice(Index, Slice, List) ->
remove_slice_tr(Index, Slice, List, [], 1).
-spec remove_slice_tr(index(), command_list(), [command_list(),...],
[command_list()], pos_integer()) -> [command_list(),...].
remove_slice_tr(Index, Slice, [H|T], Acc, Index) ->
lists:reverse(Acc, [H -- Slice] ++ T);
remove_slice_tr(Index, Slice, [H|T], Acc, N) ->
remove_slice_tr(Index, Slice, T, [H|Acc], N+1).
-spec get_slices(command_list()) -> [command_list()].
get_slices(List) ->
get_slices_tr(List, List, 1, []).
-spec get_slices_tr(command_list(), command_list(), pos_integer(),
[command_list()]) -> [command_list()].
get_slices_tr([], _, _, Acc) -> Acc;
get_slices_tr([_|Tail], List, N, Acc) ->
get_slices_tr(Tail, List, N+1, [lists:sublist(List, N)|Acc]).
-spec spawn_link_cp(fun(() -> _)) -> pid().
spawn_link_cp(ActualFun) ->
PDictStuff = [Pair || {K,_V} = Pair <- get(),
is_atom(K),
re:run(atom_to_list(K), ["^[$]"],
[{capture,none}]) =:= match],
Fun = fun() ->
lists:foreach(fun({K,V}) -> put(K,V) end, PDictStuff),
proper_arith:rand_reseed(),
ActualFun()
end,
spawn_link(Fun). | _build/default/lib/proper/src/proper_statem.erl | 0.619011 | 0.414662 | proper_statem.erl | starcoder |
%%%
%%% Copyright 2017 RBKmoney
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
-module(mg_core_dirange).
-export_type([dirange/1]).
-export([empty/0]).
-export([forward/2]).
-export([backward/2]).
-export([to_opaque/1]).
-export([from_opaque/1]).
-export([align/2]).
-export([reverse/1]).
-export([dissect/2]).
-export([conjoin/2]).
-export([intersect/2]).
-export([limit/2]).
-export([fold/3]).
-export([enumerate/1]).
-export([direction/1]).
-export([size/1]).
-export([bounds/1]).
-export([from/1]).
-export([to/1]).
%% Directed range over integers
-opaque dirange(_T) :: nonempty_dirange(_T) | undefined.
-type direction() :: -1 | +1.
-type nonempty_dirange(_T) ::
% Non-empty, unambiguously oriented directed range [from..to].
{_T :: integer(), _T :: integer(), direction()}.
%%
-spec empty() -> dirange(_).
empty() ->
undefined.
-spec forward(_T :: integer(), _T :: integer()) -> dirange(_T).
forward(A, B) when A =< B ->
{A, B, +1};
forward(A, B) when A > B ->
{B, A, +1}.
-spec backward(_T :: integer(), _T :: integer()) -> dirange(_T).
backward(A, B) when A >= B ->
{A, B, -1};
backward(A, B) when A < B ->
{B, A, -1}.
-spec to_opaque(dirange(_)) -> mg_core_storage:opaque().
to_opaque(undefined) ->
null;
to_opaque({A, B, +1}) ->
[A, B];
to_opaque({A, B, D = -1}) ->
[A, B, D].
-spec from_opaque(mg_core_storage:opaque()) -> dirange(_).
from_opaque(null) ->
undefined;
from_opaque([A, B]) ->
{A, B, +1};
from_opaque([A, B, D]) ->
{A, B, D}.
%%
-spec align(dirange(T), _Pivot :: dirange(T)) -> dirange(T).
align(R, Rp) ->
case direction(R) * direction(Rp) of
-1 -> reverse(R);
_S -> R
end.
-spec reverse(dirange(T)) -> dirange(T).
reverse({A, B, D}) ->
{B, A, -D};
reverse(undefined) ->
undefined.
-spec dissect(dirange(T), T) -> {dirange(T), dirange(T)}.
dissect(undefined, _) ->
{undefined, undefined};
dissect({A, B, +1 = D} = R, C) ->
if
C < A -> {undefined, R};
B =< C -> {R, undefined};
A =< C, C < B -> {{A, C, D}, {C + 1, B, D}}
end;
dissect(R, C) ->
{R1, R2} = dissect(reverse(R), C - 1),
{reverse(R2), reverse(R1)}.
-spec conjoin(dirange(T), dirange(T)) -> dirange(T).
conjoin(undefined, R) ->
R;
conjoin(R, undefined) ->
R;
conjoin({A1, B1, D}, {A2, B2, D}) when A2 == B1 + D ->
{A1, B2, D};
conjoin(R1, R2) ->
erlang:error(badarg, [R1, R2]).
-spec intersect(_Range :: dirange(T), _With :: dirange(T)) ->
{
% part of `Range` to the «left» of `With`
_LeftDiff :: dirange(T),
% intersection between `Range` and `With`
_Intersection :: dirange(T),
% part of `Range` to the «right» of `With`
_RightDiff :: dirange(T)
}.
intersect(R0, undefined) ->
erlang:error(badarg, [R0, undefined]);
intersect(R0, With) ->
D0 = direction(R0),
{WA, WB} = bounds(align(With, R0)),
% to NOT include WA itself
{LeftDiff, R1} = dissect(R0, WA - D0),
{Intersection, RightDiff} = dissect(R1, WB),
{LeftDiff, Intersection, RightDiff}.
-spec limit(dirange(T), non_neg_integer()) -> dirange(T).
limit(undefined, _) ->
undefined;
limit(_, 0) ->
undefined;
limit({A, B, +1}, N) when N > 0 ->
{A, erlang:min(B, A + N - 1), +1};
limit({B, A, -1}, N) when N > 0 ->
{B, erlang:max(A, B - N + 1), -1}.
-spec enumerate(dirange(T)) -> [T].
enumerate(undefined) ->
[];
enumerate({A, B, D}) ->
lists:seq(A, B, D).
-spec fold(fun((T, Acc) -> Acc), Acc, dirange(T)) -> Acc.
fold(_, Acc, undefined) ->
Acc;
fold(F, Acc, {A, B, D}) ->
fold(F, Acc, A, B, D).
-spec fold(fun((T, Acc) -> Acc), Acc, T, T, -1..1) -> Acc.
fold(F, Acc, A, A, _) ->
F(A, Acc);
fold(F, Acc, A, B, S) ->
fold(F, F(A, Acc), A + S, B, S).
-spec direction(dirange(_)) -> direction() | 0.
direction({_, _, D}) ->
D;
direction(_) ->
0.
-spec size(dirange(_)) -> non_neg_integer().
size(undefined) ->
0;
size({A, B, D}) ->
(B - A) * D + 1.
-spec bounds(dirange(_T)) -> {_T, _T} | undefined.
bounds({A, B, _}) ->
{A, B};
bounds(undefined) ->
undefined.
-spec from(dirange(_T)) -> _T | undefined.
from(undefined) ->
undefined;
from({A, _, _}) ->
A.
-spec to(dirange(_T)) -> _T | undefined.
to(undefined) ->
undefined;
to({_, B, _}) ->
B. | src/mg_core_dirange.erl | 0.624866 | 0.482917 | mg_core_dirange.erl | starcoder |
%% Adapted from https://github.com/eproxus/unite/blob/master/src/unite_compact.erl
-module(eunit_formatter).
-behaviour(eunit_listener).
% EUnit Callbacks
-export([start/0]).
-export([start/1]).
-export([init/1]).
-export([handle_begin/3]).
-export([handle_end/3]).
-export([handle_cancel/3]).
-export([terminate/2]).
-export([ioindent/2]).
% Clear line: "\e[2K"
-record(s, {
start = current_time_msecs(),
cases = [],
profile = false,
profile_max = 10
}).
%--- EUnit Callbacks ----------------------------------------------------------
current_time_msecs() ->
{Mega, Sec, Micro} = os:timestamp(),
(Mega*1000000 + Sec)*1000 + round(Micro/1000).
start() ->
start([]).
start(Options) ->
eunit_listener:start(?MODULE, Options).
init(Options) ->
case get(profile, Options) of
undefined ->
#s{};
true ->
#s{profile = true};
Max when is_integer(Max), Max >= 0 ->
#s{profile = true, profile_max = Max}
end.
handle_begin(_Type, _Data, State) ->
State.
handle_end(test, Data, State) ->
case get(status, Data) of
ok -> io:format(("."));
skip -> io:format(("S"));
{error, _} -> io:format(("F"))
end,
State#s{cases = State#s.cases ++ [Data]};
handle_end(_Type, _Data, State) ->
State.
handle_cancel(group, Data, State) ->
case get(reason, Data) of
undefined ->
State;
_Else ->
io:format(("C")),
State#s{cases = State#s.cases ++ [Data]}
end;
handle_cancel(_Type, _Data, State) ->
State.
terminate({ok, Result}, #s{cases = Cases} = State) ->
print_failures(lists:filter(
fun(C) ->
case get(status, C) of
{error, _} ->
true;
_ ->
case get(reason, C) of
{abort, _} -> true;
_ -> false
end
end
end,
Cases
)),
print_times(State),
print_summary(Result, State).
%--- Internal Functions -------------------------------------------------------
print_failures([]) -> ok;
print_failures(Failures) ->
Indexed = lists:zip(lists:seq(1, length(Failures)), Failures),
[print_failure(I, F) || {I, F} <- Indexed],
io:format("~n").
% Individual Test Case
print_failure(Index, Failure) ->
Reason = get(reason, Failure),
Info = get(status, Failure, Reason),
{Header, Details} = format_info(Failure, Info),
io:format("~n~n ~p) ~s~n", [Index, Header]),
io:format(ioindent(4, Details)),
case format_output(Failure) of
undefined -> ok;
Output -> io:format("~n~s", [ioindent(4, Output)])
end.
format_info(Failure, {error, {error, {assert, Info}, ST}}) ->
Expr = get(expression, Info),
{
(format_case(Failure, ST)),
[("Assert failed: "), format_macro_string(Expr)]
};
format_info(Failure, {error, {error, {assertEqual, Info}, ST}}) ->
Expected = get(expected, Info),
Actual = get(value, Info),
Exp = diff_prep_term(Expected),
Act = diff_prep_term(Actual),
Diff = tdiff:diff(Exp, Act),
{
(format_case(Failure, ST)),
io_lib:format("~s ~s~n~s", [
("Assert equal failed!"),
[
("-Expected-"),
" ",
("+Actual+")
],
format_diff(Diff)
])
};
format_info(Failure, {error, {error, {assertMatch, Info}, ST}}) ->
Expr = get(expression, Info),
Pattern = get(pattern, Info),
Value = get(value, Info),
{
(format_case(Failure, ST)),
io_lib:format("~s~n~s~n~s~n~s~n~s~n~s~n~s~n", [
("Assert match failed!"),
("Expression:"),
ioindent(4, format_macro_string(Expr)),
("Pattern:"),
ioindent(4, format_macro_string(Pattern)),
("Actual:"),
ioindent(4, format_term(Value, 0, 8))
])
};
format_info(Failure, {error, {error, {assertException, Info}, ST}}) ->
case get(unexpected_exception, Info) of
undefined ->
Success = get(unexpected_success, Info),
Term = format_term(Success, 22, 4),
{
(format_case(Failure, ST)),
case multiline(Term) of
true ->
io_lib:format("~s~n~s", [
("Unexpected success!"),
(format_term(Success, 0, 4))
]);
false ->
[
("Unexpected success:"),
" ",
(Term)
]
end
};
{E, R, NewST} ->
{
(format_case(Failure, ST)),
io_lib:format("~s~n~s", [
("Unexpected exception:"),
(format_exception(E, R, NewST))
])
}
end;
format_info(Failure, {error, {E, R, ST}}) ->
{
format_case(Failure, ST, red),
[
("Uncaught exception! "),
io_lib:format("~n", []),
(format_exception(E, R, ST))
]
};
format_info(_Failure, {abort, {bad_test, Test}}) ->
{
("Bad test specification:"),
[
(io_lib:format("~p", [Test]))
]
};
format_info(Failure, {abort, {generator_failed, {MFA, {E, R, ST}}}}) ->
{
(format_case(Failure, [add_info(MFA, ST)])),
[
("Generator failed!"),
io_lib:format("~n", []),
(format_exception(E, R, ST))
]
};
format_info(Failure, {abort, {Reason, {E, R, ST}}}) ->
{
(format_case(Failure, ST)),
[
(case Reason of
setup_failed -> "Setup failed: ";
cleanup_failed -> "Cleanup failed: "
end),
io_lib:format("~n", []),
(format_exception(E, R, ST))
]
}.
diff_prep_term(Term) ->
Pretty = format_term(Term, 0, 0),
Flat = iolist_to_binary(Pretty),
TermSplit = "([,\\[\\]\\{\\}]|\\s+=>\\s+)",
re:split(Flat, TermSplit, [trim]).
format_term(Term, Indent, Outer) ->
io_lib_pretty:print(Term, Indent, columns() - Outer, -1).
format_diff([]) ->
[];
format_diff([{eq, Str}|Rest]) ->
[Str|format_diff(Rest)];
format_diff([{del, Str}|Rest]) ->
[(["-", Str, "-"])|format_diff(Rest)];
format_diff([{ins, Str}|Rest]) ->
[(["+", Str, "+"])|format_diff(Rest)].
format_case(Failure, ST) -> format_case(Failure, ST, white).
format_case(Failure, ST, Color) ->
case get(desc, Failure) of
undefined -> format_source(Failure, ST);
Desc ->
io_lib:format("~s~n~s", [
colorize([$", Desc, $"], cyan),
colorize(ioindent(4, format_source(Failure, ST)), Color)
])
end.
format_source(Failure, ST) ->
case get(source, Failure) of
undefined ->
format_stack_line(ST);
MFA ->
format_stack_line([add_info(MFA, ST)])
end.
format_stack_line([{M, F, A, I} | _]) ->
case {get(file, I), get(line, I)} of
{undefined, undefined} ->
io_lib:format("~p:~p/~p", [M, F, A]);
{File, L} ->
io_lib:format("~p/~p~n~s:~p:", [F, A, File, L])
end;
format_stack_line([]) ->
"unknown location".
format_exception(Error, Reason, Stacktrace) ->
lib:format_exception(1, Error, Reason, Stacktrace,
fun(_M, _F, _A) -> false end,
fun(T, I) ->
io_lib_pretty:print(T, I, columns(), -1)
end
).
format_output(Failure) ->
case get(output, Failure) of
<<>> -> undefined;
undefined -> undefined;
Output ->
[
("Output:"),
io_lib:format("~n", []),
ioindent(2, Output)
]
end.
format_macro_string(Str) ->
case lists:member($?, Str) of
true ->
[C || C <- Str, C =/= $ ];
false ->
{ok, S, _} = erl_scan:string(Str ++ "."),
{ok, P} = erl_parse:parse_exprs(S),
erl_pp:exprs(P)
end.
% Profiling
print_times(#s{profile_max = Max, cases = Cases, profile = P}) when P ->
Times = [{T, format_case(C, [])} || C <- Cases, T <- [get(time, C)], is_integer(T)],
Top = lists:sublist(lists:reverse(lists:sort(Times)), Max),
case length(Top) of
0 ->
ok;
N ->
Title = colorize(io_lib:format("Top ~p slowest tests:", [N]), yellow),
io:format("~n~n ~s~n", [Title]),
[print_time(T, C) || {T, C} <- Top]
end;
print_times(_State) ->
ok.
print_time(Ms, Case) ->
Time = colorize(string:left(format_time(Ms), 10), red),
io:format(" ~s~n ~s~n", [Case, Time]).
% Summary
print_summary(Result, State) ->
case get_all(Result, [pass, fail, skip, cancel]) of
[0, 0, 0, 0] ->
ok;
[Pass, Fail, Skip, Cancel] ->
Elapsed = current_time_msecs() - State#s.start,
Time = format_time(Elapsed),
io:format("~n~s~n", [iolist_to_binary(iojoin([
non_zero(Pass, green, plural(Pass, "test", "passed")),
non_zero(Fail, red, plural(Fail, "test", "failed")),
non_zero(Skip, yellow, plural(Skip, "test", "skipped")),
non_zero(Cancel, yellow, plural(Cancel, "fixture", "cancelled")),
(io_lib:format("(~s)", [Time]))
], " "))])
end.
plural(Number, Noun, Postfix) ->
Text = case Number of 1 -> Noun; Number -> [Noun, "s"] end,
[i2b(Number), " ", Text, " ", Postfix].
% Utilities
format_time(Ms) -> io_lib:format("~.2f s", [Ms / 1000]).
get_all(Proplist, Keys) ->
[get(K, Proplist) || K <- Keys].
i2b(Integer) -> list_to_binary(integer_to_list(Integer)).
non_zero(Int, Colors, IOData) ->
case Int of
0 -> [];
_ -> colorize(IOData, Colors)
end.
colorize(String, []) ->
String;
colorize(String, [_Color|Colors]) ->
colorize((String), Colors);
colorize(String, Color) when is_atom(Color) ->
colorize(String, [Color]).
iojoin([], _Separator) -> [];
iojoin([[]|List], Separator) -> iojoin(List, Separator);
iojoin([Item], _Separator) -> Item;
iojoin([Item|List], Separator) -> [Item, Separator, iojoin(List, Separator)].
ioindent(Indent, IOData) when is_integer(Indent) ->
Spacing = iolist_to_binary(lists:duplicate(Indent, 32)),
[Spacing, ioindent(Spacing, IOData)];
ioindent(Spacing, [10|IOData]) ->
[10, Spacing|ioindent(Spacing, IOData)];
ioindent(Spacing, Binary) when is_binary(Binary) ->
binary:replace(Binary, <<"\n">>, <<"\n", Spacing/binary>>, [global]);
ioindent(Spacing, [Sub|IOData]) ->
[ioindent(Spacing, Sub)|ioindent(Spacing, IOData)];
ioindent(_Spacing, []) ->
[];
ioindent(_Spacing, Other) ->
Other.
add_info({M, F, A}, []) -> {M, F, A, []};
add_info({M, F, A}, [{M, _, _, I}|_ST]) -> {M, F, A, I};
add_info(MFA, [_Line|ST]) -> add_info(MFA, ST).
multiline([10|_IOData]) ->
true;
multiline([List|IOData]) when is_list(List) ->
multiline(List) orelse multiline(IOData);
multiline([_|IOData]) ->
multiline(IOData);
multiline(IOData) when is_binary(IOData) ->
binary:match(IOData, <<"\n">>) =/= nomatch;
multiline([]) ->
false.
get(Key, Proplist) -> proplists:get_value(Key, Proplist).
get(Key, Proplist, Default) -> proplists:get_value(Key, Proplist, Default).
columns() -> case io:columns() of {ok, Columns} -> Columns; _Error -> 80 end. | src/eunit_formatter.erl | 0.535827 | 0.495911 | eunit_formatter.erl | starcoder |
-module(q46_50).
-export([
table/1,
table/2,
perm/2,
gray/1,
huffman/1
]).
-compile(export_all).
%% Auxiliary functions.
and_(true, true) -> true;
and_(_, _) -> false.
or_(false, false) -> false;
or_(_, _) -> true.
nand_(A, B) -> not and_(A, B).
nor_(A, B) -> not or_(A, B).
xor_(A,A) -> false;
xor_(_,_) -> true.
impl_(true, false) -> false;
impl_(_, _) -> true.
equi_(A, A) -> true;
equi_(_, _) -> false.
%% Logic and Codes
%% Problem 46
%% (**) Define predicates and/2, or/2, nand/2, nor/2, xor/2, impl/2 and equ/2 (for logical equivalence) which succeed or fail according to the result of their respective operations; e.g. and(A,B) will succeed, if and only if both A and B succeed.
%%
%% A logical expression in two variables can then be written as in the following example: and(or(A,B),nand(A,B)).
%%
%% Now, write a predicate table/3 which prints the truth table of a given logical expression in two variables.
%%
%% Example:
%%
%% (table A B (and A (or A B)))
%% true true true
%% true fail true
%% fail true fail
%% fail fail fail
%% Example in Haskell:
%%
%% λ> table (\a b -> (and' a (or' a b)))
%% True True True
%% True False True
%% False True False
%% False False False
%%
%% Problem 47
%% (*) Truth tables for logical expressions (2).
%%
%% Continue problem P46 by defining and/2, or/2, etc as being operators. This allows to write the logical expression in the more natural way, as in the example: A and (A or not B). Define operator precedence as usual; i.e. as in Java.
%%
%% Example:
%%
%% * (table A B (A and (A or not B)))
%% true true true
%% true fail true
%% fail true fail
%% fail fail fail
%% Example in Haskell:
%%
%% λ> table2 (\a b -> a `and'` (a `or'` not b))
%% True True True
%% True False True
%% False True False
%% False False False
%%
%% Problem 48
%% (**) Truth tables for logical expressions (3).
%%
%% Generalize problem P47 in such a way that the logical expression may contain any number of logical variables. Define table/2 in a way that table(List,Expr) prints the truth table for the expression Expr, which contains the logical variables enumerated in List.
%%
%% Example:
%%
%% * (table (A,B,C) (A and (B or C) equ A and B or A and C))
%% true true true true
%% true true fail true
%% true fail true true
%% true fail fail true
%% fail true true true
%% fail true fail true
%% fail fail true true
%% fail fail fail true
%% Example in Haskell:
%%
%% λ> tablen 3 (\[a,b,c] -> a `and'` (b `or'` c) `equ'` a `and'` b `or'` a `and'` c)
%% -- infixl 3 `equ'`
%% True True True True
%% True True False True
%% True False True True
%% True False False True
%% False True True True
%% False True False True
%% False False True True
%% False False False True
%%
%% -- infixl 7 `equ'`
%% True True True True
%% True True False True
%% True False True True
%% True False False False
%% False True True False
%% False True False False
%% False False True False
%% False False False False
%%
%% Table and perm functions cover problems 46,47,48
table(Func) ->
table(2, Func).
table(Len, Func) ->
perm(Len, Func).
table(print, Items, Func) ->
PrintStr = lists:flatten(lists:map(fun(X) -> "~w " end, Items)),
io:format(PrintStr ++ "~w~n", Items ++ [Func(Items)]).
perm(L, Func) ->
perm(L, [], L, Func).
perm(L, Res, 0, Func) ->
table(print, Res, Func);
perm(L, Res, Acc, Func) ->
perm(L, [true|Res], Acc-1, Func),
perm(L, [false|Res], Acc-1, Func).
%% Problem 49
%% (**) Gray codes.
%%
%% An n-bit Gray code is a sequence of n-bit strings constructed according to certain rules. For example,
%%
%% n = 1: C(1) = ['0','1'].
%% n = 2: C(2) = ['00','01','11','10'].
%% n = 3: C(3) = ['000','001','011','010',´110´,´111´,´101´,´100´].
%% Find out the construction rules and write a predicate with the following specification:
%%
%% % gray(N,C) :- C is the N-bit Gray code
%% Can you apply the method of "result caching" in order to make the predicate more efficient, when it is to be used repeatedly?
%%
%% Example in Haskell:
%%
%% λ> gray 3
%% ["000","001","011","010","110","111","101","100"]
gray(0) ->
[""];
gray(N) ->
Prev = gray(N-1),
["0" ++ X || X <- Prev] ++ ["1" ++ lists:reverse(X) || X <- Prev].
%% Problem 50
%% (***) Huffman codes.
%%
%% We suppose a set of symbols with their frequencies, given as a list of fr(S,F) terms. Example: [fr(a,45),fr(b,13),fr(c,12),fr(d,16),fr(e,9),fr(f,5)]. Our objective is to construct a list hc(S,C) terms, where C is the Huffman code word for the symbol S. In our example, the result could be Hs = [hc(a,'0'), hc(b,'101'), hc(c,'100'), hc(d,'111'), hc(e,'1101'), hc(f,'1100')] [hc(a,'01'),...etc.]. The task shall be performed by the predicate huffman/2 defined as follows:
%%
%% % huffman(Fs,Hs) :- Hs is the Huffman code table for the frequency table Fs
%% Example in Haskell:
%%
%% λ> huffman [('a',45),('b',13),('c',12),('d',16),('e',9),('f',5)]
%% [('a',"0"),('b',"101"),('c',"100"),('d',"111"),('e',"1101"),('f',"1100")]
huffman(L) ->
huffman(next, lists:sort(fun({_, A}, {_, B}) -> A =< B end, L)).
huffman(next, [{Tree, _}]) ->
codes(Tree);
huffman(next, [{El1, W1}, {El2, W2} | Res]) ->
huffman(next, lists:sort(fun({_, A}, {_, B}) -> A =< B end, [{{El1, El2}, W1+W2} | Res])).
codes({L, R}) ->
codes(L, "0") ++ codes(R, "1").
codes({L, R}, Bits) ->
codes(L, Bits ++ "0") ++ codes(R, Bits ++ "1");
codes(Symbol, Bits) ->
[{Symbol, Bits}]. | q46_50.erl | 0.506347 | 0.720805 | q46_50.erl | starcoder |
%% --- Day 6: Probably a Fire Hazard ---
%%
%% Because your neighbors keep defeating you in the holiday house
%% decorating contest year after year, you've decided to deploy one
%% million lights in a 1000x1000 grid.
%%
%% Furthermore, because you've been especially nice this year, Santa
%% has mailed you instructions on how to display the ideal lighting
%% configuration.
%%
%% Lights in your grid are numbered from 0 to 999 in each direction;
%% the lights at each corner are at 0,0, 0,999, 999,999, and
%% 999,0. The instructions include whether to turn on, turn off, or
%% toggle various inclusive ranges given as coordinate pairs. Each
%% coordinate pair represents opposite corners of a rectangle,
%% inclusive; a coordinate pair like 0,0 through 2,2 therefore refers
%% to 9 lights in a 3x3 square. The lights all start turned off.
%%
%% To defeat your neighbors this year, all you have to do is set up
%% your lights by doing the instructions Santa sent you in order.
%%
%% For example:
%%
%% turn on 0,0 through 999,999 would turn on (or leave on) every
%% light. toggle 0,0 through 999,0 would toggle the first line of
%% 1000 lights, turning off the ones that were on, and turning on the
%% ones that were off. turn off 499,499 through 500,500 would turn
%% off (or leave off) the middle four lights. After following the
%% instructions, how many lights are lit?
%%
%% --- Part Two ---
%%
%% You just finish implementing your winning light pattern when you
%% realize you mistranslated Santa's message from Ancient Nordic
%% Elvish.
%%
%% The light grid you bought actually has individual brightness
%% controls; each light can have a brightness of zero or more. The
%% lights all start at zero.
%%
%% The phrase turn on actually means that you should increase the
%% brightness of those lights by 1.
%%
%% The phrase turn off actually means that you should decrease the
%% brightness of those lights by 1, to a minimum of zero.
%%
%% The phrase toggle actually means that you should increase the
%% brightness of those lights by 2.
%%
%% What is the total brightness of all lights combined after following
%% Santa's instructions?
%%
%% For example:
%%
%% turn on 0,0 through 0,0 would increase the total brightness by 1.
%% toggle 0,0 through 999,999 would increase the total brightness by
%% 2000000.
-module(day6).
-compile([export_all]).
solve_part1() ->
solve_part1(input()).
input() ->
{ok, Input} = file:read_file("input/day6"),
string:tokens(binary_to_list(Input), "\n").
solve_part1(Input) ->
erase(),
Ops = parse_operations(Input),
ok = eval(Ops),
length(get_keys(on)).
parse_operations(Input) ->
lists:map(
fun(Line) ->
{match, [Op, X1, Y1, X2, Y2]} =
re:run( Line
, "(turn off|toggle|turn on) (\\d+),(\\d+)"
" through (\\d+),(\\d+)"
, [{capture, all_but_first, list}]),
{ Op
, {list_to_integer(X1), list_to_integer(Y1)}
, {list_to_integer(X2), list_to_integer(Y2)}}
end, Input).
eval(Ops) ->
[ op(Op, {X, Y}) || {Op, {X1, Y1}, {X2, Y2}} <- Ops,
X <- lists:seq(X1, X2),
Y <- lists:seq(Y1, Y2)],
ok.
op("turn on", Pos) ->
put(Pos, on);
op("turn off", Pos) ->
put(Pos, undefined);
op("toggle", Pos) ->
case get(Pos) of
on -> put(Pos, undefined);
undefined -> put(Pos, on)
end.
-include_lib("eunit/include/eunit.hrl").
day6_test_() ->
[]. | src/day6.erl | 0.599485 | 0.811078 | day6.erl | starcoder |
%%% gm
%%%
%%% Functions for interacting with GraphicsMagick
-module(gm).
-export([
identify_explicit/2,
identify/2,
convert/3,
mogrify/2
]).
%% =====================================================
%% API
%% =====================================================
%% Explicit Identify
%%
%% Get explicit image characteristics in a list to be parsed by proplists:get_value
%%
%% Example:
%%
%% identify_explicit("my.jpg", [filename, width, height, type]}).
%%
%% Which returns a list of characteristics to be retrived with proplists:get_value
%%
identify_explicit(File, Options) ->
Template = "identify -format :format_string :file",
TemplateOpts = [{file, File}, {format_string, identify_format_string(Options)}],
Result = os:cmd(bind_data(Template, TemplateOpts, [escape])),
case cmd_error(Result) of
{error, Msg} -> {error, Msg};
no_error -> parse_identify_explicit(Result)
end.
%% Identify
identify(File, Options) ->
Template = "identify {{options}} :file",
TemplateOpts = [{file, File}],
exec_cmd(Template, TemplateOpts, Options).
%% Convert
convert(File, Converted, Options) ->
Template = "convert {{options}} :input_file :output_file",
TemplateOpts = [{input_file, File}, {output_file, Converted}],
exec_cmd(Template, TemplateOpts, Options).
%% Mogrify
mogrify(File, Options) ->
Template = "mogrify {{options}} :file",
TemplateOpts = [{file, File}],
exec_cmd(Template, TemplateOpts, Options).
%% =====================================================
%% INTERNAL FUNCTIONS
%% =====================================================
%% Run an os:cmd based on a template and passed in options
exec_cmd(Template, ExtraOptions, Options) ->
OptString = opt_string(Options),
PreParsed = bind_data(Template, ExtraOptions, [escape]),
CmdString = re:replace(PreParsed, "{{options}}", OptString, [{return, list}]),
Cmd = os:cmd(lists:concat(["gm ",CmdString])),
parse_result(Cmd).
%% Create a format string from the passed in options
identify_format_string(Options) ->
Parts = [kv_string(Option) || Option <- Options],
Str = string:join(Parts, "--SEP--"),
Str.
%% Parse the result of the identify command using "explicit"
parse_identify_explicit(Str) ->
Stripped = re:replace(Str, "\n", "", [{return, list}]),
FormatParts = re:split(Stripped, "--SEP--", [{return, list}]),
ParsedParts = [part_to_tuple(X) || X <- FormatParts],
ParsedParts.
%% Create a k:v format string to simplify parsing
kv_string(Option) ->
string:join([atom_to_list(Option), gm_format_char:val(Option)], ": ").
%% Convert an identify -format response to a list of k/v pairs
part_to_tuple(X) ->
[K,V] = re:split(X, ": ", [{return, list}]),
K1 = list_to_atom(K),
{K1, converted_value(K1, V)}.
%% Conversions for passed options
converted_value(width, V) ->
list_to_integer(V);
converted_value(height, V) ->
list_to_integer(V);
converted_value(_Label, V) ->
V.
%% Build the option part of the command string from a list of options
opt_string(Options) ->
opt_string("", Options).
opt_string(OptString, []) ->
OptString;
opt_string(OptString, [Option|Options]) ->
NewOptString = case gm_options:opt(Option) of
{Switch, Template, Data} ->
Parsed = lists:concat(["'",bind_data(Template, Data, []),"'"]),
string:join([OptString, Switch, Parsed], " ");
{Switch} ->
string:join([OptString, Switch], " ")
end,
opt_string(NewOptString, Options).
%% Bind data to a command template
bind_data(Template, [{Key, Value}|Rest], Options) ->
Search = lists:concat([":",atom_to_list(Key)]),
Replace = case Options of
[escape] ->
lists:concat(["'", stringify(Value), "'"]);
_ ->
Value
end,
NewTemplate = re:replace(Template, Search, stringify(Replace), [{return, list}]),
bind_data(NewTemplate, Rest, Options);
bind_data(Template, [], _Options) ->
Template.
%% Convert the given value to a string
stringify(Value) when is_integer(Value) ->
erlang:integer_to_list(Value);
stringify(Value) when is_atom(Value) ->
erlang:atom_to_list(Value);
stringify(Value) ->
Value.
%% Parse an error coming from an executed os:cmd
cmd_error(Cmd) ->
Errors = [
{"command not found", command_not_found},
{"No such file", file_not_found},
{"Request did not return an image", no_image_returned},
{"unable to open image", unable_to_open}
],
parse_error(Cmd, Errors).
%% Run through each error, checking for a match.
%% Return `no_error` when there are no more possibilities.
parse_error(_, []) ->
no_error;
parse_error(Cmd, [{ErrorDescription, Error}|Errors]) ->
case re:run(Cmd, ErrorDescription) of
{match, _} -> {error, Error};
_ ->
parse_error(Cmd, Errors)
end.
%% Return ok if successful, otherwise return a useful error
parse_result(Result) ->
case cmd_error(Result) of
{error, Msg} ->
{error, Msg};
no_error ->
case Result of
[] ->
ok;
Msg ->
{error, Msg}
end
end.
%% =====================================================
%% UNIT TESTS
%% =====================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
gm_test_() ->
[
{"Returns a file_not_found error", fun test_file_not_found/0},
{"Gets explicit image info", fun test_image_info/0},
{"Doesn't get hacked", fun test_escapes_hacking/0}
].
test_file_not_found() ->
?assertMatch({error, file_not_found}, identify("doesntexist.jpg", [])).
test_image_info() ->
Img = "sandbox/cyberbrain.jpg",
Info = identify_explicit(Img, [width]),
?assertMatch(600, proplists:get_value(width, Info)).
test_escapes_hacking() ->
mogrify("baz", [{output_directory, "$(touch hackingz)"}]),
?assertMatch(false, filelib:is_file("hackingz")).
-endif. | src/gm.erl | 0.531696 | 0.402803 | gm.erl | starcoder |
%% Simple "QOI - Quite Okay Image" format.
%%
%% Based on https://github.com/phoboslab/qoi.
%%
%% Terminology:
%%
%% * Pixel: 3 or 4 8-bit bytes, stored as a `binary()`, either on its
%% own or as part of a larger binary.
%%
%% * Channels: Number of bytes per pixel. Three (Red, Green, Blue),
%% and four (Red, Green, Blue, Alpha) are supported.
%%
%% * Chunk: One QOI code. These may be from one to five bytes in
%% length.
%%
%% The encode/2 function converts from a binary() of Pixels to a
%% binary() of Chunks. The decode/2 function converts from a binary()
%% of Chunks to a binary() of Pixels.
%%
%% The read/1 and write/2,4 functions will load or store files
%% containing QOI chunks, prefixed with an informational header,
%% on-disk.
%%
%% Chunk Encoding:
%%
%% 0 0 Reference:6
%% 0 1 R:2 G:2 B:2
%% 1 0 G:6 R:4 B:4
%%
%% 1 1 1 1 1 1 1 0 R:8 G:8 B:8
%% 1 1 1 1 1 1 1 1 R:8 G:8 B:8 A:8
%%
%% 1 1 Run:6
-module(eqoi).
-export([
encode/2,
decode/2,
read/1,
write/2,
write/4,
verify/2
]).
-compile({inline, [{index_add, 2},
{wrap_diff, 2},
{encode_run, 1}]}).
%% RGB or RGBA
-type pixel() :: <<_:24>> | <<_:32>>.
-type channels() :: 3..4.
%% The hash of a pixel
-define(INDEX_SIZE, 64).
-type hash() :: 0..(?INDEX_SIZE-1).
%% map from hash to pixel value
-type index() :: #{hash() := pixel()}.
%% Encoder/decoder state
-record(eqoi_state,
{
%% Most recently encoded/decoded pixel value
previous :: pixel(),
%% Number of pixels matching `previous` since the first time
%% that value was seen (not including the first time)
run :: integer(),
%% Pixel value index.
index :: index()
}).
%% QOI format final file bytes
-define(FILE_TAIL, <<0,0,0,0,0,0,0,1>>).
%%% SETUP
%% Create the starting state for the encoder or decoder.
-spec state_initial(pixel()) -> #eqoi_state{}.
state_initial(InitPixel) ->
#eqoi_state{
previous = InitPixel,
run = 0,
index = index_add(InitPixel, #{})
}.
%% Create an opaque, black pixel (the default "previous" pixel for the
%% encoder/decoder start state), with the correct number of channels.
-spec pixel_initial(channels()) -> pixel().
pixel_initial(3) ->
<<0, 0, 0>>;
pixel_initial(4) ->
<<0, 0, 0, 255>>.
%%% INDEX MAINTENANCE
%% Map a pixel in the index. This will replace any pixel mapped at the
%% same hash.
-spec index_add(pixel(), index()) -> index().
index_add(Pixel, Index) ->
Index#{pixel_hash(Pixel) => Pixel}.
%% Compute the hash of a pixel.
-spec pixel_hash(pixel()) -> hash().
pixel_hash(<<R/integer, G/integer, B/integer, A/integer>>) ->
((R * 3) + (G * 5) + (B * 7) + (A * 11)) rem ?INDEX_SIZE;
pixel_hash(<<R/integer, G/integer, B/integer>>) ->
((R * 3) + (G * 5) + (B * 7) + (255 * 11)) rem ?INDEX_SIZE.
%%% ENCODING
%% Encode the Image, treating the data as pixels with Channels number
%% of bytes. 3-byte (RGB) and 4-byte (RGB + Alpha) are supported.
-spec encode(channels(), binary()) -> binary().
encode(Channels, Image) ->
encode_image(Channels, Image,
state_initial(pixel_initial(Channels)), <<>>).
%% Internal helper to drive the encoder by feeding it a pixel at a
%% time.
%%
%% The encode_pixel function appends chunks onto Acc as it needs. This
%% saves time over, for example, having encode_image return new chunk
%% bytes, and then appending them here, because we don't have to
%% create a new binary just for the return value.
-spec encode_image(channels(), binary(), #eqoi_state{}, binary()) -> binary().
encode_image(Channels, Image, State, Acc)->
case Image of
<<Pixel:Channels/binary, Rest/binary>> ->
{NewAcc, NewState} = encode_pixel(Pixel, State, Acc),
encode_image(Channels, Rest, NewState, NewAcc);
<<>> ->
maybe_add_run(State#eqoi_state.run, Acc)
end.
%% Apply a pixel to the encoder state. Zero, one, or two chunks may be
%% produced, and the encoder state may be updated. If a chunk is
%% produced, the encoder state's run length will be reset to 0.
%%
%% Zero chunks will be produced if the pixel has the same component
%% values as the previous pixel applied to the encoder state AND the
%% run length has NOT exceeded the maximum encodable run length.
%%
%% Two chunks will be produced if the pixel does not have the same
%% component values as the previous pixel AND the run length is
%% non-zero.
%%
%% One chunk will be produced in the cases not covered above (run
%% length exceeded, or run length zero).
-spec encode_pixel(pixel(), #eqoi_state{}, binary()) ->
{binary(), #eqoi_state{}}.
encode_pixel(Pixel, State=#eqoi_state{previous=Pixel, run=Run}, Acc) ->
%% previous pixel matches this pixel
case Run < 61 of
true ->
%% no new chunk to write; just lengthen the run
{Acc, State#eqoi_state{run = 1 + Run}};
false ->
%% max run size; write a run chunk and reset the counter
{<<Acc/binary, (encode_run(Run+1))/binary>>,
State#eqoi_state{run=0}}
end;
encode_pixel(Pixel, State=#eqoi_state{run=Run, index=Index}, Acc) ->
%% not a match for previous pixel
Hash = pixel_hash(Pixel),
case Index of
#{Hash := Pixel} ->
%% reference a pixel value already in the index
NewAcc = <<(maybe_add_run(Run, Acc))/binary, 0:2, Hash:6>>,
NewIndex = Index;
_ ->
%% describe the new pixel value
NewIndex = Index#{Hash => Pixel},
case component_diffs(Pixel, State#eqoi_state.previous) of
{R, G, B, 0} when R >= -2, R =< 1,
G >= -2, G =< 1,
B >= -2, B =< 1 ->
%% small modification
%% +2 = diffs are shifted up to be encoded unsigned
NewAcc = <<(maybe_add_run(Run, Acc))/binary,
1:2, (R+2):2, (G+2):2, (B+2):2>>;
{R, G, B, 0} when G >= -32, G =< 31,
(R-G) >= -8, (R-G) =< 7,
(B-G) >= -8, (B-G) =< 7 ->
%% medium modification
%% +32,+8 = diffs are shifted up to be encoded unsigned
NewAcc = <<(maybe_add_run(Run, Acc))/binary,
2:2, (G+32):6, (R-G+8):4, (B-G+8):4>>;
{_, _, _, 0} ->
%% component substitution, no alpha change
NewAcc = <<(maybe_add_run(Run, Acc))/binary,
254, Pixel:3/binary>>;
_ when size(Pixel) == 4 ->
%% component substitution, alpha change
%% 'when' clause is not necessary - diff won't
%% show an alpha change if the pixel is only 3
%% bytes wide, but the guard is kept here to
%% ensure that
NewAcc = <<(maybe_add_run(Run, Acc))/binary,
255, Pixel/binary>>
end
end,
{NewAcc, State#eqoi_state{previous=Pixel, run=0, index=NewIndex}}.
%% If the state's run length counter is non-zero, add a run-length
%% chunk at the end of the accumulated binary.
-spec maybe_add_run(integer(), binary()) -> binary().
maybe_add_run(0, Acc) ->
Acc;
maybe_add_run(Length, Acc) ->
<<Acc/binary, (encode_run(Length))/binary>>.
%% Encode a run-length chunk.
-spec encode_run(integer) -> binary().
encode_run(Length) when Length =< 62 ->
%% -1 = no need to encode a run of length 0, so encoded 0 = length 1
<<3:2, (Length-1):6>>.
%%% DECODING
%% Decode a chunk byte stream into a pixel byte stream, where each
%% pixel is Channels bytes long.
-spec decode(channels(), binary()) ->
{ok, binary()} | {error, proplists:proplist()}.
decode(Channels, Chunks) ->
%% This is a neat trick. Since all of the encodings are based on
%% modifiying the previous pixel, if we start with a pixel having
%% the correct number of channels, then all pixel modifications
%% create pixels with the correct number of channels.
case decode_loop(Chunks, state_initial(pixel_initial(Channels)),
<<>>, 0) of
{ok, Pixels, _} ->
{ok, Pixels};
Error={error,_} ->
Error
end.
%% Decode one chunk at a time, accumulating the decoded pixels in Acc.
%%
%% The final argument, Offset, is only used for debugging. It keeps
%% track of how many bytes have been processed from the Chunks binary,
%% so that information can be included in any potential error return.
%%
%% This is a different style than the encoder, because while we knew
%% how many bytes were in a pixel without looking at its value, we
%% don't know how many are in a chunk until we examine the first few
%% bits. This could be rewritten to take one byte at a time, and track
%% decoding state until a full chunk is read, but the pattern matching
%% works out well the way it's written here.
-spec decode_loop(binary(), #eqoi_state{}, binary(), integer()) ->
{ok, binary(), #eqoi_state{}} | {error, proplists:proplist()}.
decode_loop(<<>>, State, Acc, _) ->
%% accept a chunk stream with no file tail, for easier testing
{ok, Acc, State};
decode_loop(?FILE_TAIL, State, Acc, _) ->
{ok, Acc, State};
decode_loop(<<0:2, Hash:6, Rest/binary>>,
State=#eqoi_state{index=Index},
Acc, Offset) ->
%% indexed pixel
case Index of
#{Hash := Pixel} ->
decode_loop(Rest, State#eqoi_state{previous=Pixel},
<<Acc/binary, Pixel/binary>>, Offset+1);
_ ->
{error, [{reason, {bad_pixel_index, Index}},
{decoder_state, State},
{chunk_bytes_processed, Offset},
{chunk_bytes_remaining, Rest},
{decoded_pixels, Acc}]}
end;
decode_loop(<<1:2, Rd:2, Gd:2, Bd:2,Rest/binary>>,
State=#eqoi_state{previous=Pixel, index=Index},
Acc, Offset) ->
%% small modification
%% -2 = diffs are shifted up to be encoded unsigned
NewPixel = mod_pixel(Pixel, Rd-2, Gd-2, Bd-2, 0),
decode_loop(Rest,
State#eqoi_state{previous=NewPixel,
index=index_add(NewPixel, Index)},
<<Acc/binary, NewPixel/binary>>, Offset+1);
decode_loop(<<2:2, Gd:6, Rd:4, Bd:4, Rest/binary>>,
State=#eqoi_state{previous=Pixel, index=Index},
Acc, Offset) ->
%% medium modification
%% -32 = green shift to unsigned encoding
%% -40 = red and blue shift to unsigned encoding, plus green shift
NewPixel = mod_pixel(Pixel, Gd+Rd-40, Gd-32, Gd+Bd-40, 0),
decode_loop(Rest,
State#eqoi_state{previous=NewPixel,
index=index_add(NewPixel, Index)},
<<Acc/binary, NewPixel/binary>>, Offset+2);
decode_loop(<<254, RGB:3/binary, Rest/binary>>,
State=#eqoi_state{previous=Pixel, index=Index},
Acc, Offset) ->
case Pixel of
<<_,_,_>> -> NewPixel = RGB;
<<_,_,_,A>> -> NewPixel = <<RGB:3/binary,A>>
end,
decode_loop(Rest,
State#eqoi_state{previous=NewPixel,
index=index_add(NewPixel, Index)},
<<Acc/binary, NewPixel/binary>>, Offset+4);
decode_loop(<<255, NewPixel:4/binary, Rest/binary>>,
State=#eqoi_state{index=Index},
Acc, Offset) ->
decode_loop(Rest,
State#eqoi_state{previous=NewPixel,
index=index_add(NewPixel, Index)},
<<Acc/binary, NewPixel/binary>>, Offset+5);
decode_loop(<<3:2, Length:6, Rest/binary>>,
State=#eqoi_state{previous=Pixel},
Acc, Offset) ->
%% short run
%% (see encode_run/1 for "+1" explanation)
decode_loop(Rest, State,
<<Acc/binary, (binary:copy(Pixel, Length+1))/binary>>,
Offset+1).
%% PIXEL VALUE MANIPULATION
%% Compute the difference (X-Y) in two pixel components. This uses the
%% wrap-around math described by the QOI spec. That is the difference
%% between 0 and 255 is either 1 or -1, depending on which way you're
%% wrapping.
-spec wrap_diff(integer(), integer()) -> integer().
wrap_diff(X, Y) ->
case X - Y of
D when D > 127 ->
D - 256;
D when D < -127 ->
D + 256;
D ->
D
end.
%% Compute the component-wise difference of two pixels.
-spec component_diffs(pixel(), pixel()) ->
{integer(), integer(), integer(), integer()}.
component_diffs(<<R, G, B, A>>, <<Pr, Pg, Pb, Pa>>) ->
{wrap_diff(R, Pr), wrap_diff(G, Pg), wrap_diff(B, Pb), wrap_diff(A, Pa)};
component_diffs(<<R, G, B>>, <<Pr, Pg, Pb>>) ->
{wrap_diff(R, Pr), wrap_diff(G, Pg), wrap_diff(B, Pb), 0}.
%% Apply differences to pixel components.
%%
%% We don't have to do any checking around <0 or >255, because putting
%% the integer in the binary is going to limit it to the lowest eight
%% bits, which does what we would have done with arithmetic.
-spec mod_pixel(pixel(), integer(), integer(), integer(), integer()) ->
pixel().
mod_pixel(<<Ro, Go, Bo, Ao>>, Rd, Gd, Bd, Ad) ->
<<(Rd+Ro), (Gd+Go), (Bd+Bo), (Ad+Ao)>>;
mod_pixel(<<Ro, Go, Bo>>, Rd, Gd, Bd, 0) ->
%% Alpha diff will always be zero for 3-Channel images. And,
%% importantly, the decoding process depends on this function
%% producing pixels with the same number of channels as previous
%% pixels.
<<(Rd+Ro), (Gd+Go), (Bd+Bo)>>.
%% READING AND WRITING FILES
%% Read a QOI-format file. The proplist in the return value contains
%% elements for `width`, `height`, and `channels`, as well as `pixels`
%% which will be a binary containing Channels bytes per pixel in the
%% image.
-spec read(string()) -> {ok|error, proplists:proplist()}.
read(Filename) ->
{ok, <<"qoif",
Width:32/unsigned, Height:32/unsigned,
Channels,
_ColorSpace, %% ignored right now
Chunks/binary>>} = file:read_file(Filename),
Props = [{width, Width}, {height, Height}, {channels, Channels}],
case decode(Channels, Chunks) of
{ok, Pixels} ->
{ok, [{pixels, Pixels} | Props]};
{error, Proplist} ->
{error, Props ++ Proplist}
end.
%% Given a proplist with pixels, width, height, and channels elements,
%% in the format specified by a successful return from read/1, write a
%% QOI-encoded file to Filename.
-spec write(proplists:proplist(), string()) -> ok | {error | term()}.
write(Props, Filename) ->
{width, Width} = proplists:lookup(width, Props),
{height, Height} = proplists:lookup(height, Props),
{channels, Channels} = proplists:lookup(channels, Props),
{pixels, Pixels} = proplists:lookup(pixels, Props),
write(Channels, Pixels, {Width, Height}, Filename).
%% Encode Pixels using QOI, and write an image file at Filename. Size
%% is a 2-tuple of {Width, Height}. Pixels should contain Channels
%% bytes per pixel <<Red, Green, Blue [, Alpha]>> (i.e. Channels *
%% Width * Height).
-spec write(channels(), binary(), {integer(), integer()}, string()) ->
ok | {error, term()}.
write(Channels, Pixels, Size, Filename) ->
Chunks = encode(Channels, Pixels),
file:write_file(Filename,
[qoif_header(Size, Channels),
Chunks,
?FILE_TAIL]).
%% Create a QOI-format file header.
-spec qoif_header({integer(), integer()}, channels()) -> binary().
qoif_header({Width, Height}, Channels) ->
<<"qoif",
Width:32/unsigned, Height:32/unsigned,
Channels,
0 %% Color space - unused right now
>>.
%% TESTING
%% Verify that decode(Channels, encode(Channels, Pixels)) reproduces
%% Pixels exactly. The two values in the successful `{ok, _, _}`
%% return are the encoder and decoder states, respectively. They
%% should be equivalent, but that is not part of this verification. If
%% an error is returned, the proplist contains information about what
%% didn't match (reason), where in the image it was (pixels_consumed),
%% and the state of the encoder and decoder.
-spec verify(channels(), binary()) ->
{ok, #eqoi_state{}, #eqoi_state{}} |
{error, proplists:proplist()}.
verify(Channels, Pixels) ->
EncodeState = state_initial(pixel_initial(Channels)),
DecodeState = EncodeState,
verify(Channels, EncodeState, DecodeState, Pixels, <<>>, 0).
%% Consume Pixels (fourth argument) one at a time, passing them to the
%% encoder, and accumulating them in Acc (fifth argument). When the
%% encoder returns a new chunk (or chunks), pass them to the decoder
%% and verify that the bytes produced match the bytes
%% accumulated. Essential verify that Pixels ==
%% decode(encode(Pixels)), but step-by-step, stopping with hopefully
%% helpful information when the decoding doesn't match the original.
-spec verify(channels(),
#eqoi_state{}, #eqoi_state{},
binary(), binary(),
integer()) ->
{ok, #eqoi_state{}, #eqoi_state{}} |
{error, proplists:proplist()}.
verify(Channels, ES, DS, <<>>, Acc, Consumed) ->
case Acc of
<<>> ->
case ES#eqoi_state.run of
0 ->
{ok, ES, DS};
N ->
{error, [{reason,
"End run non-zero, but no accumulated pixels"},
{end_run_length, N},
{pixels_consumed, Consumed},
{encoder_state, ES},
{decoder_state, DS}]}
end;
_ ->
case ES#eqoi_state.run of
0 ->
{error, [{reason, "Accumulated pixels remaining"},
{pixels_remaining, size(Acc)},
{pixels_consumed, Consumed},
{encoder_state, ES},
{decoder_state, DS}]};
N ->
Chunks = encode_run(N),
NewES = ES#eqoi_state{run=0},
case verify_match(Channels, Chunks, Acc, DS) of
{ok, NewDS} ->
{ok, NewES, NewDS};
{error, Reason, NewDS} ->
{error, [{reason, Reason},
{pixels_consumed, Consumed},
{encoder_state, NewES},
{decoder_state, NewDS}]}
end
end
end;
verify(Channels, ES, DS, Pixels, Acc, Consumed) ->
<<Next:Channels/binary, Rest/binary>> = Pixels,
case encode_pixel(Next, ES, <<>>) of
{<<>>, NewES} ->
verify(Channels, NewES, DS, Rest, <<Acc/binary, Next/binary>>,
Consumed + 1);
{Chunks, NewES} ->
Expect = <<Acc/binary, Next/binary>>,
case verify_match(Channels, Chunks, Expect, DS) of
{ok, NewDS} ->
verify(Channels, NewES, NewDS, Rest, <<>>, Consumed + 1);
{error, Reason, NewDS} ->
{error, [{reason, Reason},
{chunks, Chunks},
{expect, Expect},
{pixels_consumed, Consumed+1},
{encoder_state, NewES},
{decoder_state, NewDS}]}
end
end.
%% Veryify that Chunks (second argument) decode to the bytes of Expect
%% (third argument). The state in either return value is the updated
%% decoder state.
-spec verify_match(channels(), binary(), binary(), #eqoi_state{}) ->
{ok, #eqoi_state{}} | {error, term(), #eqoi_state{}}.
verify_match(Channels, Chunks, Expect, DS) ->
case decode_loop(Chunks, DS, <<>>, 0) of
{ok, PixelList, NewDS} ->
case match_pixels(Channels, Expect, PixelList) of
{ok, <<>>} ->
{ok, NewDS};
{ok, Remaining} ->
{error, {leftover_expect, Remaining}, NewDS};
{error, Reason} ->
{error, Reason, NewDS}
end;
{error, Reason} ->
{error, Reason, DS}
end.
%% Verify that Pixels (third argument) is an exact prefix of Expect
%% (second argument). An `{ok, Remaining}` return means it is. if
%% Remaining is the empty binary, Expect and Pixels were the same
%% size. The binaries returned by `{error, {mismatch, _, _}}` might be
%% either one pixel each, or unconsumed tails of each input.
-spec match_pixels(channels(), binary(), binary()) ->
{ok, binary()} | {error, {mismatch, binary(), binary()}}.
match_pixels(_, Remaining, <<>>) ->
%% Encoding can produce multiple chunks at once. Remaining bytes
%% here likely mean that verify_match has another chunk to decode.
{ok, Remaining};
match_pixels(Channels, Expect, Pixels) ->
case {Expect, Pixels} of
{<<E:Channels/binary, RestExpect/binary>>,
<<P:Channels/binary, RestPixels/binary>>} ->
case E == P of
true ->
match_pixels(Channels, RestExpect, RestPixels);
false ->
{error, {mismatch, E, P}}
end;
_ ->
%% Pixels isn't empty, but either it has too few bytes for
%% a full pixel, or Expect is empty.
{error, {mismatch, Expect, Pixels}}
end. | src/eqoi.erl | 0.591723 | 0.628878 | eqoi.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2000-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
%%----------------------------------------------------------------------
%% Purpose: Main API module for Event Tracer
%%----------------------------------------------------------------------
%%
%% The Event Tracer (et) uses the built-in trace mechanism in Erlang and
%% provides tools for collection and graphical viewing of trace data.
%%
%% et_collector
%%
%% An Erlang trace client which collects and stores trace data.
%% Provides hooks for trace data filtering and group communication
%% between processes (such as et_viewer-processes). The trace data
%% is preferably traced et-module calls, but may in fact be any
%% Erlang trace data.
%%
%% It do also provide functionality for global control of trace
%% pattern settings. If used, the one et_collector-process is
%% registered globally. On all connected Erlang nodes, it starts an
%% Erlang tracer process which sends its trace data to a local port
%% (the port number is generated). On the node where the global
%% et_collector is running, the corresponding Erlang trace client
%% processes are started (one for each node), configured to
%% transform the trace data into event records and possibly hand
%% them over to the collector. Whenever new nodes are
%% (dis)connected, this is monitored and new tracer/client pair of
%% processes are automatically started and eventually the trace
%% pattern are set on these nodes.
%%
%% Trace data can also be loaded from one or more files.
%%
%% et_viewer
%%
%% A graphical sequence chart tool. It is connected to a
%% et_collector-process, which it polls regulary for more trace
%% events to display. Before the event is displayed a user defined
%% filter function is applied in order to skip, accept as is or
%% transform the event. Several et_viewer-processes may share the
%% same et_collector in order to provide different simultaneous
%% views of the same trace data.
%%
%% et_contents_viewer
%%
%% A graphical tool which displays a detailed view of one trace
%% event. Normally started from the et_viewer.
%%
%% et_selector
%%
%% A library module with low level functions for activation of
%% Erlang trace patterns. It do also implement a default filter
%% function which transforms the raw trace data into the event
%% record data structure that is used as internal format by the rest
%% of the application. Customized transform functions can be
%% alternatively be used (by et_viewer, et_contents_viewer and
%% et_collector), if needed.
%%
%% et
%%
%% A library module with a few event report functions that are
%% intended to be invoked from other applications. The functions are
%% extremely light weight as they do nothing besides returning an
%% atom. These functions are specifically designed to be traced
%% for. The global trace patterns in et_collector defaults to trace
%% on these functions.
%%----------------------------------------------------------------------
-module(et).
-export([
trace_me/4, phone_home/4, report_event/4,
trace_me/5, phone_home/5, report_event/5
]).
%%----------------------------------------------------------------------
%% Reports an event, such as a message
%%
%% trace_me(DetailLevel, FromTo, Label, Contents) -> hopefully_traced
%% trace_me(DetailLevel, From, To, Label, Contents) -> hopefully_traced
%% report_event(DetailLevel, FromTo, Label, Contents) -> hopefully_traced
%% report_event(DetailLevel, From, To, Label, Contents) -> hopefully_traced
%% phone_home(DetailLevel, FromTo, Label, Contents) -> hopefully_traced
%% phone_home(DetailLevel, From, To, Label, Contents) -> hopefully_traced
%%
%% DetailLevel = integer(X) when X =< 0, X >= 100
%% From = actor()
%% To = actor()
%% FromTo = actor()
%% Label = atom() | string() | term()
%% Contents = [{Key, Value}] | term()
%%
%% actor() = term()
%%
%% These functions are intended to be invoked at strategic places
%% in user applications in order to enable simplified tracing.
%% The functions are extremely light weight as they do nothing
%% besides returning an atom. These functions are designed for
%% being traced. The global tracing mechanism in et_collector
%% defaults to set its trace pattern to these functions.
%%
%% The label is intended to provide a brief summary of the event.
%% A simple tag would do.
%%
%% The contents can be any term but in order to simplify
%% post processing of the traced events, a plain list
%% of {Key, Value} tuples is preferred.
%%
%% Some events, such as messages, are directed from some actor to another.
%% Other events (termed actions) may be undirected and only have one actor.
%%----------------------------------------------------------------------
trace_me(DetailLevel, FromTo, Label, Contents)
when is_integer(DetailLevel) ->
?MODULE:trace_me(DetailLevel, FromTo, FromTo, Label, Contents).
trace_me(DetailLevel, _From, _To, _Label, _Contents)
when is_integer(DetailLevel) ->
hopefully_traced.
phone_home(DetailLevel, FromTo, Label, Contents) ->
%% N.B External call
?MODULE:trace_me(DetailLevel, FromTo, FromTo, Label, Contents).
phone_home(DetailLevel, From, To, Label, Contents) ->
%% N.B External call
?MODULE:trace_me(DetailLevel, From, To, Label, Contents).
report_event(DetailLevel, FromTo, Label, Contents) ->
%% N.B External call
?MODULE:trace_me(DetailLevel, FromTo, FromTo, Label, Contents).
report_event(DetailLevel, From, To, Label, Contents)
when is_integer(DetailLevel) ->
%% N.B External call
?MODULE:trace_me(DetailLevel, From, To, Label, Contents). | dependencies/otp/17.1/lib/et/src/et.erl | 0.700485 | 0.563738 | et.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-----------------------------------------------------------------------
-module(prop_period_or_count).
-include_lib("proper/include/proper.hrl").
-define(CONFIG(Key, Config), proplists:get_value(Key, Config)).
%%%%%%%%%%%%%%%%%%
%%% Properties %%%
%%%%%%%%%%%%%%%%%%
prop_compare_desired_sampling_result_with_factual() ->
?FORALL({Period, Count, Limit, Delay},
{pos_integer(), pos_integer(), pos_integer(), pos_integer()},
begin
Config = [{limit, Limit},
{delay, Delay},
{period, Period},
{count, Count}],
start_apps(Config),
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
stop_apps(),
abs(DesiredResult - Result) < 2
end).
prop_validate_sampling_by_checking_counters() ->
?FORALL({Period, Count, Limit, Delay},
{integer(1, 5), integer(1, 10), integer(10, 100), integer(5, 10)},
begin
Config = [{limit, Limit},
{delay, Delay},
{period, Period},
{count, Count}],
start_apps(Config),
%% 1000/100 (0th, 99th, 199th, etc)
{Time, Result} = run_tracing(Config),
DesiredResult = desired_result(Config, Time),
true = abs(DesiredResult - Result) < 2,
timer:sleep(Period * 1000 + 500),
X = if
Limit rem Count == 0 -> Count;
true -> Limit rem Count
end,
[{sampler, _, X}] = ets:lookup(sampler_period_or_count, sampler),
%% increment counter to trigger value
_ = lists:foreach(fun(_) ->
oc_trace:start_span(<<"span">>, undefined)
end, lists:seq(X, Count - 1)),
%% this enabled because of counter
Span1 = oc_trace:start_span(<<"span">>, undefined),
true = oc_trace:is_enabled(Span1),
[{sampler, _, 1}] = ets:lookup(sampler_period_or_count, sampler),
timer:sleep(Period * 1000 + 500),
%% this enabled because of period
Span2 = oc_trace:start_span(<<"span">>, undefined),
true = oc_trace:is_enabled(Span2),
if
Count > 1 ->
[{sampler, _, 2}] = ets:lookup(sampler_period_or_count, sampler);
true ->
[{sampler, _, 1}] = ets:lookup(sampler_period_or_count, sampler)
end,
{Time1, Result1} = run_tracing(Config),
DesiredResult1 = desired_result(Config, Time1),
stop_apps(),
true = abs(DesiredResult1 - Result1) < 2
end).
%%%%%%%%%%%%%%%
%%% Helpers %%%
%%%%%%%%%%%%%%%
%% boolean(_) -> true.
%%%%%%%%%%%%%%%%%%
%%% Generators %%%
%%%%%%%%%%%%%%%%%%
%% mytype() -> term().
desired_result(Config, Duration) ->
Limit = ?CONFIG(limit, Config),
Period = ?CONFIG(period, Config),
Count = ?CONFIG(count, Config),
round(calculate_desired_result(Limit, Duration, Period, Count)).
calculate_desired_result(_Limit, Duration, Period, Count)
when Count == 0 ->
Duration / Period;
calculate_desired_result(Limit, _Duration, Period, Count)
when Period == 0 ->
Limit / Count;
calculate_desired_result(Limit, Duration, Period, Count)
%% if period is too small, counter will not reach the trigger value
%% and sampler will act according to period duration settings
when (Duration / Period) > (Limit / Count) ->
Duration / Period;
calculate_desired_result(Limit, _Duration, _Period, Count) ->
Limit / Count.
%%
%%
run_tracing(Config) ->
Start = erlang:monotonic_time(microsecond),
Limit = ?CONFIG(limit, Config),
Delay = ?CONFIG(delay, Config),
%% run traces counted by Limit, with pause specified by Delay,
%% and filter only enabled ones
L = lists:filter(fun(_) ->
SpanContext = oc_trace:start_span(<<"span">>, undefined),
timer:sleep(Delay),
oc_trace:is_enabled(SpanContext)
end, lists:seq(1, Limit)),
End = erlang:monotonic_time(microsecond),
{(End - Start) / 1000000, length(L)}.
start_apps(Config) ->
application:set_env(opencensus, sampler,
{oc_sampler_period_or_count,
[
{period, ?CONFIG(period, Config)},
{count, ?CONFIG(count, Config)}
]}),
{ok, _} = application:ensure_all_started(opencensus).
stop_apps() ->
_ = application:stop(opencensus). | test/prop_period_or_count.erl | 0.603932 | 0.491456 | prop_period_or_count.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2017, <<NAME>>
%%% @doc
%%%
%%% @end
%%% Created : 07. Feb 2017 6:08 PM
%%%-------------------------------------------------------------------
-module(q_time).
-author("madalin").
-include_lib("q/include/q.hrl").
%% API
-export([posix_seconds/0]).
-export([posix_milliseconds/0]).
-export([posix_microseconds/0]).
-export([posix_nanoseconds/0]).
-export([strict_monotonic_seconds/0]).
-export([strict_monotonic_milliseconds/0]).
-export([strict_monotonic_microseconds/0]).
-export([strict_monotonic_nanoseconds/0]).
-export([datetime/0]).
-export([date/0]).
-export([time/0]).
-export([convert_datetime_to_posix_seconds/1]).
-export([convert_datetime_to_posix_milliseconds/1]).
-export([convert_datetime_to_posix_microseconds/1]).
-export([convert_datetime_to_posix_nanoseconds/1]).
-export([convert_posix_seconds_to_datetime/1]).
-export([convert_posix_milliseconds_to_datetime/1]).
-export([convert_posix_microseconds_to_datetime/1]).
-export([convert_posix_nanoseconds_to_datetime/1]).
%% @doc Returns the seconds
posix_seconds() -> erlang:system_time(second).
%% @doc Returns the milliseconds
posix_milliseconds() -> erlang:system_time(millisecond).
%% @doc Returns the microseconds
posix_microseconds() -> erlang:system_time(microsecond).
%% @doc Returns the nanosecond
posix_nanoseconds() -> erlang:system_time(nanosecond).
%% @doc Returns the current Q monotonic time in seconds.
%% This is a strictly monotonically increasing time regardless Erlang time warp mode currently in use.
%% That is, consecutive calls to q_lib_time:strict_monotonic_seconds/0 can NOT produce the same result.
strict_monotonic_seconds() -> q_time_srv_strict_monotonic:get_seconds().
%% @doc Returns the current Q monotonic time in milliseconds.
%% This is a strictly monotonically increasing time regardless Erlang time warp mode currently in use.
%% That is, consecutive calls to q_lib_time:strict_monotonic_milliseconds/0 can NOT produce the same result.
strict_monotonic_milliseconds() -> q_time_srv_strict_monotonic:get_milliseconds().
%% @doc Returns the current Q monotonic time in microseconds
%% This is a strictly monotonically increasing time regardless Erlang time warp mode currently in use.
%% That is, consecutive calls to q_lib_time:strict_monotonic_microseconds/0 can NOT produce the same result.
strict_monotonic_microseconds() -> q_time_srv_strict_monotonic:get_microseconds().
%% @doc Returns the current Q monotonic time in nanoseconds
%% This is a strictly monotonically increasing time regardless Erlang time warp mode currently in use.
%% That is, consecutive calls to q_lib_time:strict_monotonic_nanoseconds/0 can NOT produce the same result.
strict_monotonic_nanoseconds() -> q_time_srv_strict_monotonic:get_nanoseconds().
%% @doc Returns the current date and time according to Universal Time Coordinated (UTC), also called GMT
datetime() -> calendar:gregorian_seconds_to_datetime(posix_seconds() + ?Q_TIME_SECONDS_UNIX_EPOCH).
%% @doc Returns the current date according to Universal Time Coordinated (UTC), also called GMT
date() -> {Date, _} = datetime(), Date.
%% @doc Returns the current time according to Universal Time Coordinated (UTC), also called GMT
time() -> {_, Time} = datetime(), Time.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% convert_datetime_
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Convert datetime() to posix seconds
convert_datetime_to_posix_seconds(DateTime) -> calendar:datetime_to_gregorian_seconds(DateTime) - ?Q_TIME_SECONDS_UNIX_EPOCH.
%% @doc Convert datetime() to posix microseconds
convert_datetime_to_posix_milliseconds(DateTime) -> convert_datetime_to_posix_seconds(DateTime) * 1000.
%% @doc Convert datetime() to posix microseconds
convert_datetime_to_posix_microseconds(DateTime) -> convert_datetime_to_posix_seconds(DateTime) * 1000000.
%% @doc Convert datetime() to posix microseconds
convert_datetime_to_posix_nanoseconds(DateTime) -> convert_datetime_to_posix_seconds(DateTime) * 1000000000.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% convert_posix_
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Convert posix seconds to datetime()
convert_posix_seconds_to_datetime(Seconds) -> calendar:gregorian_seconds_to_datetime(Seconds + ?Q_TIME_SECONDS_UNIX_EPOCH).
%% @doc Convert posix milliseconds to datetime()
convert_posix_milliseconds_to_datetime(Milliseconds) -> convert_posix_seconds_to_datetime(round(Milliseconds/1000)).
%% @doc Convert posix microseconds to datetime()
convert_posix_microseconds_to_datetime(Microseconds) -> convert_posix_seconds_to_datetime(round(Microseconds/1000000)).
%% @doc Convert posix nanoseconds to datetime()
convert_posix_nanoseconds_to_datetime(Nanoseconds) -> convert_posix_seconds_to_datetime(round(Nanoseconds/1000000000)). | src/time/q_time.erl | 0.589953 | 0.413714 | q_time.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% Test database consistency with random transactions
-module(mria_proper_mixed_cluster_suite).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("snabbkaffe/include/ct_boilerplate.hrl").
-include("mria_proper_utils.hrl").
%%================================================================================
%% Testcases
%%================================================================================
t_import_transactions_mixed_cluster(Config0) when is_list(Config0) ->
Config = [{proper, #{max_size => 300,
numtests => 100,
timeout => 100000
}} | Config0],
ClusterConfig = [ core
, {core, [{mria, db_backend, mnesia}]}
, replicant
],
?run_prop(Config, mria_proper_utils:prop(ClusterConfig, ?MODULE)).
%%================================================================================
%% Proper FSM definition
%%================================================================================
%% Initial model value at system start. Should be deterministic.
initial_state() ->
#s{cores = [n1, n2], replicants = [n3]}.
command(State) -> mria_proper_utils:command(State).
precondition(_State, {call, _Mod, execute, [_Node, Op]}) ->
%% With more than one core, a race condition involving a
%% `dirty_write' / `dirty_delete' pair of ops happening on
%% different cores can arise: one of the cores might process the
%% dirty ops in a different order than what the state machine
%% expects, thus violating the model consistency. Since this is
%% inherent to mnesia, for this test we simply forbid dirty
%% operations altogether.
case Op of
{dirty, _} -> false;
_ -> true
end;
precondition(State, Op) -> mria_proper_utils:precondition(State, Op).
postcondition(State, Op, Res) -> mria_proper_utils:postcondition(State, Op, Res).
next_state(State, Res, Op) -> mria_proper_utils:next_state(State, Res, Op). | test/mria_proper_mixed_cluster_suite.erl | 0.663342 | 0.427546 | mria_proper_mixed_cluster_suite.erl | starcoder |
%%
%% Copyright (c) dushin.net
%% All rights reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%%-----------------------------------------------------------------------------
%% @doc An AtomVM I2C driver for the Bosch-Sensortec BME280.
%%
%% The BME280 is a small sensor that can read temperature, humidity, and atmospheric pressure.
%% The chipset supports I2C and SPI interfaces. This driver uses the AtomVM I2C
%% interface for communicating with the BME280. This means you can take temperature,
%% barometric pressure, and humidity readings using two GPIO pins on your ESP32.
%%
%% Developers interact with this driver by starting an instance, specifying pins for
%% the I2C data and clock pins. Starting an instance of the driver yeilds a reference
%% that can be used in subsequent calls.
%%
%% The primary operation in this module is the take_reading/1 function, which takes
%% a reference to a BME280 driver, and returns a reading expressed as a tuple containing
%% the temperature (in degrees celcius), atomspheric pressure (in hectopascals) and
%% relative humidity (as a percentage).
%%
%% Functions for reading the BME280 chip ide and version, as well as doing a soft
%% reset of the device, are also supported.
%%
%% Note. The BME280 sensor is a fairly dynamic sensor and can be used for
%% many different applications (e.g., weather collection, gaming, drones, etc).
%% The primary use-case for this driver is weather collection, which is assumed
%% to be a low frequency operation. Some of the BME280 applications may require
%% additional support in this driver, which would be relatively straightforward
%% to support in future versions.
%%
%% Further information about the Bosch Sensortec BME280 can be found in the reference
%% documentation:
%% https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
%%
%% @end
%%-----------------------------------------------------------------------------
-module(bme280).
-behaviour(gen_server).
-export([start/1, start/2, stop/1, take_reading/1, chip_id/1, version/1, soft_reset/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
% -define(TRACE_ENABLED, true).
-include_lib("atomvm_lib/include/trace.hrl").
-type over_sampling() :: ignore | x1 | x2 | x4| x8 | x16.
-type mode() :: sleep | forced | normal.
-type option() ::
{temp_oversampling, over_sampling()} |
{pressure_oversampling, over_sampling()} |
{humidity_oversampling, over_sampling()} |
{mode, mode()}.
-type options() :: [option()].
-type bme() :: pid().
-type fractional() :: 0..99.
-type temp_reading() :: {integer(), fractional()}.
-type pressure_reading() :: {integer(), fractional()}.
-type humidity_reading() :: {integer(), fractional()}.
-type reading() :: {temp_reading(), pressure_reading(), humidity_reading()}.
-define(BME280_BASE_ADDR, 16#76).
-define(BME280_REGISTER_CHIPID, 16#D0).
-define(BME280_REGISTER_VERSION, 16#D1).
-define(BME280_REGISTER_SOFT_RESET, 16#E0).
-define(BME280_REGISTER_CTL_HUM, 16#F2).
-define(BME280_REGISTER_CTL_MEAS, 16#F4).
-define(DEFAULT_OVERSAMPLING, x4).
-define(DEFAULT_MODE, forced).
-record(state, {
i2c_bus,
calibration_data,
temp_oversampling,
pressure_oversampling,
humidity_oversampling,
mode
}).
-record(calibration, {
dig_T1, dig_T2, dig_T3,
dig_P1, dig_P2, dig_P3, dig_P4, dig_P5, dig_P6, dig_P7, dig_P8, dig_P9,
dig_H1, dig_H2, dig_H3, dig_H4, dig_H5, dig_H6
}).
%%-----------------------------------------------------------------------------
%% @param SDAPin pin number for I2C SDA channel
%% @param SCLPin pin number for the I2C SCL channel
%% @returns {ok, BME} on success, or {error, Reason}, on failure
%% @equiv start(SDAPin, SCLPin, [])
%% @doc Start the BME280 driver.
%% @end
%%-----------------------------------------------------------------------------
-spec start(I2CBus::i2c_bus:i2c_bus()) -> {ok, BME::bme()} | {error, Reason::term()}.
start(I2CBus) ->
start(I2CBus, []).
%%-----------------------------------------------------------------------------
%% @param SDAPin pin number for I2C SDA channel
%% @param SCLPin pin number for the I2C SCL channel
%% @param Options additional driver options
%% @returns {ok, BME} on success, or {error, Reason}, on failure
%% @doc Start the BME280 driver.
%%
%% This operation will start the BME driver. Use the returned reference
%% in subsequent operations, such as for taking a reading.
%%
%% The Options parameter may be used to fine-tune behavior of the sensor,
%% but the default values should be sufficient for weather-station based
%% scenarios.
%%
%% Notes: The default oversampling rates for temperature, pressure, and humidity
%% is `x4'. A sampling rate of `ignore' is not tested.
%%
%% The default `mode' is `forced'. Other modes are not tested.
%% @end
%%-----------------------------------------------------------------------------
-spec start(I2CBus::i2c_bus:i2c_bus(), Options::options()) -> {ok, BME::bme()} | {error, Reason::term()}.
start(I2CBus, Options) ->
gen_server:start(?MODULE, {I2CBus, Options}, []).
%%-----------------------------------------------------------------------------
%% @param BME a reference to the BME instance created via start
%% @returns ok if successful; {error, Reason}, otherwise
%% @doc Stop the BME280 driver.
%%
%% Note. This function is not well tested and its use may result in a memory leak.
%% @end
%%-----------------------------------------------------------------------------
-spec stop(BME::bme()) -> ok | {error, Reason::term()}.
stop(BME) ->
gen_server:stop(BME).
%%-----------------------------------------------------------------------------
%% @param BME a reference to the BME instance created via start
%% @returns {ok, Reading} if successful; {error, Reason}, otherwise
%% @doc Take a reading from the sensor.
%%
%% This function will take a reading from the attached BME280 sensor.
%%
%% The return value is a 3-ary tuple containing the temperature, pressure,
%% and humidty readings from the sensor. Each element of the tuple is a
%% pair, containing the value in integral and fractional parts.
%%
%% Temperature is expressed in degrees celsius, pressure is expressed in hectopascals,
%% and humidity is expressed as relative humidity.
%% @end
%%-----------------------------------------------------------------------------
-spec take_reading(BME::bme()) -> {ok, Reading::reading()} | {error, Reason::term()}.
take_reading(BME) ->
gen_server:call(BME, take_reading).
%%-----------------------------------------------------------------------------
%% @param BME a reference to the BME instance created via start
%% @returns the chip id of the BME280 sensor
%% @doc Return the chip id of the BME280 sensor
%% @end
%%-----------------------------------------------------------------------------
-spec chip_id(BME::bme()) -> integer().
chip_id(BME) ->
gen_server:call(BME, chip_id).
%%-----------------------------------------------------------------------------
%% @param BME a reference to the BME instance created via start
%% @returns the version of the BME280 sensor
%% @doc Return the version of the BME280 sensor
%% @end
%%-----------------------------------------------------------------------------
-spec version(BME::bme()) -> integer().
version(BME) ->
gen_server:call(BME, version).
%%-----------------------------------------------------------------------------
%% @param BME a reference to the BME instance created via start
%% @returns ok
%% @doc Perform a soft reset of the BME280 sensor.
%%
%% A soft reset will set all of the registers in the device
%% to values in section 5.3 of the reference documentation.
%% @end
%%-----------------------------------------------------------------------------
-spec soft_reset(BME::bme()) -> ok.
soft_reset(BME) ->
gen_server:call(BME, soft_reset).
%%
%% gen_server API
%%
%% @hidden
init({I2CBus, Options}) ->
Calibration = read_calibration_data(I2CBus),
{ok, #state{
i2c_bus = I2CBus,
calibration_data = Calibration,
temp_oversampling = normalize_oversampling(proplists:get_value(temp_oversampling, Options, ?DEFAULT_OVERSAMPLING)),
pressure_oversampling = normalize_oversampling(proplists:get_value(pressure_oversampling, Options, ?DEFAULT_OVERSAMPLING)),
humidity_oversampling = normalize_oversampling(proplists:get_value(humidity_oversampling, Options, ?DEFAULT_OVERSAMPLING)),
mode = normalize_mode(proplists:get_value(mode, Options, ?DEFAULT_MODE))
}}.
%% private
normalize_oversampling(OverSampling) ->
case OverSampling of
ignore -> 16#00;
x1 -> 16#01;
x2 -> 16#02;
x4 -> 16#03;
x8 -> 16#04;
_ -> 16#05
end.
%% private
normalize_mode(Mode) ->
case Mode of
sleep -> 16#0;
forced -> 16#1;
normal -> 16#3;
_ -> 16#1
end.
%% @hidden
handle_call(take_reading, _From, State) ->
Reading = do_take_reading(State),
{reply, {ok, Reading}, State};
handle_call(chip_id, _From, State) ->
{reply, read_byte(State#state.i2c_bus, ?BME280_REGISTER_CHIPID), State};
handle_call(version, _From, State) ->
{reply, read_byte(State#state.i2c_bus, ?BME280_REGISTER_VERSION), State};
handle_call(soft_reset, _From, State) ->
{reply, write_byte(State#state.i2c_bus, ?BME280_REGISTER_SOFT_RESET, 16#01), State};
handle_call(Request, _From, State) ->
{reply, {error, {unknown_request, Request}}, State}.
%% @hidden
handle_cast(_Msg, State) ->
{noreply, State}.
%% @hidden
handle_info(_Info, State) ->
{noreply, State}.
%% @hidden
terminate(normal, State) ->
?TRACE("terminate(normal, ~p)", [State]),
do_sleep(State);
terminate(_Reason, _State) ->
ok.
%% @hidden
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%
%% Internal functions
%%
%% Understanding this code probably requires an understanding of the above reference documentation.
%% Where possible, we reference sections of the document, to help explain the code. Section 5.3
%% is particularly helpful for understanding the memory layout in the device.
%%
%% The BME280 provides temperature, pressure, and humidity data in blocks of memory on the sensor,
%% which can be read through I2C commands. The raw metrics stored on the sensor must then be
%% calibrated, using calibration data that is burned into the sensor, but which is also read at
%% initialization time into the state of this gen_server.
%%
%% All data stored in the sensor are stored in little endian format, some signed, some unsigned,
%% some 16 bit, some 8 bit. The calibration data is not all contiguous, unfortunately, and
%% two of the values (for humidity) is not aligned on byte boundaries. So extraction is ... interesting.
%% It would be easier if we supported bit syntax on unaligned boundaries in AtomVM. Some day...
%%
% See section 4.2.2 for the layout of calibration data in the sensor.
read_calibration_data(I2CBus) ->
?TRACE("Reading calibration data off ~p...", [I2CBus]),
Bytes1 = read_bytes(I2CBus, 16#88, 25),
<<
T1:16/little, T2:16/signed-little, T3:16/signed-little,
P1:16/little, P2:16/signed-little, P3:16/signed-little,
P4:16/signed-little, P5:16/signed-little, P6:16/signed-little,
P7:16/signed-little, P8:16/signed-little, P9:16/signed-little,
H1:8
>> = Bytes1,
Bytes2 = read_bytes(I2CBus, 16#E1, 7),
<<H2:16/signed-little, H3:8, E4:8/signed, E5:8, E6:8/signed, H6:8/signed>> = Bytes2,
H4 = (E4 bsl 4) bor (E5 band 16#0F),
H5 = (E6 bsl 4) bor ((E5 band 16#F0) bsr 4),
Calibration = #calibration {
dig_T1 = T1, dig_T2 = T2, dig_T3 = T3,
dig_P1 = P1, dig_P2 = P2, dig_P3 = P3,
dig_P4 = P4, dig_P5 = P5, dig_P6 = P6,
dig_P7 = P7, dig_P8 = P8, dig_P9 = P9,
dig_H1 = H1, dig_H2 = H2, dig_H3 = H3,
dig_H4 = H4, dig_H5 = H5, dig_H6 = H6
},
?TRACE("Calibration data: ~p", [Calibration]),
Calibration.
%% @private
read_bytes(I2CBus, Register, Len) ->
?TRACE("Reading bytes off I2CBus ~p, Register ~p, Len ~p...", [I2CBus, Register, Len]),
i2c_bus:read_bytes(I2CBus, ?BME280_BASE_ADDR, Register, Len).
%% @private
read_byte(I2CBus, Register) ->
Bytes = read_bytes(I2CBus, Register, 1),
<<Value:8>> = Bytes,
Value.
%% @private
write_byte(I2CBus, Register, Byte) ->
Value = <<Byte:8>>,
i2c_bus:write_bytes(I2CBus, ?BME280_BASE_ADDR, Register, Value).
%% @private
do_take_reading(State) ->
#state{
i2c_bus = I2CBus,
temp_oversampling = TempOverSampling,
pressure_oversampling = PressureOverSampling,
humidity_oversampling = HumidityOverSampling,
mode = Mode
} = State,
%%
%% Tell the sensor to take a temp, pressure, and humidity reading
%% with specified oversampling. Per the spec, we need to write to
%% the HUM and then MEAS registers. The mode should almost always be force.
%%
ok = write_byte(I2CBus, ?BME280_REGISTER_CTL_HUM, HumidityOverSampling),
Meas = (TempOverSampling bsl 5) bor (PressureOverSampling bsl 2) bor Mode,
ok = write_byte(I2CBus, ?BME280_REGISTER_CTL_MEAS, Meas),
%%
%% Wait the max time for the sensor to take the reading.
%% See Section 9.2 of the spec for expected timing measurements.
%%
SleepTimeUs = get_max_timing_us(
TempOverSampling,
PressureOverSampling,
HumidityOverSampling
),
timer:sleep(SleepTimeUs div 1000 + case SleepTimeUs rem 1000 of 0 -> 0; _ -> 1 end),
%%
%% Read the data in memory. The BME280 reference documentation recommends
%% reading all values in a single block.
%%
Bytes = read_bytes(I2CBus, 16#F7, 8),
<<
Press_MSB:8, Press_LSB:8, Press_XLSB:8,
Temp_MSB:8, Temp_LSB:8, Temp_XLSB:8,
Hum_MSB:8, Hum_LSB:8
>> = Bytes,
Cal = State#state.calibration_data,
%%
%% Calculate and Calibrate temperature, pressure, and humidity readings
%%
RawTemp = ((Temp_MSB bsl 16) bor (Temp_LSB bsl 8) bor Temp_XLSB) bsr 4,
{T_fine, Temperature} = calibrate_temp(Cal, RawTemp),
RawPressure = ((Press_MSB bsl 16) bor (Press_LSB bsl 8) bor Press_XLSB) bsr 4,
Pressure = calibrate_pressure(Cal, T_fine, RawPressure),
RawHumidity = (Hum_MSB bsl 8) bor Hum_LSB,
Humidity = calibrate_humidity(Cal, T_fine, RawHumidity),
%%
%% Normalize into {integer, fractional} values.
%%
Reading = {
normalize_reading(Temperature, TempOverSampling, 100),
normalize_reading(Pressure, PressureOverSampling, 100),
normalize_reading(Humidity, HumidityOverSampling, 1024)
},
?TRACE("Reading: ~p", [Reading]),
Reading.
%% @private
do_sleep(State) ->
#state{
i2c_bus = I2CBus
} = State,
?TRACE("Setting BME device to sleep ...", []),
ok = write_byte(I2CBus, ?BME280_REGISTER_CTL_HUM, 16#FF),
ok = write_byte(I2CBus, ?BME280_REGISTER_CTL_MEAS, 16#FF).
%% @private
normalize_reading(R, O, D) ->
case O of
0 -> undefined;
_ ->
Integral = R div D,
FractionalBits = R rem D,
Fractional = case D of
100 ->
FractionalBits;
_ ->
%% (R mod D):D as x:100, so x = 100 * (R mod D)/D
(100 * FractionalBits) div D
end,
{Integral, Fractional}
end.
%% See Section 9.1 for max measurement time
%% @private
get_max_timing_us(TempOversampling, PressureOversampling, HumidityOversampling) ->
TempSleepTimeUs = 1250 + 2300 * (1 bsl TempOversampling),
PressureSleepTimeUs = 2300 * (1 bsl PressureOversampling) + 575,
HumiditySleepTimeUs = 2300 * (1 bsl HumidityOversampling) + 575,
TempSleepTimeUs + PressureSleepTimeUs + HumiditySleepTimeUs.
%% Annotated with the recommended algorithm. See Section 8.2 (32-bit version)
%% @private
calibrate_temp(Cal, Adc_T) ->
Dig_T1 = Cal#calibration.dig_T1,
Dig_T2 = Cal#calibration.dig_T2,
Dig_T3 = Cal#calibration.dig_T3,
%% var1 = ((((adc_T>>3) – ((BME280_S32_t)dig_T1<<1))) * ((BME280_S32_t)dig_T2)) >> 11;
%% var1 = ((((adc_T >> 3) – (dig_T1 << 1))) * dig_T2) >> 11;
Var1 = (((Adc_T bsr 3) - (Dig_T1 bsl 1)) * Dig_T2) bsr 11,
% var2 = (((((adc_T>>4) – ((BME280_S32_t)dig_T1)) * ((adc_T>>4) – ((BME280_S32_t)dig_T1)))>> 12) * ((BME280_S32_t)dig_T3)) >> 14;
% var2 = (((((adc_T >> 4) – dig_T1) * ((adc_T >> 4) – dig_T1)) >> 12) * dig_T3) >> 14;
Var2 = (((((Adc_T bsr 4) - Dig_T1) * ((Adc_T bsr 4) - Dig_T1)) bsr 12) * Dig_T3) bsr 14,
% t_fine = var1 + var2;
T_fine = Var1 + Var2,
% T =(t_fine * 5 +1 28) >> 8;
Temperature = (T_fine * 5 + 128) bsr 8,
{T_fine, Temperature}.
%% Annotated with the recommended algorithm. See Section 8.2 (32-bit version)
%% @private
calibrate_pressure(Cal, T_fine, Adc_P) ->
Dig_P1 = Cal#calibration.dig_P1,
Dig_P2 = Cal#calibration.dig_P2,
Dig_P3 = Cal#calibration.dig_P3,
Dig_P4 = Cal#calibration.dig_P4,
Dig_P5 = Cal#calibration.dig_P5,
Dig_P6 = Cal#calibration.dig_P6,
Dig_P7 = Cal#calibration.dig_P7,
Dig_P8 = Cal#calibration.dig_P8,
Dig_P9 = Cal#calibration.dig_P9,
% limit 16#0FFFFFFF
% var1 = (((BME280_S32_t)t_fine)>>1) – (BME280_S32_t)64000;
% var1 = (t_fine >> 1) – 64000;
Var1 = (T_fine bsr 1) - 64000,
% var2 = (((var1>>2) * (var1>>2)) >> 11 ) * ((BME280_S32_t)dig_P6);
% var2 = (((var1 >> 2) * (var1 >> 2)) >> 11 ) * dig_P6;
Var2 = (((Var1 bsr 2) * (Var1 bsr 2)) bsr 11) * Dig_P6,
% var2 = var2 + ((var1*((BME280_S32_t)dig_P5))<<1);
% var2 = var2 + ((var1 * dig_P5) << 1);
Var3 = Var2 + ((Var1 * (Dig_P5)) bsl 1),
% var2 = (var2>>2)+(((BME280_S32_t)dig_P4)<<16);
% var2 = (var2 >> 2) + (dig_P4 << 16);
Var4 = (Var3 bsr 2) + ((Dig_P4 ) bsl 16),
% Var4 = (Var3 bsr 2) + ((Dig_P4 ) * 65536),
% var1 = (((dig_P3 * (((var1>>2) * (var1>>2)) >> 13 )) >> 3) + ((((BME280_S32_t)dig_P2) * var1)>>1))>>18;
% var1 = (((dig_P3 * (((var1 >> 2) * (var1 >> 2)) >> 13 )) >> 3) + ((dig_P2 * var1) >> 1)) >> 18;
Var5 = (((Dig_P3 * (((Var1 bsr 2) * (Var1 bsr 2)) bsr 13)) bsr 3) + ((Dig_P2 * Var1) bsr 1)) bsr 18,
% var1 =((((32768+var1))*((BME280_S32_t)dig_P1))>>15);
% var1 = ((32768 + var1) * dig_P1) >> 15;
Var6 = ((32768 + Var5) * Dig_P1) bsr 15,
case Var6 of
0 ->
0;
_ ->
% p = (((BME280_U32_t)(((BME280_S32_t)1048576)-adc_P)-(var2>>12)))*3125;
% p = ((1048576 - adc_P) - (var2 >> 12)) * 3125;
%P = ((1048576 - Adc_P) - (Var4 bsr 12)) * 3125,
P = ((1048576 - Adc_P) - (Var4 div 4096)) * 3125,
P1 = case P < 16#80000000 of
true ->
% p = (p << 1) / ((BME280_U32_t)var1);
(P bsl 1) div Var6;
_ ->
% p = (p / (BME280_U32_t)var1) * 2;
(P div Var6) bsl 1
end,
% var1 = (((BME280_S32_t)dig_P9) * ((BME280_S32_t)(((p>>3) * (p>>3))>>13)))>>12;
% var1 = (dig_P9 * (((p >> 3) * (p >> 3)) >> 13)) >> 12;
Var7 = (Dig_P9 * (((P1 bsr 3) * (P1 bsr 3)) bsr 13)) bsr 12,
% var2 = (((BME280_S32_t)(p>>2)) * ((BME280_S32_t)dig_P8))>>13;
% var2 = ((p >> 2) * dig_P8) >> 13;
Var8 = ((P1 bsr 2) * Dig_P8) bsr 13,
% p = (BME280_U32_t)((BME280_S32_t)p + ((var1 + var2 + dig_P7) >> 4));
% p = p + ((var1 + var2 + dig_P7) >> 4));
R = P1 + ((Var7 + Var8 + Dig_P7) bsr 4),
R
end.
%% Annotated with the recommended algorithm. See Section 4.2.3
%% @private
calibrate_humidity(Cal, T_fine, Adc_H) ->
Dig_H1 = Cal#calibration.dig_H1,
Dig_H2 = Cal#calibration.dig_H2,
Dig_H3 = Cal#calibration.dig_H3,
Dig_H4 = Cal#calibration.dig_H4,
Dig_H5 = Cal#calibration.dig_H5,
Dig_H6 = Cal#calibration.dig_H6,
% % v_x1_u32r = (t_fine – ((BME280_S32_t)76800));
V_x1_u32r = T_fine - 76800,
% %v_x1_u32r = (((((adc_H << 14) – (((BME280_S32_t)dig_H4) << 20) – (((BME280_S32_t)dig_H5) * v_x1_u32r)) + ((BME280_S32_t)16384)) >> 15) * (((((((v_x1_u32r * ((BME280_S32_t)dig_H6)) >> 10) * (((v_x1_u32r * ((BME280_S32_t)dig_H3)) >> 11) + ((BME280_S32_t)32768))) >> 10) + ((BME280_S32_t)2097152)) * ((BME280_S32_t)dig_H2) + 8192) >> 14));
% %v_x1_u32r = (((((adc_H << 14) – (dig_H4 << 20) – (dig_H5 * v_x1_u32r)) + 16384) >> 15) * (((((((v_x1_u32r * dig_H6) >> 10) * (((v_x1_u32r * dig_H3) >> 11) + 32768)) >> 10) + 2097152) * dig_H2 + 8192) >> 14));
V_x2_u32r = (((((Adc_H bsl 14) - (Dig_H4 bsl 20) - (Dig_H5 * V_x1_u32r)) + 16384) bsr 15) * (((((((V_x1_u32r * Dig_H6) bsr 10) * (((V_x1_u32r * Dig_H3) bsr 11) + 32768)) bsr 10) + 2097152) * Dig_H2 + 8192) bsr 14)),
% % v_x1_u32r = (v_x1_u32r – (((((v_x1_u32r >> 15) * (v_x1_u32r >> 15)) >> 7) * ((BME280_S32_t)dig_H1)) >> 4));
% % v_x1_u32r = (v_x1_u32r – (((((v_x1_u32r >> 15) * (v_x1_u32r >> 15)) >> 7) * dig_H1) >> 4));
V_x3_u32r = (V_x2_u32r - (((((V_x2_u32r bsr 15) * (V_x2_u32r bsr 15)) bsr 7) * Dig_H1) bsr 4)),
% %v_x1_u32r = (v_x1_u32r < 0 ? 0 : v_x1_u32r);
V_x4_u32r = case V_x3_u32r < 0 of true -> 0; _ -> V_x3_u32r end,
% %v_x1_u32r = (v_x1_u32r > 419430400 ? 419430400 : v_x1_u32r); } return (BME280_U32_t)(v_x1_u32r>>12);
% %v_x1_u32r = (v_x1_u32r > 419430400 ? 419430400 : v_x1_u32r); } return (v_x1_u32r >> 12);
V_x5_u32r = case V_x4_u32r > 419430400 of true -> 419430400; _ -> V_x4_u32r end,
% return (BME280_U32_t)(v_x1_u32r>>12);
V_x5_u32r bsr 12. | src/bme280.erl | 0.717408 | 0.61273 | bme280.erl | starcoder |
%%
%% Copyright (c) 2012 - 2013, <NAME>
%% Copyright (c) 2012 - 2013, <NAME>
%% All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% A simple pin-code state machine (based on an example from the OTP system documentation)
%%
-module(pincode).
-behaviour(pipe).
%% public api
-export([start_link/1, digit/1, reset/0, check/0]).
%% pipe call-back
-export([init/1, free/2, locked/3, unlocked/3]).
%%
%% pipe:start_link spawns new registered process using this module as behavior
start_link(Code) ->
pipe:start_link({local, ?MODULE}, ?MODULE, [Code], []).
%%
%% supplies digit to state machine
digit(Digit)
when Digit >= 0, Digit =< 9 ->
pipe:send(?MODULE, {digit, Digit}).
%%
%% reset state machine to initial state
reset() ->
pipe:send(?MODULE, reset).
%%
%% check pin-code status
check() ->
pipe:call(?MODULE, check).
%%%------------------------------------------------------------------
%%%
%%% behavior
%%%
%%%------------------------------------------------------------------
%%
%% The function is called whenever the state machine process is started using either
%% start_link or start function. It build internal state data structure, defines
%% initial state transition, etc. The function should return either `{ok, Sid, State}`
%% or `{error, Reason}`.
init([Code]) ->
{ok, locked, {[], lists:reverse(Code)}}.
%%
%% The function is called to release resource owned by state machine, it is called when
%% the process is about to terminate.
free(_, _State) ->
ok.
%%
%% The state transition function receive any message, which is sent using pipe interface
%% or any other Erlang message passing operation. The function executes the state
%% transition, generates output or terminate execution.
%%
%%
%% locked state
locked({digit, Digit}, _Pipe, {SoFar, Code}) ->
case [Digit|SoFar] of
% if the new digit completes a sequence that matches the code then unlocks the lock
Code ->
% pin-code is correct, unlock state machine.
% set-up inactivity timeout for 10 seconds.
{next_state, unlocked, {[], Code}, 10000};
% partial pin-code is entered
Prefix when length(Prefix) < length(Code) ->
{next_state, locked, {Prefix, Code}};
% wrong pin-code
_ ->
{next_state, locked, {[], Code}}
end;
locked(reset, _Pipe, {_, Code}) ->
% reset state machine to initial state
{next_state, locked, {[], Code}};
locked(check, Pipe, State) ->
% client process checks the state machine status.
% acknowledge message and reply to client
pipe:ack(Pipe, locked),
{next_state, locked, State}.
%%
%% unlocked state
unlocked(reset, _Pipe, {_, Code}) ->
% reset state machine to initial state
{next_state, locked, {[], Code}};
unlocked(timeout, _Pipe, {_, Code}) ->
% reset state machine to initial state
{next_state, locked, {[], Code}};
unlocked(check, Pipe, State) ->
% client process checks the state machine status.
% acknowledge message and reply to client
pipe:ack(Pipe, unlocked),
{next_state, unlocked, State, 10000}. | examples/pincode/src/pincode.erl | 0.560974 | 0.40539 | pincode.erl | starcoder |
%% @doc The payment_stream module provides utilities for dealing with
%% streams of payments.
%%
%% == Payment Streams ==
%%
%% A <i>payment</i> is a tuple `{amount, date}' consisting of
%% an amount (in whatever currency) and a date. The amount can be
%% positive or negative.
%%
%% For example, `{-2000, {2015, 1, 1}}' represents an amount of -200
%% transferred at Jan 01, 2015.
%%
%% A <i>payment stream</i> is a list of payments
%%
%% == Relative Payment Streams ==
%%
%% Let `[{a_1, t_1}, ..., {a_n, t_n}]' be a payment stream and let
%% `{a_f, t_f}' be the earliest payment in this stream. A <i>relative
%% payment stream</i> is a list `[{a_1, r_1}, ..., {a_n, r_n}]' where
%% `r_k' is the difference of `t_k' and `t_f' "expressed in years".
%%
%% More precisely, `r_k' is computed as follows: Let `t_f' be the
%% `d_f'th day in a year `y_f' and let `t_k' be the `d_k'th day in year
%% `y_k'. (Days are indexed starting at `0'. Jan 01 is day `0'.) Let
%% `D(y)' denote the number of days in a year `y'. For a leap year `y',
%% `D(y)' is 366. Otherwise, `D(y)' is 365. Then
%%
%% ```
%% r_k = (y_k - y_f) + (d_k / D(y_k) - d_f / D(y_f)).
%% '''
%%
%% == The Net Present Value Function ==
%%
%% A relative payment stream `[{a_1, r_1}, ..., {a_n, r_n}]' gives rise
%% to the definition of the net present value function
%%
%% ```
%% npv(x) = a_1 * (1 + x)^(-r_1) + ... + a_n * (1 + x)^(-r_n)
%% '''
%%
%% of single real variable `x'. The internal interest rate of the
%% original payment stream is the root of the `npv' function.
%%
%% In general, there is no closed formula for the computation of the
%% roots of `npv'. However, given a "reasonable" start value, Newton's
%% method converges very fast to the wanted root.
%%
%% Newton's method requires the computation of the derivative `` npv' ''
%% of `npv'. Fortunately, `` npv' '' can be easily written in a
%% closed form:
%%
%% ```
%% npv' = a_1 * (-r_1) * (1 + x)^(-r_1 - 1) + ... + a_n * (-r_n) * (1 + x)^(-r_n - 1)
%% '''
-module(payment_stream).
-export([earliest_payment/1, to_relative_payment_stream/1, net_present_value/1, net_present_value_derivative/1]).
%% @doc Finds the earliest payment in a payment stream
%%
%% == Examples ==
%%
%% ```
%% 1> payment_stream:earliest_payment([{-1000, {2021, 1, 1}}, {1000, {2020, 1, 1}}]).
%% {1000,{2020,1,1}}
%% '''
earliest_payment(PaymentStream) ->
lists:nth(1, lists:sort(fun({_AmountA, DateA}, {_AmountB, DateB}) -> DateA < DateB end, PaymentStream)).
%% @doc Converts a payment stream to a relative payment stream
%%
%% == Examples ==
%% ```
%% 1> payment_stream:to_relative_payment_stream([{1000, {2020, 1, 1}}, {1000, {2021, 1, 1}}]).
%% [{1000,0.0},{1000,1.0}]
%% '''
to_relative_payment_stream(PaymentStream) ->
P_f = earliest_payment(PaymentStream),
lists:map(fun(P) -> to_relative_payment(P_f, P) end, PaymentStream).
to_relative_payment({_A_f, T_f}, {A_k, T_k}) ->
{Y_f, _, _} = T_f,
{Y_k, _, _} = T_k,
{A_k, Y_k - Y_f + relative_day_in_year(T_k) - relative_day_in_year(T_f)}.
relative_day_in_year(T) ->
{Y, _, _ } = T,
D = day_in_year(T),
case calendar:is_leap_year(Y) of
true ->
D / 366;
false ->
D / 365
end.
day_in_year(T) ->
{Y, _, _ } = T,
calendar:date_to_gregorian_days(T) - calendar:date_to_gregorian_days({Y, 1, 1}).
%% @doc Computes the net present value function `npv' of a relative
%% payment stream
%%
%% == Examples ==
%%
%% Let `[{1000, {2021, 1, 1}}, {-1000, {2022, 1, 1}}]' be a very
%% simple payment stream. Since the amount payed on Jan 01, 2021 is
%% the negative of the amount received one year later on Jan 01, 2022,
%% the internal interest rate for this payment stream should be `0'.
%%
%% The relative payment stream corresponding to the payment stream
%% above is `[{1000, 0.0}, {-1000, 1.0}]' and then the corresponding
%% `npv' function is
%% ```
%% npv(x) = 1000 * (1 + x)^0.0 + (-1000) * (1 + x)^(-1.0)
%% '''
%% so that
%% ```
%% npv(0) = 1000 * (1 + 0)^0.0 + (-1000) * (1 + 0)^(-1.0)
%% = 1000 * 1 - 1000 * 1
%% = 0
%% '''
%% ```
%% 1> PaymentStream = [{1000, {2021, 1, 1}}, {-1000, {2022, 1, 1}}].
%% [{1000,{2021,1,1}},{-1000,{2022,1,1}}]
%% 2> RelativePaymentStream = payment_stream:to_relative_payment_stream(PaymentStream).
%% [{1000,0.0},{-1000,1.0}]
%% 3> Npv = payment_stream:net_present_value(RelativePaymentStream).
%% #Fun<payment_stream.2.28137773>
%% 4> Npv(0).
%% 0.0
%% '''
net_present_value(RelativePaymentStream) ->
fun(X) ->
lists:foldl(
fun({A, R}, Sum) -> Sum + A * math:pow(1 + X, -R) end,
0,
RelativePaymentStream
)
end.
%% @doc Computes the derivative `` npv' '' of the net present value
%% function `npv' of a relative payment stream
%%
%% == Examples ==
%%
%% Let `[{1000, 0.0}, {-1000, 1.0}]' be a very simple relative payment
%% stream with a corresponding net present value function
%% ```
%% npv(x) = 1000 * (1 + x)^0.0 + (-1000) * (1 + x)^(-1.0)
%% '''
%% Then the derivative of `npv' is
%% ```
%% npv'(x) = 0.0 * 1000 * (1 + x)^(-1.0) + (-1.0) * (-1000) * (1 + x)^-(2.0)
%% = 1000 * (1 + x)^(-2.0)
%% '''
%% ```
%% npv'(x) = 0.0 * 1000 * (1 + x)^(-1.0) + (-1.0) * (-1000) * (1 + x)^-(2.0)
%% = 1000 * (1 + x)^(-2.0)
%% '''
%% ```
%% 1> RelativePaymentStream = [{1000, 0.0}, {-1000, 1.0}].
%% [{1000,0.0},{-1000,1.0}]
%% 2> Npvp = payment_stream:net_present_value_derivative(RelativePaymentStream).
%% #Fun<payment_stream.3.28137773>
%% 3> Npvp(0).
%% 1.0e3
%% '''
net_present_value_derivative(RelativePaymentStream) ->
fun(X) ->
lists:foldl(
fun({A, R}, Sum) -> Sum + A * (-R) * math:pow(1 + X, -R - 1) end,
0,
RelativePaymentStream
)
end. | src/payment_stream.erl | 0.782995 | 0.680759 | payment_stream.erl | starcoder |
% Contains any functions not related to raft or kv store
%
-module(nilva_helper).
-include_lib("eunit/include/eunit.hrl").
-export([getUniformRand/2, getUniformRandInt/2]).
-export([spawn_and_get_result/1]).
%% =========================================================================
%% Public Functions
%% =========================================================================
-spec getUniformRand(number(), number()) -> number().
getUniformRand(Min, Max) when Min < Max ->
Min + (Max - Min) * rand:uniform().
-spec getUniformRandInt(integer(), integer()) -> integer().
getUniformRandInt(Min, Max) ->
round(getUniformRand(Min, Max)).
-spec spawn_and_get_result(function()) -> any().
spawn_and_get_result(F) ->
Self = self(),
Pid = spawn(fun() -> X = F(), Self ! {self(), X} end),
Return = receive
{Pid, Result} ->
Result
end,
Return.
%% =========================================================================
%% Unit Tests
%% =========================================================================
getUniformRand_test_() ->
% Valid min & max
{Min, Max} = {95.8, 209},
X = getUniformRand(Min, Max),
% Invalid min & max
{Min2, Max2} = {209, 95.8},
% Check if a random number if generated with valid min & max
[?_assert((X >= Min) and (X =< Max)),
% Check if an error is thrown when min > max
?_assertException(error, function_clause, getUniformRand(Min2, Max2))].
% Also tests spawn_and_get_result
getUniformRand_randomness_test_() ->
{Min, Max} = {20395, 58649.7},
F = fun() -> [nilva_helper:getUniformRand(Min, Max) || _ <- lists:seq(1, 10)] end,
Xs = F(),
Xs_set = sets:from_list(Xs),
Xs_different_process = spawn_and_get_result(F),
% Check if sequence of random numbers generated are different from each other
[?_assertEqual(length(Xs), sets:size(Xs_set)),
% Test whether the random number sequences generated in separate processes are
% different from each other
?_assertNotEqual(Xs, Xs_different_process)].
getUniformRandInt_test_() ->
{Min, Max} = {1890, 2018},
I = getUniformRandInt(Min, Max),
[?_assert(is_integer(I)),
?_assert((I >= Min) and (I =< Max))]. | src/nilva_helper.erl | 0.654343 | 0.573081 | nilva_helper.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% This sampler makes the decision based on the parent, with the following possibilities:
%% 1) a remote parent that is sampled (by default always_on);
%% 2) a remote parent that is not sampled (by default always_off);
%% 3) a local parent that is sampled (by default always_on);
%% 4) a local parent that is not sampled (by default always_off);
%% 5) no parent (by default always_on).
%%
%% For each of these cases a different sampler can be configured.
%% @end
%%%-------------------------------------------------------------------------
-module(otel_sampler_parent_based).
-behavior(otel_sampler).
-export([description/1, setup/1, should_sample/7]).
-export_type([opts/0]).
-include_lib("kernel/include/logger.hrl").
-include_lib("opentelemetry_api/include/opentelemetry.hrl").
-type opts() :: #{
remote_parent_sampled => otel_sampler:sampler_spec(),
remote_parent_not_sampled => otel_sampler:sampler_spec(),
local_parent_sampled => otel_sampler:sampler_spec(),
local_parent_not_sampled => otel_sampler:sampler_spec(),
root => otel_sampler:sampler_spec()
}.
setup(Opts = #{root := RootSpec}) ->
RemoteParentSampledSampler = sampler_for_spec(remote_parent_sampled, Opts, always_on),
RemoteParentNotSampledSampler = sampler_for_spec(remote_parent_not_sampled, Opts, always_off),
LocalParentSampledSampler = sampler_for_spec(local_parent_sampled, Opts, always_on),
LocalParentNotSampledSampler = sampler_for_spec(local_parent_not_sampled, Opts, always_off),
RootSampler = otel_sampler:new(RootSpec),
#{
root => RootSampler,
remote_parent_sampled => RemoteParentSampledSampler,
remote_parent_not_sampled => RemoteParentNotSampledSampler,
local_parent_sampled => LocalParentSampledSampler,
local_parent_not_sampled => LocalParentNotSampledSampler
};
setup(Opts) ->
?LOG_INFO("No sampler spec found for parent_based 'root' option. The 'always_on' sampler will be used for root spans."),
setup(Opts#{root => always_on}).
sampler_for_spec(Key, Opts, DefaultModule) ->
Spec = maps:get(Key, Opts, DefaultModule),
otel_sampler:new(Spec).
description(#{
root := RootSampler,
remote_parent_sampled := RemoteParentSampler,
remote_parent_not_sampled := RemoteParentNotSampler,
local_parent_sampled := LocalParentSampler,
local_parent_not_sampled := LocalParentNotSampler
}) ->
<<"ParentBased{root:", (otel_sampler:description(RootSampler))/binary, ",remoteParentSampled:",
(otel_sampler:description(RemoteParentSampler))/binary, ",remoteParentNotSampled:",
(otel_sampler:description(RemoteParentNotSampler))/binary, ",localParentSampled:",
(otel_sampler:description(LocalParentSampler))/binary, ",localParentNotSampled:",
(otel_sampler:description(LocalParentNotSampler))/binary, "}">>.
should_sample(Ctx, TraceId, Links, SpanName, SpanKind, Attributes, Config) ->
ParentSpanCtx = otel_tracer:current_span_ctx(Ctx),
SamplerKey = parent_based_sampler(ParentSpanCtx),
{Sampler, _Description, SamplerOpts} = maps:get(SamplerKey, Config),
Sampler:should_sample(Ctx, TraceId, Links, SpanName, SpanKind, Attributes, SamplerOpts).
%% remote parent sampled
parent_based_sampler(#span_ctx{trace_flags = TraceFlags, is_remote = true}) when
?IS_SAMPLED(TraceFlags)
->
remote_parent_sampled;
%% remote parent not sampled
parent_based_sampler(#span_ctx{is_remote = true}) ->
remote_parent_not_sampled;
%% local parent sampled
parent_based_sampler(#span_ctx{trace_flags = TraceFlags}) when
?IS_SAMPLED(TraceFlags)
->
local_parent_sampled;
%% local parent not sampled
parent_based_sampler(#span_ctx{}) ->
local_parent_not_sampled;
%% root
parent_based_sampler(_SpanCtx) ->
root. | apps/opentelemetry/src/otel_sampler_parent_based.erl | 0.617167 | 0.444625 | otel_sampler_parent_based.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(ekka_ring).
-include("ekka.hrl").
-export([ find_node/2
, find_nodes/2
, find_nodes/3
]).
-ifdef(TEST).
-compile(export_all).
-compile(nowarn_export_all).
-endif.
-type(key() :: term()).
-type(ring() :: list(member())).
-export_type([key/0, ring/0]).
-define(BASE, 4294967295). %% trunc(math:pow(2, 32) - 1))
-spec(find_node(key(), ring()) -> node()).
find_node(Key, Ring) ->
(next_member(phash(Key), Ring))#member.node.
next_member(Hash, Ring = [Head|_]) ->
next_member(Hash, Head, Ring).
next_member(_Hash, Head, []) ->
Head;
next_member(Hash, _Head, [M = #member{hash = MHash}|_])
when MHash >= Hash ->
M;
next_member(Hash, Head, [_|Ring]) ->
next_member(Hash, Head, Ring).
-spec(find_nodes(key(), ring()) -> list(node())).
find_nodes(Key, Ring) ->
Count = min(quorum(Ring), length(Ring)),
find_nodes(Key, Count, Ring).
-spec(find_nodes(key(), pos_integer(), ring()) -> list(node())).
find_nodes(Key, Count, Ring) ->
[N || #member{node = N} <- next_members(phash(Key), Count, Ring)].
next_members(_Hash, Count, Ring) when Count >= length(Ring) ->
Ring;
next_members(Hash, Count, Ring) ->
{Left, Right} = split_ring(Hash, Ring),
case length(Right) >= Count of
true ->
lists:sublist(Right, 1, Count);
false ->
lists:append(Right, lists:sublist(Left, 1, Count - length(Right)))
end.
split_ring(Hash, Ring) ->
split_ring(Hash, Ring, [], []).
split_ring(_Hash, [], Left, Right) ->
{lists:reverse(Left), lists:reverse(Right)};
split_ring(Hash, [M = #member{hash = MHash}|Ring], Left, Right) ->
case Hash =< MHash of
true -> split_ring(Hash, Ring, Left, [M|Right]);
false -> split_ring(Hash, Ring, [M|Left], Right)
end.
quorum(Ring) ->
case length(Ring) div 2 + 1 of
N when N > 3 -> 3;
N -> N
end.
phash(Key) -> erlang:phash2(Key, ?BASE). | src/ekka_ring.erl | 0.609757 | 0.451992 | ekka_ring.erl | starcoder |
%%------------------------------------------------------------------------------
%% @doc This OTP application is used for crawling websites while respecting `robots.txt'.
%%
%% This module exposes some high level functions to be able to add new crawlers and start/stop them.
%%
%% While most of the configuration is per crawler, this application is also configurable globally
%% via the following `sys.config' settings:
%%
%% ```
%% {treewalker, [
%% %% The minimum delay to wait before retrying a failed request
%% {min_retry_delay, pos_integer()},
%% %% The maximum delay to wait before retrying a failed request
%% {max_retry_delay, pos_integer()},
%% %% The maximum amount of retries of a failed request
%% {max_retries, pos_integer()},
%% %% The maximum amount of delay before starting a request (in seconds)
%% {max_worker_delay, pos_integer()},
%% %% The maximum amount of concurrent workers making HTTP requests
%% {max_concurrent_worker, pos_integer()},
%% %% The user agent making the HTTP requests
%% {user_agent, binary()}]},
%% '''
%%
%% @copyright 2020 <NAME>
%% @author <NAME> <<EMAIL>>
%% @end
%%------------------------------------------------------------------------------
-module(treewalker).
%% API
-export([add_crawler/2,
add_crawler/3,
remove_crawler/1,
start_crawler/1,
stop_crawler/1]).
-type child() :: treewalker_crawlers_sup:child().
-type options() :: #{scraper => module(),
scraper_options => term(),
fetcher => module(),
fetcher_options => module(),
max_depth => pos_integer(),
store => module(),
store_options => term(),
link_filter => module()}.
-type url() :: treewalker_page:url().
%%%===================================================================
%%% API
%%%===================================================================
%%------------------------------------------------------------------------------
%% @doc
%% Add a new crawler with the default configuration.
%% @end
%%------------------------------------------------------------------------------
-spec add_crawler(term(), url()) -> {ok, child()} | {ok, child(), term()} | {error, term()}.
add_crawler(Name, Url) ->
treewalker_crawlers_sup:add_crawler(Name, #{url => Url}).
%%------------------------------------------------------------------------------
%% @doc
%% Remove the specified crawler.
%% @end
%%------------------------------------------------------------------------------
-spec remove_crawler(term()) -> ok.
remove_crawler(Name) ->
treewalker_crawlers_sup:remove_crawler(Name).
%%------------------------------------------------------------------------------
%% @doc
%% Add a new crawler with the specified configuration.
%%
%% The available options are as follow:
%%
%% - `scraper': Module implementing the {@link treewalker_scraper} behaviour.
%%
%% - `scraper_options': The options to pass to the module implementing the
%% {@link treewalker_scraper} behaviour.
%%
%% - `fetcher': Module implementing the {@link treewalker_fetcher} behaviour.
%%
%% - `fetcher_options': The options to pass to the module implementing the
%% {@link treewalker_fetcher} behaviour.
%%
%% - `max_depth': The max depth that the crawler will crawl.
%%
%% - `store': Module implementing the {@link treewalker_store} behaviour.
%%
%% - `store_options': The options to pass to the module implementing the
%% {@link treewalker_store} behaviour.
%%
%% - `link_filter': Module implementing the {@link treewalker_link_filter} behaviour.
%% @end
%%------------------------------------------------------------------------------
-spec add_crawler(term(), url(), options()) ->
{ok, child()} | {ok, child(), term()} | {error, term()}.
add_crawler(Name, Url, Custom) ->
treewalker_crawlers_sup:add_crawler(Name, Custom#{url => Url}).
%%------------------------------------------------------------------------------
%% @doc
%% Start the specified crawler.
%% @end
%%------------------------------------------------------------------------------
-spec start_crawler(term()) -> ok.
start_crawler(Name) ->
treewalker_crawlers_sup:start_crawler(Name).
%%------------------------------------------------------------------------------
%% @doc
%% Stop the specified crawler.
%% @end
%%------------------------------------------------------------------------------
-spec stop_crawler(term()) -> ok.
stop_crawler(Name) ->
treewalker_crawlers_sup:stop_crawler(Name).
%%%===================================================================
%%% Internal functions
%%%=================================================================== | src/treewalker.erl | 0.54577 | 0.426441 | treewalker.erl | starcoder |
-module(slacker_group).
-include("spec.hrl").
-export([archive/2, close/2, create/2, create_child/2, history/3,
info/2, invite/3, kick/3, leave/2, list/2, mark/3, open/2,
rename/3, set_purpose/3, set_topic/3, unarchive/2]).
%% @doc Archives a private group.
-spec archive(Token :: string(), Channel :: string()) -> http_response().
archive(Token, Channel) ->
slacker_request:send("groups.archive", [{"token", Token},{"channel", Channel}]).
%% @doc Closes a private group.
-spec close(Token :: string(), Channel :: string()) -> http_response().
close(Token, Channel) ->
slacker_request:send("groups.close", [{"token", Token},{"channel", Channel}]).
%% @doc Creates a private group.
-spec create(Token :: string(), Name :: string()) -> http_response().
create(Token, Name) ->
slacker_request:send("groups.create", [{"token", Token},{"name", Name}]).
%% @doc Clones and archives a private group.
-spec create_child(Token :: string(), Channel :: string()) -> http_response().
create_child(Token, Channel) ->
slacker_request:send("groups.createChild", [{"token", Token},{"channel", Channel}]).
%% @doc Fetch history of messages and events from a given private group.
%%
%% Options can be:
%% latest: end of time range of messages to include in results
%% oldest: start of time range of messages to include in results
%% inclusive: include messages with latest or oldest timestamp in results (default: 0)
%% count: number of messages to return, between 1 and 1000 (default: 100)
%% unreads: include unread_count_display in the output (default: 0)
%%
-spec history(Token :: string(), Channel :: string(), Options :: list()) -> http_response().
history(Token, Channel, Options) ->
slacker_request:send("groups.history", [{"token", Token},{"channel", Channel}], Options).
%% @doc Gets information about a private group.
-spec info(Token :: string(), Channel :: string()) -> http_response().
info(Token, Channel) ->
slacker_request:send("groups.info", [{"token", Token},{"channel", Channel}]).
%% @doc Invites a user to a private group.
-spec invite(Token :: string(), Channel :: string(), User :: string()) -> http_response().
invite(Token, Channel, User) ->
slacker_request:send("groups.invite", [{"token", Token},{"channel", Channel},{"user", User}]).
%% @doc Removes a user from a private group.
-spec kick(Token :: string(), Channel :: string(), User :: string()) -> http_response().
kick(Token, Channel, User) ->
slacker_request:send("groups.kick", [{"token", Token},{"channel", Channel},{"user", User}]).
%% @doc Leaves a private group.
-spec leave(Token :: string(), Channel :: string()) -> http_response().
leave(Token, Channel) ->
slacker_request:send("groups.leave", [{"token", Token},{"channel", Channel}]).
%% @doc List of groups in the team that the calling user has access to.
%%
%% Options can be:
%% exclude_archived: do not return archived private channels (default: 0)
%%
-spec list(Token :: string(), Options :: list()) -> http_response().
list(Token, Options) ->
slacker_request:send("groups.list", [{"token", Token}], Options).
%% @doc Sets the read cursor in a private group.
-spec mark(Token :: string(), Channel :: string(), Timestamp :: string()) -> http_response().
mark(Token, Channel, Timestamp) ->
slacker_request:send("groups.leave", [{"token", Token},{"channel", Channel},{"ts", Timestamp}]).
%% @doc Opens a private group.
-spec open(Token :: string(), Channel :: string()) -> http_response().
open(Token, Channel) ->
slacker_request:send("groups.open", [{"token", Token},{"channel", Channel}]).
%% @doc Rename a group.
-spec rename(Token :: string(), Channel :: string(), Name :: string()) -> http_response().
rename(Token, Channel, Name) ->
slacker_request:send("groups.rename", [{"token", Token},{"channel", Channel},{"name", Name}]).
%% @doc Sets the purpose for a private group.
-spec set_purpose(Token :: string(), Channel :: string(), Purpose :: string()) -> http_response().
set_purpose(Token, Channel, Purpose) ->
slacker_request:send("groups.setPurpose", [{"token", Token},{"channel", Channel},{"purpose", Purpose}]).
%% @doc Sets the topic for a private group.
-spec set_topic(Token :: string(), Channel :: string(), Topic :: string()) -> http_response().
set_topic(Token, Channel, Topic) ->
slacker_request:send("groups.setTopic", [{"token", Token},{"channel", Channel},{"topic", Topic}]).
%% @doc Unarchives a private group.
-spec unarchive(Token :: string(), Channel :: string()) -> http_response().
unarchive(Token, Channel) ->
slacker_request:send("groups.unarchive", [{"token", Token},{"channel", Channel}]). | src/slacker_group.erl | 0.530723 | 0.447762 | slacker_group.erl | starcoder |
%%%
%%% Copyright (c) 2016 The Talla Authors. All rights reserved.
%%% Use of this source code is governed by a BSD-style
%%% license that can be found in the LICENSE file.
%%%
%%% -----------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @doc Math API.
%%% @end
%%% -----------------------------------------------------------
-module(onion_math).
%% API.
-export([ceil/1,
floor/1,
pow/2,
mod/2,
mod_pow/3
]).
-include("onion_test.hrl").
%% @doc Round a given number upwards towards to the nearest integer.
-spec ceil(Value) -> integer()
when
Value :: number().
ceil(X) when X < 0 ->
trunc(X);
ceil(X) ->
V = trunc(X),
case X - V == 0 of
true ->
V;
false ->
V + 1
end.
%% @doc Round a given number downwards towards the nearest integer.
-spec floor(Value) -> integer()
when
Value :: number().
floor(X) when X < 0 ->
V = trunc(X),
case X - V == 0 of
true ->
V;
false ->
V - 1
end;
floor(X) ->
trunc(X).
%% @doc Return X^N.
-spec pow(X, N) -> integer()
when
X :: integer(),
N :: integer().
pow(X, N) ->
trunc(math:pow(X, N)).
%% @doc Return X mod Y.
-spec mod(X, Y) -> integer()
when
X :: integer(),
Y :: integer().
mod(X, Y) ->
trunc(X rem Y).
%% @doc Return B^E mod M.
-spec mod_pow(Base, Exponent, Modulus) -> Result
when
Base :: number(),
Exponent :: number(),
Modulus :: number(),
Result :: number().
mod_pow(_, _, 1) ->
0;
mod_pow(B, E, M) ->
Result = 1,
Base = mod(B, M),
do_mod_pow(Base, E, M, Result).
%% @private
-spec do_mod_pow(Base, Exponent, Modulus, Result) -> Result
when
Base :: number(),
Exponent :: number(),
Modulus :: number(),
Result :: number().
do_mod_pow(B, E, M, Result) when E > 0 ->
NewB = mod(B * B, M),
NewE = E bsr 1,
NewResult = case mod(E, 2) of
1 ->
mod(Result * B, M);
_ ->
Result
end,
do_mod_pow(NewB, NewE, M, NewResult);
do_mod_pow(_, _, _, Result) ->
Result.
-ifdef(TEST).
ceil_test() ->
[
?assertEqual(ceil(-100.231), -100),
?assertEqual(ceil(-1.0001), -1),
?assertEqual(ceil(0.5), 1),
?assertEqual(ceil(2.00), 2),
?assertEqual(ceil(2.5), 3),
?assertEqual(ceil(0.1), 1)
].
floor_test() ->
[
?assertEqual(floor(-100.231), -101),
?assertEqual(floor(-1.0001), -2),
?assertEqual(floor(0.5), 0),
?assertEqual(floor(2.00), 2),
?assertEqual(floor(2.5), 2),
?assertEqual(floor(0.1), 0)
].
pow_test() ->
[
?assertEqual(pow(1, 0), 1),
?assertEqual(pow(2, 2), 4),
?assertEqual(pow(2, 255), 57896044618658097711785492504343953926634992332820282019728792003956564819968)
].
mod_test() ->
[
?assertEqual(mod(2, 2), 0),
?assertEqual(mod(2, 4), 2),
?assertEqual(mod(2, 3), 2),
?assertEqual(mod(3, 9), 3)
].
-endif. | src/onion_math.erl | 0.663342 | 0.490663 | onion_math.erl | starcoder |
%
% This file is part of AtomVM.
%
% Copyright 2019-2022 <NAME> <<EMAIL>>
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
% SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
%
%
% This file is part of AtomVM.
%
% Copyright 2019 <NAME> <<EMAIL>>
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
% SPDX-License-Identifier: Apache-2.0 OR LGPL-2.1-or-later
%
-module(io_lib).
-export([format/2]).
%%-----------------------------------------------------------------------------
%% @param Format format string
%% @param Args format argument
%% @returns string
%% @doc Format string and data to a string.
%% Approximates features of OTP io_lib:format/2, but
%% only supports ~p and ~n format specifiers.
%% Raises bad_format error if the number of format specifiers
%% does not match the length of the Args.
%% @end
%%-----------------------------------------------------------------------------
-spec format(Format :: string(), Args :: list()) -> string().
format(Format, Args) ->
{FormatTokens, Instr} = split(Format),
case length(FormatTokens) == length(Args) + 1 of
true ->
StringList = interleave(FormatTokens, Instr, Args, []),
lists:flatten(StringList);
false ->
throw(bad_format)
end.
%%
%% internal operations
%%
%% @private
split(Format) ->
split(Format, [], [], []).
%% @private
split([], Cur, Accum, Instr) ->
{lists:reverse([lists:reverse(Cur) | Accum]), lists:reverse(Instr)};
split([$~, $p | Rest], Cur, Accum, Instr) ->
split(Rest, [], [lists:reverse(Cur) | Accum], [quote | Instr]);
split([$~, $s | Rest], Cur, Accum, Instr) ->
split(Rest, [], [lists:reverse(Cur) | Accum], [literal | Instr]);
split([$~, $n | Rest], Cur, Accum, Instr) ->
split(Rest, [$\n | Cur], Accum, Instr);
split([$~, $~ | Rest], Cur, Accum, Instr) ->
split(Rest, [$~ | Cur], Accum, Instr);
split([Char | Rest], Cur, Accum, Instr) ->
split(Rest, [Char | Cur], Accum, Instr).
%% @private
interleave([LastToken], _Instr, [], Accum) ->
lists:reverse([LastToken | Accum]);
interleave([Token | Tokens], [Q | Instr], [Arg | Args], Accum) ->
interleave(Tokens, Instr, Args, [to_string(Arg, Q), Token | Accum]).
%% @private
to_string(T, _Q) when is_atom(T) ->
erlang:atom_to_list(T);
to_string(T, _Q) when is_integer(T) ->
erlang:integer_to_list(T);
to_string(T, _Q) when is_float(T) ->
erlang:float_to_list(T);
to_string(T, _Q) when is_pid(T) ->
erlang:pid_to_list(T);
to_string(T, _Q) when is_reference(T) ->
erlang:ref_to_list(T);
to_string(T, _Q) when is_function(T) ->
erlang:fun_to_list(T);
to_string(T, Q) when is_list(T) ->
case is_printable_ascii(T) of
true ->
case Q of
quote -> [$"] ++ T ++ [$"];
_ -> T
end;
_ ->
"[" ++ lists:join(",", [to_string(E, quote) || E <- T]) ++ "]"
end;
to_string(T, Q) when is_binary(T) ->
BinList = erlang:binary_to_list(T),
Data =
case is_printable_ascii(BinList) of
true ->
case Q of
quote -> [$"] ++ BinList ++ [$"];
_ -> BinList
end;
_ ->
lists:join(",", [erlang:integer_to_list(E) || E <- BinList])
end,
"<<" ++ Data ++ ">>";
to_string(T, _Q) when is_tuple(T) ->
"{" ++
lists:flatten(lists:join(",", [to_string(E, quote) || E <- erlang:tuple_to_list(T)])) ++
"}";
to_string(T, _Q) when is_map(T) ->
"#{" ++
lists:flatten(
lists:join(",", [
to_string(K, quote) ++ " => " ++ to_string(V, quote)
|| {K, V} <- maps:to_list(T)
])
) ++ "}";
to_string(_T, _Q) ->
"unknown".
%% @private
is_printable_ascii([]) ->
true;
is_printable_ascii([E | R]) when is_integer(E) andalso 32 =< E andalso E < 127 ->
is_printable_ascii(R);
is_printable_ascii(_) ->
false. | libs/estdlib/src/io_lib.erl | 0.619817 | 0.42925 | io_lib.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% merkerl: simple in-memory Merkle Trees
%%
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc An implementation of Merkle Trees for anti-entropy.
%%
%% Intended use is for synchronizing two key/value stores with similar but
%% potentially-divergent content.
%%
%% Typical usage is when a pair (or more) of nodes or systems have views of a
%% set of key/value objects which can change independently. Whenever a new
%% object is created or an existing one is modified (there is no difference from
%% the merkle point of view) the node seeing the change performs an insert/2 to
%% record the change. At any time, one node can send a representation of its
%% tree to another node. The receiving node can diff/2 the trees to see which
%% objects differ on the two systems. From this information, a system knows
%% exactly which objects to send or request in order to converge toward a common
%% view of the world. Of course, if the objects contain versioning information
%% it will be much easier to resolve which node's view for any given object is
%% newer.
%%
%% See the code of merkle_test/0 for trivial example usage.
%%
%% Application usage note: the 'crypto' OTP application must be started before
%% any of this module's functions will work.
%%
%% @reference <NAME>, A Digital Signature Based on a Conventional
%% Encryption Function, A Conference on the Theory and Applications of
%% Cryptographic Techniques on Advances in Cryptology, p.369-378, August 16-20,
%% 1987
-module(merkerl).
-export([insert/2,delete/2,build_tree/1,diff/2,allkeys/1]).
% TODO: fix doc, userdata is the ONLY user-exposed key
-record(merk, {nodetype, % atom: expected values are 'leaf' or 'inner'
key=undefined, % if nodetype=leaf, then this is binary/160
% (keys are 160b binaries)
userdata=undefined, % (if user specified a non-binary key)
hashval, % hash of value if leaf, of children if inner
offset=undefined, % if inner, then offset to reach here
children=undefined % if nodetype=inner, then this is orddict
}).
% TODO in doc: note that these are an internal-only form
-record(merkitem, {userdata=undefined, % for non-binary "keys"
hkey, % SHA-1 of userdata
hval % SHA-1 of value (user-supplied)
}).
% @type tree() = treeleaf() | treeinner() | undefined.
% The tree() type here is used as the internal representation of
% a Merkle tree. It can be used locally with insert/2 or pickled
% via term_to_binary and inverse for use remotely in diff/2.
% @type treeleaf() = term().
% Not externally useful, this is one of two record types making up tree().
% @type treeinner() = term().
% Not externally useful, this is one of two record types making up tree().
% (NEED TO EDOC THE RECORD TYPES)
% The merkitem records...
% These make up the "real" leaves in the Merkle tree.
%
% This is the input that most clients of the library will need to provide.
% @type key() = binary().
% This is the key, or "name" for an object tracked by a Merkle tree.
% It should remain constant through changes to the object it references.
% It is expected to be a 160b binary, as produced by
% crypto:sha/1 -- if the natural names of objects are not such values,
% then simply crypto:sha(term_to_binary(the-name>).
% @type hash() = binary().
% This is a hash representing a unique content value for an object
% tracked by a Merkle tree.
% It should change if the object it references changes in value.
% It is expected to be a 160b binary, as produced by
% crypto:sha/1 -- crypto:sha(term_to_binary(value)) is the canonical
% way to produce a hash().
% %spec build_tree([kh()]) -> tree()
% @doc Build a Merkle tree from a list of KH's of objects.
build_tree([]) ->
undefined;
build_tree([{K,H}|KHL]) ->
insert({K,H},build_tree(KHL)).
delete(Key, Tree) when is_record(Tree, merk) ->
mi_delete({0, #merkitem{userdata=Key,hkey=sha(Key),hval=undefined}}, Tree).
mi_delete({Offset, MI}, Tree) ->
HKey = MI#merkitem.hkey,
case Tree#merk.nodetype of
leaf ->
case Tree#merk.key of
HKey ->
undefined;
_ ->
Tree
end;
inner ->
Kids = Tree#merk.children,
OKey = offset_key(Offset,HKey),
NewKids = case orddict:is_key(OKey,Kids) of
false ->
Kids;
true ->
SubTree = orddict:fetch(OKey,Kids),
orddict:store(OKey,
mi_delete({Offset+8,MI},SubTree),Kids)
end,
mkinner(Offset,NewKids)
end.
% TODO: fix @spec to be merkitems instead of kh's
% spec insert(KH :: kh(),T :: tree()) -> tree()
% @doc Insert the KH for a new or changed object into T.
%
% This is used much like a typical tree-insert function.
% To create a new tree, this can be called with T set to the atom 'undefined'.
insert({Userdata, Hashval}, T) ->
mi_insert(#merkitem{userdata=Userdata,hkey=sha(Userdata),hval=Hashval}, T).
mi_insert(MI,T) when is_record(MI, merkitem) ->
mi_insert({0,MI},T);
mi_insert({_Offset,MI},undefined) ->
mkleaf(MI);
mi_insert({160,MI},_Tree) ->
% we're all the way deep! replace.
mkleaf(MI);
mi_insert({Offset,MI},Tree) ->
Key = MI#merkitem.hkey,
case Tree#merk.nodetype of
leaf ->
case Tree#merk.key of
Key -> % replacing!
mkleaf(MI);
_ -> % turning a leaf into an inner
Kid = orddict:store(offset_key(Offset,Tree#merk.key),
Tree,orddict:new()),
NewInner = mkinner(Offset,Kid),
mi_insert1({Offset,MI},NewInner)
end;
inner ->
mi_insert1({Offset,MI},Tree)
end.
mi_insert1({Offset,MI},Tree) ->
Kids = Tree#merk.children,
OKey = offset_key(Offset,MI#merkitem.hkey),
NewKids = case orddict:is_key(OKey,Kids) of
false ->
orddict:store(OKey,mkleaf(MI),Kids);
true ->
SubTree = orddict:fetch(OKey,Kids),
orddict:store(OKey,
mi_insert({Offset+8,MI},SubTree),Kids)
end,
mkinner(Offset,NewKids).
mkleaf(MI) ->
#merk{nodetype=leaf,
key=MI#merkitem.hkey,
userdata=MI#merkitem.userdata,
hashval=MI#merkitem.hval}.
mkinner(Offset,Kids) ->
#merk{nodetype=inner,hashval=sha(Kids),offset=Offset,
children=[{K,V} || {K,V} <- Kids, V =/= undefined]}.
offset_key(Offset,Key) ->
% offset is a 8b-divisible integer from 0 to 152, inclusive
% Key is a 160b binary
<<_L:Offset/integer,RightKey/binary>> = Key,
<<OKey:8/integer,_R/binary>> = RightKey,
OKey.
% TODO FIX TO NOTE THAT WE RETURN USERDATA INSTEAD
% @spec diff(tree(), tree()) -> [key()]
% @doc Find the keys of objects which differ between the two trees.
%
% For this purpose, "differ" means that an object either exists in
% only one of the two trees or it exists in both but with different
% hash() values.
%
% No information about the differing objects is provided except the keys.
% (Objects with vector-clock versioning are helpful here)
diff(undefined, X) -> allkeys(X);
diff(X, undefined) -> allkeys(X);
diff(TreeA,TreeB) when is_record(TreeA,merk),is_record(TreeB,merk) ->
% return the list of 'userdata' fields from inner nodes that differ
lists:usort(diff1(TreeA,TreeB)).
diff1(TreeA,TreeB) ->
% precondition: TreeA and TreeB are both merks at same offset
case TreeA#merk.hashval == TreeB#merk.hashval of
true ->
[];
false ->
diff2(TreeA,TreeB)
end.
diff2(TreeA,TreeB) ->
% precondition: TreeA and TreeB are both merks at same offset
% precondition: TreeA and TreeB have different hashval
case TreeA#merk.nodetype == TreeB#merk.nodetype andalso
TreeA#merk.nodetype == leaf of
true ->
[TreeA#merk.userdata,TreeB#merk.userdata];
false ->
diff3(TreeA,TreeB)
end.
diff3(TreeA,TreeB) ->
% precondition: TreeA and TreeB are both merks at same offset
% precondition: TreeA and TreeB have different hashval
% precondition: at least one of TreeA and TreeB is not a leaf
case TreeA#merk.nodetype == leaf of
true ->
allbutmaybe(TreeB,TreeA);
false ->
case TreeB#merk.nodetype == leaf of
true ->
allbutmaybe(TreeA,TreeB);
false ->
diff4(TreeA,TreeB)
end
end.
diff4(TreeA,TreeB) ->
% precondition: TreeA and TreeB are both merks at same offset
% precondition: TreeA and TreeB have different hashval
% precondition: TreeA and TreeB are both inner nodes
diff4a(TreeA#merk.children,TreeB#merk.children,0,[]).
diff4a(KidsA,KidsB,Idx,Acc) ->
% this is the ugly bit.
case Idx > 255 of
true ->
Acc;
false ->
case KidsA of
[] ->
lists:append(Acc,lists:flatten([allkeys(X) ||
{_Okey, X} <- KidsB]));
_ ->
case KidsB of
[] ->
lists:append(Acc,lists:append(
[allkeys(X) ||
{_Okey, X} <- KidsA]));
_ ->
diff4b(KidsA,KidsB,Idx,Acc)
end
end
end.
diff4b(KidsA,KidsB,Idx,Acc) ->
% precondition: neither KidsA nor KidsB is empty
[{OkeyA,NodeA}|RestA] = KidsA,
[{OkeyB,NodeB}|RestB] = KidsB,
case OkeyA == Idx of
true ->
case OkeyB == Idx of
true ->
diff4a(RestA,RestB,Idx+1,
lists:append(Acc,diff1(
NodeA,NodeB)));
false ->
diff4a(RestA,KidsB,Idx+1,
lists:append(Acc,allkeys(
NodeA)))
end;
false ->
case OkeyB == Idx of
true ->
diff4a(KidsA,RestB,Idx+1,
lists:append(Acc,allkeys(
NodeB)));
false ->
diff4a(KidsA,KidsB,Idx+1,Acc)
end
end.
allkeys(undefined) -> [];
allkeys(Tree) when is_record(Tree, merk) ->
case Tree#merk.nodetype of
leaf ->
[Tree#merk.userdata];
_ ->
lists:append([allkeys(Kid) || Kid <- getkids(Tree)])
end.
allbutmaybe(Tree,Leaf) when is_record(Tree, merk),is_record(Leaf,merk) ->
% return all keys in Tree, maybe the one for Leaf
% (depending on whether it is present&identical in Tree)
case contains_node(Tree,Leaf) of
true ->
lists:delete(Leaf#merk.userdata,allkeys(Tree));
false ->
lists:append([Leaf#merk.userdata],allkeys(Tree))
end.
contains_node(Tree,Node) ->
case Tree#merk.nodetype of
leaf ->
Tree#merk.hashval == Node#merk.hashval;
_ ->
lists:any(fun(T) -> contains_node(T,Node) end, getkids(Tree))
end.
getkids(Tree) ->
[V || {_K,V} <- orddict:to_list(Tree#merk.children)].
-ifndef(old_hash).
sha(X) ->
crypto:hash(sha, term_to_binary(X)).
-else.
sha(X) ->
crypto:sha(term_to_binary(X)).
-endif.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
% A test function and example code.
%
% This should be changed into a proper unit test suite.
merkle_test() ->
case lists:keymember(crypto, 1, application:loaded_applications()) of
true -> ok;
false -> ok = application:start(crypto)
end,
A = [{one,"one data"},{two,"two data"},{three,"three data"},
{four,"four data"},{five,"five data"}],
B = [{one,"one data"},{two,"other two"},{three,"three data"},
{four,"other four"},{five,"five data"}],
A2 = build_tree(A),
B2 = build_tree(B),
?assertEqual(lists:usort([two, four]), diff(A2,B2)),
C = [{one,"one data"}],
C2 = build_tree(C),
?assertEqual(lists:usort([two, three, four, five]), diff(A2,C2)),
D = insert({four, sha("changed!")}, A2),
?assertEqual([four], diff(A2,D)),
E = insert({five, sha("changed more!")}, D),
?assertEqual([five], diff(D,E)),
?assertEqual(lists:usort([four, five]), diff(A2,E)),
F = delete(five,D),
G = delete(five,E),
?assertEqual([], diff(F,G)),
H = delete(two,A2),
?assertEqual([two], diff(A2,H)),
?assertEqual([one], diff(C2,undefined)),
STree1 = build_tree([{"hello", "hi"},{"and", "what"}]),
STree2 = build_tree([{"hello", "hi"},{"goodbye", "bye"}]),
?assertEqual(lists:usort(["and", "goodbye"]), diff(STree1, STree2)),
I = [{<<"riak.com42">>,sha("should not")},{<<"riak.com452">>,sha("clobber")}],
I2 = build_tree(I),
?assertEqual(2, length(allkeys(I2))).
-endif. | src/merkerl.erl | 0.526343 | 0.610657 | merkerl.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
%%
%% -------------------------------------------------------------------
%% @doc
%%
%% == Introduction ==
%% This module defines a sliding time-window histogram with execution
%% cost control.
%%
%% The problem with traditional histograms is that every sample is
%% stored and processed, no matter what the desired resolution is.
%%
%% If a histogram has a sliding window of 100 seconds, and we have a
%% sample rate of 100Hz will give us 10000 elements to process every time
%% we want to calculate that average, which is expensive.
%% The same goes for min/max finds, percentile calculations, etc.
%%
%% The solution is to introduce cost-control, where we can balance
%% execution time against sample resolution.
%%
%% The obvious implementation is to lower the sample rate by throwing
%% away samples and just store evey N samples. However, this will
%% mean potentially missed min/max values, and other extreme data
%% that is seen by the edge code but is thrown away.
%%
%% A slotted slide histogram will define a number of time slots, each
%% spanning a fixed number of milliseconds. The slider then stores
%% slots that cover a given timespan into the past (a rolling
%% historgram). All slots older than the timespan are discarded.
%%
%% The "current" slot is defined as the time period between the time
%% stamp of the last stored slot and the time when it is time to store
%% the next slot. If the slot period (size) is 100ms, and the last
%% slot was stored in the histogram at msec 1200, the current slot
%% period ends at msec 1300.
%%
%% All samples received during the current slot are processed by a
%% low-cost fun that updates the current slot state. When the current
%% slot ends, another fun is used to transform the current slot state
%% to a value that is stored in the histogram.
%%
%% If a simple average is to be calculated for all samples received,
%% the sample-processing fun will add to the sum of the received
%% samples, and increment sample counter. When the current slot
%% expires, the result of SampleSum / SampleCount is stored in the
%% slot.
%%
%% If min/max are to be stored by the slotted histograms, the current
%% slot state would have a { Min, Max } tuple that is upadted with the
%% smallest and largest values received during the period. At slot
%% expiration the min/max tuple is simply stored, by the
%% transformation fun, in the histogram slots.
%%
%% By adjusting the slot period and the total histogram duration, the
%% cost of analysing the entire histogram can be balanced against
%% the resolution of that analysis.
%%
%%
%% == SLOT HISTOGRAM MANAGEMENT ==
%%
%% The slide state maintains a list of { TimeStamp, SlotElem }
%% slot tuples. TimeStamp is the time period (in monotonic ms),
%% rounded down to the resolution of the slot period, and SlotElem is
%% the tuple generated by the current slot transformation MFA. The
%% list is sorted on descending time stamps (newest slot first).
%%
%% Normally each element in the list has a timestamp that is
%% SlotPeriod milliseconds newer than the next slot in the
%% list. However, if no samples are received during the current slot,
%% no slot for that time stamp will be stored, leaving a hole in the
%% list. Normally, the the slot list would look like this (with a 100
%% msec slot period and a simple average value):
%%
%% <pre lang="erlang">
%% [ { 1400, 23.2 }, { 1300, 23.1 }, { 1200, 22.8 }, { 1100, 23.0 } ]
%% </pre>
%%
%% If no samples were received during the period between 1200 and 1300
%% (ms), no slot would be stored at that time stamp, yielding the
%% following list:
%%
%% <pre lang="erlang">
%% [ { 1400, 23.2 }, { 1300, 23.1 }, { 1100, 23.0 } ]
%% </pre>
%%
%% This means that the total length of the slot list may vary, even
%% if it always covers the same time span into the past.
%%
%% == SLOT LISTS ==
%%
%% The slotted slider stores its slots in two lists, list1, and list2.
%% list1 contains the newest slots. Once the oldest element in list1
%% is older than the time span covered by the histogram, the entire
%% content of list1 is shifted into list2, and list1 is set to [].
%% The old content of list2, if any, is discarded during the shift.
%%
%% When the content of the histogram is to be retrieved (through
%% fold{l,r}(), or to_list()), the entire content of list1 is prepended to
%% the part of list2 that is within than the time span covered by the
%% histogram.
%%
%% If the time span of the histogram is 5 seconds, with a 1 second
%% slot period, list1 can look like this :
%%
%% <pre lang="erlang">
%% list1 = [ {5000, 1.2}, {4000, 2.1}, {3000, 2.0}, {2000, 2.3}, {1000, 2.8} ]
%% </pre>
%%
%% When the next slot is stored in the list, add_slot() will detect
%% that the list is full since the oldest element ({1000, 20.8}) will
%% fall outside the time span covered by the histogram. List1 will
%% shifted to List2, and List1 will be set to the single new slot that
%% is to be stored:
%%
%% <pre lang="erlang">
%% list1 = [ {6000, 1.8} ]
%% list2 = [ {5000, 1.2}, {4000, 2.1}, {3000, 2.0}, {2000, 2.3}, {1000, 2.8} ]
%% </pre>
%%
%% To_list() and fold{l,r}() will return list1, and the first four elements
%% of list2 in order to get a complete histogram covering the entire
%% time span:
%%
%% <pre lang="erlang">
%% [ {6000, 1.8}, {5000, 1.2}, {4000, 2.1}, {3000, 2.0}, {2000, 2.3} ]
%% </pre>
%%
%%
%% == SAMPLE PROCESSING AND TRANSFORMATION FUN ==
%%
%% Two funs are provided to the new() function of the slotted slide
%% histogram. The processing function is called by add_element() and
%% will take the same sample value provided to that function together
%% with the current timestamp and slot state as arguments. The
%% function will return the new current slot state.
%%
%% <pre lang="erlang">
%% M:F(TimeStamp, Value, State) -> NewState
%% </pre>
%%
%% The first call to the sample processing fun when the current slot
%% is newly reset (just after a slot has been added to the histogram),
%% state will be set to 'undefined'
%%
%% <pre lang="erlang">
%% M:F(TimeStamp, Value, undefined) -> NewState
%% </pre>
%%
%% The transformation fun is called when the current slot has expired
%% and is to be stored in the histogram. It will receive the current
%% timestamp and slot state as arguments and returns the element to
%% be stored (together with a slot timestamp) in the slot histogram.
%%
%% <pre lang="erlang">
%% M:F(TimeStamp, State) -> Element
%% </pre>
%%
%% Element will present in the lists returned by to_list() and fold{l,r}().
%% If the transformation MFA cannot do its job, for example because
%% no samples have been processed by the sample processing fun,
%% the transformation fun should return 'undefined'
%%
%% See new/2 and its avg_sample() and avg_transform() functions for an
%% example of a simple average value implementation.
%%
%% @end
-module(exometer_slot_slide).
-export([new/2, new/4, new/5,
add_element/2,
add_element/3,
add_element/4,
reset/1,
to_list/1,
foldl/3,
foldl/4,
foldr/3,
foldr/4]).
-compile(inline).
-type(timestamp() :: integer()).
-type(value() :: any()).
-type(cur_state() :: any()).
-type(sample_fun() :: fun((timestamp(), value(), cur_state()) -> cur_state())).
-type transform_fun() :: fun((timestamp(), cur_state()) -> cur_state()).
%% Fixed size event buffer
%% A slot is indexed by taking the current timestamp (in ms) divided by the slot_period
%%
-record(slide, {timespan = 0 :: integer(), % How far back in time do we go, in slot period increments.
sample_fun :: sample_fun(),
transform_fun :: transform_fun(),
slot_period :: integer(), % Period, in ms, of each slot
cur_slot = 0 :: integer(), % Current slot as in
cur_state = undefined :: any(), % Total for the current slot
list1_start_slot = 0 ::integer(), % Slot of the first list1 element
list1 = [] :: list(),
list2 = [] :: list()}).
-spec new(integer(), integer()) -> #slide{}.
new(HistogramTimeSpan, SlotPeriod) ->
new(HistogramTimeSpan, SlotPeriod, fun avg_sample/3, fun avg_transform/2).
-spec new(integer(), integer(), sample_fun(), transform_fun()) -> #slide{}.
new(HistogramTimeSpan, SlotPeriod, SampleF, TransformF) ->
new(HistogramTimeSpan, SlotPeriod, SampleF, TransformF, []).
-spec new(integer(), integer(), sample_fun(), transform_fun(), list()) -> #slide{}.
new(HistogramTimeSpan, SlotPeriod, SampleF, TransformF, _Options)
when is_function(SampleF, 3), is_function(TransformF, 2) ->
#slide{timespan = trunc(HistogramTimeSpan / SlotPeriod),
sample_fun = SampleF,
transform_fun = TransformF,
slot_period = SlotPeriod,
cur_slot = trunc(timestamp() / SlotPeriod),
cur_state = undefined,
list1_start_slot = 0,
list1 = [],
list2 = []}.
-spec add_element(any(), #slide{}) -> #slide{}.
add_element(Val, Slide) ->
add_element(timestamp(), Val, Slide, false).
add_element(TS, Val, Slide) ->
add_element(TS, Val, Slide, false).
add_element(TS, Val, #slide{cur_slot = CurrentSlot,
sample_fun = SampleF} = Slide, Wrap) ->
TSSlot = get_slot(TS, Slide),
%% We have two options here:
%% 1. We have moved into a new slot since last call to add_element().
%% In this case, we invoke the transform MFA for the now completed slot
%% and add the returned element to the slider using the add_slot() function.
%% Once that is done, we start with fresh 'undefined' current slot state
%% that the next call to the sample MFA will receive.
%%
%% 2. We are in the same slot as during the last call to add_element().
%% In this case, we will simply call the sample MFA to update the
%% current slot state.
%%
%%
{Flag, Slide1} =
if TSSlot =/= CurrentSlot ->
add_slot(TS, Slide);
true ->
{false, Slide}
end,
%%
%% Invoke the sample MFA to get a new state to work with
%%
ret(Wrap, Flag,
Slide1#slide {cur_state = SampleF(TS, Val, Slide1#slide.cur_state)}).
ret(false, _, Slide) ->
Slide;
ret(true, Flag, Slide) ->
{Flag, Slide}.
-spec to_list(#slide{}) -> list().
%%
to_list(#slide{timespan = TimeSpan}) when TimeSpan == 0 ->
[];
%% Convert the whole histograms, in both buffers, to a list.
to_list(#slide{timespan = TimeSpan } = Slide) ->
Oldest = get_slot(Slide) - TimeSpan,
take_since(Oldest, Slide).
-spec reset(#slide{}) -> #slide{}.
reset(#slide{} = Slide) ->
Slide#slide { cur_state = undefined,
cur_slot = 0,
list1 = [],
list2 = [],
list1_start_slot = 0}.
foldl(TS, Fun, Acc, #slide{timespan = TimeSpan} = Slide) ->
Oldest = get_slot(TS, Slide) - TimeSpan,
lists:foldl(Fun, Acc, take_since(Oldest, Slide)).
foldl(Fun, Acc, Slide) ->
foldl(timestamp(), Fun, Acc, Slide).
foldr(TS, Fun, Acc, #slide{timespan = TimeSpan} = Slide) ->
Oldest = get_slot(TS, Slide) - TimeSpan,
lists:foldr(Fun, Acc, take_since(Oldest, Slide)).
foldr(Fun, Acc, Slide) ->
foldr(timestamp(), Fun, Acc, Slide).
%% Collect all elements in the histogram with a timestamp that falls
%% within the timespan ranging from Oldest up to the current timme.
%%
take_since(Oldest, #slide{%% cur_slot = CurrentSlot,
cur_state = CurrentState,
slot_period = SlotPeriod } =Slide) ->
%% Check if we need to add a slot for the current time period
%% before we start to grab data.
%% We add the current slot if it has a sample (cur_state =/= undefined)
%% and the slot period has expired.
TS = timestamp(),
%% TSSlot = get_slot(TS, Slide),
#slide { list1 = List1,
list2 = List2} =
%% if TSSlot =/= CurrentSlot, CurrentState =/= undefined ->
if CurrentState =/= undefined ->
{_, Sl} = add_slot(TS, Slide),
Sl;
true ->
Slide
end,
take_since(List2, Oldest, SlotPeriod, take_since(List1, Oldest, SlotPeriod, [])).
take_since([{TS,Element} |T], Oldest, SlotPeriod, Acc) when TS >= Oldest ->
take_since(T, Oldest, SlotPeriod, [{TS * SlotPeriod, Element} |Acc]);
take_since(_, _, _, Acc) ->
%% Don't reverse; already the wanted order.
Acc.
%%
get_slot(Slide) ->
get_slot(timestamp(), Slide).
get_slot(TS, #slide{slot_period = Period}) ->
trunc(TS / Period).
%%
%% Calculate the average data sampled during the current slot period
%% and push that average to the new slot.
%%
add_slot(TS, #slide{timespan = TimeSpan,
slot_period = SlotPeriod,
cur_slot = CurrentSlot,
cur_state = CurrentState,
transform_fun = TransformF,
list1 = List1,
list1_start_slot = StartSlot} = Slide) ->
%% Transform current slot state to an element to be deposited
%% in the histogram list
TSSlot = trunc(TS / SlotPeriod),
case TransformF(TS, CurrentState) of
undefined -> %% Transformation function could not produce an element
%% Reset the time slot to the current slot. Reset state./
{false, Slide#slide{ cur_slot = TSSlot,
cur_state = undefined}};
%% The transform function produced an element to store
Element ->
%%
%% Check if it is time to do a buffer swap.
%% If the oldset element of list1,
%%
if StartSlot < TSSlot - TimeSpan ->
%% Shift list1 into list2.
%% Add the new slot as the initial element of list1
{true, Slide#slide{ list1 = [{ CurrentSlot, Element }],
list2 = List1,
list1_start_slot = CurrentSlot,
cur_slot = TSSlot,
cur_state = undefined}};
true ->
%% No shift necessary. Tack on the new slot to list1.
{false,
Slide#slide{list1 = [{CurrentSlot, Element} | List1],
cur_slot = TSSlot,
cur_state = undefined}}
end
end.
%% Simple sample processor that maintains the average value
%% of all samples received during the current slot.
avg_sample(_TS, Value, undefined) ->
{ 1, Value };
avg_sample(_TS, Value, {Count, Total}) ->
{ Count + 1, Total + Value }.
%% If avg_sample() has not been called for the current time slot,
%% then the provided state will still be 'undefined'
avg_transform(_TS, undefined) ->
undefined;
%% Calculate the average of all received samples during the current slot
%% and return it as the element to be stored in the histogram.
avg_transform(_TS, { Count, Total }) ->
Total / Count. %% Return the average value.
timestamp() ->
%% Invented epoc is {1258,0,0}, or 2009-11-12, 4:26:40
%% Millisecond resolution
{MS,S,US} = os:timestamp(),
(MS-1258)*1000000000 + S*1000 + US div 1000. | deps/exometer_core/src/exometer_slot_slide.erl | 0.681833 | 0.601769 | exometer_slot_slide.erl | starcoder |
%%% @author <NAME>
%%% @doc Lazily-evaluated iterables for Erlang
%%% @end
-module(streams).
-compile([{inline, [{lazily, 2}, {id, 1}]}]).
%% API exports
%% Stream transformers
-export([
append/2,
chunk/2,
cycle/1,
drop/2,
drop_while/2,
filter/2,
filter_map/2,
flat_map/2,
group_by/2,
lazily/2,
map/2,
split/2,
split_while/2,
take/2,
take_while/2,
transform/3,
uniq/1,
uniq/2,
with_index/1,
with_index/2,
zip/2,
zip/3
]).
%% Stream generators
-export([
iterate/2,
naturals/0,
repeatedly/1,
unfold/2
]).
%% Stream reducers
-export([
count/1,
fold/3,
sum/1,
to_list/1,
to_map/1
]).
-type stream(A) :: fun(() -> halt | {A, stream(A)}).
%% @doc
%% Attempts to retrieve the next element of `Stream'.
%%
%% Returns the next element along with the updated stream, or
%% `halt' if `Stream' is empty.
%% @end
-spec yield(Stream :: stream(A)) -> halt | {A, stream(A)}.
yield(F) when is_function(F) ->
case F() of
G when is_function(G) -> yield(G);
Yield -> Yield
end;
yield([X|Xs]) -> {X, Xs};
yield([]) -> halt;
yield(M) when is_map(M) ->
yield(maps:to_list(M)).
%% @doc
%% Lazily applies a function to a stream. Somewhat equivalent to list comprehensions.
%%
%% Expects a function `F' which takes the head and tail of the given stream, returning
%% either `halt' or a new head/tail tuple pair.
%% @end
-spec lazily(fun(({A, stream(A)}) -> halt | {B, stream(B)}), stream(A)) -> stream(B).
lazily(F, Stream) ->
fun() ->
case yield(Stream) of
{X, Xs} -> F(X, Xs);
halt -> halt
end
end.
id(A) -> A.
%% @doc
%% Returns the stream of all natural numbers.
%% @end
-spec naturals() -> stream(integer()).
naturals() -> iterate(fun(N) -> N + 1 end, 0).
%% @doc
%% Creates a stream by emitting `Acc' followed by the sequence
%% of values resulting from applying `F' to the previous value.
%% @end
-spec iterate(fun((A) -> A), A) -> stream(A).
iterate(F, Acc) ->
fun() ->
{Acc, do_iterate(F, Acc)}
end.
do_iterate(F, Last) ->
fun() ->
Next = F(Last),
{Next, do_iterate(F, Next)}
end.
%% @doc
%% Creates a stream that first yields all values from `StreamA' followed
%% by all values in `StreamB'.
%% @end
-spec append(stream(A), stream(A)) -> stream(A) when A :: any().
append(StreamA, StreamB) ->
fun() ->
case yield(StreamA) of
{X, Xs} -> {X, append(Xs, StreamB)};
halt -> yield(StreamB)
end
end.
%% @doc
%% Creates a stream by applying `F' to each element of `Stream',
%% concatenating the resulting streams.
%% @end
-spec flat_map(fun((A) -> stream(B)), stream(A)) -> stream(B).
flat_map(F, Stream) ->
transform(fun(X, UnusedAcc) ->
{F(X), UnusedAcc}
end, undefined, Stream).
%% @doc
%% Creates a stream that yields every unique value of `Stream' exactly once.
%%
%% Note that this needs to keep a map to keep track of previously seen values and
%% can use quite a bit of memory for large streams.
%% @end
-spec uniq(stream(A)) -> stream(A).
uniq(Stream) ->
uniq(fun id/1, Stream).
%% @doc
%% Creates a stream that yields every unique value of `Stream' exactly once according
%% to the key function `F'.
%%
%% Note that this needs to keep a map to keep track of previously seen values and
%% can use quite a bit of memory for large streams.
%% @end
uniq(F, Stream) ->
do_uniq(F, Stream, #{}).
do_uniq(F, Stream, Seen) ->
lazily(fun(X, Xs) ->
Key = F(X),
case maps:is_key(Key, Seen) of
true ->
do_uniq(F, Xs, Seen);
false ->
Rest = do_uniq(F, Xs, Seen#{Key => true}),
{X, Rest}
end
end, Stream).
%% @doc
%% Applies `Fun' to each element of `Stream', yielding a new stream of
%% the transformed elements.
%% @end
-spec map(fun((A) -> B), stream(A)) -> stream(B).
map(Fun, Stream) ->
lazily(fun(X, Xs) ->
{Fun(X), map(Fun, Xs)}
end, Stream).
%% @doc
%% Creates a stream consisting of all the elements in `Stream' for
%% which `F' returns `true'.
%% @end
-spec filter(fun((A) -> boolean()), stream(A)) -> stream(A)
when A :: any().
filter(F, Stream) ->
lazily(fun(X, Xs) ->
case F(X) of
true -> {X, filter(F, Xs)};
false -> filter(F, Xs)
end
end, Stream).
%% @doc
%% Maps and filters a stream in a single pass. The usage is identical
%% to `lists:filtermap/2'.
%% @end
-spec filter_map(fun((A) -> boolean()), stream(A)) -> stream(A)
when A :: any().
filter_map(F, Stream) ->
lazily(fun(X, Xs) ->
case F(X) of
{true, Y} -> {Y, filter_map(F, Xs)};
true -> {X, filter_map(F, Xs)};
false -> filter_map(F, Xs)
end
end, Stream).
%% @doc
%% Evaluates `Stream', collecting its elements into a list.
%%
%% Warning: If `Stream' is infinite this function this function will loop
%% indefinitely.
%% @end
-spec to_list(stream(A)) -> list(A).
to_list(Stream) ->
Rev = fold(fun(A, Acc) ->
[A|Acc]
end, [], Stream),
lists:reverse(Rev).
%% @doc
%% Evaluates `Stream', collecting its elements into a map. The stream must
%% emit tuples.
%%
%% Warning: If `Stream' is infinite this function this function will loop
%% indefinitely.
%% @end
-spec to_map(stream({K, V})) -> map() when K :: any(), V :: any().
to_map(Stream) ->
fold(fun({K, V}, Acc) ->
Acc#{K => V}
end, #{}, Stream).
%% @doc
%% Creates a stream by pairing up each element of `StreamA' and `StreamB'
%% into tuples.
%%
%% The resulting stream has the length of the shortest input stream.
%% @end
-spec zip(stream(A), stream(B)) -> stream({A, B}).
zip(StreamA, StreamB) ->
zip(fun(A, B) -> {A, B} end, StreamA, StreamB).
%% @doc
%% Creates a stream by pairing up each element of `StreamA' and `StreamB'
%% through the function `F'.
%%
%% The resulting stream has the length of the shortest input stream.
%% @end
-spec zip(fun((A, B) -> C), stream(A), stream(B)) -> stream(C).
zip(F, StreamA, StreamB) ->
fun() ->
case {yield(StreamA), yield(StreamB)} of
{{A, As}, {B, Bs}} -> {F(A, B), zip(F, As, Bs)};
{halt, _} -> halt;
{_, halt} -> halt
end
end.
%% @doc
%% Creates a stream of each element in `Stream' along with its 0-indexed
%% position.
%% @end
-spec with_index(stream(A)) -> stream({integer(), A}).
with_index(Stream) ->
with_index(0, Stream).
%% @doc
%% Creates a stream of each element in `Stream' along with its 0-indexed
%% position, offset by `Offset'.
%% @end
-spec with_index(integer(), stream(A)) -> stream({integer(), A}).
with_index(Offset, Stream) ->
lazily(fun(X, Xs) ->
Y = {Offset, X},
Ys = with_index(Offset + 1, Xs),
{Y, Ys}
end, Stream).
%% @doc
%% Creates a new stream consisting of the first `N' elements of `Stream'.
%% @end
-spec take(integer(), stream(A)) -> stream(A).
take(N, Stream) when N >= 0 ->
lazily(fun(X, Xs) ->
case N of
0 -> halt;
_ -> {X, take(N - 1, Xs)}
end
end, Stream).
%% @doc
%% Creates a stream that emits `N' element lists of contiguous elements
%% from `Stream'.
%% @end
-spec chunk(integer(), stream(A)) -> stream([A]).
chunk(N, Stream) ->
fun() ->
do_chunk(0, N, Stream, [])
end.
do_chunk(N, N, Stream, Acc) ->
{lists:reverse(Acc), chunk(N, Stream)};
do_chunk(M, N, Stream, Acc) ->
case yield(Stream) of
{X, Xs} ->
do_chunk(M + 1, N, Xs, [X|Acc]);
halt ->
case Acc of
[] -> halt;
_ ->
{lists:reverse(Acc), []}
end
end.
%% @doc
%% Takes elements from `Stream' until `F' returns `false'.
%% @end
-spec take_while(fun((A) -> boolean()), stream(A)) -> stream(A).
take_while(F, Stream) ->
lazily(fun(X, Xs) ->
case F(X) of
true -> {X, take_while(F, Xs)};
false -> halt
end
end, Stream).
%% @doc
%% Creates a stream that drops the first `N' elements of `Stream'.
%% @end
-spec drop(non_neg_integer(), stream(A)) -> stream(A).
drop(0, Stream) -> Stream;
drop(N, Stream) ->
lazily(fun(_, Xs) ->
drop(N - 1, Xs)
end, Stream).
%% @doc
%% Drops elements from `Stream' until `F' returns `false'.
%% @end
-spec drop_while(fun((A) -> boolean()), stream(A)) -> stream(A).
drop_while(F, Stream) ->
lazily(fun(X, Xs) ->
case F(X) of
true -> drop_while(F, Xs);
false -> {X, Xs}
end
end, Stream).
%% @doc
%% Creates a stream of elements by repeatedly calling `F'.
%% @end
-spec repeatedly(fun(() -> A)) -> stream(A).
repeatedly(F) ->
fun() ->
{F(), repeatedly(F)}
end.
%% @doc
%% Creates a stream by repeatedly going through `Stream', looping around when
%% it is exhausted.
%%
%% Note that if `Stream' is infinite this effectively returns the
%% same stream.
%%
%% Warning: If `Stream' is empty this function this function will loop
%% indefinitely.
%% @end
-spec cycle(stream(A)) -> stream(A).
cycle(Stream) ->
do_cycle(Stream, Stream).
do_cycle(Stream, Original) ->
fun() ->
case yield(Stream) of
{X, Xs} -> {X, do_cycle(Xs, Original)};
halt -> do_cycle(Original, Original)
end
end.
%% @doc
%% Counts the number of elements in `Stream'.
%%
%% Warning: If `Stream' is infinite this function this function will loop
%% indefinitely.
%% @end
-spec count(stream(any())) -> non_neg_integer().
count(Stream) ->
fold(fun(_, Acc) -> Acc + 1 end, 0, Stream).
%% @doc
%% Returns the sum of elements in `Stream'.
%%
%% Warning: If `Stream' is infinite this function this function will loop
%% indefinitely.
%% @end
-spec sum(stream(any())) -> number().
sum(Stream) ->
fold(fun(I, Acc) -> Acc + I end, 0, Stream).
%% @doc
%% Unfolds a stream from a seed value `Init'.
%%
%% The stream will yield `Init', and then call `F' with it as the initial
%% accumulator to generate subsequent values.
%%
%% `F' is expected to either return `{NextItem, NextAcc}' or `halt'.
%% @end
-spec unfold(fun(({A, B}) -> {A, B}), A) -> stream(B).
unfold(F, Init) ->
fun() ->
{Init, do_unfold(F, Init)}
end.
do_unfold(F, Acc) ->
fun() ->
case F(Acc) of
halt -> halt;
{X, Next} -> {X, do_unfold(F, Next)}
end
end.
%% @doc
%% Creates a stream that groups continguous sequences of elements for which
%% `F' returns the same value.
%% @end
-spec group_by(fun((A) -> B), stream(A)) -> stream([A]) when B :: any().
group_by(F, Stream) ->
group_by(F, Stream, undefined, []).
group_by(F, Stream, Key, Term) ->
fun() ->
case yield(Stream) of
{X, Xs} ->
case F(X) of
Key -> group_by(F, Xs, Key, [X|Term]);
NewKey ->
case Term of
[] -> group_by(F, Xs, NewKey, [X]);
[_|_] -> {Term, group_by(F, Xs, NewKey, [X])}
end
end;
halt ->
case Term of
[] -> halt;
_ -> {lists:reverse(Term), []}
end
end
end.
%% @doc
%% Transforms an existing `Stream'.
%%
%% Transform expects a function that takes the next element of `Stream' and the current
%% accumulator, returning either `{Iterable, NewAccumulator}' or `halt', in which case
%% the resulting stream halts.
%% @end
-spec transform(fun((A, B) -> {stream(C), B} | halt), B, stream(A)) -> stream(C).
transform(F, Acc, Stream) ->
lazily(fun(X, Xs) ->
case F(X, Acc) of
halt -> halt;
{[], Next} ->
transform(F, Next, Xs);
{[Y], Next} ->
Ys = transform(F, Next, Xs),
{Y, Ys};
{Y, Next} ->
Ys = transform(F, Next, Xs),
append(Y, Ys)
end
end, Stream).
%% @doc
%% Splits off the first `N' elements of `Stream', returning these elements
%% in a list as well as the remaining stream.
%% @end
-spec split(non_neg_integer(), stream(A)) -> {[A], stream(A)}.
split(N, Stream) ->
split(N, Stream, []).
split(0, Stream, Acc) ->
{lists:reverse(Acc), Stream};
split(N, Stream, Acc) ->
case yield(Stream) of
{X, Xs} -> split(N - 1, Xs, [X|Acc]);
halt -> {lists:reverse(Acc), []}
end.
%% @doc
%% Collects elements from `Stream' into a list until `F' returns false.
%%
%% Returns the collected elements as well as the remaining stream.
%% @end
-spec split_while(fun((A) -> boolean()), stream(A)) -> {[A], stream(A)}.
split_while(F, Stream) ->
split_while(F, Stream, []).
split_while(F, Stream, Acc) ->
case yield(Stream) of
{X, Xs} ->
case F(X) of
true -> split_while(F, Xs, [X|Acc]);
false -> {lists:reverse(Acc), Stream}
end;
halt -> {lists:reverse(Acc), []}
end.
%% @doc
%% Folds a stream into a single value by `F' to each element and the current
%% accumulator to compute the next accumulator.
%%
%% Returns the final accumulator.
%%
%% Warning: If `Stream' is infinite this function this function will loop
%% indefinitely.
%% @end
-spec fold(fun((A, B) -> B), B, stream(A)) -> B.
fold(F, Acc, Stream) ->
fold_priv(F, Acc, yield(Stream)).
fold_priv(F, Acc, {X, Xs}) ->
NewAcc = F(X, Acc),
fold_priv(F, NewAcc, yield(Xs));
fold_priv(_F, Acc, halt) -> Acc. | extra/streams.erl | 0.57081 | 0.537041 | streams.erl | starcoder |
%% A "Final" higher order abstract syntax representation in Erlang.
%% http://okmij.org/ftp/tagless-final/index.html
%% https://en.wikipedia.org/wiki/Higher-order_abstract_syntax
%%
%% The main purpose of this abstraction is to implement the compiler
%% for a pure dataflow language.
%%
%% The side effect needed to thread compilation state through the
%% evaluation of the abstract syntax is implemented using a second
%% process. The individual operators that implement language
%% semantics resemble operators in a State Monad. The evaluaor
%% process is implictly linked to the evaluator process through the
%% process dictionary, and state-threading operations are implemented
%% using RPC. Note that this is impure code, but the side effect is
%% only observable during the extent of eval/3, which is reasonable.
-module(dsl).
-export([eval/3,
op/2,
compile_dataflow/3,
example/0]).
eval(InitState, Function, Arguments) ->
StatePid =
%% Compiler is a separate process to isolate side effects.
serv:start(
{handler,
fun() -> InitState end,
%% Keep this really simple.
fun({_, dump}=Msg, State) ->
obj:handle(Msg, State);
({Pid, {op, Op, Arg}}, State = #{ bind := Bind }) ->
{Val, State1} = Bind(Op, Arg, State),
obj:reply(Pid, Val),
State1
end}),
Ref = erlang:make_ref(),
Pid = self(),
_EvalPid =
%% Isolate it in a separate process, because we use the
%% process dictionary.
spawn_link(
fun() ->
put(dsl_state, StatePid),
Value = apply(Function, Arguments),
State = obj:call(StatePid, dump),
Pid ! {Ref, Value, State}
end),
receive
{Ref, Value, State} ->
exit(StatePid, normal),
{ok, #{ value => Value, state => State}}
end.
op(Op, Args) ->
obj:call(get(dsl_state), {op, Op, Args}).
%% EXAMPLES
%% Note that epid_app doesn't use any of these. It uses raw bind,
%% relying on memoized instantiation.
%% Dataflow language with instance ID allocator.
compile_dataflow(Config, Program, ProgramArgs) ->
DefaultState = #{ bind => fun bind_alloc/3 },
InitState = maps:merge(DefaultState, Config),
eval(InitState, Program, ProgramArgs).
%% Perform instance ID allocation, and call into specialized
%% binding/instantiation operation.
bind_alloc(OpType, Args, State = #{ bind_instance := Bind }) ->
N = maps:get({count, OpType}, State, 0),
InstanceId = {OpType, N},
Bind(InstanceId, Args,
maps:put({count, OpType}, N + 1, State)).
%% ... specialized to compile to concrete syntax.
compile_dataflow(Program, ProgramArgs) ->
Config = #{ env => [], bind_instance => fun bind_compile/3 },
compile_dataflow(Config, Program, ProgramArgs).
bind_compile({OpType, N}, Args, State = #{ env := Env }) ->
Node = {node, {OpType, N}},
Binding = {Node, {op, {OpType, Args}}},
Env1 = [Binding|Env],
{Node, maps:put(env, Env1, State)}.
%% Binder for compiling to syntax data structure.
example() ->
compile_dataflow(
fun(A, B) ->
C = op(mul, [A, A]),
D = op(mul, [B, B]),
op(add, [C, D])
end,
[a, b]). | src/dsl.erl | 0.58261 | 0.707455 | dsl.erl | starcoder |
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
-module(subrect).
-export([parse/1, annotate/1, validate/2, normalize/2, to_parts/2, extract/3]).
-type constraint_expr() :: string() | binary().
%% type of constraint expression. You may use a string or a binary as constraint expression.
-type slice() :: non_neg_integer()
| {non_neg_integer(), pos_integer(), non_neg_integer()}
| {non_neg_integer(), pos_integer()}.
-type constraint() :: binary()
| [slice()]
| {constraint(), [constraint()]}.
-type elem() :: pos_integer()
| [{binary(), field()}].
-type field() :: {elem(), [pos_integer()]}.
-type schema() :: #{binary() => field()}.
%% @doc parse the `Constraint' expression to internal representation
-spec parse(constraint_expr()) -> Result
when Result :: {ok, [constraint()]} | Error,
Error :: {error, {Line, module(), Reason}},
Line :: pos_integer(),
Reason :: term().
parse(Constraint) ->
case subrect_lexer:string(unicode:characters_to_list(Constraint)) of
{ok, Tokens, _} ->
subrect_parser:parse(Tokens);
{error, Error, _} ->
{error, Error}
end.
-type annotated_elem() :: pos_integer()
| annotated_schema().
-type annotated_field() :: {{non_neg_integer(), pos_integer()}, {annotated_elem(), [pos_integer()]}}.
-type annotated_schema() :: #{binary() => annotated_field()}.
-spec annotate(schema()) -> annotated_schema().
annotate(Schema) ->
maps:map(fun(_, V) -> annotate_field(0, V) end, Schema).
annotate_field(Offset, {Elem, Shape}) ->
{Elem1, N} = annotate_elem(Elem),
{Shape1, Size} =
lists:mapfoldr(
fun (E, Acc) ->
{Acc, E*Acc}
end,
N,
Shape),
{{Offset, Size}, {Elem1, Shape1}}.
annotate_elem(N) when is_integer(N) ->
{N, N};
annotate_elem(List) ->
{List1, N} =
lists:mapfoldl(
fun({K,V}, Acc) ->
{{Offset, Size}, _} = Field = annotate_field(Acc, V),
{{K,Field}, Offset+Size}
end,
0,
List),
{maps:from_list(List1), N}.
-spec validate([constraint()], schema()) -> boolean().
validate([], _Schema) ->
true;
validate([H|T], Schema) ->
validate_constraint(H, Schema) and validate(T, Schema).
validate_constraint(Constraint, {Elem, []}) ->
validate_constraint(Constraint, Elem);
validate_constraint(Name, Schema) when is_binary(Name), is_map(Schema) ->
maps:is_key(Name, Schema);
validate_constraint({Name, Constraints}, Schema) when is_binary(Name), is_map(Schema) ->
case maps:find(Name, Schema) of
error ->
false;
{ok, Field} ->
validate(Constraints, Field)
end;
validate_constraint(Name, Fields) when is_binary(Name), is_list(Fields) ->
lists:keymember(Name, 1, Fields);
validate_constraint({Name, Constraints}, Fields) when is_binary(Name), is_list(Fields) ->
case lists:keyfind(Name, 1, Fields) of
false ->
false;
{Name, Field} ->
validate(Constraints, Field)
end;
validate_constraint(Slices, {_, [H|_]}) when is_list(Slices) ->
validate_slices(Slices, H);
validate_constraint({Slices, Constraints}, {Elem, [H|T]}) when is_list(Slices) ->
validate_slices(Slices, H) and validate(Constraints, {Elem, T});
validate_constraint(_, _) ->
false.
validate_slices([], _) ->
true;
validate_slices([H|T], Size) ->
validate_slice(H, Size) and validate_slices(T, Size).
validate_slice(I, N) when is_integer(I), I >= 0, I < N ->
true;
validate_slice({Start, Stride, Stop}, N) when Start >= 0, Start < N, Stop >= Start ->
Max = Start + (Stop - Start) div Stride * Stride,
Max < N;
validate_slice({Start, _}, N) when Start >= 0, Start < N ->
true;
validate_slice(_, _) ->
false.
-type normalized_slice() :: {non_neg_integer(), pos_integer(), non_neg_integer()}.
-type normalized_constraint() :: binary()
| [normalized_slice()]
| {normalized_constraint(), [normalized_constraint()]}.
-spec normalize([constraint()], schema()) -> [normalized_constraint()].
normalize(Constraints, Schema) ->
[normalize_constraint(C, Schema) || C <- Constraints].
normalize_constraint(Constraint, {Elem, []}) ->
normalize_constraint(Constraint, Elem);
normalize_constraint({Name, Constraints}, Schema) when is_binary(Name), is_map(Schema) ->
{Name, normalize(Constraints, maps:get(Name, Schema))};
normalize_constraint({Name, Constraints}, Fields) when is_binary(Name), is_list(Fields) ->
{Name, Field} = lists:keyfind(Name, 1, Fields),
{Name, normalize(Constraints, Field)};
normalize_constraint(Slices, {_, [H|_]}) when is_list(Slices) ->
[ normalize_slice(S, H) || S <- Slices ];
normalize_constraint({Slices, Constraints}, {Elem, [H|T]}) when is_list(Slices) ->
{normalize_constraint(Slices, {Elem, [H|T]}), normalize(Constraints, {Elem, T})};
normalize_constraint(Constraint, _) ->
Constraint.
normalize_slice(I, _) when is_integer(I) ->
{I, 1, I};
normalize_slice({Start, Stride}, N)->
{Start, Stride, N-1};
normalize_slice(Slice, _) ->
Slice.
-type part() :: {binary(), {non_neg_integer(), pos_integer()}}.
-type parts() :: part()
| [part()].
-spec to_parts([normalized_constraint()], annotated_schema()) -> parts().
to_parts(Constraints, Schema) when is_list(Constraints), is_map(Schema) ->
[to_parts_var(C, Schema)
|| C <- Constraints ].
to_parts_var(Name, Schema) when is_binary(Name), is_map(Schema) ->
{Part, _}= maps:get(Name, Schema),
{Name, Part};
to_parts_var({Name, Constraints}, Schema) when is_binary(Name), is_map(Schema) ->
{_, Field}= maps:get(Name, Schema),
[to_parts(C, Name, 0, Field) || C <- Constraints ].
to_parts(Constraint, Var, Offset, {Elem, []}) ->
to_parts(Constraint, Var, Offset, Elem);
to_parts(Name, Var, Offset, Fields) when is_binary(Name), is_map(Fields) ->
{{Offset1, Size}, _} = maps:get(Name, Fields),
{Var, {Offset + Offset1, Size}};
to_parts(Slices, Var, Offset, {_, [H|_]}) when is_list(Slices) ->
[to_parts_slice(S, Var, Offset, H) || S <- Slices ];
to_parts({Name, Constraints}, Var, Offset, Fields) when is_binary(Name), is_map(Fields) ->
{{Offset1, _}, Field} = maps:get(Name, Fields),
[to_parts(C, Var, Offset+Offset1, Field) || C <- Constraints];
to_parts({Slices, Constraints}, Var, Offset, {Elem, [H|T]}) when is_list(Slices) ->
[ to_parts(C, Var, Offset + I * H, {Elem, T})
|| {Start, Stride, Stop} <- Slices,
I <- lists:seq(Start, Stop, Stride),
C <- Constraints ].
to_parts_slice({Start, 1, Stop}, Var, Offset, Size) ->
{Var, {Offset + Start * Size, (Stop + 1 - Start) * Size}};
to_parts_slice({Start, Stride, Stop}, Var, Offset, Size) ->
[{Var, {Offset + I * Size, Size}}
|| I <- lists:seq(Start, Stop, Stride) ].
-spec extract([normalized_constraint()], #{binary() => binary()}, annotated_schema()) -> iodata().
extract(Constraints, Data, Schema) when is_list(Constraints), is_map(Schema) ->
[extract_var(C, Data, Schema)
|| C <- Constraints ].
extract_var(Name, Data, Schema) when is_binary(Name), is_map(Schema) ->
maps:get(Name, Data);
extract_var({Name, Constraints}, Data, Schema) when is_binary(Name), is_map(Schema) ->
Part = maps:get(Name, Data),
{_, Field}= maps:get(Name, Schema),
[extract_constraint(C, Part, Field) || C <- Constraints ].
extract_constraint(Constraint, Data, {Elem, []}) ->
extract_constraint(Constraint, Data, Elem);
extract_constraint(Name, Data, Fields) when is_binary(Name), is_map(Fields) ->
{Part, _} = maps:get(Name, Fields),
binary:part(Data, Part);
extract_constraint(Slices, Data, {_, [H|_]}) when is_list(Slices) ->
[ extract_slice(S, Data, H) || S <- Slices ];
extract_constraint({Name, Constraints}, Data, Fields) when is_binary(Name), is_map(Fields) ->
{Part, Field} = maps:get(Name, Fields),
Data1 = binary:part(Data, Part),
[extract_constraint(C, Data1, Field) || C <- Constraints];
extract_constraint({Slices, Constraints}, Data, {Elem, [H|T]}) when is_list(Slices) ->
[extract_constraints(
Start, Stride, Stop,
H,
Data,
Constraints,
{Elem, T})
|| {Start, Stride, Stop} <- Slices].
extract_constraints(Start, Stride, Stop, Size, Data, Constraints, Field) when Start =< Stop ->
Data1 = binary:part(Data, Start * Size, Size),
[[extract_constraint(C, Data1, Field)
|| C <- Constraints ]
|extract_constraints(Start+Stride, Stride, Stop, Size, Data, Constraints, Field)];
extract_constraints(_, _, _, _, _, _, _) ->
[].
extract_slice({Start, 1, Stop}, Data, Size) ->
binary:part(Data, Start * Size, (Stop + 1 - Start) * Size);
extract_slice({Start, Stride, Stop}, Data, Size) when Start =< Stop ->
[binary:part(Data, Start * Size, Size)|extract_slice({Start+Stride, Stride, Stop}, Data, Size)];
extract_slice({Start, Stride, Stop}, Data, Size) ->
[]. | src/subrect.erl | 0.741112 | 0.448668 | subrect.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couchdb_uuids).
% -include_lib("couch/include/couch_db.hrl").
-behaviour(gen_server).
-vsn(3).
% -behaviour(config_listener).
-include("../dev.hrl").
-export([
new/0
,set_algorithm/1
,random/0
,start/0
,stop/0
,init/1
,terminate/2
,code_change/3
,handle_call/3
,handle_cast/2
,handle_info/2
]).
% config_listener api
% -export([handle_config_change/5, handle_config_terminate/3]).
-define(b2l(V), binary_to_list(V)).
-define(l2b(V), list_to_binary(V)).
%
% API
%
%% @doc Generate a new uuid
%%
%% At start time, default the algorithm will be set to `random'
%% should you require a different algorithm use the function
%% `set_algorithm/1' first.
-spec(new() -> binary()).
new() -> gen_server:call(?MODULE, create).
%% @doc Set the algorithm used for uuid genration
%%
%% Available algorithms:
%%
%% random: 128 bits of random awesome.
%%
%% sequential: Monotonically increasing ids with random increments. The first 26 hex characters are random, the last 6 increment in random amounts until an overflow occurs. On overflow, the random prefix is regenerated and the process starts over.
%%
%% utc_random: The time since Jan 1, 1970 UTC, in microseconds. The first 14 characters are the time in hex. The last 18 are random.
%%
%% utc_id: The time since Jan 1, 1970 UTC, in microseconds, plus the utc_id_suffix string. The first 14 characters are the time in hex. The uuids/utc_id_suffix string value is appended to these.
%%
%% Usage:
%% ```
%% set_algorithm(random).
%% set_algorithm(utc_random).
%% set_algorithm(sequential).
%% set_algorithm({utc_id, <<"suffix">>).
%% '''
-spec(set_algorithm(Algorithm::term())-> {ok, term() | {error, term()}}).
set_algorithm(Algo) ->
gen_server:call(?MODULE, {algorithm_change, Algo}).
% @private
random() -> list_to_binary(to_hex(crypto:strong_rand_bytes(16))).
%
% Internal
%
% @private
start() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
% @private
stop() ->
gen_server:cast(?MODULE, stop).
% @private
init([]) -> {ok, state()}.
% @private
terminate(_Reason, _State) ->
ok.
% @private
handle_call({algorithm_change, Algo}, _From, _State) ->
{reply, {ok, Algo}, state(Algo)};
handle_call(create, _From, random) ->
{reply, random(), random};
handle_call(create, _From, {utc_random, ClockSeq}) ->
{UtcRandom, NewClockSeq} = utc_random(ClockSeq),
{reply, UtcRandom, {utc_random, NewClockSeq}};
handle_call(create, _From, {utc_id, UtcIdSuffix, ClockSeq}) ->
Now = os:timestamp(),
{UtcId, NewClockSeq} = utc_suffix(UtcIdSuffix, ClockSeq, Now),
{reply, UtcId, {utc_id, UtcIdSuffix, NewClockSeq}};
handle_call(create, _From, {sequential, Pref, Seq}) ->
Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
case Seq >= 16#fff000 of
true ->
{reply, Result, {sequential, new_prefix(), inc()}};
_ ->
{reply, Result, {sequential, Pref, Seq + inc()}}
end.
% @private
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(_Msg, State) ->
{noreply, State}.
% @private
handle_info(_Info, State) ->
{noreply, State}.
% @private
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
% @private
new_prefix() -> to_hex((crypto:strong_rand_bytes(13))).
% @private
inc() -> rand:uniform(16#ffd).
% @private
state() -> state(random).
state(Uuid_algorithm) ->
% AlgoStr = config:get("uuids", "algorithm", "random"),
case Uuid_algorithm of
random -> random;
utc_random ->
ClockSeq = micros_since_epoch(os:timestamp()),
{utc_random, ClockSeq};
sequential -> {sequential, new_prefix(), inc()};
{utc_id, Suffix} when is_binary(Suffix) ->
ClockSeq = micros_since_epoch(os:timestamp()),
UtcIdSuffix = Suffix, %config:get("uuids", "utc_id_suffix", ""),
{utc_id, UtcIdSuffix, ClockSeq};
Unknown -> throw({unknown_uuid_algorithm, Unknown})
end.
% @private
micros_since_epoch({_, _, Micro} = Now) ->
Nowish = calendar:now_to_universal_time(Now),
Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
(Nowsecs - Then) * 1000000 + Micro.
% @private
utc_random(ClockSeq) ->
Suffix = to_hex(crypto:strong_rand_bytes(9)),
utc_suffix(Suffix, ClockSeq, os:timestamp()).
% @private
utc_suffix(Suffix, ClockSeq, Now) ->
OsMicros = micros_since_epoch(Now),
NewClockSeq = if
OsMicros =< ClockSeq ->
% Timestamp is lagging, use ClockSeq as Timestamp
ClockSeq + 1;
OsMicros > ClockSeq ->
% Timestamp advanced, use it, and reset ClockSeq with it
OsMicros
end,
Prefix = io_lib:format("~14.16.0b", [NewClockSeq]),
{list_to_binary(Prefix ++ Suffix), NewClockSeq}.
%%
%% Original: https://github.com/apache/couchdb/blob/master/src/couch/src/couch_util.erl
%%
% @private
to_hex(<<Hi:4, Lo:4, Rest/binary>>) ->
[nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)];
to_hex(<<>>) ->
[];
to_hex(List) when is_list(List) ->
to_hex(list_to_binary(List)).
% @private
nibble_to_hex(0) -> $0;
nibble_to_hex(1) -> $1;
nibble_to_hex(2) -> $2;
nibble_to_hex(3) -> $3;
nibble_to_hex(4) -> $4;
nibble_to_hex(5) -> $5;
nibble_to_hex(6) -> $6;
nibble_to_hex(7) -> $7;
nibble_to_hex(8) -> $8;
nibble_to_hex(9) -> $9;
nibble_to_hex(10) -> $a;
nibble_to_hex(11) -> $b;
nibble_to_hex(12) -> $c;
nibble_to_hex(13) -> $d;
nibble_to_hex(14) -> $e;
nibble_to_hex(15) -> $f. | src/common/couchdb_uuids.erl | 0.689828 | 0.597373 | couchdb_uuids.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc GMap CRDT: grow only map.
%% Modeled as a dictionary where keys can be anything and the
%% values are join-semilattices.
%%
%% @reference <NAME>
%% delta-enabled-crdts C++ library
%% [https://github.com/CBaquero/delta-enabled-crdts]
-module(state_gmap).
-author("<NAME> <<EMAIL>>").
-include("state_type.hrl").
-behaviour(type).
-behaviour(state_type).
-define(TYPE, ?MODULE).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0, new/1]).
-export([mutate/3, delta_mutate/3, merge/2]).
-export([query/1, equal/2, is_bottom/1, is_inflation/2, is_strict_inflation/2, irreducible_is_strict_inflation/2]).
-export([join_decomposition/1, delta/3]).
-export([encode/2, decode/2]).
-export_type([state_gmap/0, state_gmap_op/0]).
-opaque state_gmap() :: {?TYPE, payload()}.
-type ctype() :: state_type:state_type() | {state_type:state_type(), [term()]}.
-type payload() :: {ctype(), orddict:orddict()}.
-type key() :: term().
-type key_op() :: term().
-type state_gmap_op() :: {apply, key(), key_op()}.
%% @doc Create a new, empty `state_gmap()'.
%% By default the values are a MaxInt CRDT.
-spec new() -> state_gmap().
new() ->
new([?MAX_INT_TYPE]).
%% @doc Create a new, empty `state_gmap()'
-spec new([term()]) -> state_gmap().
new([CType]) ->
{?TYPE, {CType, orddict:new()}}.
%% @doc Mutate a `state_gmap()'.
-spec mutate(state_gmap_op(), type:id(), state_gmap()) ->
{ok, state_gmap()}.
mutate(Op, Actor, {?TYPE, _}=CRDT) ->
state_type:mutate(Op, Actor, CRDT).
%% @doc Delta-mutate a `state_gmap()'.
%% The first argument can only be a triple where the first
%% component `apply`, the second is a key, and the third is the
%% operation to be performed on the correspondent value of that
%% key.
-spec delta_mutate(state_gmap_op(), type:id(), state_gmap()) ->
{ok, state_gmap()}.
delta_mutate({apply, Key, Op}, Actor, {?TYPE, {CType, GMap}}) ->
{Type, Args} = state_type:extract_args(CType),
Current = case orddict:find(Key, GMap) of
{ok, Value} ->
Value;
error ->
Type:new(Args)
end,
{ok, {Type, KeyDelta}} = Type:delta_mutate(Op, Actor, Current),
Delta = orddict:store(Key, {Type, KeyDelta}, orddict:new()),
{ok, {?TYPE, {CType, Delta}}}.
%% @doc Returns the value of the `state_gmap()'.
%% This value is a dictionary where each key maps to the
%% result of `query/1' over the current value.
-spec query(state_gmap()) -> term().
query({?TYPE, {CType, GMap}}) ->
{Type, _Args} = state_type:extract_args(CType),
lists:map(
fun({Key, Value}) ->
{Key, Type:query(Value)}
end,
GMap
).
%% @doc Merge two `state_gmap()'.
%% The keys of the resulting `state_gmap()' are the union of the
%% keys of both `state_gmap()' passed as input.
%% If a key is only present on one of the `state_gmap()',
%% its correspondent value is preserved.
%% If a key is present in both `state_gmap()', the new value
%% will be the `merge/2' of both values.
-spec merge(state_gmap(), state_gmap()) -> state_gmap().
merge({?TYPE, _}=CRDT1, {?TYPE, _}=CRDT2) ->
MergeFun = fun({?TYPE, {CType, GMap1}}, {?TYPE, {CType, GMap2}}) ->
{Type, _Args} = state_type:extract_args(CType),
GMap = orddict:merge(
fun(_, Value1, Value2) ->
Type:merge(Value1, Value2)
end,
GMap1,
GMap2
),
{?TYPE, {CType, GMap}}
end,
state_type:merge(CRDT1, CRDT2, MergeFun).
%% @doc Equality for `state_gmap()'.
%% Two `state_gmap()' are equal if they have the same keys
%% and for each key, their values are also `equal/2'.
-spec equal(state_gmap(), state_gmap()) -> boolean().
equal({?TYPE, {CType, GMap1}}, {?TYPE, {CType, GMap2}}) ->
{Type, _Args} = state_type:extract_args(CType),
Fun = fun(Value1, Value2) ->
Type:equal(Value1, Value2)
end,
orddict_ext:equal(GMap1, GMap2, Fun).
%% @doc Check if a `state_gmap()' is bottom
-spec is_bottom(state_gmap()) -> boolean().
is_bottom({?TYPE, {_CType, GMap}}) ->
orddict:is_empty(GMap).
%% @doc Given two `state_gmap()', check if the second is an inflation
%% of the first.
%% Two conditions should be met:
%% - each key in the first `state_gmap()' is also in
%% the second `state_gmap()'
%% - for each key in the first `state_gmap()',
%% the correspondent value in the second `state_gmap()'
%% should be an inflation of the value in the first.
-spec is_inflation(state_gmap(), state_gmap()) -> boolean().
is_inflation({?TYPE, {CType, GMap1}}, {?TYPE, {CType, GMap2}}) ->
{Type, _Args} = state_type:extract_args(CType),
lists_ext:iterate_until(
fun({Key, Value1}) ->
case orddict:find(Key, GMap2) of
{ok, Value2} ->
Type:is_inflation(Value1, Value2);
error ->
false
end
end,
GMap1
).
%% @doc Check for strict inflation.
-spec is_strict_inflation(state_gmap(), state_gmap()) -> boolean().
is_strict_inflation({?TYPE, _}=CRDT1, {?TYPE, _}=CRDT2) ->
state_type:is_strict_inflation(CRDT1, CRDT2).
%% @doc Check for irreducible strict inflation.
-spec irreducible_is_strict_inflation(state_gmap(), state_gmap()) ->
boolean().
irreducible_is_strict_inflation({?TYPE, _}=Irreducible, {?TYPE, _}=CRDT) ->
state_type:irreducible_is_strict_inflation(Irreducible, CRDT).
%% @doc Join decomposition for `state_gmap()'.
%% @todo
-spec join_decomposition(state_gmap()) -> [state_gmap()].
join_decomposition({?TYPE, _}=CRDT) ->
[CRDT].
%% @doc Delta calculation for `state_gmap()'.
-spec delta(state_type:delta_method(), state_gmap(), state_gmap()) ->
state_gmap().
delta(Method, {?TYPE, _}=A, {?TYPE, _}=B) ->
state_type:delta(Method, A, B).
-spec encode(state_type:format(), state_gmap()) -> binary().
encode(erlang, {?TYPE, _}=CRDT) ->
erlang:term_to_binary(CRDT).
-spec decode(state_type:format(), binary()) -> state_gmap().
decode(erlang, Binary) ->
{?TYPE, _} = CRDT = erlang:binary_to_term(Binary),
CRDT.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual({?TYPE, {?MAX_INT_TYPE, []}}, new()),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, []}}, new([?GCOUNTER_TYPE])).
query_test() ->
Counter1 = {?GCOUNTER_TYPE, [{1, 1}, {2, 13}, {3, 1}]},
Counter2 = {?GCOUNTER_TYPE, [{2, 2}, {3, 13}, {5, 2}]},
Map0 = new([?GCOUNTER_TYPE]),
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, Counter1}, {<<"key2">>, Counter2}]}},
?assertEqual([], query(Map0)),
?assertEqual([{<<"key1">>, 15}, {<<"key2">>, 17}], query(Map1)).
delta_apply_test() ->
Map0 = new([?GCOUNTER_TYPE]),
{ok, {?TYPE, Delta1}} = delta_mutate({apply, <<"key1">>, increment}, 1, Map0),
Map1 = merge({?TYPE, Delta1}, Map0),
{ok, {?TYPE, Delta2}} = delta_mutate({apply, <<"key1">>, increment}, 2, Map1),
Map2 = merge({?TYPE, Delta2}, Map1),
{ok, {?TYPE, Delta3}} = delta_mutate({apply, <<"key2">>, increment}, 1, Map2),
Map3 = merge({?TYPE, Delta3}, Map2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, {?TYPE, Delta1}),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, Map1),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{2, 1}]}}]}}, {?TYPE, Delta2}),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}, {2, 1}]}}]}}, Map2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, {?TYPE, Delta3}),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}, {2, 1}]}},
{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, Map3).
apply_test() ->
Map0 = new([?GCOUNTER_TYPE]),
{ok, Map1} = mutate({apply, <<"key1">>, increment}, 1, Map0),
{ok, Map2} = mutate({apply, <<"key1">>, increment}, 2, Map1),
{ok, Map3} = mutate({apply, <<"key2">>, increment}, 1, Map2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, Map1),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}, {2, 1}]}}]}}, Map2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}, {2, 1}]}},
{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}}, Map3).
merge_deltas_test() ->
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
Delta1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 17}]}}]}},
Delta2 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key2">>, {?GCOUNTER_TYPE, [{1, 17}]}}]}},
Map2 = merge(Delta1, Map1),
Map3 = merge(Map1, Delta1),
DeltaGroup = merge(Delta1, Delta2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 17}]}}]}}, Map2),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 17}]}}]}}, Map3),
?assertEqual({?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 17}]}},
{<<"key2">>, {?GCOUNTER_TYPE, [{1, 17}]}}]}}, DeltaGroup).
equal_test() ->
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
Map2 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 2}]}}]}},
Map3 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
?assert(equal(Map1, Map1)),
?assertNot(equal(Map1, Map2)),
?assertNot(equal(Map1, Map3)).
is_bottom_test() ->
Map0 = new(),
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
?assert(is_bottom(Map0)),
?assertNot(is_bottom(Map1)).
is_inflation_test() ->
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
Map2 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 2}]}}]}},
Map3 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
?assert(is_inflation(Map1, Map1)),
?assert(is_inflation(Map1, Map2)),
?assertNot(is_inflation(Map1, Map3)),
%% check inflation with merge
?assert(state_type:is_inflation(Map1, Map1)),
?assert(state_type:is_inflation(Map1, Map2)),
?assertNot(state_type:is_inflation(Map1, Map3)).
is_strict_inflation_test() ->
Map1 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
Map2 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 2}]}}]}},
Map3 = {?TYPE, {?GCOUNTER_TYPE, [{<<"key2">>, {?GCOUNTER_TYPE, [{1, 1}]}}]}},
?assertNot(is_strict_inflation(Map1, Map1)),
?assert(is_strict_inflation(Map1, Map2)),
?assertNot(is_strict_inflation(Map1, Map3)).
join_decomposition_test() ->
%% @todo
ok.
encode_decode_test() ->
Map = {?TYPE, {?GCOUNTER_TYPE, [{<<"key1">>, {?GCOUNTER_TYPE, [{1, 2}]}}]}},
Binary = encode(erlang, Map),
EMap = decode(erlang, Binary),
?assertEqual(Map, EMap).
equivalent_with_gcounter_test() ->
Actor1 = 1,
Actor2 = 2,
Map0 = new([?MAX_INT_TYPE]),
{ok, Map1} = mutate({apply, Actor1, increment}, undefined, Map0),
{ok, Map2} = mutate({apply, Actor1, increment}, undefined, Map1),
{ok, Map3} = mutate({apply, Actor2, increment}, undefined, Map2),
[{Actor1, Value1}, {Actor2, Value2}] = query(Map3),
GCounter0 = ?GCOUNTER_TYPE:new(),
{ok, GCounter1} = ?GCOUNTER_TYPE:mutate(increment, Actor1, GCounter0),
{ok, GCounter2} = ?GCOUNTER_TYPE:mutate(increment, Actor1, GCounter1),
{ok, GCounter3} = ?GCOUNTER_TYPE:mutate(increment, Actor2, GCounter2),
?assertEqual(Value1 + Value2, ?GCOUNTER_TYPE:query(GCounter3)).
-endif. | _build/default/lib/types/src/state_gmap.erl | 0.685634 | 0.438845 | state_gmap.erl | starcoder |
% License: Apache License, Version 2.0
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
%% @author <NAME> <<EMAIL>>
%% @copyright Copyright 2015 <NAME>
%%
%% @doc Extensions of the ej module (http://github.com/set/ej) to provide
%% support for JSON Merge Patch (RFC 7396) and JSON Patch (RFC 6902).
%%
%% @end
-module(ej_merge).
-author('<NAME> <<EMAIL>>').
-export([
mergepatch/2,
patch/2
]).
-include_lib("ej/include/ej.hrl").
%% @doc RFC 7396 - JSON Merge Patch Function
%%
%% This function implements Merge Patch processing as specified in RFC
%% 7396 Section 2. The processing specified by the Patch document is
%% applied to the Target Document. The result is returned.
%%
%% 'Target' is a and JSON document compliant to RFC 7159 that has been
%% decoded by a parser support by mochijson2.
%% 'Patch' is a JSON Merge Patch document as defined in RFC 7396 that
%% has been decoded by a parser support by mochijson2.
%%
%% NOTES: The result is NOT a JSON Document (string) but a mochjson2
%% data structure. See ej (https://github.com/seth/ej) for the spec
%% types.
%% @end
%%
-spec mergepatch(json_term(), json_term()) -> json_term().
mergepatch(Target, Patch) ->
case is_object(Patch) of
true ->
Target0 = case is_object(Target) of
true -> Target;
false -> {struct,[]}
end,
processAttributes(get_attribute_names(Patch), Patch, Target0);
false ->
Patch
end.
get_attribute_names({L}) when is_list(L) ->
proplists:get_keys(L);
get_attribute_names({struct,L}) when is_list(L) ->
proplists:get_keys(L);
get_attribute_names(_) ->
[].
is_object({L}) when is_list(L) ->
true;
is_object({struct,L}) when is_list(L) ->
true;
is_object(_) ->
false.
processAttributes([], _, Target) ->
Target;
processAttributes([H|T], Patch, Target) ->
Value = ej:get({H},Patch),
Target0 = case null =:= Value of
true -> %% delete value
ej:delete({H},Target);
false -> %% set value
ej:set( {H}, Target, mergepatch( ej:get({H},Target), Value ) )
end,
processAttributes(T, Patch, Target0).
%% Converts RFC 6901 JSON Pointer to a tuple and string format with special
%% characters returned their value.
%% NOTE: This will leave the '-' in the list - not every JSONPointer is ej friendly.
to_ej_path(JSONPointer) when is_list(JSONPointer) ->
to_ej_path( list_to_binary(JSONPointer) );
to_ej_path(JSONPointer) when is_binary(JSONPointer) ->
_Segments = binary:split(JSONPointer, <<"/">>, [global]),
_TempList = lists:foldl(fun process_segment/2, [], _Segments),
_TempList2 = lists:reverse(_TempList),
case lists:last(_TempList2) of
"-" ->
{ unassigned_index, _TempList2 };
_ ->
{ direct_reference, _TempList2 }
end.
process_segment(E, AccIn) ->
_a = binary:replace(E,<<"~0">>,<<"~">>,[global]),
_b = binary:replace(_a,<<"~1">>,<<"/">>,[global]),
case _b of
<<>> ->
AccIn;
_ ->
[ binary_to_list(_b) ] ++ AccIn
end.
%% @doc Executes RFC 6902 JSON Patch function
%%
%% This function implements the operations and process identified in
%% IETF RFC 6902 (see NOTE). It uses JSONPointer (RFC 6901) for path
%% references.
%%
%% 'Target' is a and JSON document compliant to RFC 7159 that has been
%% decoded by a parser support by mochijson2.
%% 'Commas' is a JSON Merge Patch document as defined in RFC 7396 that
%% has been decoded by a parser support by mochijson2.
%%
%% NOTES: The result is NOT a JSON Document (string) but a mochjson2
%% data structure. See ej (https://github.com/seth/ej) for the spec
%% types.
%%
%% NOTE WELL: The "test" operation relies on equality which in this
%% module relies on erlang lists:sort and erlang sorting mechanisms.
%% @end
%%
-spec patch(json_term(), json_array()) -> json_term().
patch(Target, []) ->
Target;
patch(Target, [Command|T]) ->
_Path = to_ej_path( ej:get({"path"}, Command) ),
_NewTarget = case ej:get({"op"}, Command) of
<<"add">> ->
ej:set( extract_path(_Path), Target, ej:get({"value"}, Command) );
<<"remove">> ->
ej:delete( extract_direct_ref(_Path), Target);
<<"replace">> ->
ej:set( extract_direct_ref(_Path), Target, ej:get({"value"}, Command) );
<<"move">> ->
_From = extract_direct_ref( to_ej_path( ej:get({"from"}, Command) ) ),
_Value = ej:get(_From, Target),
_Targ0 = ej:delete(_From, Target),
ej:set( extract_direct_ref(_Path), _Targ0, _Value);
<<"copy">> ->
_From1 = extract_direct_ref (to_ej_path( ej:get({"from"}, Command) ) ),
_Value1 = ej:get(_From1, Target),
ej:set( extract_direct_ref(_Path), Target, _Value1 );
<<"test">> ->
_PathValue = ej:get( extract_direct_ref(_Path), Target ),
_CompareValue = ej:get( {"value"}, Command ),
case equivalent(_PathValue, _CompareValue) of
true -> Target;
false -> throw(failed_test)
end;
_ ->
throw(badarg)
end,
patch(_NewTarget, T).
extract_path({ direct_reference, L }) ->
L;
extract_path({ unassigned_index, L }) ->
lists:droplast(L);
extract_path(_)->
throw(badarg).
extract_direct_ref({ direct_reference, L }) ->
L;
extract_direct_ref(_) ->
throw(badarg).
equivalent(Item1, Item2) when is_tuple(Item1) andalso is_tuple(Item2) ->
to_list(Item1) =:= to_list(Item2);
equivalent(Item1, Item2) when is_list(Item1) andalso is_list(Item2) ->
to_list(Item1) =:= to_list(Item2);
equivalent(Item1, Item2) ->
Item1 =:= Item2.
to_list(Item) when is_tuple(Item) ->
to_list(tuple_to_list(Item));
to_list(Item) when is_list(Item) ->
lists:sort(lists:foldl(fun(E, AccIn) -> to_list(E) ++ AccIn end, [], Item));
to_list(Item) ->
Item. | src/ej_merge.erl | 0.523664 | 0.441914 | ej_merge.erl | starcoder |
%% Use this module to run some basic query against the chain
%% For example:
%%
%% - Lookup last 500 blocks for payment_v2 transactions:
%% lookup_txns_by_type(500, blockchain_txn_payment_v2).
%%
%% - Lookup last 500 blocks for particular txn hash:
%% lookup_txns_by_type(500, <<some_txn_hash>>).
-module(miner_query).
-export([poc_analyze/2,
txns/2, txns/3,
blocks/2,
lookup_txns_by_hash/2,
lookup_txns_by_type/2]).
poc_analyze(_Start, _End) ->
ok.
txns(Type, Start, End) when is_atom(Type) ->
txns([Type], Start, End);
txns(Types, Start, End) ->
Chain = blockchain_worker:blockchain(),
fold_blocks(fun(B, Acc) ->
Txns = blockchain_block:transactions(B),
Txns1 =
lists:filter(fun(T) ->
lists:member(blockchain_txn:type(T), Types)
end, Txns),
lists:append(Txns1, Acc)
end,
Start, End, Chain,
[]).
txns(Start, End) ->
Chain = blockchain_worker:blockchain(),
fold_blocks(fun(B, Acc) ->
Txns = blockchain_block:transactions(B),
lists:append(Txns, Acc)
end,
Start, End, Chain,
[]).
blocks(Start, End) ->
Chain = blockchain_worker:blockchain(),
[begin
{ok, B} = blockchain:get_block(N, Chain),
B
end
|| N <- lists:seq(Start, End)].
fold_blocks(_Fun, End, End, _Chain, Acc) ->
Acc;
fold_blocks(Fun, Start, End, Chain, Acc) ->
{ok, B} = blockchain:get_block(Start, Chain),
fold_blocks(Fun, Start + 1, End, Chain, Fun(B, Acc)).
%% slightly different collection semantics, tags by height
lookup_txns_by_type(LastXBlocks, TxnType) ->
C = blockchain_worker:blockchain(),
{ok, Current} = blockchain:height(C),
lists:reverse(
fold_blocks(
fun(B, Acc) ->
I = blockchain_block:height(B),
case lists:filter(fun(T) ->
blockchain_txn:type(T) == TxnType
end, blockchain_block:transactions(B)) of
[] -> Acc;
R -> [{I, R} | Acc]
end
end,
Current - LastXBlocks, Current, C,
[])).
lookup_txns_by_hash(LastXBlocks, TxnHash) ->
C = blockchain_worker:blockchain(),
{ok, Current} = blockchain:height(C),
lists:reverse(
fold_blocks(
fun(B, Acc) ->
I = blockchain_block:height(B),
case lists:filter(fun(T) ->
blockchain_txn:hash(T) == TxnHash
end, blockchain_block:transactions(B)) of
[] -> Acc;
R -> [{I, R} | Acc]
end
end,
Current - LastXBlocks, Current, C,
[])). | src/miner_query.erl | 0.54577 | 0.575439 | miner_query.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 SyncFree Consortium. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(log_recovery_SUITE).
-compile({parse_transform, lager_transform}).
%% common_test callbacks
-export([
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0]).
%% tests
-export([read_pncounter_log_recovery_test/1]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("kernel/include/inet.hrl").
init_per_suite(Config) ->
lager_common_test_backend:bounce(debug),
test_utils:at_init_testsuite(),
Clusters = test_utils:set_up_clusters_common(Config),
Nodes = hd(Clusters),
[{nodes, Nodes}|Config].
end_per_suite(Config) ->
Config.
init_per_testcase(_Case, Config) ->
Config.
end_per_testcase(_, _) ->
ok.
all() -> [read_pncounter_log_recovery_test].
%% First we remember the initial time of the counter (with value 0).
%% After 15 updates, we kill the nodes
%% We then restart the nodes, and read the value
%% being sure that all 15 updates were loaded from the log
read_pncounter_log_recovery_test(Config) ->
Nodes = proplists:get_value(nodes, Config),
FirstNode = hd(Nodes),
case rpc:call(FirstNode, application, get_env, [antidote, enable_logging]) of
{ok, false} ->
pass;
_ ->
Type = antidote_crdt_counter_pn,
Key = log_value_test,
Obj = {Key, Type, bucket},
{ok, TxId} = rpc:call(FirstNode, antidote, start_transaction, [ignore, []]),
increment_counter(FirstNode, Obj, 15),
%% value from old snapshot is 0
{ok, [ReadResult1]} = rpc:call(FirstNode,
antidote, read_objects, [[Obj], TxId]),
?assertEqual(0, ReadResult1),
%% read value in txn is 15
{ok, [ReadResult2], CommitTime} = rpc:call(FirstNode,
antidote, read_objects, [ignore, [], [Obj]]),
?assertEqual(15, ReadResult2),
lager:info("Killing and restarting the nodes"),
%% Shut down the nodes
Nodes = test_utils:kill_and_restart_nodes(Nodes, Config),
lager:info("Vnodes are started up"),
lager:info("Nodes: ~p", [Nodes]),
%% Read the value again
{ok, [ReadResult3], _CT} = rpc:call(FirstNode, antidote, read_objects,
[CommitTime, [], [Obj]]),
?assertEqual(15, ReadResult3),
lager:info("read_pncounter_log_recovery_test finished"),
pass
end.
%% Auxiliary method o increment a counter N times.
increment_counter(_FirstNode, _Key, 0) ->
ok;
increment_counter(FirstNode, Obj, N) ->
WriteResult = rpc:call(FirstNode, antidote, update_objects,
[ignore, [], [{Obj, increment, 1}]]),
?assertMatch({ok, _}, WriteResult),
increment_counter(FirstNode, Obj, N - 1). | test/log_recovery_SUITE.erl | 0.53048 | 0.468487 | log_recovery_SUITE.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Digraph data type. Similar to the digraph module, but provides a
%% functional API. The functional API allows us to revert to a
%% previous version of the digraph when an optimization that may have
%% damaged the digraph has failed.
%%
-module(beam_digraph).
-export([new/0,
add_vertex/2, add_vertex/3, add_edge/3, add_edge/4,
del_edge/2, del_edges/2,
has_vertex/2,
is_path/3,
in_degree/2, in_edges/2, in_neighbours/2,
out_degree/2, out_edges/2, out_neighbours/2,
vertex/2, vertices/1,
reverse_postorder/2,
roots/1,
topsort/1,
strong_components/2]).
%% Debugging.
-define(DEBUG, false).
-if(?DEBUG).
-export([dump/1,dump/2,dump/3]).
-endif.
-import(lists, [foldl/3, reverse/1]).
-type edge_map() :: #{ vertex() => ordsets:ordset(vertex()) }.
-type vertice_map() :: #{ vertex() => label() }.
-record(dg, {vs = #{} :: vertice_map(),
in_es = #{} :: edge_map(),
out_es = #{} :: edge_map()}).
-type graph() :: #dg{}.
-type vertex() :: term().
-type label() :: term().
-type edge() :: {vertex(), vertex(), label()}.
-spec new() -> graph().
new() -> #dg{}.
-spec add_vertex(graph(), vertex()) -> graph().
add_vertex(Dg, V) ->
add_vertex(Dg, V, vertex).
-spec add_vertex(graph(), vertex(), label()) -> graph().
add_vertex(Dg, V, Label) ->
#dg{in_es=InEsMap0,out_es=OutEsMap0,vs=Vs0} = Dg,
InEsMap = init_edge_map(V, InEsMap0),
OutEsMap = init_edge_map(V, OutEsMap0),
Vs = Vs0#{V=>Label},
Dg#dg{vs=Vs,in_es=InEsMap,out_es=OutEsMap}.
init_edge_map(V, EsMap) ->
case is_map_key(V, EsMap) of
true ->
EsMap;
false ->
EsMap#{V=>ordsets:new()}
end.
-spec add_edge(graph(), vertex(), vertex()) -> graph().
add_edge(Dg, From, To) ->
add_edge(Dg, From, To, edge).
-spec add_edge(graph(), vertex(), vertex(), label()) -> graph().
add_edge(Dg, From, To, Label) ->
#dg{in_es=InEsMap0,out_es=OutEsMap0} = Dg,
Name = {From,To,Label},
InEsMap = edge_map_add(To, Name, InEsMap0),
OutEsMap = edge_map_add(From, Name, OutEsMap0),
Dg#dg{in_es=InEsMap,out_es=OutEsMap}.
edge_map_add(V, E, EsMap) ->
Es0 = map_get(V, EsMap),
Es = ordsets:add_element(E, Es0),
EsMap#{V:=Es}.
-spec del_edge(graph(), edge()) -> graph().
del_edge(Dg, {From,To,_}=E) ->
#dg{in_es=InEsMap0,out_es=OutEsMap0} = Dg,
InEsMap = edge_map_del(To, E, InEsMap0),
OutEsMap = edge_map_del(From, E, OutEsMap0),
Dg#dg{in_es=InEsMap,out_es=OutEsMap}.
edge_map_del(V, E, EsMap) ->
Es0 = map_get(V, EsMap),
Es = Es0 -- [E],
EsMap#{V:=Es}.
-spec del_edges(graph(), [edge()]) -> graph().
del_edges(G, Es) when is_list(Es) ->
foldl(fun(E, A) -> del_edge(A, E) end, G, Es).
-spec has_vertex(graph(), vertex()) -> boolean().
has_vertex(#dg{vs=Vs}, V) ->
is_map_key(V, Vs).
-spec in_degree(graph(), vertex()) -> non_neg_integer().
in_degree(#dg{in_es=InEsMap}, V) ->
length(map_get(V, InEsMap)).
-spec in_edges(graph(), vertex()) -> [edge()].
in_edges(#dg{in_es=InEsMap}, V) ->
map_get(V, InEsMap).
-spec in_neighbours(graph(), vertex()) -> [vertex()].
in_neighbours(#dg{in_es=InEsMap}, V) ->
[From || {From,_,_} <- map_get(V, InEsMap)].
-spec is_path(graph(), vertex(), vertex()) -> boolean().
is_path(G, From, To) ->
Seen = sets:new([{version, 2}]),
try
_ = is_path_1([From], To, G, Seen),
false
catch
throw:true ->
true
end.
is_path_1([To|_], To, _G, _Seen) ->
throw(true);
is_path_1([V|Vs], To, G, Seen0) ->
case sets:is_element(V, Seen0) of
true ->
is_path_1(Vs, To, G, Seen0);
false ->
Seen1 = sets:add_element(V, Seen0),
Successors = out_neighbours(G, V),
Seen = is_path_1(Successors, To, G, Seen1),
is_path_1(Vs, To, G, Seen)
end;
is_path_1([], _To, _G, Seen) ->
Seen.
-spec out_degree(graph(), vertex()) -> non_neg_integer().
out_degree(#dg{out_es=OutEsMap}, V) ->
length(map_get(V, OutEsMap)).
-spec out_edges(graph(), vertex()) -> [edge()].
out_edges(#dg{out_es=OutEsMap}, V) ->
map_get(V, OutEsMap).
-spec out_neighbours(graph(), vertex()) -> [vertex()].
out_neighbours(#dg{out_es=OutEsMap}, V) ->
[To || {_,To,_} <- map_get(V, OutEsMap)].
-spec vertex(graph(), vertex()) -> label().
vertex(#dg{vs=Vs}, V) ->
map_get(V, Vs).
-spec vertices(graph()) -> [{vertex(), label()}].
vertices(#dg{vs=Vs}) ->
maps:to_list(Vs).
-spec reverse_postorder(graph(), [vertex()]) -> [vertex()].
reverse_postorder(G, Vs) ->
Seen = sets:new([{version, 2}]),
{RPO, _} = reverse_postorder_1(Vs, G, Seen, []),
RPO.
reverse_postorder_1([V|Vs], G, Seen0, Acc0) ->
case sets:is_element(V, Seen0) of
true ->
reverse_postorder_1(Vs, G, Seen0, Acc0);
false ->
Seen1 = sets:add_element(V, Seen0),
Successors = out_neighbours(G, V),
{Acc,Seen} = reverse_postorder_1(Successors, G, Seen1, Acc0),
reverse_postorder_1(Vs, G, Seen, [V|Acc])
end;
reverse_postorder_1([], _, Seen, Acc) ->
{Acc, Seen}.
-spec roots(graph()) -> [vertex()].
roots(G) ->
roots_1(vertices(G), G).
roots_1([{V,_}|Vs], G) ->
case in_degree(G, V) of
0 ->
[V|roots_1(Vs, G)];
_ ->
roots_1(Vs, G)
end;
roots_1([], _G) -> [].
-spec topsort(graph()) -> [vertex()].
topsort(G) ->
Seen = roots(G),
reverse_postorder(G, Seen).
%%
%% Kosaraju's algorithm
%%
%% Visit each node in reverse post order. If the node has not been assigned to
%% a component yet, start a new component and add all of its in-neighbors to it
%% if they don't yet belong to one. Keep going until all nodes have been
%% visited.
%%
%% https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm
%%
-spec strong_components(graph(), [vertex()]) -> ComponentMap when
%% Vertices together with their components.
ComponentMap :: #{ vertex() => [vertex()] }.
strong_components(G, Vs) ->
sc_1(Vs, G, #{}, #{}).
sc_1([V | Vs], G, Roots0, Components) when not is_map_key(V, Roots0) ->
%% V has not been assigned to a component, start a new one with this one as
%% the root.
{Roots, Component} = sc_2([V], G, V, Roots0, []),
sc_1(Vs, G, Roots, Components#{ V => Component });
sc_1([V | Vs], G, Roots, Components0) ->
%% V is already part of a component, copy it over.
Root = map_get(V, Roots),
Components = Components0#{ V => map_get(Root, Components0) },
sc_1(Vs, G, Roots, Components);
sc_1([], _G, _Roots, Components) ->
Components.
sc_2([V | Vs], G, Root, Roots, Acc) when not is_map_key(V, Roots) ->
%% V has not been assigned to a component, so assign it to the current one.
sc_2(in_neighbours(G, V) ++ Vs, G, Root, Roots#{ V => Root }, [V | Acc]);
sc_2([_V | Vs], G, Root, Roots, Acc) ->
%% V is already part of a component, skip it.
sc_2(Vs, G, Root, Roots, Acc);
sc_2([], _G, _Root, Roots, Acc) ->
{Roots, reverse(Acc)}.
-if(?DEBUG).
%%
%% Dumps the graph as a string in dot (graphviz) format.
%%
%% Use dot(1) to convert to an image:
%%
%% dot [input] -T[format]
%% dot graph_file -Tsvg > graph.svg
-spec dump(any()) -> any().
dump(G) ->
Formatter = fun(Node) -> io_lib:format("~p", [Node]) end,
io:format("~s", [dump_1(G, Formatter)]).
-spec dump(any(), any()) -> any().
dump(G, FileName) ->
Formatter = fun(Node) -> io_lib:format("~p", [Node]) end,
dump(G, FileName, Formatter).
-spec dump(any(), any(), any()) -> any().
dump(G, FileName, Formatter) ->
{ok, Fd} = file:open(FileName, [write]),
io:fwrite(Fd, "~s", [dump_1(G, Formatter)]),
file:close(Fd).
dump_1(G, Formatter) ->
Vs = maps:keys(G#dg.vs),
{Map, Vertices} = dump_vertices(Vs, 0, Formatter,#{}, []),
Edges = dump_edges(Vs, G, Map, []),
io_lib:format("digraph g {~n~s~n~s~n}~n", [Vertices, Edges]).
dump_vertices([V | Vs], Counter, Formatter, Map, Acc) ->
VerticeSlug = io_lib:format(" ~p [label=\"~s\"]~n",
[Counter, Formatter(V)]),
dump_vertices(Vs, Counter + 1, Formatter,
Map#{ V => Counter }, [VerticeSlug | Acc]);
dump_vertices([], _Counter, _Formatter, Map, Acc) ->
{Map, Acc}.
dump_edges([V | Vs], G, Map, Acc) ->
SelfId = map_get(V, Map),
EdgeSlug = [io_lib:format(" ~p -> ~p~n", [SelfId, map_get(To, Map)]) ||
{_, To, _} <- out_edges(G, V)],
dump_edges(Vs, G, Map, [EdgeSlug | Acc]);
dump_edges([], _G, _Map, Acc) ->
Acc.
-endif. | lib/compiler/src/beam_digraph.erl | 0.59796 | 0.449272 | beam_digraph.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2009-2010 <NAME>
%%
%% @doc Module for rendering and caching scomps. Scomps can be caching and
%% non caching, depending on the passed arguments and the results of the
%% scomp's varies/2 function.
%% Copyright 2009-2010 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(z_scomp).
-author("<NAME> <<EMAIL>>").
-export([render/4, render_all/4, render_optional/4]).
-include_lib("zotonic.hrl").
%% @spec render(ScompName, Args, Vars, Context) -> {ok, Context} | {ok, io_list} | {error, Reason}
%% @doc Render the names scomp, Args are the scomp arguments and Vars are the variables given to the template
render(ScompName, Args, Vars, Context) ->
case z_module_indexer:find(scomp, ScompName, Context) of
{ok, #module_index{erlang_module=ModuleName}} ->
ScompContext = z_context:prune_for_scomp(Context),
render_scomp_module(ModuleName, Args, Vars, ScompContext, Context);
{error, enoent} ->
%% No such scomp, as we can switch on/off functionality we do a quiet skip
lager:info("No scomp enabled for \"~p\"", [ScompName]),
<<>>
end.
render_optional(ScompName, Args, Vars, Context) ->
case z_module_indexer:find(scomp, ScompName, Context) of
{ok, #module_index{erlang_module=ModuleName}} ->
ScompContext = z_context:prune_for_scomp(Context),
render_scomp_module(ModuleName, ['$optional'|Args], Vars, ScompContext, Context);
{error, enoent} ->
<<>>
end.
render_all(ScompName, Args, Vars, Context) ->
case z_module_indexer:find(scomp, ScompName, Context) of
{ok, #module_index{erlang_module=ModuleName}} ->
ScompContext = z_context:prune_for_scomp(Context),
render_scomp_module(ModuleName, [{'$all', true}|Args], Vars, ScompContext, Context);
{error, enoent} ->
<<>>
end.
render_scomp_module(ModuleName, Args, Vars, ScompContext, Context) ->
ScompContextWM = ScompContext#context{req=Context#context.req},
case vary(ModuleName, Args, ScompContext) of
nocache ->
case ModuleName:render(Args, Vars, ScompContextWM) of
{ok, Result} -> z_context:prune_for_template(Result);
{error, Reason} -> throw({error, Reason})
end;
{CachKeyArgs, MaxAge, Varies} ->
Key = key(ModuleName, CachKeyArgs, ScompContextWM),
RenderFun = fun() ->
case ModuleName:render(Args, Vars, ScompContextWM) of
{ok, Result} -> z_context:prune_for_template(Result);
{error, Reason} -> throw({error, Reason})
end
end,
z_depcache:memo(RenderFun, Key, MaxAge, Varies, Context)
end.
%% @doc Create an unique key for the scomp and the visibility level it is rendered for
%% @spec key(atom(), proplist(), context()) -> term()
key(ScompName, EssentialParams, Context) ->
{ScompName, EssentialParams, z_acl:cache_key(Context), z_context:language(Context)}.
%% @doc Check how and if the scomp wants to be cached.
vary(ModuleName, Args, ScompContext) ->
case ModuleName:vary(Args, ScompContext) of
default ->
%% Scomp asks default behaviour, check the arguments for caching args
MaxAge = proplists:get_value(max_age, Args),
case z_convert:to_integer(MaxAge) of
undefined ->
nocache;
Max ->
Vary = proplists:get_all_values(vary, Args),
Args1 = proplists:delete(max_age, Args),
Args2 = proplists:delete(vary, Args1),
{Args2, Max, Vary}
end;
Other ->
Other
end. | apps/zotonic_core/src/support/z_scomp.erl | 0.546012 | 0.439507 | z_scomp.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% Measure represents a type of metric to be tracked and recorded.
%% For example, latency, request Mb/s, and response Mb/s are measures
%% to collect from a server.
%%
%% Measure is a generic interface for recording values in aggregations
%% via subscribed views.
%% When recording a value, we have to obtain the list of all subscribed views
%% and call respective aggregations. We use code generation to optimize this.
%% When a view subscribed or unsubscribed we regenerate unrolled loop in a
%% special module (one for each measure). Module names generated from measurement
%% names (1-to-1). If we know a measure name at the compile time, we can eliminate
%% the module name lookup and inject remote call directly, replacing `oc_stat:record'
%% with `<GENERATED_MEASURE_MODULE>:record'.
%% For that {parse_transform, oc_stat_measure} option must be used.
%% @end
%%%-----------------------------------------------------------------------
-module(oc_stat_measure).
%% user api
-export([new/3,
exists/1]).
%% codegen
-export([measure_module/1,
module_name/1,
maybe_module_name/1,
regen_record/2,
delete_measure/1]).
%% unsafe api, needs snychronization
-export([register_/1,
add_subscription_/2,
remove_subscription_/2,
terminate_/0]).
-export(['__init_backend__'/0]).
-export([parse_transform/2]).
-export_types([name/0,
description/0,
unit/0,
measure/0]).
-record(measure, {name :: name(),
module :: module(),
description :: description(),
unit :: unit()}).
-type name() :: atom() | binary() | string().
-type description() :: binary() | string().
-type unit() :: atom().
-type measure() :: #measure{}.
-define(MEASURES_TABLE, ?MODULE).
%% @doc
%% Creates and registers a measure. If a measure with the same name
%% already exists, old measure returned.
%% @end
-spec new(name(), description(), unit()) -> oc_stat_view:measure().
new(Name, Description, Unit) ->
gen_server:call(oc_stat, {measure_register,
#measure{name=Name,
module=oc_stat_measure:module_name(Name),
description=Description,
unit=Unit}}).
%% @doc
%% Returns a measure with the `Name' or `false'..
%% @end
-spec exists(name()) -> measure() | false.
exists(Name) ->
case ets:lookup(?MEASURES_TABLE, Name) of
[Measure] ->
Measure;
_ -> false
end.
%% =============================================================================
%% internal
%% =============================================================================
%% @private
register_(#measure{name=Name}=Measure) ->
case exists(Name) of
false ->
insert_measure_(Measure);
OldMeasure ->
OldMeasure
end.
%% @private
insert_measure_(#measure{module=Module}=Measure) ->
ets:insert(?MEASURES_TABLE, Measure),
regen_record(Module, []),
Measure.
%% @private
add_subscription_(Name, VS) ->
case exists(Name) of
false ->
{error, {unknown_measure, Name}};
#measure{module=Module} ->
Subs = Module:subs(),
regen_record(Module, [VS | Subs]),
ok
end.
%% @private
remove_subscription_(Name, VS) ->
case exists(Name) of
false ->
ok;
#measure{module=Module} ->
Subs = Module:subs(),
regen_record(Module, lists:delete(VS, Subs)),
ok
end.
%% @private
terminate_() ->
[delete_measure(M) || M <- ets:tab2list(?MEASURES_TABLE)].
%% @private
'__init_backend__'() ->
?MEASURES_TABLE = ets:new(?MEASURES_TABLE, [set, named_table, public, {keypos, 2}, {read_concurrency, true}]),
ok.
%% =============================================================================
%% codegen
%% =============================================================================
%% @private
measure_module(Name) ->
case ets:lookup(?MEASURES_TABLE, Name) of
[#measure{module=Module}] ->
Module;
_ -> erlang:error({unknown_measure, Name})
end.
%% @private
-spec module_name(name()) -> module().
module_name(Name) ->
list_to_atom(module_name_str(Name)).
module_name_str(Name) when is_atom(Name) ->
name_template(atom_to_list(Name));
module_name_str(Name) when is_binary(Name) ->
name_template(binary_to_list(Name));
module_name_str(Name) when is_list(Name) ->
name_template(binary_to_list(iolist_to_binary(Name))).
name_template(Name) ->
lists:flatten(["$_MEASURE_", Name]).
%% @private
maybe_module_name(Name) ->
list_to_existing_atom(module_name_str(Name)).
%% @private
regen_record(ModuleName, VSs) ->
regen_module(ModuleName, gen_add_sample_calls(VSs), erl_parse:abstract(VSs)).
%% @private
delete_measure(#measure{name=Name, module=Module}) ->
ErrorA = erl_parse:abstract({unknown_measure, Name}),
regen_module(Module,
gen_add_sample_calls([])
++ [{call, 1,
{remote, 1, {atom, 1, erlang}, {atom, 1, error}},
[ErrorA]}],
{call, 1,
{remote, 1, {atom, 1, erlang}, {atom, 1, error}},
[ErrorA]}).
%% @private
regen_module(ModuleName, RecordBody, Subs) ->
ModuleNameStr = atom_to_list(ModuleName),
{ok, Module, Binary} =
compile:forms(
[{attribute, 1, file,
{ModuleNameStr,
1}},
{attribute, 1, module, ModuleName},
{attribute, 1, export,
[{record, 2}]},
{attribute, 1, export,
[{subs, 0}]},
{function, 1, record, 2,
[{clause, 1, [{var, 1, 'ContextTags'}, {var, 1, 'Value'}], [],
RecordBody ++ [{atom, 1, ok}]
}]},
{function, 1, subs, 0,
[{clause, 1, [], [],
[Subs]
}]},
{eof, 2}]),
{module, Module} = code:load_binary(Module, ModuleNameStr, Binary).
gen_add_sample_calls([]) ->
[{match, 1, {var, 1, '_'}, {var, 1, 'ContextTags'}},
{match, 1, {var, 1, '_'}, {var, 1, 'Value'}}];
gen_add_sample_calls(VSs) ->
lists:map(fun oc_stat_view:gen_add_sample_/1, VSs).
%% @doc
%% `oc_stat_measure' is also a parse transform. It can detect `oc_stat:record' calls
%% with constant measure names and generate remote measure module call from that.
%% At the run-time this means we don't have to do a lookup for the module name and
%% if measure doesn't exist, `{unknown_measure, Name}' error will be thrown.
%% @end
parse_transform(Forms, _Options) ->
HiForms = lists:map(fun walk_ast/1, Forms),
HiForms.
walk_ast({function, Line, Name, Args, Clauses}) ->
{function, Line, Name, Args, walk_clauses([], Clauses)};
walk_ast(Form) ->
Form.
walk_clauses(Acc, []) ->
lists:reverse(Acc);
walk_clauses(Acc, [{clause, Line, Arguments, Guards, Body}|Rest]) ->
reset_gensym(),
walk_clauses([{clause, Line, Arguments, Guards, walk_body([], Body)}|Acc], Rest).
walk_body(Acc, []) ->
lists:reverse(Acc);
walk_body(Acc, [H|R]) ->
walk_body([transform_statement(H)|Acc], R).
transform_statement({call, Line,
{remote, _, {atom, _, oc_stat}, {atom, _, record}},
[Tags, {cons, _, _, _} = Measurements]}=_Stmt) ->
gen_record_calls(Line, Tags, erl_syntax:list_elements(Measurements));
transform_statement({call, Line,
{remote, _, {atom, _, oc_stat}, {atom, _, record}},
[Tags, {MType, _, _}=Measurement, Value]}=_Stmt)
when is_atom(MType) orelse is_binary(MType) orelse is_list(MType) ->
gen_record_calls(Line, Tags, [{tuple, Line, [Measurement, Value]}]);
transform_statement(Stmt) when is_tuple(Stmt) ->
list_to_tuple(transform_statement(tuple_to_list(Stmt)));
transform_statement(Stmt) when is_list(Stmt) ->
[transform_statement(S) || S <- Stmt];
transform_statement(Stmt) ->
Stmt.
%% =============================================================================
%% private
%% =============================================================================
gen_record_calls(Line, Tags, Measurements) ->
CTags = {var, Line, gensym("CTags")},
GTags = {var, Line, gensym("GTags")},
{block, Line,
[{match, Line, CTags, Tags},
{match, Line, GTags, gen_prepare_tags(Line, CTags)}]
++
[measure_module_record_call(Line, MeasureName, GTags, Value)
|| {tuple, _, [{_, _, MeasureName}, Value]} <- Measurements]}.
measure_module_record_call(Line, MeasureName, GTags, Value) ->
{'try', Line,
[{call, Line,
{remote, Line, {atom, Line, module_name(MeasureName)}, {atom, Line, record}},
[GTags, Value]}],
[{clause, Line, [{var, Line, '_'}], [], [{atom, 279, 'ok'}]}],
[{clause, Line,
[{tuple, Line,
[{atom, Line, error}, {atom, Line, undef}, {var, Line, '_'}]}],
[],
[{call, Line,
{remote, Line, {atom, Line, erlang}, {atom, Line, error}},
[{tuple, Line,
[{atom, Line, unknown_measure}, erl_parse:abstract(MeasureName)]}]}]}],
[]}.
gen_prepare_tags(Line, CTags) ->
{'case', Line, CTags,
[{clause, Line,
[{var, Line, '_'}],
[[{call, Line, {atom, Line, is_map}, [CTags]}]],
[CTags]},
{clause, Line,
[{var, Line, '_'}],
[],
[{call, Line,
{remote, Line, {atom, Line, oc_tags}, {atom, Line, from_ctx}},
[CTags]}]}]}.
gensym(Name) ->
put(oc_gensym_counter, get(oc_gensym_counter) + 1),
list_to_atom(
lists:flatten(
io_lib:format("$oc_gen_~s_~B$", [Name, get(oc_gensym_counter)]))).
reset_gensym() ->
put(oc_gensym_counter, 0). | src/oc_stat_measure.erl | 0.756897 | 0.512998 | oc_stat_measure.erl | starcoder |
%% @doc Edmonds-Karp / Ford-Fulkerson Algorithms
%%
%% <p>Calculates the Maximum Flow in a Network (Directed Graph)</p>
%%
%% <p>For examples you can check the <code>flow_demo</code> module.</p>
%%
-module(edmonds_karp).
-export([run/4]).
-type mode() :: bfs | dfs.
%% ==========================================================
%% Exported Functions
%% ==========================================================
%% @doc Runs the Edmonds-Karp or Ford-Fulkerson algorithm
%% on a graph <code>G</code> with <code>S</code> as source
%% as <code>T</code> as sink.
%%
%% <p>When <code>Mode</code> is <code>dfs</code> the algorithm is
%% called Ford-Fulkerson and when <code>Mode</code> is
%% <code>bfs</code> the algorithm is called Edmonds-Karp.</p>
%%
-spec run(graph:graph(), graph:vertex(), graph:vertex(), mode()) -> graph_lib:flow() | {'error', 'not_network'}.
run(G, S, T, Mode) when Mode =:= bfs; Mode =:= dfs ->
case graph:graph_type(G) of
directed ->
{Flow, RN} = init_residual_network(G),
ok = edmonds_karp_step(G, RN, Flow, S, T, Mode),
Out = graph_lib:reconstruct_flow(ets:tab2list(Flow)),
%% clean up residual graph and flow list
graph:del_graph(RN),
ets:delete(Flow),
Out;
undirected ->
{error, not_network}
end.
%% ==========================================================
%% Edmonds-Karp / Ford-FulkersonFunctions
%% ==========================================================
%% Edmonds-Karp loop
-spec edmonds_karp_step(graph:graph(), graph:graph(), ets:tid(), graph:vertex(), graph:vertex(), mode()) -> 'ok'.
edmonds_karp_step(G, RN, Flow, S, T, M) ->
case augmenting_path(RN, S, T, M) of
no_path -> ok;
EPath ->
RN_n = update_residual_network(G, RN, Flow, EPath),
edmonds_karp_step(G, RN_n, Flow, S, T, M)
end.
%% Initialize Residual Network
-spec init_residual_network(graph:graph()) -> {ets:tid(), graph:graph()}.
init_residual_network(G) ->
Vs = graph:vertices(G),
Es = graph:edges_with_weights(G),
RN = graph:empty(directed),
F = ets:new(f, [ordered_set]),
ets:insert(F, {flow, 0}),
lists:foreach(fun({E, _W}) -> ets:insert(F, {E, 0}) end, Es),
lists:foreach(fun(V) -> graph:add_vertex(RN, V) end, Vs),
lists:foreach(fun({{U, V}, W}) -> graph:add_edge(RN, U, V, W) end, Es),
{F, RN}.
%% Find an augmenting path
-spec augmenting_path(graph:graph(), graph:vertex(), graph:vertex(), mode()) -> graph_lib:epath_weighted() | 'no_path'.
augmenting_path(RN, S, T, M) ->
Traversal =
case M of
dfs -> dfs:run(RN, S);
bfs -> bfs:run(RN, S)
end,
case proplists:get_value(T, Traversal) of
unreachable -> no_path;
{_Cost, VPath} -> path_edges(RN, VPath)
end.
%% Return the edges visited in a path
-spec path_edges(graph:graph(), graph_lib:vpath()) -> graph_lib:epath_weighted().
path_edges(RN, Path) -> path_edges(RN, Path, []).
%% Helper funciton path_edges/3
-spec path_edges(graph:graph(), graph_lib:vpath(), graph_lib:epath_weighted()) -> graph_lib:epath_weighted().
path_edges(_RN, [_V], Es) ->
lists:reverse(Es);
path_edges(RN, [U, V|Ps], Es) ->
E = {U, V},
W = graph:edge_weight(RN, E),
path_edges(RN, [V|Ps], [{E, W}|Es]).
%% Update the Residual Network with the information
%% of an augmenting path
-spec update_residual_network(graph:graph(), graph:graph(), ets:tid(), graph_lib:epath_weighted()) -> graph:graph().
update_residual_network(G, RN, Flow, EPath) ->
%% Get the flow increase
[{_, Cf}|_] = lists:sort(fun({_E1, W1}, {_E2, W2}) -> W1 < W2 end, EPath),
%% Update the flow
[{flow, CurrFlow}] = ets:lookup(Flow, flow),
ets:insert(Flow, {flow, CurrFlow + Cf}),
%% Update the residual network
lists:foreach(
fun({{U, V}, _W}) ->
{{From, To}=E, W} =
case {graph:edge_weight(G, {U,V}), graph:edge_weight(G, {V,U})} of
{false, C} -> {{V, U}, C};
{C, false} -> {{U, V}, C}
end,
graph:del_edge(RN, {U, V}),
graph:del_edge(RN, {V, U}),
[{E, F}] = ets:lookup(Flow, E),
NF = F + Cf,
ets:insert(Flow, {E, NF}),
_ = case W-NF > 0 of
true -> graph:add_edge(RN, From, To, W-NF);
false -> ok
end,
_ = case NF > 0 of
true -> graph:add_edge(RN, To, From, NF);
false -> ok
end
end,
EPath
),
RN. | src/edmonds_karp.erl | 0.605682 | 0.479077 | edmonds_karp.erl | starcoder |
% Copyright 2011, Dell
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%
% Author: RobHirschfeld
%
-module(bdd_selftest).
-export([test/1, source/0, get_data/2]).
-import(bdd_webrat).
-import(bdd_utils).
-import(bdd).
assert(Bools) ->
assert(Bools, true).
assert(Bools, Test) ->
F = fun(X) -> case X of Test -> true; _ -> false end end,
lists:all(F, Bools).
assert_sets(Set) ->
assert([Expected =:= Test || {_Name, Expected, Test} <- Set]).
% set format should be {name, expected value, value}
assert_result(Test, Set) ->
Result = case assert_sets(Set) of
true -> io:format("\tPass \"~p\"~n", [Test]), true;
false -> io:format("\t*** FAIL \"~p\"! results: ~p~n", [Test, Set]), Test
end,
Result.
source() ->
{ok, Result} = file:consult("bdd_selftest.data"),
Result.
test(all) ->
io:format("Running all selftests:~n"),
All = [assert, html_peek, html_link, html_link_complex, html_bus, uri, webrat_noop, http_post_params],
Source = source(),
Results = [test(Test, Source) || Test <- All],
case assert(Results) of
true -> io:format("Completed, All Pass.~n"), pass;
_ -> io:format("Completed, FAIL.~n"), [Failed || Failed <- Results, Failed =/= true]
end;
test([Test]) -> test(Test);
test(Test) ->
test(Test, source()).
get_data(Data, Key) ->
{Key, Value} = lists:keyfind(Key,1,Data),
Value.
test(assert, Source) ->
Cases = [{true, assert_true}, {true, assert_1true}, {true, assert_mixed}, {true, assert_truefirst}, {true, assert_truelast}, {false, assert_1false}, {false, assert_false}],
Result = [{Test, Expected, bdd_utils:assert(get_data(Source, Test))} || {Expected, Test} <- Cases],
assert_result(assert, Result);
test(uri, Source) ->
{Expected, Cases} = get_data(Source, uri),
Result = [{Expected, Expected, bdd_utils:uri([{url, B}] ,P)} || {B, P} <- Cases],
assert_result(uri, Result);
test(http_post_params, Source) ->
Cases = get_data(Source, http_post_params),
Result = [{Expected, Expected, bdd_utils:http_post_params(Params)} || {Expected, Params} <- Cases],
assert_result(http_post_params, Result);
test(webrat_noop, Source) ->
Cases = get_data(Source, webrat_noop),
Result = [{In++Out, Expected, string:equal(Out, apply(bdd_catchall, step, [[], [], {step_given, 1, ["I do nothing to", In]}] ))} || {Expected, Out, In} <- Cases],
bdd_utils:debug("result: ~p~n", [Result]),
assert_result(webrat_noop, Result);
test(html_peek, Source) ->
{LookFor, HTML} = get_data(Source, html_simple),
Result1 = {result1, true, bdd_utils:html_peek(LookFor, HTML)},
Result2 = {result2, nomatch, bdd_utils:html_peek(LookFor++"not findable", HTML)},
assert_result(html_peek, [Result1, Result2]);
test(html_bus, Source) ->
{LookFor, HTML} = get_data(Source, html_bus),
Result1 = {result1, true, bdd_utils:html_peek(LookFor, HTML)},
Result2 = {result2, "/map/bus/xref_5", bdd_utils:html_find_link(LookFor, HTML)},
assert_result(html_bus, [Result1, Result2]);
test(html_link, Source) ->
{LookFor, HTML} = get_data(Source, html_simple_link),
Result1 = {result1, "url", bdd_utils:html_find_link(LookFor, HTML)},
Result2 = {result2, [], bdd_utils:html_find_link(LookFor++"not findable", HTML)},
assert_result(html_link, [Result1, Result2]);
test(html_link_complex, Source) ->
{LookFor, HTML} = get_data(Source, html_complex_link),
Result1 = {result1, "url", bdd_utils:html_find_link(LookFor, HTML)},
Result2 = {result2, [], bdd_utils:html_find_link(LookFor++"not findable", HTML)},
assert_result(html_link_complex, [Result1, Result2]);
test(Test, _Source) ->
io:format("ERROR: Could not match ~p test atom!~n", [Test]),
throw("no such test"),
false. | crowbar_framework/BDD/bdd_selftest.erl | 0.558327 | 0.533701 | bdd_selftest.erl | starcoder |
% @doc OTPCL interpreter/evaluator.
%
% OTPCL's interpreter effectively revolves around repeatedly calling
% 2-arity functions ("commands"), the first argument being the actual
% list of arguments for that function/command, and the second being
% the current interpretation state (expressed as a tuple of two maps,
% one with all command definitions and one with all variable
% definitions). Each command-backing function in turn returns a tuple
% with a return value and an updated state.
%
% To illustrate: when OTPCL's parser encounters the command invocation
% `foo bar baz' and sends the corresponding parse tree to the
% interpreter, the interpreter in turn calls `{Result, NewState} =
% Fun([bar, baz], State)' (where `Fun' is the value of the `foo' key
% in the first element of the `State' tuple, `Result' is the return
% value for that command, and `NewState' is the updated state).
%
% This means it's pretty straightforward to define an OTPCL command
% yourself from within Erlang: simply define a 2-arity function where
% the first argument is a list and the second argument is a 2-element
% tuple of maps. A module that defines OTPCL commands can/should
% specify which functions in that module are "OTPCL-aware" in this
% fashion like so:
%
% ```
% -module(my_otpcl_cmds).
% -export([foo/2, bar/2, baz/2]).
% -otpcl_cmds([foo, bar, baz]). % OTPCL-aware funs in the module
%
% foo([Thing], State) ->
% {ok, State}.
% bar([Thing1, Thing2], State) ->
% {{Thing1, Thing2}, State}.
% baz([Name, Val], State) ->
% otpcl_stdlib:set([Name, Val], State).
% '''
%
% The interpreter itself is also an OTPCL-aware function in this sense
% (albeit with a simplification in that it does not <em>require</em>
% its first argument to be a list; it can take a parse tree directly).
% It can thus be invoked from within OTPCL:
%
% ```
% otpcl> import otpcl_eval
% {ok,otpcl_eval}
% otpcl> eval {set foo "howdy~n"}
% ok
% otpcl> print $foo
% howdy
% ok
% otpcl> import otpcl_env
% ok
% otpcl> eval {
% ...> set foo "aloha~n"
% ...> print $foo
% ...> } [default_state]
% aloha
% [ ... pile of interpreter state ... ]
% otpcl> print $foo
% howdy
% ok
% '''
%
% In fact, most OTPCL features are in turn implemented as OTPCL-aware
% command-backing functions; that is: OTPCL exposes its own
% functionality as OTPCL commands wherever it's possible/practical to
% do so.
%
% Of course, one may also do this from any OTP application that uses
% OTPCL, e.g. one written in Erlang:
%
% ```
% erl> State0 = otpcl_env:default_state().
% [ ... pile of interpreter state ... ]
% erl> {ok, State1} = otpcl_stdlib:set([foo, <<"howdy~n">>], State0).
% [ ... pile of interpreter state ... ]
% erl> {ok, State2} = otpcl_eval:eval("print $foo", State1).
% howdy
% [ ... pile of interpreter state ... ]
% '''
-module(otpcl_eval).
-include("otpcl.hrl").
-export(['CMD_interpret'/2, interpret/1, interpret/2, 'CMD_eval'/2, eval/1,
eval/2, 'CMD_eval_file'/2, eval_file/1, eval_file/2, make_charstring/1,
make_binstring/1, make_atomic/1, make_atom/1]).
'CMD_interpret'([AST], State) ->
interpret(AST, State);
'CMD_interpret'([AST, InnerState], OuterState) ->
{interpret(AST, InnerState), OuterState}.
'CMD_eval'([Text], State) ->
eval(Text, State);
'CMD_eval'([Text, InnerState], OuterState) ->
{eval(Text, InnerState), OuterState}.
'CMD_eval_file'([Path], State) ->
eval_file(Path, State);
'CMD_eval_file'([Path, InnerState], OuterState) ->
{eval_file(Path, InnerState), OuterState}.
-ifdef(DEBUG).
-define(DEBUG_PRINT(Msg, Args), io:format(Msg, Args)).
-else.
-define(DEBUG_PRINT(Msg, Args), ok).
-endif.
% Build stuff out of tokens
-spec make_charstring([token()]) -> string().
% @doc Extract a character string from a token string.
make_charstring(Tokens) ->
[C || {C,_} <- Tokens].
make_charstring(Tokens, _State) ->
make_charstring(Tokens).
-spec make_binstring([token()]) -> binary().
% @doc Extract a binary string from a token string.
make_binstring(Tokens) ->
list_to_binary(make_charstring(Tokens)).
make_binstring(Tokens, _State) ->
make_binstring(Tokens).
-spec make_atomic([token()]) -> atom() | integer() | float().
% @doc Extract a float, integer, or atom (in order of preference) from
% a token string.
make_atomic(Tokens) ->
make_atomic(Tokens, otpcl_env:minimal_state()).
make_atomic(Tokens, State) ->
Text = make_charstring(Tokens),
case interpreter_is_stringy(State) of
true ->
list_to_binary(Text);
false ->
make_atomic(Text, float, string:to_float(Text), State)
end.
% Floats
make_atomic(_, float, {Float, []}, _State) ->
Float;
make_atomic(Text, float, _, State) ->
make_atomic(Text, integer, string:to_integer(Text), State);
% Integers (if this conversion attempt fails, then we just treat it as
% an ordinary atom)
make_atomic(_, integer, {Int, []}, _State) ->
Int;
make_atomic(Text, integer, _, _State) ->
list_to_atom(Text).
-spec make_atom([token()]) -> atom().
% @doc Extract an atom from a token string. This skips any attempt to
% check if an atom is a number (which means single-quoted atoms might
% technically be more efficient than unquoted atoms at the moment...).
make_atom(Tokens) ->
make_atom(Tokens, otpcl_env:minimal_state()).
make_atom(Tokens, State) ->
Text = make_binstring(Tokens),
case interpreter_is_stringy(State) of
true ->
Text;
false ->
binary_to_atom(Text)
end.
% @doc Determines if the interpreter is "stringy" (i.e. it emits
% binstrings instead of atoms).
interpreter_is_stringy(State) ->
case otpcl_meta:get(<<"STRINGY_INTERPRETER">>, State) of
{error, _, _} ->
false;
_ ->
true
end.
% Here's the meat of the interpreter.
-spec interpret(tree() | [tree()]) -> eval_success() | eval_error().
% @doc Interpret the parse nodes with the default OTPCL starting state.
interpret(Nodes) ->
interpret(Nodes, otpcl_env:default_state()).
-spec interpret(tree() | [tree()], state()) -> eval_success() | eval_error().
% @doc Interpret the parse nodes with a custom starting state.
interpret({parsed, unquoted, Tokens}, State) ->
make_atomic(Tokens, State);
interpret({parsed, single_quoted, Tokens}, State) ->
make_atom(Tokens, State);
interpret({parsed, double_quoted, Tokens}, _State) ->
make_binstring(Tokens); % TODO: allow var/funcall substitution (maybe?)
interpret({parsed, braced, Tokens}, _State) ->
make_binstring(Tokens);
interpret({parsed, backquoted, Tokens}, _State) ->
make_charstring(Tokens);
interpret({parsed, var_unquoted, Tokens}, State) ->
interpret({parsed, var, Tokens}, State);
interpret({parsed, var_braced, Tokens}, State) ->
interpret({parsed, var, Tokens}, State);
interpret({parsed, var, Tokens}, State) ->
{Val, State} = otpcl_meta:get(make_binstring(Tokens), State),
Val;
% FIXME: any state changes here (new/modified functions and variables,
% for example) won't actually persist beyond a list/tuple/funcall
% literal until I define some better logic here. This might end up
% being a "feature", though.
interpret({parsed, list, Items}, State) ->
[interpret(I, State) || I <- Items];
interpret({parsed, tuple, Items}, State) ->
list_to_tuple([interpret(I, State) || I <- Items]);
interpret({parsed, funcall, Words}, State) ->
[Cmd|Args] = [interpret(I, State) || I <- Words],
{Res, _} = otpcl_meta:apply(Cmd, Args, State),
Res;
interpret({parsed, command, []}, State) ->
otpcl_meta:get(<<"RETVAL">>, State);
interpret({parsed, command, Words}, State) ->
[Cmd|Args] = [interpret(I, State) || I <- Words],
otpcl_meta:apply(Cmd, Args, State);
interpret({parsed, comment, _}, State) ->
otpcl_meta:get(<<"RETVAL">>, State);
interpret({parsed, program, [Cmd|Rest]}, State) ->
{RetVal, NewState} = interpret(Cmd, State),
{ok, RetState} = otpcl_meta:set(<<"RETVAL">>, RetVal, NewState),
interpret({parsed, program, Rest}, RetState);
interpret({parsed, program, []}, State) ->
otpcl_meta:get(<<"RETVAL">>, State);
% Potential FIXME: if a program ends with a pipe with no arguments, should the
% parser really be sending a raw pipe token up to the interpreter? Or should
% that properly be a {parsed, command, [Cmd]}? Handling it here for now.
interpret({parsed, pipe, [Tokens]}, State) ->
Cmd = interpret(Tokens, State),
otpcl_meta:apply(Cmd, [], State);
interpret({parsed, Type, Data}, State) ->
{error, {unknown_node_type, Type, Data}, State};
interpret(InvalidNode, State) ->
{error, {not_an_otpcl_parse_node, InvalidNode}, State}.
% And some nice friendly wrappers around that interpreter
-spec eval(eval_input()) -> eval_success() | eval_error().
% @doc Evaluate a string with the default OTPCL starting state.
eval(Src) ->
eval(Src, otpcl_env:default_state()).
-spec eval(eval_input(), state()) -> eval_success() | eval_error().
% @doc Evaluate a string with a custom starting state.
%% eval(Src = [Char|_], State) when is_integer(Char) ->
%% eval([Src], State);
%% eval([Src, SubState], State) ->
%% {eval([Src], SubState), State};
eval(Src, State) ->
{ok, Tree, []} = otpcl_parse:parse(Src),
interpret(Tree, State).
%% eval(Src, State) ->
%% eval([Src], State).
-spec eval_file(filename()) -> eval_success() | eval_error().
% @doc Evaluate the named file with the default OTPCL starting state.
eval_file(Filename) ->
eval_file(Filename, otpcl_env:default_state()).
-spec eval_file(filename(), state()) -> eval_success() | eval_error().
% @doc Evaluate the named file with a custom starting state.
eval_file(Filename = [Char|_], State) when is_integer(Char) ->
eval_file([Filename], State);
eval_file([Filename], State) ->
{ok, Src} = file:read_file(Filename),
Tokens = otpcl_parse:scan(Src, otpcl_parse:initpos(Filename)),
{ok, Tree, []} = otpcl_parse:parse(Tokens),
interpret(Tree, State);
eval_file(Filename, State) ->
eval_file([Filename], State). | src/otpcl_eval.erl | 0.703651 | 0.615131 | otpcl_eval.erl | starcoder |
%% vi:ts=4 sw=4 et
%% @copyright <NAME>
%% @doc Format dates in erlang
%%
%% Licensed under the MIT license
%%
%% This module formats erlang dates in the form {{Year, Month, Day},
%% {Hour, Minute, Second}} to printable strings, using (almost)
%% equivalent formatting rules as http://uk.php.net/date, US vs
%% European dates are disambiguated in the same way as
%% http://uk.php.net/manual/en/function.strtotime.php That is, Dates
%% in the m/d/y or d-m-y formats are disambiguated by looking at the
%% separator between the various components: if the separator is a
%% slash (/), then the American m/d/y is assumed; whereas if the
%% separator is a dash (-) or a dot (.), then the European d-m-y
%% format is assumed. To avoid potential ambiguity, it's best to use
%% ISO 8601 (YYYY-MM-DD) dates.
%%
%% erlang has no concept of timezone so the following
%% formats are not implemented: B e I O P T Z
%% formats c and r will also differ slightly
%%
%% See tests at bottom for examples
-module(ec_date).
-author("<NAME> <<EMAIL>>").
-export([format/1, format/2]).
-export([format_iso8601/1]).
-export([parse/1, parse/2]).
-export([nparse/1]).
-export([tokenise/2]).
%% These are used exclusively as guards and so the function like
%% defines make sense
-define(is_num(X), (X >= $0 andalso X =< $9)).
-define(is_meridian(X), (X == [] orelse X == [am] orelse X == [pm])).
-define(is_us_sep(X), (X == $/)).
-define(is_world_sep(X), (X == $-)).
-define(MONTH_TAG, month).
-define(is_year(X), (is_integer(X) andalso X > 31)).
-define(is_day(X), (is_integer(X) andalso X =< 31)).
-define(is_hinted_month(X), (is_tuple(X) andalso size(X) =:= 2 andalso element(1, X) =:= ?MONTH_TAG)).
-define(is_month(X), ((is_integer(X) andalso X =< 12) orelse ?is_hinted_month(X))).
-define(is_tz_offset(H1, H2, M1, M2), (?is_num(H1) andalso ?is_num(H2) andalso ?is_num(M1) andalso ?is_num(M2))).
-define(GREGORIAN_SECONDS_1970, 62167219200).
-define(ISO_8601_DATETIME_FORMAT, "Y-m-dTH:i:sZ").
-define(ISO_8601_DATETIME_WITH_MS_FORMAT, "Y-m-dTH:i:s.fZ").
-type year() :: non_neg_integer().
-type month() :: 1..12 | {?MONTH_TAG, 1..12}.
-type day() :: 1..31.
-type hour() :: 0..23.
-type minute() :: 0..59.
-type second() :: 0..59.
-type microsecond() :: 0..999999.
-type daynum() :: 1..7.
-type date() :: {year(), month(), day()}.
-type time() :: {hour(), minute(), second()} | {hour(), minute(), second(), microsecond()}.
-type datetime() :: {date(), time()}.
-type now() :: {integer(), integer(), integer()}.
%%
%% EXPORTS
%%
-spec format(string()) -> string().
%% @doc format current local time as Format
format(Format) ->
format(Format, calendar:universal_time(), []).
-spec format(string(), datetime() | now()) -> string().
%% @doc format Date as Format
format(Format, {_, _, Ms} = Now) ->
{Date, {H, M, S}} = calendar:now_to_datetime(Now),
format(Format, {Date, {H, M, S, Ms}}, []);
format(Format, Date) ->
format(Format, Date, []).
-spec format_iso8601(datetime()) -> string().
%% @doc format date in the ISO8601 format
%% This always puts 'Z' as time zone, since we have no notion of timezone
format_iso8601({{_, _, _}, {_, _, _}} = Date) ->
format(?ISO_8601_DATETIME_FORMAT, Date);
format_iso8601({{_, _, _}, {_, _, _, _}} = Date) ->
format(?ISO_8601_DATETIME_WITH_MS_FORMAT, Date).
-spec parse(string()) -> datetime().
%% @doc parses the datetime from a string
parse(Date) ->
do_parse(Date, calendar:universal_time(), []).
-spec parse(string(), datetime() | now()) -> datetime().
%% @doc parses the datetime from a string
parse(Date, {_, _, _} = Now) ->
do_parse(Date, calendar:now_to_datetime(Now), []);
parse(Date, Now) ->
do_parse(Date, Now, []).
do_parse(Date, Now, Opts) ->
case filter_hints(parse(tokenise(uppercase(Date), []), Now, Opts)) of
{error, bad_date} ->
erlang:throw({?MODULE, {bad_date, Date}});
{D1, T1} = {{Y, M, D}, {H, M1, S}}
when is_number(Y), is_number(M),
is_number(D), is_number(H),
is_number(M1), is_number(S) ->
case calendar:valid_date(D1) of
true -> {D1, T1};
false -> erlang:throw({?MODULE, {bad_date, Date}})
end;
{D1, _T1, {Ms}} = {{Y, M, D}, {H, M1, S}, {Ms}}
when is_number(Y), is_number(M),
is_number(D), is_number(H),
is_number(M1), is_number(S),
is_number(Ms) ->
case calendar:valid_date(D1) of
true -> {D1, {H, M1, S, Ms}};
false -> erlang:throw({?MODULE, {bad_date, Date}})
end;
Unknown -> erlang:throw({?MODULE, {bad_date, Date, Unknown}})
end.
filter_hints({{Y, {?MONTH_TAG, M}, D}, {H, M1, S}}) ->
filter_hints({{Y, M, D}, {H, M1, S}});
filter_hints({{Y, {?MONTH_TAG, M}, D}, {H, M1, S}, {Ms}}) ->
filter_hints({{Y, M, D}, {H, M1, S}, {Ms}});
filter_hints(Other) ->
Other.
-spec nparse(string()) -> now().
%% @doc parses the datetime from a string into 'now' format
nparse(Date) ->
case parse(Date) of
{DateS, {H, M, S, Ms}} ->
GSeconds = calendar:datetime_to_gregorian_seconds({DateS, {H, M, S}}),
ESeconds = GSeconds - ?GREGORIAN_SECONDS_1970,
{ESeconds div 1000000, ESeconds rem 1000000, Ms};
DateTime ->
GSeconds = calendar:datetime_to_gregorian_seconds(DateTime),
ESeconds = GSeconds - ?GREGORIAN_SECONDS_1970,
{ESeconds div 1000000, ESeconds rem 1000000, 0}
end.
%%
%% LOCAL FUNCTIONS
%%
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $., Micros, $Z], _Now, _Opts)
when ?is_world_sep(X)
andalso (Micros >= 0 andalso Micros < 1000000)
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}, {Micros}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $Z], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $., Micros, $+, Off | _Rest], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso (Micros >= 0 andalso Micros < 1000000)
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []) - Off, Min, Sec}, {Micros}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $+, Off | _Rest], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []) - Off, Min, Sec}, {0}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $., Micros, $-, Off | _Rest], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso (Micros >= 0 andalso Micros < 1000000)
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []) + Off, Min, Sec}, {Micros}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $-, Off | _Rest], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso Year > 31 ->
{{Year, Month, Day}, {hour(Hour, []) + Off, Min, Sec}, {0}};
%% Date/Times 22 Aug 2008 6:35.0001 PM
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $., Ms | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso
(?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}, {Ms}};
parse([Month, X, Day, X, Year, Hour, $:, Min, $:, Sec, $., Ms | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_us_sep(X)
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}, {Ms}};
parse([Day, X, Month, X, Year, Hour, $:, Min, $:, Sec, $., Ms | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_world_sep(X)
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}, {Ms}};
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec, $., Ms], _Now, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}, {Ms}};
parse([Month, X, Day, X, Year, Hour, $:, Min, $:, Sec, $., Ms], _Now, _Opts)
when ?is_us_sep(X) andalso ?is_month(Month) ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}, {Ms}};
parse([Day, X, Month, X, Year, Hour, $:, Min, $:, Sec, $., Ms], _Now, _Opts)
when ?is_world_sep(X) andalso ?is_month(Month) ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}, {Ms}};
%% Date/Times Dec 1st, 2012 6:25 PM
parse([Month, Day, Year, Hour, $:, Min, $:, Sec | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_hinted_month(Month) andalso ?is_day(Day) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}};
parse([Month, Day, Year, Hour, $:, Min | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_hinted_month(Month) andalso ?is_day(Day) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, 0}};
parse([Month, Day, Year, Hour | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_hinted_month(Month) andalso ?is_day(Day) ->
{{Year, Month, Day}, {hour(Hour, PAM), 0, 0}};
%% Date/Times Dec 1st, 2012 18:25:15 (no AM/PM)
parse([Month, Day, Year, Hour, $:, Min, $:, Sec], _Now, _Opts)
when ?is_hinted_month(Month) andalso ?is_day(Day) ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}};
parse([Month, Day, Year, Hour, $:, Min], _Now, _Opts)
when ?is_hinted_month(Month) andalso ?is_day(Day) ->
{{Year, Month, Day}, {hour(Hour, []), Min, 0}};
%% Date/Times Fri Nov 21 14:55:26 +0000 2014 (Twitter format)
parse([Month, Day, Hour, $:, Min, $:, Sec, Year], _Now, _Opts)
when ?is_hinted_month(Month), ?is_day(Day), ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, []), Min, Sec}};
%% Times - 21:45, 13:45:54, 13:15PM etc
parse([Hour, $:, Min, $:, Sec | PAM], {Date, _Time}, _O) when ?is_meridian(PAM) ->
{Date, {hour(Hour, PAM), Min, Sec}};
parse([Hour, $:, Min | PAM], {Date, _Time}, _Opts) when ?is_meridian(PAM) ->
{Date, {hour(Hour, PAM), Min, 0}};
parse([Hour | PAM], {Date, _Time}, _Opts) when ?is_meridian(PAM) ->
{Date, {hour(Hour, PAM), 0, 0}};
%% Dates (Any combination with word month "aug 8th, 2008", "8 aug 2008", "2008 aug 21" "2008 5 aug" )
%% Will work because of the "Hinted month"
parse([Day, Month, Year], {_Date, Time}, _Opts)
when ?is_day(Day) andalso ?is_hinted_month(Month) andalso ?is_year(Year) ->
{{Year, Month, Day}, Time};
parse([Month, Day, Year], {_Date, Time}, _Opts)
when ?is_day(Day) andalso ?is_hinted_month(Month) andalso ?is_year(Year) ->
{{Year, Month, Day}, Time};
parse([Year, Day, Month], {_Date, Time}, _Opts)
when ?is_day(Day) andalso ?is_hinted_month(Month) andalso ?is_year(Year) ->
{{Year, Month, Day}, Time};
parse([Year, Month, Day], {_Date, Time}, _Opts)
when ?is_day(Day) andalso ?is_hinted_month(Month) andalso ?is_year(Year) ->
{{Year, Month, Day}, Time};
%% Dates 23/april/1963
parse([Day, Month, Year], {_Date, Time}, _Opts) ->
{{Year, Month, Day}, Time};
parse([Year, X, Month, X, Day], {_Date, Time}, _Opts)
when (?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, Time};
parse([Month, X, Day, X, Year], {_Date, Time}, _Opts) when ?is_us_sep(X) ->
{{Year, Month, Day}, Time};
parse([Day, X, Month, X, Year], {_Date, Time}, _Opts) when ?is_world_sep(X) ->
{{Year, Month, Day}, Time};
%% Date/Times 22 Aug 2008 6:35 PM
%% Time is "7 PM"
parse([Year, X, Month, X, Day, Hour | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso
(?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), 0, 0}};
parse([Day, X, Month, X, Year, Hour | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso ?is_world_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), 0, 0}};
parse([Month, X, Day, X, Year, Hour | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso ?is_us_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), 0, 0}};
%% Time is "6:35 PM" ms return
parse([Year, X, Month, X, Day, Hour, $:, Min | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso
(?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, 0}};
parse([Day, X, Month, X, Year, Hour, $:, Min | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso ?is_world_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, 0}};
parse([Month, X, Day, X, Year, Hour, $:, Min | PAM], _Date, _Opts)
when ?is_meridian(PAM) andalso ?is_us_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, 0}};
%% Time is "6:35:15 PM"
parse([Year, X, Month, X, Day, Hour, $:, Min, $:, Sec | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso
(?is_us_sep(X) orelse ?is_world_sep(X))
andalso ?is_year(Year) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}};
parse([Month, X, Day, X, Year, Hour, $:, Min, $:, Sec | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_us_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}};
parse([Day, X, Month, X, Year, Hour, $:, Min, $:, Sec | PAM], _Now, _Opts)
when ?is_meridian(PAM) andalso ?is_world_sep(X) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}};
parse([Day, Month, Year, Hour | PAM], _Now, _Opts)
when ?is_meridian(PAM) ->
{{Year, Month, Day}, {hour(Hour, PAM), 0, 0}};
parse([Day, Month, Year, Hour, $:, Min | PAM], _Now, _Opts)
when ?is_meridian(PAM) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, 0}};
parse([Day, Month, Year, Hour, $:, Min, $:, Sec | PAM], _Now, _Opts)
when ?is_meridian(PAM) ->
{{Year, Month, Day}, {hour(Hour, PAM), Min, Sec}};
parse(_Tokens, _Now, _Opts) ->
{error, bad_date}.
tokenise([], Acc) ->
lists:reverse(Acc);
%% ISO 8601 fractions of a second
tokenise([$., N1, N2, N3, N4, N5, N6 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4), ?is_num(N5), ?is_num(N6) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4, N5, N6]), $. | Acc]);
tokenise([$., N1, N2, N3, N4, N5 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4), ?is_num(N5) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4, N5]) * 10, $. | Acc]);
tokenise([$., N1, N2, N3, N4 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4]) * 100, $. | Acc]);
tokenise([$., N1, N2, N3 | Rest], Acc) when ?is_num(N1), ?is_num(N2), ?is_num(N3) ->
tokenise(Rest, [ltoi([N1, N2, N3]) * 1000, $. | Acc]);
tokenise([$., N1, N2 | Rest], Acc) when ?is_num(N1), ?is_num(N2) ->
tokenise(Rest, [ltoi([N1, N2]) * 10000, $. | Acc]);
tokenise([$., N1 | Rest], Acc) when ?is_num(N1) ->
tokenise(Rest, [ltoi([N1]) * 100000, $. | Acc]);
tokenise([N1, N2, N3, N4, N5, N6 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4), ?is_num(N5), ?is_num(N6) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4, N5, N6]) | Acc]);
tokenise([N1, N2, N3, N4, N5 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4), ?is_num(N5) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4, N5]) | Acc]);
tokenise([N1, N2, N3, N4 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3), ?is_num(N4) ->
tokenise(Rest, [ltoi([N1, N2, N3, N4]) | Acc]);
tokenise([N1, N2, N3 | Rest], Acc)
when ?is_num(N1), ?is_num(N2), ?is_num(N3) ->
tokenise(Rest, [ltoi([N1, N2, N3]) | Acc]);
tokenise([N1, N2 | Rest], Acc)
when ?is_num(N1), ?is_num(N2) ->
tokenise(Rest, [ltoi([N1, N2]) | Acc]);
tokenise([N1 | Rest], Acc)
when ?is_num(N1) ->
tokenise(Rest, [ltoi([N1]) | Acc]);
%% Worded Months get tagged with ?MONTH_TAG to let the parser know that these
%% are unambiguously declared to be months. This was there's no confusion
%% between, for example: "Aug 12" and "12 Aug"
%% These hint tags are filtered in filter_hints/1 above.
tokenise("JANUARY" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 1} | Acc]);
tokenise("JAN" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 1} | Acc]);
tokenise("FEBRUARY" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 2} | Acc]);
tokenise("FEB" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 2} | Acc]);
tokenise("MARCH" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 3} | Acc]);
tokenise("MAR" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 3} | Acc]);
tokenise("APRIL" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 4} | Acc]);
tokenise("APR" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 4} | Acc]);
tokenise("MAY" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 5} | Acc]);
tokenise("JUNE" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 6} | Acc]);
tokenise("JUN" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 6} | Acc]);
tokenise("JULY" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 7} | Acc]);
tokenise("JUL" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 7} | Acc]);
tokenise("AUGUST" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 8} | Acc]);
tokenise("AUG" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 8} | Acc]);
tokenise("SEPTEMBER" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 9} | Acc]);
tokenise("SEPT" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 9} | Acc]);
tokenise("SEP" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 9} | Acc]);
tokenise("OCTOBER" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 10} | Acc]);
tokenise("OCT" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 10} | Acc]);
tokenise("NOVEMBER" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 11} | Acc]);
tokenise("NOVEM" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 11} | Acc]);
tokenise("NOV" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 11} | Acc]);
tokenise("DECEMBER" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 12} | Acc]);
tokenise("DECEM" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 12} | Acc]);
tokenise("DEC" ++ Rest, Acc) -> tokenise(Rest, [{?MONTH_TAG, 12} | Acc]);
tokenise([$: | Rest], Acc) -> tokenise(Rest, [$: | Acc]);
tokenise([$/ | Rest], Acc) -> tokenise(Rest, [$/ | Acc]);
tokenise([$- | Rest], Acc) -> tokenise(Rest, [$- | Acc]);
tokenise("AM" ++ Rest, Acc) -> tokenise(Rest, [am | Acc]);
tokenise("PM" ++ Rest, Acc) -> tokenise(Rest, [pm | Acc]);
tokenise("A" ++ Rest, Acc) -> tokenise(Rest, [am | Acc]);
tokenise("P" ++ Rest, Acc) -> tokenise(Rest, [pm | Acc]);
%% Postel's Law
%%
%% be conservative in what you do,
%% be liberal in what you accept from others.
%%
%% See RFC 793 Section 2.10 http://tools.ietf.org/html/rfc793
%%
%% Mebbies folk want to include Saturday etc in a date, nae borra
tokenise("MONDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("MON" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("TUESDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("TUES" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("TUE" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("WEDNESDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("WEDS" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("WED" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("THURSDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("THURS" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("THUR" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("THU" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("FRIDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("FRI" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("SATURDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("SAT" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("SUNDAY" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("SUN" ++ Rest, Acc) -> tokenise(Rest, Acc);
%% Hmm Excel reports GMT in times so nuke that too
tokenise("GMT" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("UTC" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("DST" ++ Rest, Acc) -> tokenise(Rest, Acc); % daylight saving time
tokenise([$, | Rest], Acc) -> tokenise(Rest, Acc);
tokenise([32 | Rest], Acc) -> tokenise(Rest, Acc); % Spaces
tokenise("TH" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("ND" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("ST" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("OF" ++ Rest, Acc) -> tokenise(Rest, Acc);
tokenise("T" ++ Rest, Acc) -> tokenise(Rest, Acc); % 2012-12-12T12:12:12 ISO formatting.
tokenise([$Z | Rest], Acc) -> tokenise(Rest, [$Z | Acc]); % 2012-12-12T12:12:12Zulu
tokenise([$+, H1, H2, M1, M2 | Rest], Acc) when ?is_tz_offset(H1, H2, M1, M2) ->
tokenise(Rest, Acc); % Tue Nov 11 15:03:18 +0000 2014 Twitter format
tokenise([$+ | Rest], Acc) -> tokenise(Rest, [$+ | Acc]); % 2012-12-12T12:12:12.xxxx+ ISO formatting.
tokenise([Else | Rest], Acc) ->
tokenise(Rest, [{bad_token, Else} | Acc]).
hour(Hour, []) -> Hour;
hour(12, [am]) -> 0;
hour(Hour, [am]) -> Hour;
hour(12, [pm]) -> 12;
hour(Hour, [pm]) -> Hour + 12.
-spec format(string(), datetime(), list()) -> string().
%% Finished, return
format([], _Date, Acc) ->
lists:flatten(lists:reverse(Acc));
%% Escape backslashes
format([$\\, H | T], Dt, Acc) ->
format(T, Dt, [H | Acc]);
%% Year Formats
format([$Y | T], {{Y, _, _}, _} = Dt, Acc) ->
format(T, Dt, [itol(Y) | Acc]);
format([$y | T], {{Y, _, _}, _} = Dt, Acc) ->
[_, _, Y3, Y4] = itol(Y),
format(T, Dt, [[Y3, Y4] | Acc]);
format([$L | T], {{Y, _, _}, _} = Dt, Acc) ->
format(T, Dt, [itol(is_leap(Y)) | Acc]);
format([$o | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [itol(iso_year(Date)) | Acc]);
%% Month Formats
format([$n | T], {{_, M, _}, _} = Dt, Acc) ->
format(T, Dt, [itol(M) | Acc]);
format([$m | T], {{_, M, _}, _} = Dt, Acc) ->
format(T, Dt, [pad2(M) | Acc]);
format([$M | T], {{_, M, _}, _} = Dt, Acc) ->
format(T, Dt, [smonth(M) | Acc]);
format([$F | T], {{_, M, _}, _} = Dt, Acc) ->
format(T, Dt, [month(M) | Acc]);
format([$t | T], {{Y, M, _}, _} = Dt, Acc) ->
format(T, Dt, [itol(calendar:last_day_of_the_month(Y, M)) | Acc]);
%% Week Formats
format([$W | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [pad2(iso_week(Date)) | Acc]);
%% Day Formats
format([$j | T], {{_, _, D}, _} = Dt, Acc) ->
format(T, Dt, [itol(D) | Acc]);
format([$S | T], {{_, _, D}, _} = Dt, Acc) ->
format(T, Dt, [suffix(D) | Acc]);
format([$d | T], {{_, _, D}, _} = Dt, Acc) ->
format(T, Dt, [pad2(D) | Acc]);
format([$D | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [sdayd(Date) | Acc]);
format([$l | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [day(calendar:day_of_the_week(Date)) | Acc]);
format([$N | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [itol(calendar:day_of_the_week(Date)) | Acc]);
format([$w | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [itol(to_w(calendar:day_of_the_week(Date))) | Acc]);
format([$z | T], {Date, _} = Dt, Acc) ->
format(T, Dt, [itol(days_in_year(Date)) | Acc]);
%% Time Formats
format([$a | T], Dt = {_, {H, _, _}}, Acc) when H >= 12 ->
format(T, Dt, ["pm" | Acc]);
format([$a | T], Dt = {_, {_, _, _}}, Acc) ->
format(T, Dt, ["am" | Acc]);
format([$A | T], {_, {H, _, _}} = Dt, Acc) when H >= 12 ->
format(T, Dt, ["PM" | Acc]);
format([$A | T], Dt = {_, {_, _, _}}, Acc) ->
format(T, Dt, ["AM" | Acc]);
format([$g | T], {_, {H, _, _}} = Dt, Acc) when H == 12; H == 0 ->
format(T, Dt, ["12" | Acc]);
format([$g | T], {_, {H, _, _}} = Dt, Acc) when H > 12 ->
format(T, Dt, [itol(H - 12) | Acc]);
format([$g | T], {_, {H, _, _}} = Dt, Acc) ->
format(T, Dt, [itol(H) | Acc]);
format([$G | T], {_, {H, _, _}} = Dt, Acc) ->
format(T, Dt, [itol(H) | Acc]);
format([$h | T], {_, {H, _, _}} = Dt, Acc) when H > 12 ->
format(T, Dt, [pad2(H - 12) | Acc]);
format([$h | T], {_, {H, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(H) | Acc]);
format([$H | T], {_, {H, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(H) | Acc]);
format([$i | T], {_, {_, M, _}} = Dt, Acc) ->
format(T, Dt, [pad2(M) | Acc]);
format([$s | T], {_, {_, _, S}} = Dt, Acc) ->
format(T, Dt, [pad2(S) | Acc]);
format([$f | T], {_, {_, _, _}} = Dt, Acc) ->
format(T, Dt, [itol(0) | Acc]);
%% Time Formats ms
format([$a | T], Dt = {_, {H, _, _, _}}, Acc) when H > 12 ->
format(T, Dt, ["pm" | Acc]);
format([$a | T], Dt = {_, {_, _, _, _}}, Acc) ->
format(T, Dt, ["am" | Acc]);
format([$A | T], {_, {H, _, _, _}} = Dt, Acc) when H > 12 ->
format(T, Dt, ["PM" | Acc]);
format([$A | T], Dt = {_, {_, _, _, _}}, Acc) ->
format(T, Dt, ["AM" | Acc]);
format([$g | T], {_, {H, _, _, _}} = Dt, Acc) when H == 12; H == 0 ->
format(T, Dt, ["12" | Acc]);
format([$g | T], {_, {H, _, _, _}} = Dt, Acc) when H > 12 ->
format(T, Dt, [itol(H - 12) | Acc]);
format([$g | T], {_, {H, _, _, _}} = Dt, Acc) ->
format(T, Dt, [itol(H) | Acc]);
format([$G | T], {_, {H, _, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(H) | Acc]);
format([$h | T], {_, {H, _, _, _}} = Dt, Acc) when H > 12 ->
format(T, Dt, [pad2(H - 12) | Acc]);
format([$h | T], {_, {H, _, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(H) | Acc]);
format([$H | T], {_, {H, _, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(H) | Acc]);
format([$i | T], {_, {_, M, _, _}} = Dt, Acc) ->
format(T, Dt, [pad2(M) | Acc]);
format([$s | T], {_, {_, _, S, _}} = Dt, Acc) ->
format(T, Dt, [pad2(S) | Acc]);
format([$f | T], {_, {_, _, _, Ms}} = Dt, Acc) ->
format(T, Dt, [pad6(Ms) | Acc]);
%% Whole Dates
format([$c | T], {{Y, M, D}, {H, Min, S}} = Dt, Acc) ->
Format = "~4.10.0B-~2.10.0B-~2.10.0B"
++ " ~2.10.0B:~2.10.0B:~2.10.0B",
Date = io_lib:format(Format, [Y, M, D, H, Min, S]),
format(T, Dt, [Date | Acc]);
format([$r | T], {{Y, M, D}, {H, Min, S}} = Dt, Acc) ->
Format = "~s, ~p ~s ~p ~2.10.0B:~2.10.0B:~2.10.0B",
Args = [sdayd({Y, M, D}), D, smonth(M), Y, H, Min, S],
format(T, Dt, [io_lib:format(Format, Args) | Acc]);
format([$U | T], Dt, Acc) ->
Epoch = {{1970, 1, 1}, {0, 0, 0}},
Time = calendar:datetime_to_gregorian_seconds(Dt) -
calendar:datetime_to_gregorian_seconds(Epoch),
format(T, Dt, [itol(Time) | Acc]);
%% Unrecognised, print as is
format([H | T], Date, Acc) ->
format(T, Date, [H | Acc]).
%% @doc days in year
-spec days_in_year(date()) -> integer().
days_in_year({Y, _, _} = Date) ->
calendar:date_to_gregorian_days(Date) -
calendar:date_to_gregorian_days({Y, 1, 1}).
%% @doc is a leap year
-spec is_leap(year()) -> 1|0.
is_leap(Y) ->
case calendar:is_leap_year(Y) of
true -> 1;
false -> 0
end.
%% @doc Made up numeric day of the week
%% (0 Sunday -> 6 Saturday)
-spec to_w(daynum()) -> integer().
to_w(7) -> 0;
to_w(X) -> X.
-spec suffix(day()) -> string().
%% @doc English ordinal suffix for the day of the
%% month, 2 characters
suffix(1) -> "st";
suffix(2) -> "nd";
suffix(3) -> "rd";
suffix(21) -> "st";
suffix(22) -> "nd";
suffix(23) -> "rd";
suffix(31) -> "st";
suffix(_) -> "th".
-spec sdayd(date()) -> string().
%% @doc A textual representation of a day, three letters
sdayd({Y, M, D}) ->
sday(calendar:day_of_the_week({Y, M, D})).
-spec sday(daynum()) -> string().
%% @doc A textual representation of a day, three letters
sday(1) -> "Mon";
sday(2) -> "Tue";
sday(3) -> "Wed";
sday(4) -> "Thu";
sday(5) -> "Fri";
sday(6) -> "Sat";
sday(7) -> "Sun".
-spec day(daynum()) -> string().
%% @doc A full textual representation of a day
day(1) -> "Monday";
day(2) -> "Tuesday";
day(3) -> "Wednesday";
day(4) -> "Thursday";
day(5) -> "Friday";
day(6) -> "Saturday";
day(7) -> "Sunday".
-spec smonth(month()) -> string().
%% @doc A short textual representation of a
%% month, three letters
smonth(1) -> "Jan";
smonth(2) -> "Feb";
smonth(3) -> "Mar";
smonth(4) -> "Apr";
smonth(5) -> "May";
smonth(6) -> "Jun";
smonth(7) -> "Jul";
smonth(8) -> "Aug";
smonth(9) -> "Sep";
smonth(10) -> "Oct";
smonth(11) -> "Nov";
smonth(12) -> "Dec".
-spec month(month()) -> string().
%% @doc A full textual representation of a month
month(1) -> "January";
month(2) -> "February";
month(3) -> "March";
month(4) -> "April";
month(5) -> "May";
month(6) -> "June";
month(7) -> "July";
month(8) -> "August";
month(9) -> "September";
month(10) -> "October";
month(11) -> "November";
month(12) -> "December".
-spec iso_week(date()) -> integer().
%% @doc The week of the years as defined in ISO 8601
%% http://en.wikipedia.org/wiki/ISO_week_date
iso_week(Date) ->
Week = iso_week_one(iso_year(Date)),
Days = calendar:date_to_gregorian_days(Date) -
calendar:date_to_gregorian_days(Week),
trunc((Days / 7) + 1).
-spec iso_year(date()) -> integer().
%% @doc The year number as defined in ISO 8601
%% http://en.wikipedia.org/wiki/ISO_week_date
iso_year({Y, _M, _D} = Dt) ->
case Dt >= {Y, 12, 29} of
true ->
case Dt < iso_week_one(Y + 1) of
true -> Y;
false -> Y + 1
end;
false ->
case Dt < iso_week_one(Y) of
true -> Y - 1;
false -> Y
end
end.
-spec iso_week_one(year()) -> date().
%% @doc The date of the the first day of the first week
%% in the ISO calendar
iso_week_one(Y) ->
Day1 = calendar:day_of_the_week({Y, 1, 4}),
Days = calendar:date_to_gregorian_days({Y, 1, 4}) + (1 - Day1),
calendar:gregorian_days_to_date(Days).
-spec itol(integer()) -> list().
%% @doc short hand
itol(X) ->
integer_to_list(X).
-spec pad2(integer() | float()) -> list().
%% @doc int padded with 0 to make sure its 2 chars
pad2(X) when is_integer(X) ->
io_lib:format("~2.10.0B", [X]);
pad2(X) when is_float(X) ->
io_lib:format("~2.10.0B", [trunc(X)]).
-spec pad6(integer()) -> list().
pad6(X) when is_integer(X) ->
io_lib:format("~6.10.0B", [X]).
ltoi(X) ->
list_to_integer(X).
-ifdef(unicode_str).
uppercase(Str) -> string:uppercase(Str).
-else.
uppercase(Str) -> string:to_upper(Str).
-endif.
%%%===================================================================
%%% Tests
%%%===================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-define(DATE, {{2001, 3, 10}, {17, 16, 17}}).
-define(DATEMS, {{2001, 3, 10}, {17, 16, 17, 123456}}).
-define(DATE_NOON, {{2001, 3, 10}, {12, 0, 0}}).
-define(DATE_MIDNIGHT, {{2001, 3, 10}, {0, 0, 0}}).
-define(ISO, "o \\WW").
basic_format_test_() ->
[
?_assertEqual(format("F j, Y, g:i a", ?DATE), "March 10, 2001, 5:16 pm"),
?_assertEqual(format("F jS, Y, g:i a", ?DATE), "March 10th, 2001, 5:16 pm"),
?_assertEqual(format("F jS", {{2011, 3, 21}, {0, 0, 0}}), "March 21st"),
?_assertEqual(format("F jS", {{2011, 3, 22}, {0, 0, 0}}), "March 22nd"),
?_assertEqual(format("F jS", {{2011, 3, 23}, {0, 0, 0}}), "March 23rd"),
?_assertEqual(format("F jS", {{2011, 3, 31}, {0, 0, 0}}), "March 31st"),
?_assertEqual(format("m.d.y", ?DATE), "03.10.01"),
?_assertEqual(format("j, n, Y", ?DATE), "10, 3, 2001"),
?_assertEqual(format("Ymd", ?DATE), "20010310"),
?_assertEqual(format("H:i:s", ?DATE), "17:16:17"),
?_assertEqual(format("z", ?DATE), "68"),
?_assertEqual(format("D M j G:i:s Y", ?DATE), "Sat Mar 10 17:16:17 2001"),
?_assertEqual(format("D M j G:i:s Y", {{2001, 3, 10}, {5, 16, 17}}), "Sat Mar 10 5:16:17 2001"),
?_assertEqual(format("D M j H:i:s Y", {{2001, 3, 10}, {5, 16, 17}}), "Sat Mar 10 05:16:17 2001"),
?_assertEqual(format("ga", ?DATE_NOON), "12pm"),
?_assertEqual(format("gA", ?DATE_NOON), "12PM"),
?_assertEqual(format("ga", ?DATE_MIDNIGHT), "12am"),
?_assertEqual(format("gA", ?DATE_MIDNIGHT), "12AM"),
?_assertEqual(format("h-i-s, j-m-y, it is w Day", ?DATE),
"05-16-17, 10-03-01, 1631 1617 6 Satpm01"),
?_assertEqual(format("\\i\\t \\i\\s \\t\\h\\e\\ jS \\d\\a\\y.", ?DATE),
"it is the 10th day."),
?_assertEqual(format("H:m:s \\m \\i\\s \\m\\o\\n\\t\\h", ?DATE),
"17:03:17 m is month")
].
basic_parse_test_() ->
[
?_assertEqual({{2008, 8, 22}, {17, 16, 17}},
parse("22nd of August 2008", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("22-Aug-2008 6 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("22-Aug-2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 12}},
parse("22-Aug-2008 6:35:12 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("August/22/2008 6 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("August/22/2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("22 August 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("22 Aug 2008 6AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("22 Aug 2008 6:35AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("22 Aug 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("22 Aug 2008 6", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("22 Aug 2008 6:35", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 35, 0}},
parse("22 Aug 2008 6:35 PM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 0, 0}},
parse("22 Aug 2008 6 PM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 0, 0}},
parse("Aug 22, 2008 6 PM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 0, 0}},
parse("August 22nd, 2008 6:00 PM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 15, 15}},
parse("August 22nd 2008, 6:15:15pm", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 15, 15}},
parse("August 22nd, 2008, 6:15:15pm", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 15, 0}},
parse("Aug 22nd 2008, 18:15", ?DATE)),
?_assertEqual({{2008, 8, 2}, {17, 16, 17}},
parse("2nd of August 2008", ?DATE)),
?_assertEqual({{2008, 8, 2}, {17, 16, 17}},
parse("August 2nd, 2008", ?DATE)),
?_assertEqual({{2008, 8, 2}, {17, 16, 17}},
parse("2nd August, 2008", ?DATE)),
?_assertEqual({{2008, 8, 2}, {17, 16, 17}},
parse("2008 August 2nd", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 0, 0}},
parse("2-Aug-2008 6 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("2-Aug-2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 12}},
parse("2-Aug-2008 6:35:12 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 0, 0}},
parse("August/2/2008 6 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("August/2/2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("2 August 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 0, 0}},
parse("2 Aug 2008 6AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("2 Aug 2008 6:35AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("2 Aug 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 0, 0}},
parse("2 Aug 2008 6", ?DATE)),
?_assertEqual({{2008, 8, 2}, {6, 35, 0}},
parse("2 Aug 2008 6:35", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 35, 0}},
parse("2 Aug 2008 6:35 PM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 0, 0}},
parse("2 Aug 2008 6 PM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 0, 0}},
parse("Aug 2, 2008 6 PM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 0, 0}},
parse("August 2nd, 2008 6:00 PM", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 15, 15}},
parse("August 2nd 2008, 6:15:15pm", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 15, 15}},
parse("August 2nd, 2008, 6:15:15pm", ?DATE)),
?_assertEqual({{2008, 8, 2}, {18, 15, 0}},
parse("Aug 2nd 2008, 18:15", ?DATE)),
?_assertEqual({{2012, 12, 10}, {0, 0, 0}},
parse("Dec 10th, 2012, 12:00 AM", ?DATE)),
?_assertEqual({{2012, 12, 10}, {0, 0, 0}},
parse("10 Dec 2012 12:00 AM", ?DATE)),
?_assertEqual({{2001, 3, 10}, {11, 15, 0}},
parse("11:15", ?DATE)),
?_assertEqual({{2001, 3, 10}, {1, 15, 0}},
parse("1:15", ?DATE)),
?_assertEqual({{2001, 3, 10}, {1, 15, 0}},
parse("1:15 am", ?DATE)),
?_assertEqual({{2001, 3, 10}, {0, 15, 0}},
parse("12:15 am", ?DATE)),
?_assertEqual({{2001, 3, 10}, {12, 15, 0}},
parse("12:15 pm", ?DATE)),
?_assertEqual({{2001, 3, 10}, {3, 45, 39}},
parse("3:45:39", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("23-4-1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("23-april-1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("23-apr-1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("4/23/1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("april/23/1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("apr/23/1963", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963/4/23", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963/april/23", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963/apr/23", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963-4-23", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963-4-23", ?DATE)),
?_assertEqual({{1963, 4, 23}, {17, 16, 17}},
parse("1963-apr-23", ?DATE)),
?_assertThrow({?MODULE, {bad_date, "23/ap/195"}},
parse("23/ap/195", ?DATE)),
?_assertEqual({{2001, 3, 10}, {6, 45, 0}},
parse("6:45 am", ?DATE)),
?_assertEqual({{2001, 3, 10}, {18, 45, 0}},
parse("6:45 PM", ?DATE)),
?_assertEqual({{2001, 3, 10}, {18, 45, 0}},
parse("6:45 PM ", ?DATE))
].
parse_with_days_test_() ->
[
?_assertEqual({{2008, 8, 22}, {17, 16, 17}},
parse("Sat 22nd of August 2008", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("Sat, 22-Aug-2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 12}},
parse("Sunday 22-Aug-2008 6:35:12 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("Sun 22-Aug-2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("THURSDAY, 22-August-2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 0, 0}},
parse("THURSDAY, 22-August-2008 6 pM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("THU 22 August 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("FRi 22 Aug 2008 6:35AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("FRi 22 Aug 2008 6AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("Wednesday 22 Aug 2008 6:35 AM", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 35, 0}},
parse("Monday 22 Aug 2008 6:35", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("Monday 22 Aug 2008 6", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 0, 0}},
parse("Monday 22 Aug 2008 6p", ?DATE)),
?_assertEqual({{2008, 8, 22}, {6, 0, 0}},
parse("Monday 22 Aug 2008 6a", ?DATE)),
?_assertEqual({{2008, 8, 22}, {18, 35, 0}},
parse("Mon, 22 Aug 2008 6:35 PM", ?DATE)),
% Twitter style
?_assertEqual({{2008, 8, 22}, {06, 35, 04}},
parse("Mon Aug 22 06:35:04 +0000 2008", ?DATE)),
?_assertEqual({{2008, 8, 22}, {06, 35, 04}},
parse("Mon Aug 22 06:35:04 +0500 2008", ?DATE))
].
parse_with_TZ_test_() ->
[
?_assertEqual({{2008, 8, 22}, {17, 16, 17}},
parse("Sat 22nd of August 2008 GMT", ?DATE)),
?_assertEqual({{2008, 8, 22}, {17, 16, 17}},
parse("Sat 22nd of August 2008 UTC", ?DATE)),
?_assertEqual({{2008, 8, 22}, {17, 16, 17}},
parse("Sat 22nd of August 2008 DST", ?DATE))
].
iso_test_() ->
[
?_assertEqual("2004 W53", format(?ISO, {{2005, 1, 1}, {1, 1, 1}})),
?_assertEqual("2004 W53", format(?ISO, {{2005, 1, 2}, {1, 1, 1}})),
?_assertEqual("2005 W52", format(?ISO, {{2005, 12, 31}, {1, 1, 1}})),
?_assertEqual("2007 W01", format(?ISO, {{2007, 1, 1}, {1, 1, 1}})),
?_assertEqual("2007 W52", format(?ISO, {{2007, 12, 30}, {1, 1, 1}})),
?_assertEqual("2008 W01", format(?ISO, {{2007, 12, 31}, {1, 1, 1}})),
?_assertEqual("2008 W01", format(?ISO, {{2008, 1, 1}, {1, 1, 1}})),
?_assertEqual("2009 W01", format(?ISO, {{2008, 12, 29}, {1, 1, 1}})),
?_assertEqual("2009 W01", format(?ISO, {{2008, 12, 31}, {1, 1, 1}})),
?_assertEqual("2009 W01", format(?ISO, {{2009, 1, 1}, {1, 1, 1}})),
?_assertEqual("2009 W53", format(?ISO, {{2009, 12, 31}, {1, 1, 1}})),
?_assertEqual("2009 W53", format(?ISO, {{2010, 1, 3}, {1, 1, 1}}))
].
ms_test_() ->
Now = os:timestamp(),
[
?_assertEqual({{2012, 12, 12}, {12, 12, 12, 1234}}, parse("2012-12-12T12:12:12.001234")),
?_assertEqual({{2012, 12, 12}, {12, 12, 12, 123000}}, parse("2012-12-12T12:12:12.123")),
?_assertEqual(format("H:m:s.f \\m \\i\\s \\m\\o\\n\\t\\h", ?DATEMS),
"17:03:17.123456 m is month"),
?_assertEqual(format("Y-m-d\\TH:i:s.f", ?DATEMS),
"2001-03-10T17:16:17.123456"),
?_assertEqual(format("Y-m-d\\TH:i:s.f", nparse("2001-03-10T05:16:17.123456")),
"2001-03-10T05:16:17.123456"),
?_assertEqual(format("Y-m-d\\TH:i:s.f", nparse("2001-03-10T05:16:17.123456")),
"2001-03-10T05:16:17.123456"),
?_assertEqual(format("Y-m-d\\TH:i:s.f", nparse("2001-03-10T15:16:17.123456")),
"2001-03-10T15:16:17.123456"),
?_assertEqual(format("Y-m-d\\TH:i:s.f", nparse("2001-03-10T15:16:17.000123")),
"2001-03-10T15:16:17.000123"),
?_assertEqual(Now, nparse(format("Y-m-d\\TH:i:s.f", Now)))
].
zulu_test_() ->
[
?_assertEqual(format("Y-m-d\\TH:i:sZ", nparse("2001-03-10T15:16:17.123456")),
"2001-03-10T15:16:17Z"),
?_assertEqual(format("Y-m-d\\TH:i:s", nparse("2001-03-10T15:16:17Z")),
"2001-03-10T15:16:17"),
?_assertEqual(format("Y-m-d\\TH:i:s", nparse("2001-03-10T15:16:17+04")),
"2001-03-10T11:16:17"),
?_assertEqual(format("Y-m-d\\TH:i:s", nparse("2001-03-10T15:16:17+04:00")),
"2001-03-10T11:16:17"),
?_assertEqual(format("Y-m-d\\TH:i:s", nparse("2001-03-10T15:16:17-04")),
"2001-03-10T19:16:17"),
?_assertEqual(format("Y-m-d\\TH:i:s", nparse("2001-03-10T15:16:17-04:00")),
"2001-03-10T19:16:17")
].
format_iso8601_test_() ->
[
?_assertEqual("2001-03-10T17:16:17Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17}})),
?_assertEqual("2001-03-10T17:16:17.000000Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 0}})),
?_assertEqual("2001-03-10T17:16:17.100000Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 100000}})),
?_assertEqual("2001-03-10T17:16:17.120000Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 120000}})),
?_assertEqual("2001-03-10T17:16:17.123000Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 123000}})),
?_assertEqual("2001-03-10T17:16:17.123400Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 123400}})),
?_assertEqual("2001-03-10T17:16:17.123450Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 123450}})),
?_assertEqual("2001-03-10T17:16:17.123456Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 123456}})),
?_assertEqual("2001-03-10T17:16:17.023456Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 23456}})),
?_assertEqual("2001-03-10T17:16:17.003456Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 3456}})),
?_assertEqual("2001-03-10T17:16:17.000456Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 456}})),
?_assertEqual("2001-03-10T17:16:17.000056Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 56}})),
?_assertEqual("2001-03-10T17:16:17.000006Z",
format_iso8601({{2001, 3, 10}, {17, 16, 17, 6}})),
?_assertEqual("2001-03-10T07:16:17Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17}})),
?_assertEqual("2001-03-10T07:16:17.000000Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 0}})),
?_assertEqual("2001-03-10T07:16:17.100000Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 100000}})),
?_assertEqual("2001-03-10T07:16:17.120000Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 120000}})),
?_assertEqual("2001-03-10T07:16:17.123000Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 123000}})),
?_assertEqual("2001-03-10T07:16:17.123400Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 123400}})),
?_assertEqual("2001-03-10T07:16:17.123450Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 123450}})),
?_assertEqual("2001-03-10T07:16:17.123456Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 123456}})),
?_assertEqual("2001-03-10T07:16:17.023456Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 23456}})),
?_assertEqual("2001-03-10T07:16:17.003456Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 3456}})),
?_assertEqual("2001-03-10T07:16:17.000456Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 456}})),
?_assertEqual("2001-03-10T07:16:17.000056Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 56}})),
?_assertEqual("2001-03-10T07:16:17.000006Z",
format_iso8601({{2001, 3, 10}, {07, 16, 17, 6}}))
].
parse_iso8601_test_() ->
[
?_assertEqual({{2001, 3, 10}, {17, 16, 17}},
parse("2001-03-10T17:16:17Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 0}},
parse("2001-03-10T17:16:17.000Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 0}},
parse("2001-03-10T17:16:17.000000Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 100000}},
parse("2001-03-10T17:16:17.1Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 120000}},
parse("2001-03-10T17:16:17.12Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123000}},
parse("2001-03-10T17:16:17.123Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123400}},
parse("2001-03-10T17:16:17.1234Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123450}},
parse("2001-03-10T17:16:17.12345Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123456}},
parse("2001-03-10T17:16:17.123456Z")),
?_assertEqual({{2001, 3, 10}, {15, 16, 17, 100000}},
parse("2001-03-10T16:16:17.1+01:00")),
?_assertEqual({{2001, 3, 10}, {15, 16, 17, 123456}},
parse("2001-03-10T16:16:17.123456+01:00")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 100000}},
parse("2001-03-10T16:16:17.1-01:00")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123456}},
parse("2001-03-10T16:16:17.123456-01:00")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 456}},
parse("2001-03-10T17:16:17.000456Z")),
?_assertEqual({{2001, 3, 10}, {17, 16, 17, 123000}},
parse("2001-03-10T17:16:17.123000Z"))
].
-endif. | src/ec_date.erl | 0.526099 | 0.452475 | ec_date.erl | starcoder |
-module(fd_sched_rapos).
%% This is the algorithm from paper:
%% Effective random testing of concurrent programs. In proceeding ASE '07. <NAME>.
%% The original RAPOS schedules a program in batch, which is tricky to
%% implement in our framework. Instead, our implementation generatively
%% schedules the `batch` by picking one at a time.
-include("firedrill.hrl").
%% scheduler callbacks
-export([init/1, enqueue_req/2, dequeue_req/1, handle_call/3, handle_cast/2, to_req_list/1]).
-record(state, { rng :: rand:state()
, may_skip :: boolean()
, cand :: array:array()
, next :: array:array()
, skipped :: array:array()
}).
init(Opts) ->
Seed =
case proplists:get_value(seed, Opts, undefined) of
undefined ->
R = rand:export_seed_s(rand:seed_s(exrop)),
io:format(user, "seed = ~p~n", [R]),
R;
V -> V
end,
#state{
rng = rand:seed_s(Seed),
may_skip = false,
cand = array:new(),
next = array:new(),
skipped = array:new()
}.
enqueue_req(ReqInfo, #state{next = Next} = State) ->
%% new opetations are always considered as "affected" (enabled by previous requests),
%% so put them into `next`
{ok, State#state{next = array:set(array:size(Next), ReqInfo, Next)}}.
dequeue_req(#state{may_skip = MaySkip, cand = Cand, rng = Rng, next = Next, skipped = Skipped} = State) ->
case array:size(Cand) of
0 ->
%% At this moment, `next` is `schedulable` in paper
case array:size(Next) of
0 ->
%% according to the paper, select a random enabled request in skipped, which must in `skippped`.
{R, NewRng} = rand:uniform_s(array:size(Skipped), Rng), I = R - 1,
Req = array:get(I, Skipped),
NewSkipped = array:from_list(
array:foldr(fun (Idx, _, Acc) when Idx =:= I ->
Acc;
(_, ReqB, SkippedList) ->
[ReqB | SkippedList]
end, [], Skipped)),
dequeue_req(State#state{may_skip = false, cand = array:from_list([Req]), rng = NewRng, skipped = NewSkipped});
_ ->
dequeue_req(State#state{may_skip = false, cand = Next, next = array:new()})
end;
S ->
{R, NewRng0} = rand:uniform_s(S, Rng), I = R - 1,
{P, NewRng} =
case MaySkip of
true -> rand:uniform_s(NewRng0);
false -> {0, NewRng0}
end,
Req = array:get(I, Cand),
case P < 0.5 of
true ->
%% Move all racing requests in `cand` and `skipped` to next
{NewCandList, NewNextList0} =
array:foldr(fun (Idx, _, Acc) when Idx =:= I ->
Acc;
(_, ReqB, {CandList, NextList}) ->
case is_racing(Req, ReqB) of
true ->
{CandList, [ReqB | NextList]};
false ->
{[ReqB | CandList], NextList}
end
end, {[], array:to_list(Next)}, Cand),
{NewSkippedList, NewNextList} =
array:foldr(fun (_, ReqB, {SkippedList, NextList}) ->
case is_racing(Req, ReqB) of
true ->
{SkippedList, [ReqB | NextList]};
false ->
{[ReqB | SkippedList], NextList}
end
end, {[], NewNextList0}, Skipped),
NewCand = array:from_list(NewCandList),
NewNext = array:from_list(NewNextList),
NewSkipped = array:from_list(NewSkippedList),
{ok, Req, undefined, State#state{may_skip = true, rng = NewRng, cand = NewCand, next = NewNext, skipped = NewSkipped}};
false ->
%% Skip the current request to `skipped`
NewCand = array:from_list(
array:foldr(fun (Idx, _, Acc) when Idx =:= I ->
Acc;
(_, ReqB, CandList) ->
[ReqB | CandList]
end, [], Cand)),
NewSkipped = array:set(array:size(Skipped), Req, Skipped),
dequeue_req(State#state{may_skip = true, cand = NewCand, rng = NewRng, skipped = NewSkipped})
end
end.
handle_call(_, _, State) ->
{reply, ignored, State}.
handle_cast(_, State) ->
State.
to_req_list(#state{cand = Cand, next = Next, skipped = Skipped}) ->
array:to_list(Cand) ++ array:to_list(Next) ++ array:to_list(Skipped).
is_racing(#fd_delay_req{to = global}, #fd_delay_req{}) ->
false;
is_racing(#fd_delay_req{}, #fd_delay_req{to = global}) ->
false;
is_racing(#fd_delay_req{to = To}, #fd_delay_req{to = To}) ->
true;
is_racing(_, _) ->
false. | src/fd_sched_rapos.erl | 0.514888 | 0.493042 | fd_sched_rapos.erl | starcoder |
% @copyright 2010-2014 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Unit tests for src/vivaldi.erl
%% @end
%% @version $Id$
-module(gossip_vivaldi_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-compile(export_all).
-include("scalaris.hrl").
-include("unittest.hrl").
all() ->
[
test_init,
test_get_coordinate,
test_select_node,
test_select_data,
test_select_reply_data,
test_update_coordinate
].
suite() ->
[
{timetrap, {seconds, 10}}
].
init_per_suite(Config) ->
unittest_helper:init_per_suite(Config).
end_per_suite(Config) ->
unittest_helper:end_per_suite(Config).
init_per_testcase(TestCase, Config) ->
Config2 = unittest_helper:init_per_suite(Config),
NeedsRing = [test_get_coordinate, test_select_reply_data],
case lists:member(TestCase, NeedsRing) of
true ->
unittest_helper:make_ring(2, [{config, [{monitor_perf_interval, 0}, % deactivate monitor_perf
{gossip_vivaldi_interval, 100}
]}]),
unittest_helper:wait_for_stable_ring_deep(),
Config2;
false ->
unittest_helper:start_minimal_procs(Config2, [], true)
end.
end_per_testcase(_TestCase, Config) ->
unittest_helper:stop_minimal_procs(Config),
unittest_helper:stop_ring(),
Config.
test_get_coordinate(_Config) ->
pid_groups:join_as(pid_groups:group_with(gossip), ?MODULE),
gossip_vivaldi:get_coordinate(),
?expect_message({vivaldi_get_coordinate_response, _Coordinate, _Confidence}).
test_init(_Config) ->
pid_groups:join_as(atom_to_list(?MODULE), gossip),
?expect_no_message(),
Ret = gossip_vivaldi:init([]),
?equals_pattern(Ret, {ok, {_RandomCoordinate, 1.0}}),
?expect_message({trigger_action, {gossip_vivaldi, default}}),
?expect_no_message().
test_select_node(_Config) ->
Ret = gossip_vivaldi:select_node({[0.5, 0.5], 0.5}),
?equals(Ret, {false, {[0.5, 0.5], 0.5}}).
test_select_data(_Config) ->
pid_groups:join_as(atom_to_list(?MODULE), gossip),
Ret = gossip_vivaldi:select_data({[0.1, 0.1], 0.2}),
?equals(Ret, {ok, {[0.1, 0.1], 0.2}}),
This = comm:this(),
?expect_message({selected_data, {gossip_vivaldi, default},
{This, [0.1, 0.1], 0.2}}).
test_select_reply_data(_Config) ->
config:write(gossip_vivaldi_count_measurements, 1),
config:write(gossip_vivaldi_measurements_delay, 0),
Data = {comm:this(), [0.0, 0.0], 0.77},
Ret = gossip_vivaldi:select_reply_data(Data, 0, 0, {[1.0, 1.0], 1.0}),
?equals(Ret, {ok, {[1.0, 1.0], 1.0}}),
receive
{ping, SourcePid} ->
comm:send(SourcePid, {pong, gossip}, [{channel, prio}])
end,
?expect_message({cb_msg, {gossip_vivaldi,default},
{update_vivaldi_coordinate, _Latency, {[0.0, 0.0], 0.77}}}),
config:write(gossip_vivaldi_count_measurements, 10),
config:write(gossip_vivaldi_measurements_delay, 1000).
test_update_coordinate(_Config) ->
tester:test(?MODULE, update_coordinate, 7, 250, []).
-spec update_coordinate(Float, Float, Float, number(), Float, Float, Float) -> true when
is_subtype(Float, float()).
update_coordinate(Coord1x, Coord1y, Conf1, Latency, Coord2x, Coord2y, Conf2) ->
Coord1 = [Coord1x, Coord1y], Coord2 = [Coord2x, Coord2y],
config:write(gossip_vivaldi_dimensions, 2),
pid_groups:join_as(atom_to_list(?MODULE), gossip),
Ret = gossip_vivaldi:handle_msg({update_vivaldi_coordinate,
Latency, {Coord1, Conf1}}, {Coord2, Conf2}),
?expect_message({integrated_data, {gossip_vivaldi, default}, cur_round}),
case Latency == 0 orelse (Conf1 == 0 andalso Conf2 == 0) of
true when Coord1 =/= Coord2 ->
?expect_exception(gossip_vivaldi:update_coordinate(Coord1, Conf1, Latency, Coord2, Conf2),
error, badarith);
_ ->
NewState = gossip_vivaldi:update_coordinate(Coord1, Conf1, Latency, Coord2, Conf2),
?equals(Ret, {ok, NewState})
end. | test/gossip_vivaldi_SUITE.erl | 0.696578 | 0.454956 | gossip_vivaldi_SUITE.erl | starcoder |
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(exp_kube_orchestration).
-author("<NAME> <<EMAIL>").
-include("exp.hrl").
-behaviour(exp_orchestration).
-export([get_tasks/3,
stop_tasks/1]).
-spec get_tasks(atom(), node_port(), boolean()) -> [node_spec()].
get_tasks(Tag, Port, FilterByTimestamp) ->
Path = pods_path() ++ selector(Tag, FilterByTimestamp),
case http(get, Path) of
{ok, Nodes} ->
generate_nodes(Nodes, Port);
{error, invalid} ->
[]
end.
-spec stop_tasks([atom()]) -> ok.
stop_tasks(Tags) ->
lists:foreach(
fun(Tag) ->
ok = delete_task(Tag)
end,
Tags
),
ok.
%% @private
delete_task(Tag) ->
Path = deploy_path() ++ "/" ++ name(Tag),
Result = case http(get, Path) of
{ok, Body0} ->
Body1 = set_replicas_as_zero(Body0),
PR = http(put, Path, Body1),
DR = http(delete, Path),
case {PR, DR} of
{{ok, _}, {ok, _}} ->
ok;
_ ->
error
end;
{error, invalid} ->
error
end,
case Result of
ok ->
ok;
error ->
lager:info("Delete failed. Trying again in 1 second"),
timer:sleep(1000),
delete_task(Tag)
end.
%% @private
http(Method, Path) ->
URL = server() ++ Path,
Headers = headers(),
run_http(Method, {URL, Headers}).
%% @private
http(Method, Path, Body0) ->
URL = server() ++ Path,
Headers = headers(),
ContentType = "application/json",
Body1 = binary_to_list(ldb_json:encode(Body0)),
run_http(Method, {URL, Headers, ContentType, Body1}).
%% @private
run_http(Method, Request) ->
Options = [{body_format, binary}],
case httpc:request(Method, Request, [], Options) of
{ok, {{_, 200, _}, _, Body}} ->
{ok, ldb_json:decode(Body)};
{error, Reason} ->
lager:info("Couldn't process ~p request. Reason ~p",
[Method, Reason]),
{error, invalid}
end.
%% @private
headers() ->
Token = exp_config:get(exp_token),
[{"Authorization", "Bearer " ++ Token}].
%% @private
server() ->
exp_config:get(exp_api_server).
%% @private
timestamp() ->
integer_to_list(exp_config:get(exp_timestamp)).
%% @private
pods_path() ->
"/api/v1/pods".
%% @private
selector(Tag, FilterByTimestamp) ->
Selector = "?labelSelector=" ++ "tag%3D" ++ atom_to_list(Tag),
case FilterByTimestamp of
true ->
Selector ++ ",timestamp%3D" ++ timestamp();
false ->
Selector
end.
%% @private
name(Tag) ->
atom_to_list(Tag) ++ "-" ++ timestamp().
%% @private
prefix() ->
"/apis/extensions/v1beta1/namespaces/default".
%% @private
deploy_path() ->
prefix() ++ "/deployments".
%% @private
generate_nodes(Map, Port) ->
Items = maps:get(items, Map),
lists:foldl(
fun(Item, Nodes) ->
%% find ip
Status = maps:get(status, Item),
case maps:is_key(podIP, Status) of
true ->
IP = binary_to_list(
maps:get(podIP, Status)
),
Node = exp_util:generate_spec(IP, Port),
[Node | Nodes];
false ->
Nodes
end
end,
[],
Items
).
%% @private
set_replicas_as_zero(Map) ->
Spec0 = maps:get(spec, Map),
Spec1 = maps:put(replicas, 0, Spec0),
maps:put(spec, Spec1, Map). | src/exp_kube_orchestration.erl | 0.546012 | 0.417212 | exp_kube_orchestration.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(tricks_driver_SUITE).
-author("<NAME> <<EMAIL>>").
-include("tricks.hrl").
%% common_test callbacks
-export([suite/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2,
all/0]).
-compile([nowarn_export_all, export_all]).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
suite() ->
[{timetrap, {hours, 1}}].
init_per_suite(Config) ->
Config.
end_per_suite(Config) ->
Config.
init_per_testcase(Case, Config) ->
ct:pal("Beginning test case: ~p", [Case]),
%% start
ok = test_util:start(),
%% connect to tricks
ok = test_util:driver_connect(),
Config.
end_per_testcase(Case, Config) ->
ct:pal("Ending test case: ~p", [Case]),
%% disconnect from tricks
ok = test_util:driver_disconnect(),
%% stop
ok = test_util:stop(),
Config.
all() ->
[register_event_test,
subscribe_event_test,
discovery_test].
%% ===================================================================
%% tests
%% ===================================================================
register_event_test(_Config) ->
%% subscribe to an event
test_util:event_subscribe(17, {event, 1}),
test_util:event_subscribe(17, {event, 2}),
%% register an event using the driver
test_util:driver_event_register(17, event),
test_util:event_expect(17, {event, 1}),
%% register another event
test_util:driver_event_register(17, event),
test_util:event_expect(17, {event, 2}).
subscribe_event_test(_Config) ->
%% subscribe to an event using the driver
test_util:driver_event_subscribe(17, {event, 1}),
test_util:driver_event_subscribe(17, {event, 2}),
%% register an event
test_util:event_register(17, event),
test_util:driver_event_expect(17, {event, 1}),
%% register another event
test_util:event_register(17, event),
test_util:driver_event_expect(17, {event, 2}).
discovery_test(_Config) ->
%% expect nothing
test_util:driver_discovery_expect(17, server, []),
%% register and expect
test_util:discovery_register(17, server, {1, "127.0.0.1"}),
test_util:driver_discovery_expect(17, server, [1]),
%% register and expect
test_util:discovery_register(18, client, {2, "127.0.0.2"}),
test_util:driver_discovery_expect(18, client, [2]),
test_util:discovery_register(18, client, {1, "127.0.0.12"}),
test_util:driver_discovery_expect(18, client, [1, 2]),
%% unregister and expect
test_util:discovery_unregister(18, client, {1, "127.0.0.12"}),
test_util:driver_discovery_expect(18, client, [2]),
test_util:driver_discovery_expect(17, server, [1]). | test/tricks_driver_SUITE.erl | 0.651798 | 0.512937 | tricks_driver_SUITE.erl | starcoder |
%% Copyright (c) 2008-2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_int.erl
%% Author : <NAME>
%% Purpose : Basic interpreter of a Prolog sub-set.
%%
%% This is the basic Prolog interpreter.
%% The internal data structures used are very direct and basic:
%%
%% Structures - {Functor,arg1, Arg2,...} where Functor is an atom
%% Variables - {Name} where Name is an atom or integer
%% Lists - Erlang lists
%% Atomic - Erlang constants
%%
%% There is no problem with the representation of variables as Prolog
%% functors of arity 0 are atoms. This representation is much easier
%% to test for, and create new variables with than using funny atom
%% names like '$1' (yuch!), and we need LOTS of variables.
%%
%% All information about the state of an evaluation is held in the
%% variables:
%%
%% [CurrentGoal,] NextGoal, ChoicePoints, Bindings, VarNum, Database
%%
%% Proving a goal succeeds when we have reached the end of the goal
%% list, i.e. NextGoal is empty (true). Proving goal fails when there
%% are no more choice points left to backtrack into. The evaluation
%% is completely flat as all back track information is held in
%% ChoicePoints. Choice points are added going forwards and removed
%% by backtracking and cuts.
%%
%% Internal goals all have the format {{Name},...} as this is an
%% illegal Erlog structure which can never be generated in (legal)
%% code.
%%
%% Proving a top-level goal will return:
%%
%% {succeed,ChoicePoints,Bindings,VarNum,Database} - the
%% goal succeeded and these are the
%% choicepoints/bindings/varnum/database to continue with.
%%
%% {fail,Database} - the goal failed and this is the current database.
%%
%% When a goal has succeeded back tracking is initiated by calling
%% fail(ChoicePoints, Database) which has the same return values as
%% proving the goal.
%%
%% When the interpreter detects an error it builds an error term
%%
%% {erlog_error,ErrorDescriptor,Database}
%%
%% and throws it. The ErrorDescriptor is a valid Erlog term.
%%
%% Database
%%
%% We use a dictionary for the database. All data for a procedure are
%% kept in the database with the functor as key. Interpreted clauses
%% are kept in a list, each clause has a unique (for that functor)
%% tag. Functions which traverse clauses, clause/retract/goals, get
%% the whole list to use. Any database operations can they be done
%% directly on the database. Retract uses the tag to remove the
%% correct clause. This preserves the logical database view. It is
%% possible to use ETS instead if a dictionary, define macro ETS, but
%% the logical database view makes it difficult to directly use ETS
%% efficiently.
%%
%% Interpreted Code
%%
%% Code, interpreted clause bodies, are not stored directly as Erlog
%% terms. Before being added to the database they are checked that
%% they are well-formed, control structures are recognised, cuts
%% augmented with status and sequences of conjunctions are converted
%% to lists. When code is used a new instance is made with fresh
%% variables, correct cut labels, and bodies directly linked to
%% following code to remove the need of later appending.
%%
%% The following functions convert code:
%%
%% well_form_body/4 - converts an Erlog term to database code body
%% format checking that it is well formed.
%% well_form_goal/4 - converts an Erlog term directly to a code body
%% checking that it is well formed.
%% unify_head/4 - unify a goal directly with head without creating a
%% new instance of the head. Saves creating local variables and
%% MANY bindings. This is a BIG WIN!
%% body_instance/5 - creates a new code body instance from the
%% database format.
%% term_instance/2/3 - creates a new instance of a term with new
%% variables.
%% body_term/3 - creates a copy of a body as a legal Erlog term.
%%
%% Choicepoints/Cuts
%%
%% Choicepoints and cuts are kept on the same stack/list. There are
%% different types of cps depending on their context. Failure pops
%% the first cp off the stack, passing over cuts and resumes
%% execution from that cp. A cut has a label and a flag indicating if
%% this is the last cut with this label. Cut steps over cps/cuts
%% until a cut the same label is reached and execution is resumed
%% with that stack. Unless this is the last cut with a label a new
%% cut is pushed on the stack. For efficiency some cps also act as
%% cuts.
%%
%% It is possible to reuse cut labels for different markers as long
%% the areas the cuts are valid don't overlap, though one may be
%% contained within the other, and the cuts correctly indicate when
%% they are the last cut. This is used for ->; and once/1 where we
%% KNOW the last cut of the internal section.
%%
%% It would be better if the cut marker was the actual cps/cut stack
%% to go back to but this would entail a more interactive
%% body_instance.
-module(erlog_int).
%% Main interface.
-export([new/2,prove_goal/2,fail/1]).
%% Main execution functions.
-export([prove_body/2]).
-export([unify_prove_body/4,unify_prove_body/6]).
%% Bindings, unification and dereferncing.
-export([new_bindings/0,get_binding/2,add_binding/3,make_var_list/2]).
-export([deref/2,deref_list/2,dderef/2,dderef_list/2,partial_list/2]).
-export([unify/3,functor/1]).
%% Creating term and body instances.
-export([well_form_body/3,well_form_goal/4,term_instance/2]).
%% Working with the database.
-export([add_built_in/2,add_compiled_proc/4]).
-export([asserta_clause/2,assertz_clause/2]).
-export([retract_clause/3,abolish_clauses/2]).
%% Error types.
-export([erlog_error/1,erlog_error/2,type_error/2,type_error/3,
instantiation_error/0,instantiation_error/1,
permission_error/3,permission_error/4,
existence_error/3,domain_error/3]).
%%-compile(export_all).
-import(lists, [map/2,foldl/3,foldr/3,mapfoldr/3]).
-include("erlog_int.hrl").
%% main interface.
%% new(DbModule, DbArg) -> {ok,State}.
new(DbMod, DbArg) ->
DbRef = DbMod:new(DbArg), %Initialise the database
Db0 = #db{mod=DbMod,ref=DbRef,loc=[]},
Db1 = built_in_db(Db0), %Add these builtins
Fs = ?PROLOG_FLAGS,
St = #est{cps=[],bs=[],vn=0,db=Db1,fs=Fs},
{ok,St}.
%% prove_goal(Goal, State) -> Succeed | Fail.
%% This is the main entry point into the interpreter. Check that
%% everything is consistent then prove the goal as a call.
prove_goal(Goal0, St0) ->
%% put(erlog_cut, orddict:new()),
%% put(erlog_cps, orddict:new()),
%% put(erlog_var, orddict:new()),
%% Check term and build new instance of term with bindings.
{Goal1,Bs,Vn} = initial_goal(Goal0),
St1 = St0#est{cps=[],bs=Bs,vn=Vn}, %Update state
prove_body([{call,Goal1}], St1).
%% built_in_db(Database) -> Database.
%% Create an initial clause database containing the built-in
%% predicates and predefined library predicates.
built_in_db(Db0) ->
%% Add the Erlang built-ins.
Db1 = foldl(fun (Head, Db) -> add_built_in(Head, Db) end, Db0,
[
%% Logic and control.
{call,1},
{',',2},
{'!',0},
{';',2},
{fail,0},
{false,0},
{'->',2},
{'\\+',1},
{once,1},
{repeat,0},
{true,0},
%% Clause creation and destruction.
{abolish,1},
{assert,1},
{asserta,1},
{assertz,1},
{retract,1},
{retractall,1},
%% Clause retrieval and information.
{clause,2},
{current_predicate,1},
{predicate_property,2},
%% process controll,
{halt, 1},
%% All solutions
{findall,3},
%% Prolog flags
{current_prolog_flag,2},
{set_prolog_flag,2},
%% External interface
{ecall,2},
%% Non-standard but useful
{display,1}
]),
Db1.
-define(FAIL(St),
begin
(fun (#est{cps=Cps,bs=Bs,db=Db}) ->
put(erlog_cps, orddict:update_counter(length(Cps), 1, get(erlog_cps))),
put(erlog_var, orddict:update_counter(dict:size(Bs), 1, get(erlog_var)))
end)(St),
fail(St)
end).
-undef(FAIL).
-define(FAIL(St), fail(St)).
%% prove_goal(Goal, NextGoal, State) ->
%% {succeed,State} | {fail,State}.
%% Prove one goal. We seldom return succeed here but usually go directly to
%% to NextGoal.
%% Handle built-in predicates here. RTFM for a description of the
%% built-ins. Hopefully we do the same.
%% Logic and control. Conjunctions are handled in prove_body and true
%% has been compiled away.
prove_goal({call,G}, Next0, #est{cps=Cps,vn=Vn}=St0) ->
%% Only add cut CP to Cps if goal contains a cut.
Label = Vn,
case check_goal(G, Next0, St0, false, Label) of
{Next1,true} ->
%% Must increment Vn to avoid clashes!!!
Cut = #cut{label=Label},
St1 = St0#est{cps=[Cut|Cps],vn=Vn+1},
prove_body(Next1, St1);
{Next1,false} ->
St1 = St0#est{vn=Vn+1},
prove_body(Next1, St1)
end;
prove_goal({{cut},Label,Last}, Next, St) ->
cut(Label, Last, Next, St);
prove_goal({{disj},R}, Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
Cp = #cp{type=disjunction,next=R,bs=Bs,vn=Vn},
prove_body(Next, St#est{cps=[Cp|Cps]});
prove_goal(fail, _, St) ->
?FAIL(St);
prove_goal(false, _, St) -> %Synonym of fail/0
?FAIL(St);
prove_goal({{if_then},Label}, Next, #est{cps=Cps}=St) ->
%% We effetively implement ( C -> T ) with ( C, !, T ) but cuts in
%% C are local to C.
%% There is no ( C, !, T ) here, it has already been prepended to Next.
%%io:fwrite("PG(->): ~p\n", [{Next}]),
Cut = #cut{label=Label},
prove_body(Next, St#est{cps=[Cut|Cps]});
prove_goal({{if_then_else},Else,Label}, Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
%% Need to push a choicepoint to fail back to inside Cond and a cut
%% to cut back to before Then when Cond succeeds. #cp{type=if_then_else}
%% functions as both as is always removed whatever the outcome.
%% There is no ( C, !, T ) here, it has already been prepended to Next.
Cp = #cp{type=if_then_else,label=Label,next=Else,bs=Bs,vn=Vn},
%%io:fwrite("PG(->;): ~p\n", [{Next,Else,[Cp|Cps]}]),
prove_body(Next, St#est{cps=[Cp|Cps]});
prove_goal({'\\+',G}, Next0, #est{cps=Cps,bs=Bs,vn=Vn}=St0) ->
%% We effectively implementing \+ G with ( G -> fail ; true ).
Label = Vn,
{Next1,_} = check_goal(G, [{{cut},Label,true},fail], St0, true, Label),
Cp = #cp{type=if_then_else,label=Label,next=Next0,bs=Bs,vn=Vn},
%%io:fwrite("PG(\\+): ~p\n", [{G1,[Cp|Cps]]),
%% Must increment Vn to avoid clashes!!!
St1 = St0#est{cps=[Cp|Cps],vn=Vn+1},
prove_body(Next1, St1);
prove_goal({{once},Label}, Next, #est{cps=Cps}=St) ->
%% We effetively implement once(G) with ( G, ! ) but cuts in
%% G are local to G.
%% There is no ( G, ! ) here, it has already been prepended to Next.
Cut = #cut{label=Label},
prove_body(Next, St#est{cps=[Cut|Cps]});
prove_goal(repeat, Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
Cp = #cp{type=disjunction,next=[repeat|Next],bs=Bs,vn=Vn},
prove_body(Next, St#est{cps=[Cp|Cps]});
%% Clause creation and destruction.
prove_goal({abolish,Pi0}, Next, #est{bs=Bs,db=Db0}=St) ->
case dderef(Pi0, Bs) of
{'/',N,A} when is_atom(N), is_integer(A), A > 0 ->
Db1 = abolish_clauses({N,A}, Db0),
prove_body(Next, St#est{db=Db1});
Pi -> type_error(predicate_indicator, Pi, St)
end;
prove_goal({assert,C0}, Next, #est{bs=Bs,db=Db0}=St) ->
C = dderef(C0, Bs),
Db1 = assertz_clause(C, Db0),
prove_body(Next, St#est{db=Db1});
prove_goal({asserta,C0}, Next, #est{bs=Bs,db=Db0}=St) ->
C = dderef(C0, Bs),
Db1 = asserta_clause(C, Db0),
prove_body(Next, St#est{db=Db1});
prove_goal({assertz,C0}, Next, #est{bs=Bs,db=Db0}=St) ->
C = dderef(C0, Bs),
Db1 = assertz_clause(C, Db0),
prove_body(Next, St#est{db=Db1});
prove_goal({retract,C0}, Next, #est{bs=Bs}=St) ->
C = dderef(C0, Bs),
prove_retract(C, Next, St);
%% Process controll
prove_goal({halt,C0}, _Next, #est{bs=Bs}) ->
C = dderef(C0, Bs),
erlang:exit(self(), C);
%% Clause retrieval and information
prove_goal({clause,H0,B}, Next, #est{bs=Bs}=St) ->
H1 = dderef(H0, Bs),
prove_clause(H1, B, Next, St);
prove_goal({current_predicate,Pi0}, Next, #est{bs=Bs}=St) ->
Pi = dderef(Pi0, Bs),
prove_current_predicate(Pi, Next, St);
prove_goal({predicate_property,H0,P}, Next, #est{bs=Bs,db=Db}=St) ->
H = dderef(H0, Bs),
try get_procedure_type(functor(H), Db) of
built_in -> unify_prove_body(P, built_in, Next, St);
compiled -> unify_prove_body(P, compiled, Next, St);
interpreted -> unify_prove_body(P, interpreted, Next, St);
undefined -> ?FAIL(St)
catch
throw:{erlog_error,E} ->
erlog_error(E, St) %Add state to error
end;
%% All solutions.
prove_goal({findall,T,G,L}, Next, St) ->
prove_findall(T, G, L, Next, St);
prove_goal({{findall},T0}, _Next, #est{bs=Bs,db=Db0}=St) ->
T1 = dderef(T0, Bs),
[Loc|Locs] = Db0#db.loc, %Add it to the top local list
Db1 = Db0#db{loc=[[T1|Loc]|Locs]},
?FAIL(St#est{db=Db1});
%% Prolog flags.
prove_goal({current_prolog_flag,F,V}, Next, St) ->
prove_current_prolog_flag(F, V, Next, St);
prove_goal({set_prolog_flag,F,V}, Next, St) ->
prove_set_prolog_flag(F, V, Next, St);
%% External interface.
prove_goal({ecall,C0,Val}, Next, #est{bs=Bs}=St) ->
%% Build the initial call.
%%io:fwrite("PG(ecall): ~p\n ~p\n ~p\n", [dderef(C0, Bs),Next,Cps]),
Efun = case dderef(C0, Bs) of
{':',M,F} when is_atom(M), is_atom(F) ->
fun () -> M:F() end;
{':',M,{F,A}} when is_atom(M), is_atom(F) ->
fun () -> M:F(A) end;
{':',M,{F,A1,A2}} when is_atom(M), is_atom(F) ->
fun () -> M:F(A1, A2) end;
{':',M,T} when is_atom(M), ?IS_FUNCTOR(T) ->
L = tuple_to_list(T),
fun () -> apply(M, hd(L), tl(L)) end;
Fun when is_function(Fun) -> Fun;
Other -> type_error(callable, Other, St)
end,
prove_ecall(Efun, Val, Next, St);
%% Non-standard but useful.
prove_goal({display,T}, Next, #est{bs=Bs}=St) ->
%% A very simple display procedure.
io:fwrite("~p\n", [dderef(T, Bs)]),
prove_body(Next, St);
%% Now look up the database.
prove_goal(G0, Next, #est{bs=Bs,db=Db}=St) ->
G = dderef(G0, Bs),
%%io:fwrite("PG: ~p\n ~p\n ~p\n", [dderef(G, Bs),Next,Cps]),
try get_procedure(functor(G), Db) of
built_in -> erlog_bips:prove_goal(G, Next, St);
{code,{Mod,Func}} -> Mod:Func(G, Next, St);
{clauses,Cs} -> prove_goal_clauses(G, Cs, Next, St);
undefined ->
case get_prolog_flag(unknown, St) of
error -> %Throw error
existence_error(procedure, pred_ind(functor(G)), St);
_ -> %Fail or warning
?FAIL(St)
end
catch
throw:{erlog_error,E} ->
erlog_error(E, St) %Add state to error
end.
fail_disjunction(#cp{next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_body(Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
fail_if_then_else(#cp{next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_body(Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% fail(State) -> {fail,State}.
%% cut(Label, Last, Next, State) -> void.
%%
%% The functions which manipulate the choice point stack. fail
%% backtracks to next choicepoint skipping cut labels cut steps
%% backwards over choice points until matching cut.
fail(#est{cps=Cps}=St) ->
fail(Cps, St).
fail([#cp{type=goal_clauses}=Cp|Cps], St) ->
fail_goal_clauses(Cp, Cps, St);
fail([#cp{type=disjunction}=Cp|Cps], St) ->
fail_disjunction(Cp, Cps, St);
fail([#cp{type=if_then_else}=Cp|Cps], St) ->
fail_if_then_else(Cp, Cps, St);
fail([#cp{type=clause}=Cp|Cps], St) ->
fail_clause(Cp, Cps, St);
fail([#cp{type=retract}=Cp|Cps], St) ->
fail_retract(Cp, Cps, St);
fail([#cp{type=current_predicate}=Cp|Cps], St) ->
fail_current_predicate(Cp, Cps, St);
fail([#cp{type=findall}=Cp|Cps], St) ->
fail_findall(Cp, Cps, St);
fail([#cp{type=current_prolog_flag}=Cp|Cps], St) ->
fail_current_prolog_flag(Cp, Cps, St);
fail([#cp{type=ecall}=Cp|Cps], St) ->
fail_ecall(Cp, Cps, St);
fail([#cp{type=compiled,data=F}=Cp|Cps], St) ->
F(Cp, Cps, St);
fail([#cut{}|Cps], St) -> %Fail over cut points.
fail(Cps, St);
fail([], St) -> {fail,St}.
cut(Label, Last, Next, #est{cps=Cps}=St) ->
cut(Label, Last, Next, Cps, St).
cut(Label, Last, Next, [#cut{label=Label}|Cps]=Cps0, St) ->
if Last -> prove_body(Next, St#est{cps=Cps});
true -> prove_body(Next, St#est{cps=Cps0})
end;
cut(Label, Last, Next, [#cp{type=if_then_else,label=Label}|Cps]=Cps0, St) ->
if Last -> prove_body(Next, St#est{cps=Cps});
true -> prove_body(Next, St#est{cps=Cps0})
end;
cut(Label, Last, Next, [#cp{type=goal_clauses,label=Label}=Cp|Cps], St) ->
cut_goal_clauses(Last, Next, Cp, St#est{cps=Cps});
cut(Label, Last, Next, [_Cp|Cps], St) ->
cut(Label, Last, Next, Cps, St).
%% cut(Label, Last, Next, #est{cps=Cpd}=St) ->
%% cut(Label, Last, Next, Cps, St, 1).
%% cut(Label, Last, Next, [#cut{label=Label}|Cps]=Cps0, St, Cn) ->
%% put(erlog_cut, orddict:update_counter(Cn, 1, get(erlog_cut))),
%% if Last -> prove_body(Next, St#est{cps=Cps});
%% true -> prove_body(Next, St#est{cps=Cps0})
%% end;
%% cut(Label, Last, Next, [#cp{type=if_then_else,label=Label}|Cps]=Cps0, St, Cn) ->
%% put(erlog_cut, orddict:update_counter(Cn, 1, get(erlog_cut))),
%% if Last -> prove_body(Next, St#est{cps=Cps});
%% true -> prove_body(Next, St#est{cps=Cps0})
%% end;
%% cut(Label, Last, Next, [#cp{type=goal_clauses,label=Label}=Cp|Cps], St, Cn) ->
%% put(erlog_cut, orddict:update_counter(Cn, 1, get(erlog_cut))),
%% cut_goal_clauses(Last, Next, Cp, St#est{cps=Cps});
%% cut(Label, Last, Next, [_Cp|Cps], St, Cn) ->
%% cut(Label, Last, Next, Cps, St, Cn+1).
%% check_goal(Goal, Next, St, CutAfter, CutLabel) ->
%% {WellFormedBody,HasCut}.
%% Check to see that Goal is bound and ensure that it is well-formed.
check_goal(G0, Next, #est{bs=Bs}=St, Cut, Label) ->
case dderef(G0, Bs) of
{_} -> instantiation_error(St); %Must have something to call
G1 ->
try
well_form_goal(G1, Next, Cut, Label)
catch
throw:{erlog_error,E} ->
erlog_error(E, St) %Add state to error
end
end.
%% unify_prove_body(Term1, Term2, Next, State) ->
%% void.
%% Unify Term1 = Term2, on success prove body Next else fail.
unify_prove_body(T1, T2, Next, #est{bs=Bs0}=St) ->
case unify(T1, T2, Bs0) of
{succeed,Bs1} -> prove_body(Next, St#est{bs=Bs1});
fail -> ?FAIL(St)
end.
%% unify_prove_body(A1, A2, B1, B2, Next, State) ->
%% void.
%% Unify A1 = A2, B1 = B2, on success prove body Next else fail.
unify_prove_body(A1, A2, B1, B2, Next, #est{bs=Bs0}=St) ->
case unify(A1, A2, Bs0) of
{succeed,Bs1} -> unify_prove_body(B1, B2, Next, St#est{bs=Bs1});
fail -> ?FAIL(St)
end.
%% prove_clause(Head, Body, Next, State) ->
%% void.
%% Unify clauses matching with functor from Head with both Head and Body.
prove_clause(H, B, Next, #est{db=Db}=St) ->
Functor = functor(H),
case get_procedure(Functor, Db) of
{clauses,Cs} -> unify_clauses(H, B, Cs, Next, St);
{code,_} ->
permission_error(access, private_procedure, pred_ind(Functor), St);
built_in ->
permission_error(access, private_procedure, pred_ind(Functor), St);
undefined -> ?FAIL(St)
end.
%% unify_clauses(Head, Body, Clauses, Next, State) ->
%% void.
%% Try to unify Head and Body using Clauses which all have the same functor.
unify_clauses(Ch, Cb, [C], Next, #est{bs=Bs0,vn=Vn0}=St) ->
%% No choice point on last clause.
case unify_clause(Ch, Cb, C, Bs0, Vn0) of
{succeed,Bs1,Vn1} -> prove_body(Next, St#est{bs=Bs1,vn=Vn1});
fail -> ?FAIL(St)
end;
unify_clauses(Ch, Cb, [C|Cs], Next, #est{bs=Bs0,vn=Vn0}=St) ->
case unify_clause(Ch, Cb, C, Bs0, Vn0) of
{succeed,Bs1,Vn1} ->
Cp = #cp{type=clause,data={Ch,Cb,Cs},next=Next,bs=Bs0,vn=Vn1},
Cps = St#est.cps,
prove_body(Next, St#est{cps=[Cp|Cps],bs=Bs1,vn=Vn1});
fail -> unify_clauses(Ch, Cb, Cs, Next, St)
end;
unify_clauses(_Ch, _Cb, [], _Next, St) -> ?FAIL(St).
unify_clause(Ch, Cb, {_Tag,H0,{B0,_}}, Bs0, Vn0) ->
{H1,Rs1,Vn1} = term_instance(H0, Vn0), %Unique vars on head first
case unify(Ch, H1, Bs0) of
{succeed,Bs1} ->
{B1,_Rs2,Vn2} = body_term(B0, Rs1, Vn1), %Now we need the rest
case unify(Cb, B1, Bs1) of
{succeed,Bs2} -> {succeed,Bs2,Vn2};
fail -> fail
end;
fail -> fail
end.
fail_clause(#cp{data={Ch,Cb,Cs},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
unify_clauses(Ch, Cb, Cs, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% prove_current_predicate(PredInd, Next, State) ->
%% void.
%% Match functors of existing user (interpreted) predicate with PredInd.
prove_current_predicate(Pi, Next, #est{db=Db}=St) ->
case Pi of
{'/',_,_} -> ok;
{_} -> ok;
Other -> type_error(predicate_indicator, Other, St)
end,
Fs = get_interp_functors(Db),
prove_predicates(Pi, Fs, Next, St).
prove_predicates(Pi, [F|Fs], Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
Cp = #cp{type=current_predicate,data={Pi,Fs},next=Next,bs=Bs,vn=Vn},
unify_prove_body(Pi, pred_ind(F), Next, St#est{cps=[Cp|Cps]});
prove_predicates(_Pi, [], _Next, St) -> ?FAIL(St).
fail_current_predicate(#cp{data={Pi,Fs},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_predicates(Pi, Fs, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% prove_goal_clauses(Goal, Clauses, Next, State) ->
%% void.
%% Try to prove Goal using Clauses which all have the same functor.
prove_goal_clauses(G, [C], Next, #est{cps=Cps,vn=Vn}=St) ->
%% Must be smart here and test whether we need to add a cut point.
%% C has the structure {Tag,Head,{Body,BodyHasCut}}.
case element(2, element(3, C)) of
true ->
Cut = #cut{label=Vn},
prove_goal_clause(G, C, Next, St#est{cps=[Cut|Cps]});
false ->
prove_goal_clause(G, C, Next, St)
end;
%% prove_goal_clause(G, C, Next, Cps, Bs, Vn, Db);
prove_goal_clauses(G, [C|Cs], Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
Cp = #cp{type=goal_clauses,label=Vn,data={G,Cs},next=Next,bs=Bs,vn=Vn},
prove_goal_clause(G, C, Next, St#est{cps=[Cp|Cps]});
prove_goal_clauses(_G, [], _Next, St) -> ?FAIL(St).
prove_goal_clause(G, {_Tag,H0,{B0,_}}, Next, #est{bs=Bs0,vn=Vn0}=St) ->
%% io:fwrite("PGC1: ~p\n", [{G,H0,B0}]),
Label = Vn0,
case unify_head(G, H0, Bs0, Vn0+1) of
{succeed,Rs0,Bs1,Vn1} ->
%% io:fwrite("PGC2: ~p\n", [{Rs0}]),
{B1,_Rs2,Vn2} = body_instance(B0, Next, Rs0, Vn1, Label),
%% io:fwrite("PGC3: ~p\n", [{B1,Next,Cps}]),
prove_body(B1, St#est{bs=Bs1,vn=Vn2});
fail -> ?FAIL(St)
end.
fail_goal_clauses(#cp{data={G,Cs},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_goal_clauses(G, Cs, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% cut_goal_clauses(Last, Next, Cp, St).
cut_goal_clauses(true, Next, #cp{label=_}, St) ->
%% Just remove the choice point completely and continue.
prove_body(Next, St);
cut_goal_clauses(false, Next, #cp{label=L}, #est{cps=Cps}=St) ->
%% Replace choice point with cut point then continue.
Cut = #cut{label=L},
prove_body(Next, St#est{cps=[Cut|Cps]}).
%% prove_retract(Clause, Next, State) ->
%% void.
%% Retract clauses in database matching Clause.
prove_retract({':-',H,B}, Next, St) ->
prove_retract(H, B, Next, St);
prove_retract(H, Next, St) ->
prove_retract(H, true, Next, St).
prove_retract(H, B, Next, #est{db=Db}=St) ->
Functor = functor(H),
case get_procedure(Functor, Db) of
{clauses,Cs} -> retract_clauses(H, B, Cs, Next, St);
{code,_} ->
permission_error(modify, static_procedure, pred_ind(Functor), St);
built_in ->
permission_error(modify, static_procedure, pred_ind(Functor), St);
undefined -> ?FAIL(St)
end.
%% retract_clauses(Head, Body, Clauses, Next, State) ->
%% void.
%% Try to retract Head and Body using Clauses which all have the same functor.
retract_clauses(Ch, Cb, [C|Cs], Next, #est{cps=Cps,bs=Bs0,vn=Vn0,db=Db0}=St) ->
case unify_clause(Ch, Cb, C, Bs0, Vn0) of
{succeed,Bs1,Vn1} ->
%% We have found a right clause so now retract it.
Db1 = retract_clause(functor(Ch), element(1, C), Db0),
Cp = #cp{type=retract,data={Ch,Cb,Cs},next=Next,bs=Bs0,vn=Vn0},
prove_body(Next, St#est{cps=[Cp|Cps],bs=Bs1,vn=Vn1,db=Db1});
fail -> retract_clauses(Ch, Cb, Cs, Next, St)
end;
retract_clauses(_Ch, _Cb, [], _Next, St) -> ?FAIL(St).
fail_retract(#cp{data={Ch,Cb,Cs},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
retract_clauses(Ch, Cb, Cs, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% prove_findall(Term, Goal, List, Next, State) ->
%% void.
%% Do findall on Goal and return list of each Term in List. We keep a
%% list of lists of local values in the database structure and for
%% each findall we push a new list for that findall and pop it when
%% we are done. This allows nested findalls. Each {findall} adds its
%% value to the top list. Then when findall finally fails we catch it
%% in fail_findall which cleans up by removing the top list and
%% unifying it with the output list value.
prove_findall(T, G, L0, Next, #est{cps=Cps,bs=Bs,vn=Vn,db=Db0}=St) ->
L1 = partial_list(L0, Bs), %Check for partial list
Label = Vn,
{Body,_} = check_goal(G, [{{findall},T}], St, false, Label),
Cp = #cp{type=findall,data=L1,next=Next,bs=Bs,vn=Vn},
Locs = Db0#db.loc, %Add a new local list
Db1 = Db0#db{loc=[[]|Locs]},
%% Db1 = Db0#db{loc=[[]|Db0#db.loc]]},
%% Catch case where an erlog error occurs and cleanup local lists.
try
prove_body(Body, St#est{cps=[Cp|Cps],vn=Vn+1,db=Db1})
catch
throw:{erlog_error,E,#est{db=Dba}=Sta} ->
case Dba#db.loc of %Pop the local list
[_|Locsa] ->
Dbb = Dba#db{loc=Locsa},
%% Dbb = Dba#db{loc=tl(Db#db.loc)},
erlog_error(E, Sta#est{db=Dbb});
_ ->
erlog_error(E, Sta)
end
end.
fail_findall(#cp{next=Next,data=List,bs=Bs,vn=Vn0}, Cps, #est{db=Db0}=St) ->
[Loc0|Locs] = Db0#db.loc,
Db1 = Db0#db{loc=Locs},
{Loc1,Vn1} = findall_list(Loc0, Vn0, Bs, []),
%% Make sure to drop all new bindings and revert to old bindings.
unify_prove_body(List, Loc1, Next, St#est{cps=Cps,bs=Bs,vn=Vn1,db=Db1}).
findall_list([X0|Xs], Vn0, Bs, Acc) ->
{X1,_,Vn1} = term_instance(dderef(X0, Bs), Vn0),
findall_list(Xs, Vn1, Bs, [X1|Acc]);
findall_list([], Vn, _, Acc) -> {Acc,Vn}.
%% prove_current_prolog_flag(Flag, Value, Next, State) ->
%% void.
%% prove_set_prolog_flag(Flag, Value, Next, State) ->
%% void.
prove_current_prolog_flag(F, V, Next, #est{fs=Fs}=St) ->
prove_prolog_flags(F, V, Fs, Next, St).
prove_prolog_flags(F, V, [{Pf,Pv,_}|Fs], Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
Cp = #cp{type=current_prolog_flag,data={F,V,Fs},next=Next,bs=Bs,vn=Vn},
unify_prove_body(F, Pf, V, Pv, Next, St#est{cps=[Cp|Cps]});
prove_prolog_flags(_F, _V, [], _Next, St) -> ?FAIL(St).
fail_current_prolog_flag(#cp{data={F,V,Fs},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_prolog_flags(F, V, Fs, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
prove_set_prolog_flag(F, V, Next, #est{bs=Bs,fs=Fs}=St) ->
prove_set_prolog_flag(deref(F, Bs), deref(V, Bs), Next, Fs, St).
prove_set_prolog_flag({_}, _V, _Next, _Fs, St) ->
instantiation_error(St);
prove_set_prolog_flag(_F, {_}, _Next, _Fs, St) ->
instantiation_error(St);
prove_set_prolog_flag(F, V, Next, Fs, St) ->
case lists:keyfind(F, 1, Fs) of
{_,_,Pvs} when Pvs =/= none -> %Settable flag
prove_set_prolog_flag(F, V, Pvs, Next, Fs, St);
_ ->
domain_error(prolog_flag, F, St)
end.
prove_set_prolog_flag(F, V, Pvs, Next, Fs0, St) ->
case lists:member(V, Pvs) of %Valid value
true ->
Fs1 = lists:keyreplace(F, 1, Fs0, {F,V,Pvs}),
prove_body(Next, St#est{fs=Fs1});
false ->
domain_error(flag_value, {'+',F,V}, St)
end.
get_prolog_flag(F, #est{fs=Fs}) -> %We should know the flags
{_,V,_} = lists:keyfind(F, 1, Fs),
V.
%% prove_ecall(Generator, Value, Next, St) ->
%% void.
%% Call an external (Erlang) generator and handle return value,
%% either succeed or fail.
prove_ecall(Efun, Val, Next, #est{cps=Cps,bs=Bs,vn=Vn}=St) ->
case Efun() of
{succeed,Ret,Cont} -> %Succeed and more choices
Cp = #cp{type=ecall,data={Cont,Val},next=Next,bs=Bs,vn=Vn},
unify_prove_body(Val, Ret, Next, St#est{cps=[Cp|Cps]});
{succeed_last,Ret} -> %Succeed but last choice
unify_prove_body(Val, Ret, Next, St);
fail -> ?FAIL(St) %No more
end.
fail_ecall(#cp{data={Efun,Val},next=Next,bs=Bs,vn=Vn}, Cps, St) ->
prove_ecall(Efun, Val, Next, St#est{cps=Cps,bs=Bs,vn=Vn}).
%% prove_body(Body, State) -> {succeed,State}.
%% Prove the goals in a body. Remove the first goal and try to prove
%% it. Return when there are no more goals. This is how proving a
%% goal/body succeeds.
prove_body([G|Gs], St) ->
%%io:fwrite("PB: ~p\n", [{G,Gs,St#est.cps}]),
prove_goal(G, Gs, St);
prove_body([], St) -> %No more body
%%io:fwrite("Cps: ~p\nCut: ~p\nVar: ~p\nVar: ~p\n",
%% [get(erlog_cps),get(erlog_cut),get(erlog_var),dict:size(Bs)]),
%%io:fwrite("PB: ~p\n", [Cps]),
{succeed,St}.
%% unify(Term, Term, Bindings) -> {succeed,NewBindings} | fail.
%% Unify two terms with a set of bindings.
unify(T10, T20, Bs0) ->
case {deref(T10, Bs0),deref(T20, Bs0)} of
{T1,T2} when ?IS_CONSTANT(T1), T1 == T2 ->
{succeed,Bs0};
{{V},{V}} -> {succeed,Bs0};
{{_}=Var,T2} -> {succeed,add_binding(Var, T2, Bs0)};
{T1,{_}=Var} -> {succeed,add_binding(Var, T1, Bs0)};
{[H1|T1],[H2|T2]} ->
case unify(H1, H2, Bs0) of
{succeed,Bs1} -> unify(T1, T2, Bs1);
fail -> fail
end;
{[],[]} -> {succeed,Bs0};
{T1,T2} when tuple_size(T1) == tuple_size(T2),
element(1, T1) == element(1, T2) ->
unify_args(T1, T2, Bs0, 2, tuple_size(T1));
_Other -> fail
end.
unify_args(_, _, Bs, I, S) when I > S -> {succeed,Bs};
unify_args(S1, S2, Bs0, I, S) ->
case unify(element(I, S1), element(I, S2), Bs0) of
{succeed,Bs1} -> unify_args(S1, S2, Bs1, I+1, S);
fail -> fail
end.
%% make_var_list(Count, VarNum) -> [Var].
%% Make a list of new variables starting at VarNum.
make_var_list(0, _) -> [];
make_var_list(I, Vn) ->
[{Vn}|make_var_list(I-1, Vn+1)].
%% Errors
%% To keep dialyzer quiet.
-spec type_error(_, _) -> no_return().
-spec type_error(_, _, _) -> no_return().
-spec instantiation_error() -> no_return().
-spec instantiation_error(_) -> no_return().
-spec permission_error(_, _, _) -> no_return().
-spec permission_error(_, _, _, _) -> no_return().
-spec existence_error(_, _, _) -> no_return().
-spec domain_error(_, _, _) -> no_return().
-spec erlog_error(_) -> no_return().
-spec erlog_error(_, _) -> no_return().
type_error(Type, Value) -> erlog_error({type_error,Type,Value}).
type_error(Type, Value, St) -> erlog_error({type_error,Type,Value}, St).
instantiation_error() -> erlog_error(instantiation_error).
instantiation_error(St) -> erlog_error(instantiation_error, St).
permission_error(Op, Type, Value) ->
erlog_error({permission_error,Op,Type,Value}).
permission_error(Op, Type, Value, St) ->
erlog_error({permission_error,Op,Type,Value}, St).
existence_error(Type, PI, St) ->
erlog_error({existence_error,Type,PI}, St).
domain_error(Domain, Value, St) ->
erlog_error({domain_error,Domain,Value}, St).
erlog_error(E) -> throw({erlog_error,E}).
erlog_error(E, St) -> throw({erlog_error,E,St}).
%% Database
%% The database is a dict where the key is the functor pair {Name,Arity}.
%% The value is: built_in |
%% {clauses,NextTag,[{Tag,Head,Body}]} |
%% {code,{Module,Function}}.
%% Built-ins are defined by the system and cannot manipulated by user
%% code.
%% We are a little paranoid here and do our best to ensure consistency
%% in the database by checking input arguments even if we know they
%% come from "good" code.
%% add_built_in(Functor, Database) -> NewDatabase.
%% Add Functor as a built-in in the database.
add_built_in(Functor, #db{mod=Dm,ref=Dr0}=Db) ->
Dr1 = Dm:add_built_in(Dr0, Functor),
Db#db{ref=Dr1}.
%% add_compiled_proc(Functor, Module, Function, Database) -> NewDatabase.
%% Add Functor as a compiled procedure with code in
%% Module:Function. No checking.
add_compiled_proc(Functor, M, F, #db{mod=Dm,ref=Dr0}=Db) ->
case Dm:add_compiled_proc(Dr0, Functor, M, F) of
{ok,Dr1} -> Db#db{ref=Dr1};
error ->
permission_error(modify, static_procedure, pred_ind(Functor))
end.
%% asserta_clause(Clause, Database) -> NewDatabase.
%% assertz_clause(Clause, Database) -> NewDatabase.
%% Assert a clause into the database first checking that it is well
%% formed.
asserta_clause({':-',H,B}, Db) -> asserta_clause(H, B, Db);
asserta_clause(H, Db) -> asserta_clause(H, true, Db).
asserta_clause(Head, B, #db{mod=Dm,ref=Dr0}=Db) ->
{Functor,Body} = well_formed_clause(Head, B, Db),
case Dm:asserta_clause(Dr0, Functor, Head, Body) of
{ok,Dr1} -> Db#db{ref=Dr1};
error ->
permission_error(modify, static_procedure, pred_ind(Functor))
end.
assertz_clause({':-',H,B}, Db) -> assertz_clause(H, B, Db);
assertz_clause(H, Db) -> assertz_clause(H, true, Db).
assertz_clause(Head, B, #db{mod=Dm,ref=Dr0}=Db) ->
{Functor,Body} = well_formed_clause(Head, B, Db),
case Dm:assertz_clause(Dr0, Functor, Head, Body) of
{ok,Dr1} -> Db#db{ref=Dr1};
error ->
permission_error(modify, static_procedure, pred_ind(Functor))
end.
well_formed_clause(Head, Body, _Db) ->
%% No need to catch error as we can't add state to it.
{functor(Head),well_form_body(Body, false, sture)}.
%% retract_clause(Functor, ClauseTag, Database) -> NewDatabase.
%% Retract (remove) the clause with tag ClauseTag from the list of
%% clauses of Functor.
retract_clause(Functor, Ct, #db{mod=Dm,ref=Dr0}=Db) ->
case Dm:retract_clause(Dr0, Functor, Ct) of
{ok,Dr1} -> Db#db{ref=Dr1};
error ->
permission_error(modify, static_procedure, pred_ind(Functor))
end.
%% abolish_clauses(Functor, Database) -> NewDatabase.
abolish_clauses(Functor, #db{mod=Dm,ref=Dr0}=Db) ->
case Dm:abolish_clauses(Dr0, Functor) of
{ok,Dr1} -> Db#db{ref=Dr1};
error ->
permission_error(modify, static_procedure, pred_ind(Functor))
end.
%% get_procedure(Functor, Database) ->
%% built_in | {code,{Mod,Func}} | {clauses,[Clause]} | undefined.
%% Return the procedure type and data for a functor.
get_procedure(Functor, #db{mod=Dm,ref=Dr}) ->
Dm:get_procedure(Dr, Functor).
%% get_procedure_type(Functor, Database) ->
%% built_in | compiled | interpreted | undefined.
%% Return the procedure type for a functor.
get_procedure_type(Functor, #db{mod=Dm,ref=Dr}) ->
Dm:get_procedure_type(Dr, Functor).
%% get_interp_functors(Database) -> [Functor].
get_interp_functors(#db{mod=Dm,ref=Dr}) ->
Dm:get_interpreted_functors(Dr).
%% functor(Goal) -> {Name,Arity}.
functor(T) when ?IS_FUNCTOR(T) ->
{element(1, T),tuple_size(T)-1};
functor(T) when is_atom(T) -> {T,0};
functor(T) -> type_error(callable, T).
%% well_form_body(Body, HasCutAfter, CutLabel) -> {Body,HasCut}.
%% well_form_body(Body, Tail, HasCutAfter, CutLabel) -> {Body,HasCut}.
%% Check that Body is well-formed, flatten conjunctions, fix cuts and
%% add explicit call to top-level variables.
well_form_body(Body, Cut, Label) -> well_form_body(Body, [], Cut, Label).
well_form_body({',',L,R}, Tail0, Cut0, Label) ->
{Tail1,Cut1} = well_form_body(R, Tail0, Cut0, Label),
well_form_body(L, Tail1, Cut1, Label);
well_form_body({';',{'->',C0,T0},E0}, Tail, Cut0, Label) ->
{T1,Tc} = well_form_body(T0, Cut0, Label),
{E1,Ec} = well_form_body(E0, Cut0, Label),
%% N.B. an extra cut will be added at run-time!
{C1,_} = well_form_body(C0, true, Label),
{[{{if_then_else},C1,T1,E1,Label}|Tail],Tc or Ec};
well_form_body({';',L0,R0}, Tail, Cut0, Label) ->
{L1,Lc} = well_form_body(L0, Cut0, Label),
{R1,Rc} = well_form_body(R0, Cut0, Label),
{[{{disj},L1,R1}|Tail],Lc or Rc};
well_form_body({'->',C0,T0}, Tail, Cut0, Label) ->
{T1,Cut1} = well_form_body(T0, Cut0, Label),
%% N.B. an extra cut will be added at run-time!
{C1,_} = well_form_body(C0, true, Label),
{[{{if_then},C1,T1,Label}|Tail],Cut1};
well_form_body({once,G}, Tail, Cut, Label) ->
%% N.B. an extra cut is added at run-time!
{G1,_} = well_form_body(G, true, Label),
{[{{once},G1,Label}|Tail],Cut};
well_form_body({V}, Tail, Cut, _Label) ->
{[{call,{V}}|Tail],Cut};
well_form_body(true, Tail, Cut, _Label) -> {Tail,Cut}; %No-op
well_form_body(fail, _Tail, _Cut, _Label) -> {[fail],false}; %No further
well_form_body('!', Tail, Cut, Label) ->
{[{{cut},Label,not Cut}|Tail],true};
well_form_body(Goal, Tail, Cut, _Label) ->
functor(Goal), %Check goal
{[Goal|Tail],Cut}.
%% well_form_goal(Goal, Tail, HasCutAfter, CutLabel) -> {Body,HasCut}.
%% Check that Goal is well-formed, flatten conjunctions, fix cuts and
%% add explicit call to top-level variables.
well_form_goal({',',L,R}, Tail0, Cut0, Label) ->
{Tail1,Cut1} = well_form_goal(R, Tail0, Cut0, Label),
well_form_goal(L, Tail1, Cut1, Label);
well_form_goal({';',{'->',C0,T0},E0}, Tail, Cut0, Label) ->
{T1,Tc} = well_form_goal(T0, Tail, Cut0, Label),
{C1,_} = well_form_goal(C0, [{{cut},Label,true}|T1], true, Label),
{E1,Ec} = well_form_goal(E0, Tail, Cut0, Label),
{[{{if_then_else},E1,Label}|C1],Tc or Ec};
well_form_goal({';',L0,R0}, Tail, Cut0, Label) ->
{L1,Lc} = well_form_goal(L0, Tail, Cut0, Label),
{R1,Rc} = well_form_goal(R0, Tail, Cut0, Label),
{[{{disj},R1}|L1],Lc or Rc};
well_form_goal({'->',C0,T0}, Tail, Cut0, Label) ->
{T1,Cut1} = well_form_goal(T0, Tail, Cut0, Label),
%% N.B. an extra cut will be added at run-time!
{C1,_} = well_form_goal(C0, [{{cut},Label,true}|T1], true, Label),
{[{{if_then},Label}|C1],Cut1};
well_form_goal({once,G}, Tail, Cut, Label) ->
{G1,_} = well_form_goal(G, [{{cut},Label,true}|Tail], true, Label),
{[{{once},Label}|G1],Cut};
well_form_goal({V}, Tail, Cut, _Label) ->
{[{call,{V}}|Tail],Cut};
well_form_goal(true, Tail, Cut, _Label) -> {Tail,Cut}; %No-op
well_form_goal(fail, _Tail, _Cut, _Label) -> {[fail],false}; %No further
well_form_goal('!', Tail, Cut, Label) ->
{[{{cut},Label,not Cut}|Tail],true};
well_form_goal(Goal, Tail, Cut, _Label) ->
functor(Goal), %Check goal
{[Goal|Tail],Cut}.
%% term_instance(Term, VarNum) -> {Term,NewRepls,NewVarNum}.
%% term_instance(Term, Repls, VarNum) -> {Term,NewRepls,NewVarNum}.
%% Generate a copy of a term with new, fresh unused variables. No
%% bindings from original variables to new variables. It can handle
%% replacing integer variables with overlapping integer ranges. Don't
%% check Term as it should already be checked. Use orddict as there
%% will seldom be many variables and it it fast to setup.
term_instance(A, Vn) -> term_instance(A, orddict:new(), Vn).
term_instance([], Rs, Vn) -> {[],Rs,Vn};
term_instance([H0|T0], Rs0, Vn0) ->
{H,Rs1,Vn1} = term_instance(H0, Rs0, Vn0),
{T,Rs2,Vn2} = term_instance(T0, Rs1, Vn1),
{[H|T],Rs2,Vn2};
term_instance({'_'}, Rs, Vn) -> {{Vn},Rs,Vn+1}; %Unique variable
term_instance({V0}, Rs0, Vn0) -> %Other variables
case orddict:find(V0, Rs0) of
{ok,V1} -> {V1,Rs0,Vn0};
error ->
V1 = {Vn0},
{V1,orddict:store(V0, V1, Rs0),Vn0+1}
end;
%% Special case some smaller structures.
term_instance({Atom,Arg}, Rs0, Vn0) ->
{CopyArg,Rs1,Vn1} = term_instance(Arg, Rs0, Vn0),
{{Atom,CopyArg},Rs1,Vn1};
term_instance({Atom,A1,A2}, Rs0, Vn0) ->
{CopyA1,Rs1,Vn1} = term_instance(A1, Rs0, Vn0),
{CopyA2,Rs2,Vn2} = term_instance(A2, Rs1, Vn1),
{{Atom,CopyA1,CopyA2},Rs2,Vn2};
term_instance(T, Rs0, Vn0) when is_tuple(T) ->
As0 = tl(tuple_to_list(T)),
{As1,Rs1,Vn1} = term_instance(As0, Rs0, Vn0),
{list_to_tuple([element(1, T)|As1]),Rs1,Vn1};
term_instance(A, Rs, Vn) -> {A,Rs,Vn}. %Constant
%% unify_head(Goal, Head, Bindings, VarNum) ->
%% {succeed,Repls,NewBindings,NewVarNum} | fail
%% Unify a goal with a head without creating an instance of the
%% head. This saves us creating many variables which are local to the
%% clause and saves many variable bindings.
unify_head(Goal, Head, Bs, Vn) ->
unify_head(deref(Goal, Bs), Head, orddict:new(), Bs, Vn).
unify_head(G, H, Rs, Bs, Vn) when ?IS_CONSTANT(G), G == H ->
{succeed,Rs,Bs,Vn};
unify_head(_T, {'_'}, Rs, Bs, Vn) -> {succeed,Rs,Bs,Vn};
unify_head(T, {V0}, Rs, Bs0, Vn) ->
%% Now for the tricky bit!
case orddict:find(V0, Rs) of
{ok,V1} -> %Already have a replacement
case unify(T, V1, Bs0) of
{succeed,Bs1} -> {succeed,Rs,Bs1,Vn};
fail -> fail
end;
error -> %Add a replacement
{succeed,orddict:store(V0, T, Rs),Bs0,Vn}
end;
unify_head({_}=Var, H0, Rs0, Bs, Vn0) ->
%% Must have an instance here.
{H1,Rs1,Vn1} = term_instance(H0, Rs0, Vn0),
{succeed,Rs1,add_binding(Var, H1, Bs),Vn1};
unify_head([GH|GT], [HH|HT], Rs0, Bs0, Vn0) ->
case unify_head(deref(GH, Bs0), HH, Rs0, Bs0, Vn0) of
{succeed,Rs1,Bs1,Vn1} -> unify_head(deref(GT, Bs1), HT, Rs1, Bs1, Vn1);
fail -> fail
end;
unify_head([], [], Rs, Bs, Vn) -> {succeed,Rs,Bs,Vn};
unify_head(G, H, Rs, Bs, Vn) when tuple_size(G) == tuple_size(H),
element(1, G) == element(1, H) ->
unify_head_args(G, H, Rs, Bs, Vn, 2, tuple_size(G));
unify_head(_G, _H, _Rs, _Bs, _Vn) -> fail.
unify_head_args(_G, _H, Rs, Bs, Vn, I, S) when I > S ->
{succeed,Rs,Bs,Vn};
unify_head_args(G, H, Rs0, Bs0, Vn0, I, S) ->
case unify_head(deref(element(I, G), Bs0), element(I, H), Rs0, Bs0, Vn0) of
{succeed,Rs1,Bs1,Vn1} -> unify_head_args(G, H, Rs1, Bs1, Vn1, I+1, S);
fail -> fail
end.
%% body_instance(Body, Tail, Repls, VarNum, Label) ->
%% {Body,NewRepls,NewVarNum}.
%% Generate a copy of a body in a form ready to be interpreted. No
%% bindings from original variables to new variables. It can handle
%% replacing integer variables with overlapping integer ranges. Don't
%% check Term as it should already be checked. Use term_instance to
%% handle goals. N.B. We have to be VERY careful never to go into the
%% original tail as this will cause havoc.
body_instance([{{cut}=Cut,_,Last}|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
{[{Cut,Label,Last}|Gs1],Rs1,Vn1};
body_instance([{{disj}=Disj,L0,R0}|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
%% Append Gs1 directly to L and R.
{L1,Rs2,Vn2} = body_instance(L0, Gs1, Rs1, Vn1, Label),
{R1,Rs3,Vn3} = body_instance(R0, Gs1, Rs2, Vn2, Label),
{[{Disj,R1}|L1],Rs3,Vn3};
body_instance([{{if_then}=IT,C0,T0,_}|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
{T1,Rs2,Vn2} = body_instance(T0, Gs1, Rs1, Vn1, Label),
{C1,Rs3,Vn3} = body_instance(C0, [{{cut},Label,true}|T1], Rs2, Vn2, Label),
%% Append Gs1 directly to T1 to C1.
{[{IT,Label}|C1],Rs3,Vn3};
body_instance([{{if_then_else}=ITE,C0,T0,E0,_}|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
{T1,Rs2,Vn2} = body_instance(T0, Gs1, Rs1, Vn1, Label),
{C1,Rs3,Vn3} = body_instance(C0, [{{cut},Label,true}|T1], Rs2, Vn2, Label),
{E1,Rs4,Vn4} = body_instance(E0, Gs1, Rs3, Vn3, Label),
{[{ITE,E1,Label}|C1],Rs4,Vn4};
body_instance([{{once}=Once,G0,_}|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
{G1,Rs2,Vn2} = body_instance(G0, [{{cut},Label,true}|Gs1], Rs1, Vn1, Label),
{[{Once,Label}|G1],Rs2,Vn2};
body_instance([G0|Gs0], Tail, Rs0, Vn0, Label) ->
{Gs1,Rs1,Vn1} = body_instance(Gs0, Tail, Rs0, Vn0, Label),
{G1,Rs2,Vn2} = term_instance(G0, Rs1, Vn1),
{[G1|Gs1],Rs2,Vn2};
body_instance([], Tail, Rs, Vn, _Label) -> {Tail,Rs,Vn}.
%% body_term(Body, Repls, VarNum) -> {Term,NewRepls,NewVarNum}.
%% Generate a copy of a body as a term with new, fresh unused
%% variables. No bindings from original variables to new
%% variables. It can handle replacing integer variables with
%% overlapping integer ranges. Don't check Term as it should already
%% be checked. Use orddict as there will seldom be many variables and
%% it it fast to setup.
body_term([{{cut},_,_}|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{body_conj('!', Gs1),Rs1,Vn1};
body_term([{{disj},L0,R0}|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{L1,Rs2,Vn2} = body_term(L0, Rs1, Vn1),
{R1,Rs3,Vn3} = body_term(R0, Rs2, Vn2),
{body_conj({';',L1,R1}, Gs1),Rs3,Vn3};
body_term([{{if_then},C0,T0,_}|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{C1,Rs2,Vn2} = body_term(C0, Rs1, Vn1),
{T1,Rs3,Vn3} = body_term(T0, Rs2, Vn2),
{body_conj({'->',C1,T1}, Gs1),Rs3,Vn3};
body_term([{{if_then_else},C0,T0,E0,_}|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{C1,Rs2,Vn2} = body_term(C0, Rs1, Vn1),
{T1,Rs3,Vn3} = body_term(T0, Rs2, Vn2),
{E1,Rs4,Vn4} = body_term(E0, Rs3, Vn3),
{body_conj({';',{'->',C1,T1},E1}, Gs1),Rs4,Vn4};
body_term([{{once},G0,_}|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{G1,Rs2,Vn2} = body_term(G0, Rs1, Vn1),
{body_conj({once,G1}, Gs1),Rs2,Vn2};
body_term([G0|Gs0], Rs0, Vn0) ->
{Gs1,Rs1,Vn1} = body_term(Gs0, Rs0, Vn0),
{G1,Rs2,Vn2} = term_instance(G0, Rs1, Vn1),
{body_conj(G1, Gs1),Rs2,Vn2};
body_term([], Rs, Vn) -> {true,Rs,Vn}.
body_conj(L, true) -> L;
body_conj(L, R) -> {',',L,R}.
pred_ind({N,A}) -> {'/',N,A}.
pred_ind(N, A) -> {'/',N,A}.
%% Bindings
%% Bindings are kept in a map/dict where the key is the variable name.
-ifdef(HAS_MAPS).
-define(NEW_BINDINGS(), maps:new()).
-define(ADD_BINDING(V, Val, Bs), maps:put(V, Val, Bs)).
%%-define(ADD_BINDING(V, Val, Bs),
%% begin is_integer(V) orelse io:write(V), maps:put(V, Val, Bs) end).
-define(GET_BINDING(V, BS), maps:find(V, Bs)).
-else.
%%-define(BIND, orddict).
-define(BIND, dict).
-define(NEW_BINDINGS(), ?BIND:new()).
-define(ADD_BINDING(V, Val, Bs), ?BIND:store(V, Val, Bs)).
-define(GET_BINDING(V, BS), ?BIND:find(V, Bs)).
-endif.
new_bindings() -> ?NEW_BINDINGS().
add_binding({V}, Val, Bs) ->
?ADD_BINDING(V, Val, Bs).
get_binding({V}, Bs) ->
?GET_BINDING(V, Bs).
%% deref(Term, Bindings) -> Term.
%% Dereference a variable, else just return the term.
deref({V}=T0, Bs) ->
case ?GET_BINDING(V, Bs) of
{ok,T1} -> deref(T1, Bs);
error -> T0
end;
deref(T, _) -> T. %Not a variable, return it.
%% deref_list(List, Bindings) -> List.
%% Dereference the top-level checking that it is a list.
deref_list([], _) -> []; %It already is a list
deref_list([_|_]=L, _) -> L;
deref_list({V}, Bs) ->
case ?GET_BINDING(V, Bs) of
{ok,L} -> deref_list(L, Bs);
error -> instantiation_error()
end;
deref_list(Other, _) -> type_error(list, Other).
%% dderef(Term, Bindings) -> Term.
%% Do a deep dereference. Completely dereference all the variables
%% occuring in a term, even those occuring in a variables value.
dderef(A, _) when ?IS_CONSTANT(A) -> A;
dderef([], _) -> [];
dderef([H0|T0], Bs) ->
[dderef(H0, Bs)|dderef(T0, Bs)];
dderef({V}=Var, Bs) ->
case ?GET_BINDING(V, Bs) of
{ok,T} -> dderef(T, Bs);
error -> Var
end;
dderef(T, Bs) when is_tuple(T) ->
Es0 = tuple_to_list(T),
Es1 = dderef(Es0, Bs),
list_to_tuple(Es1).
%% dderef_list(List, Bindings) -> List.
%% Dereference all variables to any depth but check that the
%% top-level is a list.
dderef_list([], _Bs) -> [];
dderef_list([H|T], Bs) ->
[dderef(H, Bs)|dderef_list(T, Bs)];
dderef_list({V}, Bs) ->
case ?GET_BINDING(V, Bs) of
{ok,L} -> dderef_list(L, Bs);
error -> instantiation_error()
end;
dderef_list(Other, _Bs) -> type_error(list, Other).
%% partial_list(Term, Bindings) -> Term.
%% Dereference all variables and check if partial list.
partial_list([], _) -> [];
partial_list([H|T0], Bs) ->
T1 = partial_list(T0, Bs),
[H|T1];
partial_list({V}=Var, Bs) ->
case ?GET_BINDING(V, Bs) of
{ok,T} -> partial_list(T, Bs);
error -> Var
end;
partial_list(Other, _) -> type_error(list, Other).
%% initial_goal(Goal) -> {Goal,Bindings,NewVarNum}.
%% initial_goal(Goal, Bindings, VarNum) -> {Goal,NewBindings,NewVarNum}.
%% Check term for well-formedness as an Erlog term and replace '_'
%% variables with unique numbered variables. Error on non-well-formed
%% goals.goal
initial_goal(Goal) -> initial_goal(Goal, new_bindings(), 0).
initial_goal({'_'}, Bs, Vn) -> {{Vn},Bs,Vn+1}; %Anonymous variable
initial_goal({Name}=Var0, Bs, Vn) when is_atom(Name) ->
case get_binding(Var0, Bs) of
{ok,Var1} -> {Var1,Bs,Vn};
error ->
Var1 = {Vn},
{Var1,add_binding(Var0, Var1, Bs),Vn+1}
end;
initial_goal([H0|T0], Bs0, Vn0) ->
{H1,Bs1,Vn1} = initial_goal(H0, Bs0, Vn0),
{T1,Bs2,Vn2} = initial_goal(T0, Bs1, Vn1),
{[H1|T1],Bs2,Vn2};
initial_goal([], Bs, Vn) -> {[],Bs,Vn};
initial_goal(S, Bs0, Vn0) when ?IS_FUNCTOR(S) ->
As0 = tl(tuple_to_list(S)),
{As1,Bs1,Vn1} = initial_goal(As0, Bs0, Vn0),
{list_to_tuple([element(1, S)|As1]),Bs1,Vn1};
initial_goal(T, Bs, Vn) when ?IS_ATOMIC(T) -> {T,Bs,Vn};
initial_goal(T, _Bs, _Vn) -> type_error(callable, T). | src/erlog_int.erl | 0.585694 | 0.547887 | erlog_int.erl | starcoder |
-module(problem2015_03).
-export([solve1/1, solve2/1]).
-type move() :: $^ | $< | $> | $v.
-type moves() :: [ move() ].
-type coord() :: { integer(), integer() }.
-type visited_houses() :: sets:set( coord() ).
-type visitation_strategy() :: fun( ( string(), coord(), visited_houses() ) -> visited_houses() ).
%%% COMMON
-spec get_number_of_visited_houses( visited_houses() ) -> non_neg_integer().
get_number_of_visited_houses( VisitedHouses ) ->
sets:size( VisitedHouses ).
-spec visit_house( coord(), visited_houses() ) -> visited_houses().
visit_house( Coord, VisitedHouses ) ->
sets:add_element( Coord, VisitedHouses ).
-spec no_visited_houses() -> visited_houses().
no_visited_houses() ->
sets:new().
-spec next_coord( move(), coord() ) -> coord().
next_coord( $^, { X, Y } ) ->
{ X, Y - 1 };
next_coord( $<, { X, Y } ) ->
{ X - 1, Y };
next_coord( $>, { X, Y } ) ->
{ X + 1, Y };
next_coord( $v, { X, Y } ) ->
{ X, Y + 1 }.
-spec move_santa( move(), { coord(), visited_houses() } ) -> { coord(), visited_houses() }.
move_santa( Move, { SantaCoord, VisitedHouses } ) ->
NextCoord = next_coord( Move, SantaCoord ),
UpdatedVisitedHouses = visit_house( NextCoord, VisitedHouses ),
{ NextCoord, UpdatedVisitedHouses }.
-spec solve( moves(), visitation_strategy() ) -> non_neg_integer().
solve( Input, VisitStrategy ) ->
InitialCoord = { 0, 0 },
InitialVisitedHouses = visit_house( InitialCoord, no_visited_houses() ),
TotalVisitedHouses = VisitStrategy( Input, InitialCoord, InitialVisitedHouses ),
get_number_of_visited_houses( TotalVisitedHouses ).
%%% PART 1
-spec visit_strategy1( string(), coord(), visited_houses() ) -> visited_houses().
visit_strategy1( Input, InitialCoord, InitialVisitedHouses ) ->
{ _, TotalVisitedHouses } =
lists:foldl( fun move_santa/2,
{ InitialCoord, InitialVisitedHouses },
Input ),
TotalVisitedHouses.
-spec solve1( [ move() ] ) -> non_neg_integer().
solve1( Input ) ->
solve( Input, fun visit_strategy1/3 ).
%%% PART 2
-spec visit_strategy2( string(), coord(), visited_houses() ) -> visited_houses().
visit_strategy2( Input, InitialCoord, InitialVisitedHouses ) ->
{ _, _, TotalVisitedHouses } =
lists:foldl( fun( Move, { CoordToMove, CoordToSkip, VisitedHouses } ) ->
{ NextCoord, UpdatedVisitedHouses } = move_santa( Move, { CoordToMove, VisitedHouses } ),
{ CoordToSkip, NextCoord, UpdatedVisitedHouses }
end,
{ InitialCoord, InitialCoord, InitialVisitedHouses },
Input ),
TotalVisitedHouses.
-spec solve2( [ move() ] ) -> non_neg_integer().
solve2( Input ) ->
solve( Input, fun visit_strategy2/3 ).
%%% TESTS
-include_lib("eunit/include/eunit.hrl").
solve1_test_() ->
[ ?_assertEqual( 2, solve1( ">" ) ),
?_assertEqual( 4, solve1( "^>v<" ) ),
?_assertEqual( 2, solve1( "^v^v^v^v^v" ) ) ].
solve2_test_() ->
[ ?_assertEqual( 3, solve2( "^v" ) ),
?_assertEqual( 3, solve2( "^>v<" ) ),
?_assertEqual( 11, solve2( "^v^v^v^v^v" ) ) ]. | src/2015/problem2015_03.erl | 0.541894 | 0.594551 | problem2015_03.erl | starcoder |
% @author <NAME>
% @copyright 2007 <NAME> freeyourmind ++ [$@|gmail.com]
% @doc plists is a drop-in replacement for module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>,
% making most list operations parallel. It can operate on each element in
% parallel, for IO-bound operations, on sublists in parallel, for
% taking advantage of multi-core machines with CPU-bound operations, and
% across erlang nodes, for parallizing inside a cluster. It handles
% errors and node failures. It can be configured, tuned, and tweaked to
% get optimal performance while minimizing overhead.
%
% Almost all the functions are
% identical to equivalent functions in lists, returning exactly the same
% result, and having both a form with an identical syntax that operates on
% each element in parallel and a form which takes an optional "malt",
% a specification for how to parallize the operation.
%
% fold is the one exception, parallel fold is different from linear fold.
% This module also include a simple mapreduce implementation, and the
% function runmany. All the other functions are implemented with runmany,
% which is as a generalization of parallel list operations.
%
% == Malts ==
% A malt specifies how to break a list into sublists, and can optionally
% specify a timeout, which nodes to run on, and how many processes to start
% per node.
%
% Malt = MaltComponent | [MaltComponent]<br/>
% MaltComponent = SubListSize::integer() | {processes, integer()} |
% {processes, schedulers} |
% {timeout, Milliseconds::integer()} | {nodes, [NodeSpec]}<br/>
% NodeSpec = Node::atom() | {Node::atom(), NumProcesses::integer()} |
% {Node::atom(), schedulers}
%
% An integer can be given to specify the exact size for
% sublists. 1 is a good choice for IO-bound operations and when
% the operation on each list element is expensive. Larger numbers
% minimize overhead and are faster for cheap operations.
%
% If the integer is omitted, and
% you have specified a {processes, X}, the list is
% split into X sublists. This is only
% useful when the time to process each element is close to identical and you
% know exactly how many lines of execution are available to you.
%
% If neither of the above applies, the sublist size defaults to 1.
%
% You can use {processes, X} to have the list processed
% by X processes on the local machine. A good choice for X is the number of
% lines of execution (cores) the machine provides. This can be done
% automatically with {processes, schedulers}, which sets
% the number of processes to the number of schedulers in the erlang virtual
% machine (probably equal to the number of cores).
%
% {timeout, Milliseconds} specifies a timeout. This is a timeout for the entire
% operation, both operating on the sublists and combining the results.
% exit(timeout) is evaluated if the timeout is exceeded.
%
% {nodes, NodeList} specifies that the operation should be done across nodes.
% Every element of NodeList is of the form {NodeName, NumProcesses} or
% NodeName, which means the same as {NodeName, 1}. plists runs
% NumProcesses processes on NodeName concurrently. A good choice for
% NumProcesses is the number of lines of execution (cores) a node provides
% plus one. This ensures the node is completely busy even when
% fetching a new sublist. This can be done automatically with
% {NodeName, schedulers}, in which case
% plists uses a cached value if it has one, and otherwise finds the number of
% schedulers in the remote node and adds one. This will ensure at least one
% busy process per core (assuming the node has a scheduler for each core).
%
% plists is able to recover if a node goes down.
% If all nodes go down, exit(allnodescrashed) is evaluated.
%
% Any of the above may be used as a malt, or may be combined into a list.
% {nodes, NodeList} and {processes, X} may not be combined.
%
% === Examples ===
% % start a process for each element (1-element sublists)<br/>
% 1
%
% % start a process for each ten elements (10-element sublists)<br/>
% 10
%
% % split the list into two sublists and process in two processes<br/>
% {processes, 2}
%
% % split the list into X sublists and process in X processes,<br/>
% % where X is the number of cores in the machine<br/>
% {processes, schedulers}
%
% % split the list into 10-element sublists and process in two processes<br/>
% [10, {processes, 2}]
%
% % timeout after one second. Assumes that a process should be started<br/>
% % for each element.<br/>
% {timeout, 1000}
%
% % Runs 3 processes at a time on apple@desktop,
% and 2 on orange@laptop<br/>
% % This is the best way to utilize all the CPU-power of a dual-core<br/>
% % desktop and a single-core laptop. Assumes that the list should be<br/>
% % split into 1-element sublists.<br/>
% {nodes, [{apple@desktop, 3}, {orange@laptop, 2}]}
%
% Like above, but makes plists figure out how many processes to use.
% {nodes, [{apple@desktop, schedulers}, {orange@laptop, schedulers}]}
%
% % Gives apple and orange three seconds to process the list as<br/>
% % 100-element sublists.<br/>
% [100, {timeout, 3000}, {nodes, [{apple@desktop, 3}, {orange@laptop, 2}]}]
%
% === Aside: Why Malt? ===
% I needed a word for this concept, so maybe my subconsciousness gave me one by
% making me misspell multiply. Maybe it is an acronym for Malt is A List
% Tearing Specification. Maybe it is a beer metaphor, suggesting that code
% only runs in parallel if bribed with spirits. It's jargon, learn it
% or you can't be part of the in-group.
%
% == Messages and Errors ==
% plists assures that no extraneous messages are left in or will later
% enter the message queue. This is guaranteed even in the event of an error.
%
% Errors in spawned processes are caught and propagated to the calling
% process. If you invoke
%
% plists:map(fun (X) -> 1/X end, [1, 2, 3, 0]).
%
% you get a badarith error, exactly like when you use lists:map.
%
% plists uses monitors to watch the processes it spawns. It is not a good idea
% to invoke plists when you are already monitoring processes. If one of them
% does a non-normal exit, plists receives the 'DOWN' message believing it to be
% from one of its own processes. The error propagation system goes into
% effect, which results in the error occuring in the calling process.
%
% == License ==
% The MIT License
%
% Copyright (c) 2007 <NAME>
%
% Permission is hereby granted, free of charge, to any person obtaining a copy
% of this software and associated documentation files (the "Software"), to deal
% in the Software without restriction, including without limitation the rights
% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
% copies of the Software, and to permit persons to whom the Software is
% furnished to do so, subject to the following conditions:
%
% The above copyright notice and this permission notice shall be included in
% all copies or substantial portions of the Software.
%
% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
% THE SOFTWARE.
-module(plists).
-include_lib("otp_vsn/include/otp_vsn.hrl").
-export([all/2, all/3, any/2, any/3, filter/2, filter/3,
fold/3, fold/4, fold/5, foreach/2, foreach/3, map/2, map/3,
partition/2, partition/3, sort/1, sort/2, sort/3,
usort/1, usort/2, usort/3, mapreduce/2, mapreduce/3, mapreduce/5,
runmany/3, runmany/4]).
% Everything here is defined in terms of runmany.
% The following methods are convient interfaces to runmany.
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> bool()
all(Fun, List) ->
all(Fun, List, 1).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List, Malt) -> bool()
all(Fun, List, Malt) ->
try runmany(fun (L) ->
B = lists:all(Fun, L),
if B ->
nil;
true ->
exit(notall)
end
end,
fun (_A1, _A2) ->
nil
end,
List, Malt) of
_ ->
true
catch exit:notall ->
false
end.
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> bool()
any(Fun, List) ->
any(Fun, List, 1).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List, Malt) -> bool()
any(Fun, List, Malt) ->
try runmany(fun (L) ->
B = lists:any(Fun, L),
if B ->
exit(any);
true ->
nil
end
end,
fun (_A1, _A2) ->
nil
end,
List, Malt) of
_ ->
false
catch exit:any ->
true
end.
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> list()
filter(Fun, List) ->
filter(Fun, List, 1).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List, Malt) -> list()
filter(Fun, List, Malt) ->
runmany(fun (L) ->
lists:filter(Fun, L)
end,
{reverse, fun (A1, A2) ->
A1 ++ A2
end},
List, Malt).
% Note that with parallel fold there is not foldl and foldr,
% instead just one fold that can fuse Accumlators.
% @doc Like below, but assumes 1 as the Malt. This function is almost useless,
% and is intended only to aid converting code from using lists to plists.
% @spec (Fun, InitAcc, List) -> term()
fold(Fun, InitAcc, List) ->
fold(Fun, Fun, InitAcc, List, 1).
% @doc Like below, but uses the Fun as the Fuse by default.
% @spec (Fun, InitAcc, List, Malt) -> term()
fold(Fun, InitAcc, List, Malt) ->
fold(Fun, Fun, InitAcc, List, Malt).
% @doc fold is more complex when made parallel. There is no foldl and foldr,
% accumulators aren't passed in any defined order.
% The list is split into sublists which are folded together. Fun is
% identical to the function passed to lists:fold[lr], it takes
% (an element, and the accumulator) and returns -> a new accumulator.
% It is used for the initial stage of folding sublists. Fuse fuses together
% the results, it takes (Results1, Result2) and returns -> a new result.
% By default sublists are fused left to right, each result of a fuse being
% fed into the first element of the next fuse. The result of the last fuse
% is the result.
%
% Fusing may also run in parallel using a recursive algorithm,
% by specifying the fuse as {recursive, Fuse}. See
% the discussion in {@link runmany/4}.
%
% Malt is the malt for the initial folding of sublists, and for the
% possible recursive fuse.
% @spec (Fun, Fuse, InitAcc, List, Malt) -> term()
fold(Fun, Fuse, InitAcc, List, Malt) ->
Fun2 = fun (L) -> lists:foldl(Fun, InitAcc, L) end,
runmany(Fun2, Fuse, List, Malt).
% @doc Similiar to foreach in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>
% except it makes no guarantee about the order it processes list elements.
% @spec (Fun, List) -> void()
foreach(Fun, List) ->
foreach(Fun, List, 1).
% @doc Similiar to foreach in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>
% except it makes no guarantee about the order it processes list elements.
% @spec (Fun, List, Malt) -> void()
foreach(Fun, List, Malt) ->
runmany(fun (L) ->
lists:foreach(Fun, L)
end,
fun (_A1, _A2) ->
ok
end,
List, Malt).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> list()
map(Fun, List) ->
map(Fun, List, 1).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List, Malt) -> list()
map(Fun, List, Malt) ->
runmany(fun (L) ->
lists:map(Fun, L)
end,
{reverse, fun (A1, A2) ->
A1 ++ A2
end},
List, Malt).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> {list(), list()}
partition(Fun, List) ->
partition(Fun, List, 1).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List, Malt) -> {list(), list()}
partition(Fun, List, Malt) ->
runmany(fun (L) ->
lists:partition(Fun, L)
end,
{reverse, fun ({True1, False1}, {True2, False2}) ->
{True1 ++ True2, False1 ++ False2}
end},
List, Malt).
% SORTMALT needs to be tuned
-define(SORTMALT, 100).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (List) -> list()
sort(List) ->
sort(fun (A, B) ->
A =< B
end,
List).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> list()
sort(Fun, List) ->
sort(Fun, List, ?SORTMALT).
% @doc This version lets you specify your own malt for sort.
%
% sort splits the list into sublists and sorts them, and it merges the
% sorted lists together. These are done in parallel. Each sublist is
% sorted in a seperate process, and each merging of results is done in a
% seperate process. Malt defaults to 100, causing the list to be split into
% 100-element sublists.
% @spec (Fun, List, Malt) -> list()
sort(Fun, List, Malt) ->
Fun2 = fun (L) ->
lists:sort(Fun, L)
end,
Fuse = fun (A1, A2) ->
lists:merge(Fun, A1, A2)
end,
runmany(Fun2, {recursive, Fuse}, List, Malt).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (List) -> list()
usort(List) ->
usort(fun (A, B) ->
A =< B
end,
List).
% @doc Same semantics as in module
% <a href="http://www.erlang.org/doc/man/lists.html">lists</a>.
% @spec (Fun, List) -> list()
usort(Fun, List) ->
usort(Fun, List, ?SORTMALT).
% @doc This version lets you specify your own malt for usort.
%
% usort splits the list into sublists and sorts them, and it merges the
% sorted lists together. These are done in parallel. Each sublist is
% sorted in a seperate process, and each merging of results is done in a
% seperate process. Malt defaults to 100, causing the list to be split into
% 100-element sublists.
%
% usort removes duplicate elments while it sorts.
% @spec (Fun, List, Malt) -> list()
usort(Fun, List, Malt) ->
Fun2 = fun (L) ->
lists:usort(Fun, L)
end,
Fuse = fun (A1, A2) ->
lists:umerge(Fun, A1, A2)
end,
runmany(Fun2, {recursive, Fuse}, List, Malt).
% @doc Like below, assumes default MapMalt of 1.
% @spec (MapFunc, List) -> Dict
% MapFunc = (term()) -> DeepListOfKeyValuePairs
% DeepListOfKeyValuePairs = [DeepListOfKeyValuePairs] | {Key, Value}
mapreduce(MapFunc, List) ->
mapreduce(MapFunc, List, 1).
% Like below, but uses a default reducer that collects all
% {Key, Value} pairs into a
% <a href="http://www.erlang.org/doc/man/dict.html">dict</a>,
% with values {Key, [Value1, Value2...]}.
% This dict is returned as the result.
mapreduce(MapFunc, List, MapMalt) ->
mapreduce(MapFunc, List, dict:new(), fun add_key/3, MapMalt).
% @doc This is a very basic mapreduce. You won't write a Google-rivaling
% search engine with it. It has no equivalent in lists. Each
% element in the list is run through the MapFunc, which produces either
% a {Key, Value} pair, or a lists of key value pairs, or a list of lists of
% key value pairs...etc. A reducer process runs in parallel with the mapping
% processes, collecting the key value pairs. It starts with a state given by
% InitState, and for each {Key, Value} pair that it receives it invokes
% ReduceFunc(OldState, Key, Value) to compute its new state. mapreduce returns
% the reducer's final state.
%
% MapMalt is the malt for the mapping operation, with a default value of 1,
% meaning each element of the list is mapped by a seperate process.
%
% mapreduce requires OTP R11B, or it may leave monitoring messages in the
% message queue.
% @spec (MapFunc, List, InitState, ReduceFunc, MapMalt) -> Dict
% MapFunc = (term()) -> DeepListOfKeyValuePairs
% DeepListOfKeyValuePairs = [DeepListOfKeyValuePairs] | {Key, Value}
% ReduceFunc = (OldState::term(), Key::term(), Value::term() -> NewState::term()
mapreduce(MapFunc, List, InitState, ReduceFunc, MapMalt) ->
Parent = self(),
{Reducer, ReducerRef} =
erlang:spawn_monitor(fun () ->
reducer(Parent, 0, InitState, ReduceFunc)
end),
MapFunc2 = fun (L) ->
Reducer ! lists:map(MapFunc, L),
1
end,
SentMessages = try runmany(MapFunc2, fun (A, B) -> A+B end, List, MapMalt)
catch
exit:Reason ->
erlang:demonitor(ReducerRef, [flush]),
Reducer ! die,
exit(Reason)
end,
Reducer ! {mappers, done, SentMessages},
Results = receive
{Reducer, Results2} ->
Results2;
{'DOWN', _, _, Reducer, Reason2} ->
exit(Reason2)
end,
receive
{'DOWN', _, _, Reducer, normal} ->
nil
end,
Results.
reducer(Parent, NumReceived, State, Func) ->
receive
die ->
nil;
{mappers, done, NumReceived} ->
Parent ! {self (), State};
Keys ->
reducer(Parent, NumReceived + 1, each_key(State, Func, Keys), Func)
end.
each_key(State, Func, {Key, Value}) ->
Func(State, Key, Value);
each_key(State, Func, [List|Keys]) ->
each_key(each_key(State, Func, List), Func, Keys);
each_key(State, _, []) ->
State.
add_key(Dict, Key, Value) ->
case dict:is_key(Key, Dict) of
true ->
dict:append(Key, Value, Dict);
false ->
dict:store(Key, [Value], Dict)
end.
% @doc Like below, but assumes a Malt of 1,
% meaning each element of the list is processed by a seperate process.
% @spec (Fun, Fuse, List) -> term()
runmany(Fun, Fuse, List) ->
runmany(Fun, Fuse, List, 1).
% Begin internal stuff (though runmany/4 is exported).
% @doc All of the other functions are implemented with runmany. runmany
% takes a List, splits it into sublists, and starts processes to operate on
% each sublist, all done according to Malt. Each process passes its sublist
% into Fun and sends the result back.
%
% The results are then fused together to get the final result. There are two
% ways this can operate, lineraly and recursively. If Fuse is a function,
% a fuse is done linearly left-to-right on the sublists, the results
% of processing the first and second sublists being passed to Fuse, then
% the result of the first fuse and processing the third sublits, and so on. If
% Fuse is {reverse, FuseFunc}, then a fuse is done right-to-left, the results
% of processing the second-to-last and last sublists being passed to FuseFunc,
% then the results of processing the third-to-last sublist and
% the results of the first fuse, and and so forth.
% Both methods preserve the original order of the lists elements.
%
% To do a recursive fuse, pass Fuse as {recursive, FuseFunc}.
% The recursive fuse makes no guarantee about the order the results of
% sublists, or the results of fuses are passed to FuseFunc. It
% continues fusing pairs of results until it is down to one.
%
% Recursive fuse is down in parallel with processing the sublists, and a
% process is spawned to fuse each pair of results. It is a parallized
% algorithm. Linear fuse is done after all results of processing sublists
% have been collected, and can only run in a single process.
%
% Even if you pass {recursive, FuseFunc}, a recursive fuse is only done if
% the malt contains {nodes, NodeList} or {processes, X}. If this is not the
% case, a linear fuse is done.
% @spec (Fun, Fuse, List, Malt) -> term()
% Fun = (list()) -> term()
% Fuse = FuseFunc | {recursive, FuseFunc}
% FuseFunc = (term(), term()) -> term()
runmany(Fun, Fuse, List, Malt) when is_list(Malt) ->
runmany(Fun, Fuse, List, local, no_split, Malt);
runmany(Fun, Fuse, List, Malt) ->
runmany(Fun, Fuse, List, [Malt]).
runmany(Fun, Fuse, List, Nodes, no_split, [MaltTerm|Malt]) when is_integer(MaltTerm) ->
runmany(Fun, Fuse, List, Nodes, MaltTerm, Malt);
% run a process for each scheduler
runmany(Fun, Fuse, List, local, Split, [{processes, schedulers}|Malt]) ->
S = erlang:system_info(schedulers),
runmany(Fun, Fuse, List, local, Split, [{processes, S}|Malt]);
% Split the list into X sublists, where X is the number of processes
runmany(Fun, Fuse, List, local, no_split, [{processes, X}|_]=Malt) ->
L = length(List),
case L rem X of
0 ->
runmany(Fun, Fuse, List, local, L div X, Malt);
_ ->
runmany(Fun, Fuse, List, local, L div X + 1, Malt)
end;
% run X process on local machine
runmany(Fun, Fuse, List, local, Split, [{processes, X}|Malt]) ->
Nodes = lists:duplicate(X, node()),
runmany(Fun, Fuse, List, Nodes, Split, Malt);
runmany(Fun, Fuse, List, Nodes, Split, [{timeout, X}|Malt]) ->
Parent = self(),
Timer = spawn(fun () ->
receive
stoptimer ->
Parent ! {timerstopped, self()}
after X ->
Parent ! {timerrang, self()},
receive
stoptimer ->
Parent ! {timerstopped, self()}
end
end
end),
Ans = try runmany(Fun, Fuse, List, Nodes, Split, Malt)
catch
% we really just want the after block, the syntax
% makes this catch necessary.
willneverhappen ->
nil
after
Timer ! stoptimer,
cleanup_timer(Timer)
end,
Ans;
runmany(Fun, Fuse, List, local, Split, [{nodes, NodeList}|Malt]) ->
Nodes = lists:foldl(fun ({Node, schedulers}, A) ->
X = schedulers_on_node(Node) + 1,
lists:reverse(lists:duplicate(X, Node), A);
({Node, X}, A) ->
lists:reverse(lists:duplicate(X, Node), A);
(Node, A) ->
[Node|A]
end,
[], NodeList),
runmany(Fun, Fuse, List, Nodes, Split, Malt);
% local recursive fuse, for when we weren't invoked with {processes, X}
% or {nodes, NodeList}. Degenerates recursive fuse into linear fuse.
runmany(Fun, {recursive, Fuse}, List, local, Split, []) ->
runmany(Fun, Fuse, List, local, Split, []);
% by default, operate on each element seperately
runmany(Fun, Fuse, List, Nodes, no_split, []) ->
runmany(Fun, Fuse, List, Nodes, 1, []);
runmany(Fun, Fuse, List, local, Split, []) ->
List2 = splitmany(List, Split),
local_runmany(Fun, Fuse, List2);
runmany(Fun, Fuse, List, Nodes, Split, []) ->
List2 = splitmany(List, Split),
cluster_runmany(Fun, Fuse, List2, Nodes).
cleanup_timer(Timer) ->
receive
{timerrang, Timer} ->
cleanup_timer(Timer);
{timerstopped, Timer} ->
nil
end.
schedulers_on_node(Node) ->
case get(plists_schedulers_on_nodes) of
undefined ->
X = determine_schedulers(Node),
put(plists_schedulers_on_nodes,
dict:store(Node, X, dict:new())),
X;
Dict ->
case dict:is_key(Node, Dict) of
true ->
dict:fetch(Node, Dict);
false ->
X = determine_schedulers(Node),
put(plists_schedulers_on_nodes,
dict:store(Node, X, Dict)),
X
end
end.
determine_schedulers(Node) ->
Parent = self(),
Child = spawn(Node, fun () ->
Parent ! {self(), erlang:system_info(schedulers)}
end),
erlang:monitor(process, Child),
receive
{Child, X} ->
receive
{'DOWN', _, _, Child, _Reason} ->
nil
end,
X;
{'DOWN', _, _, Child, Reason} when Reason =/= normal ->
0
end.
% local runmany, for when we weren't invoked with {processes, X}
% or {nodes, NodeList}. Every sublist is processed in parallel.
local_runmany(Fun, Fuse, List) ->
Parent = self (),
Pids = lists:map(fun (L) ->
F = fun () ->
Parent !
{self (), Fun(L)}
end,
{Pid, _} = erlang:spawn_monitor(F),
Pid
end,
List),
Answers = try lists:map(fun receivefrom/1, Pids)
catch throw:Message ->
{BadPid, Reason} = Message,
handle_error(BadPid, Reason, Pids)
end,
lists:foreach(fun (Pid) ->
normal_cleanup(Pid)
end, Pids),
fuse(Fuse, Answers).
receivefrom(Pid) ->
receive
{Pid, R} ->
R;
{'DOWN', _, _, BadPid, Reason} when Reason =/= normal ->
throw({BadPid, Reason});
{timerrang, _} ->
throw({nil, timeout})
end.
% Convert List into [{Number, Sublist}]
cluster_runmany(Fun, Fuse, List, Nodes) ->
{List2, _} = lists:foldl(fun (X, {L, Count}) ->
{[{Count, X}|L], Count+1}
end,
{[], 0}, List),
cluster_runmany(Fun, Fuse, List2, Nodes, [], []).
% Add a pair of results into the TaskList as a fusing task
cluster_runmany(Fun, {recursive, Fuse}, [], Nodes, Running,
[{_, R1}, {_, R2}|Results]) ->
cluster_runmany(Fun, {recursive, Fuse}, [{fuse, R1, R2}], Nodes,
Running, Results);
% recursive fuse done, return result
cluster_runmany(_, {recursive, _Fuse}, [], _Nodes, [], [{_, Result}]) ->
Result;
% edge case where we are asked to do nothing
cluster_runmany(_, {recursive, _Fuse}, [], _Nodes, [], []) ->
[];
% We're done, now we just have to [linear] fuse the results
cluster_runmany(_, Fuse, [], _Nodes, [], Results) ->
fuse(Fuse, lists:map(fun ({_, R}) -> R end,
lists:sort(fun ({A, _}, {B, _}) ->
A =< B
end,
lists:reverse(Results))));
% We have a ready node and a sublist or fuse to be processed, so we start
% a new process
cluster_runmany(Fun, Fuse, [Task|TaskList], [N|Nodes], Running, Results) ->
Parent = self(),
case Task of
{Num, L2} ->
Fun2 = fun () ->
Parent ! {self(), Num, Fun(L2)}
end;
{fuse, R1, R2} ->
{recursive, FuseFunc} = Fuse,
Fun2 = fun () ->
Parent ! {self(), fuse, FuseFunc(R1, R2)}
end
end,
Fun3 = fun () ->
try Fun2()
catch
exit:siblingdied ->
ok;
exit:Reason ->
Parent ! {self(), error, Reason};
?OTP_VSN_IF_HAS_ST_MATCHING(
error:R:ST ->
Parent ! {self(), error, {R, ST}};
,error:R ->
Parent ! {self(), error, {R, erlang:get_stacktrace()}};
)
?OTP_VSN_IF_HAS_ST_MATCHING(
throw:R:ST ->
Parent ! {self(), error, {{nocatch, R}, ST}}
,throw:R ->
Parent ! {self(), error, {{nocatch, R}, erlang:get_stacktrace()}}
)
end
end,
Pid = spawn(N, Fun3),
erlang:monitor(process, Pid),
cluster_runmany(Fun, Fuse, TaskList, Nodes, [{Pid, N, Task}|Running], Results);
% We can't start a new process, but can watch over already running ones
cluster_runmany(Fun, Fuse, TaskList, Nodes, Running, Results) when length(Running) > 0 ->
receive
{_Pid, error, Reason} ->
RunningPids = lists:map(fun ({Pid, _, _}) ->
Pid
end,
Running),
handle_error(junkvalue, Reason, RunningPids);
{Pid, Num, Result} ->
% throw out the exit message, Reason should be
% normal, noproc, or noconnection
receive {'DOWN', _, _, Pid, _Reason} ->
nil
end,
{Running2, FinishedNode, _} = delete_running(Pid, Running, []),
cluster_runmany(Fun, Fuse, TaskList,
[FinishedNode|Nodes], Running2, [{Num, Result}|Results]);
{timerrang, _} ->
RunningPids = lists:map(fun ({Pid, _, _}) ->
Pid
end,
Running),
handle_error(nil, timeout, RunningPids);
% node failure
{'DOWN', _, _, Pid, noconnection} ->
{Running2, _DeadNode, Task} = delete_running(Pid, Running, []),
cluster_runmany(Fun, Fuse, [Task|TaskList], Nodes,
Running2, Results);
% could a noproc exit message come before the message from
% the process? we are assuming it can't.
% this clause is unlikely to get invoked due to cluster_runmany's
% spawned processes. It will still catch errors in mapreduce's
% reduce process, however.
{'DOWN', _, _, BadPid, Reason} when Reason =/= normal ->
RunningPids = lists:map(fun ({Pid, _, _}) ->
Pid
end,
Running),
handle_error(BadPid, Reason, RunningPids)
end;
% We have data, but no nodes either available or occupied
cluster_runmany(_, _, [_Non|_Empty], []=_Nodes, []=_Running, _) ->
exit(allnodescrashed).
delete_running(Pid, [{Pid, Node, List}|Running], Acc) ->
{Running ++ Acc, Node, List};
delete_running(Pid, [R|Running], Acc) ->
delete_running(Pid, Running, [R|Acc]).
handle_error(BadPid, Reason, Pids) ->
lists:foreach(fun (Pid) ->
exit(Pid, siblingdied)
end, Pids),
lists:foreach(fun (Pid) ->
error_cleanup(Pid, BadPid)
end, Pids),
exit(Reason).
error_cleanup(BadPid, BadPid) ->
ok;
error_cleanup(Pid, BadPid) ->
receive
{Pid, _} ->
error_cleanup(Pid, BadPid);
{Pid, _, _} ->
error_cleanup(Pid, BadPid);
{'DOWN', _, _, Pid, _Reason} ->
ok
end.
normal_cleanup(Pid) ->
receive
{'DOWN', _, _, Pid, _Reason} ->
ok
end.
% edge case
fuse(_, []) ->
[];
fuse({reverse, _}=Fuse, Results) ->
[RL|ResultsR] = lists:reverse(Results),
fuse(Fuse, ResultsR, RL);
fuse(Fuse, [R1|Results]) ->
fuse(Fuse, Results, R1).
fuse({reverse, FuseFunc}=Fuse, [R2|Results], R1) ->
fuse(Fuse, Results, FuseFunc(R2, R1));
fuse(Fuse, [R2|Results], R1) ->
fuse(Fuse, Results, Fuse(R1, R2));
fuse(_, [], R) ->
R.
% Splits a list into a list of sublists, each of size Size,
% except for the last element which is less if the original list
% could not be evenly divided into Size-sized lists.
splitmany(List, Size) ->
splitmany(List, [], Size).
splitmany([], Acc, _) ->
lists:reverse(Acc);
splitmany(List, Acc, Size) ->
{Top, NList} = split(Size, List),
splitmany(NList, [Top|Acc], Size).
% Like lists:split, except it splits a list smaller than its first
% parameter
split(Size, List) ->
split(Size, List, []).
split(0, List, Acc) ->
{lists:reverse(Acc), List};
split(Size, [H|List], Acc) ->
split(Size - 1, List, [H|Acc]);
split(_, [], Acc) ->
{lists:reverse(Acc), []}. | src/plists.erl | 0.694095 | 0.728579 | plists.erl | starcoder |
%%%-----------------------------------------------------------------------------
%%% @doc Mempool process group consists of `N` memcells instances `M`. They have
%%% all the same capacity `C`. We follow a simple rules for scaling and deciding
%%% which memcell should handle the next call.
%%%
%%% To pick a memcell which should handle the next call, we pick a random number
%%% number in interval <0;`N`). Memcell `M`[`N`] will be routed next request.
%%%
%%% To scale the mempool, we use probability. To fill the first instance, we
%%% need `C` insert messages. To fill the second instance, we need 2 * `C`
%%% insert messages as approximately half of those insert messages are going to
%%% be routed to the first cell (based on chance). Generally:
%%%
%%% NewInsertsUntilNewCell = `C` * `N`
%%%
%%% If a cell does not include requested number of messages, we query a random
%%% cell again until
%%% a) The number of messages hasn't changed
%%% b) We collected requested number of messages
%%% This is a mechanism on handling the issue of querying newly spawned node.
%%% Each mempool should have a reasonable env var which limits the number of
%%% messages a client can query in one request.
%%%
%%% For this module, we picked behaviour of `gen_server`. Gen server provides
%%% useful functionality for handling async requests. Insert requests might
%%% scale the memtissue, which is unnecessary to wait for from client side of
%%% view.
%%%
%%% @end
%%%-----------------------------------------------------------------------------
-module(memtissue).
-behavior(gen_server).
-include("../prelude.hrl").
-export([insert/1, get/1, start_link/1, count_cells/0]).
-export([flush/0]).
-export([init/1, handle_call/3, handle_cast/2]).
%%------------------------------------------------------------------------------
%% @doc Holds state for this gen server implementation.
%%
%% @end
%%------------------------------------------------------------------------------
-record(memtissue, {
supervisor :: pid(),
memcells :: array(),
%% How many inserts until next cell.
until_next_cell :: integer(),
%% How many elements does a single cell store at most.
memcell_capacity :: integer()
}).
%%%
%%% Exported functions
%%%
%%------------------------------------------------------------------------------
%% @doc Initializes new memory tissue with given capacity.
%%
%% @end
%%------------------------------------------------------------------------------
-spec start_link(capacity()) -> ok.
start_link(Capacity) ->
%% TODO: Save statistics/trace/log to file based on environment.
%% See http://erlang.org/doc/man/gen_server.html#start_link-2 for more
%% information on how this can be achieved with debug options.
{ok, _} = gen_server:start_link({local, memtissue}, ?MODULE, Capacity, []).
%%------------------------------------------------------------------------------
%% @doc Inserts a new element to one of the memcells. Every now and then this
%% all results in scaling the memtissue.
%%
%% @end
%%------------------------------------------------------------------------------
-spec insert(element()) -> ok | {error, term()}.
insert(Element) -> gen_server:cast(memtissue, {insert, Element}).
%%------------------------------------------------------------------------------
%% @doc Returns a callback function which can be used as callback to get N
%% messages. The main reason for this function to be a callback is to avoid
%% blocking the memtissue process.
%%
%% Example:
%% ```
%% N = 10,
%% {ok, Callback} = memtissue:get(N),
%% {Len, Messages} = Callback().
%% ```
%%
%% @end
%%------------------------------------------------------------------------------
-spec get(number_of_elements()) -> {ok, fun(() -> [element()])}.
get(N) -> gen_server:call(memtissue, {get, N}).
%%------------------------------------------------------------------------------
%% @doc Counts how many memcells are there currently running (or being
%% restarted).
%%
%% @end
%%------------------------------------------------------------------------------
-spec count_cells() -> {ok, integer()}.
count_cells() -> gen_server:call(memtissue, count_cells).
%%------------------------------------------------------------------------------
%% @doc Memtissue keeps cached list of memcells pids in memory. Occasionally one
%% of the cells will be in restarting state when capturing its pid. When this
%% happens, the memtissue skips the restarting cell. A workaround for this is to
%% reload the array of pids every now and then.
%%
%% @end
%%------------------------------------------------------------------------------
-spec flush() -> ok.
flush() -> gen_server:cast(memtissue, flush).
%%%
%%% Callback functions from gen_server
%%%
init(Capacity) ->
{ok, Supervisor} = memcell_sup:start_link(Capacity),
{ok, _} = supervisor:start_child(Supervisor, []),
%% Supervisor starts with a single child.
Pids = memcell_sup:which_children(Supervisor),
State = #memtissue{
memcell_capacity=Capacity,
memcells=array:from_list(Pids),
supervisor=Supervisor,
until_next_cell=Capacity
},
{ok, State}.
%% Scales the mempool.
handle_cast(
Call = {insert, _},
State = #memtissue {until_next_cell=1, memcells=Cells, memcell_capacity=Cap}
) ->
case array:size(Cells) =< ?MAX_MEMCELLS of
true ->
%% The ID of the new worker is going to be N as workers IDs start at 0.
N = array:size(State#memtissue.memcells),
supervisor:start_child(State#memtissue.supervisor, []),
%% How many insert requests until a next new worker.
UntilNextCell = (N + 1) * Cap,
ok = flush(),
handle_cast(Call, State#memtissue { until_next_cell=UntilNextCell });
false ->
handle_cast(Call, State#memtissue { until_next_cell=0 })
end;
%% If the tissue reached maximum number of cells, we don't do anything special
%% on inserts.
handle_cast({insert, Element}, State = #memtissue {until_next_cell=0}) ->
Pid = random_cell(State),
ok = memcell:insert(Element, Pid),
{noreply, State};
%% We decrement the state pointer for insert by 1.
handle_cast({insert, Element}, State = #memtissue {until_next_cell=N}) ->
Pid = random_cell(State),
ok = memcell:insert(Element, Pid),
{noreply, State#memtissue {until_next_cell=N - 1}};
%% Reloads memcells by reading supervisor's children and loadng the pids into
%% an array.
handle_cast(flush, State) ->
Pids = memcell_sup:which_children(State#memtissue.supervisor),
{noreply, State#memtissue { memcells=array:from_list(Pids) }}.
%% Picks a cell on random and creates a callback which the consumer can use to
%% get N random messages from.
handle_call({get, N}, _, State) ->
Cell = random_cell(State),
Callback = fun() -> memcell:get(N, Cell) end,
{reply, {ok, Callback}, State};
%% Counts how many cells are there in the array at the moment.
handle_call(count_cells, _, State = #memtissue {memcells=Cells}) ->
{reply, {ok, array:size(Cells)}, State}.
%%%
%%% Local functions
%%%
-spec random_cell(#memtissue{}) -> pid().
random_cell(State) ->
Children = array:size(State#memtissue.memcells),
RandomCellIndex = erlang:system_time(millisecond) rem Children,
% Checks that the cell is not in restarting state.
case array:get(RandomCellIndex, State#memtissue.memcells) of
restarting -> random_cell(State);
% Checks that the cell hasn't died.
Pid -> case is_process_alive(Pid) of
false -> random_cell(State);
true -> Pid
end
end. | apps/blueamber_mempool/src/storage/memtissue.erl | 0.61451 | 0.602646 | memtissue.erl | starcoder |
%/--------------------------------------------------------------------
%| Copyright 2019 Erisata, UAB (Ltd.)
%|
%| Licensed under the Apache License, Version 2.0 (the "License");
%| you may not use this file except in compliance with the License.
%| You may obtain a copy of the License at
%|
%| http://www.apache.org/licenses/LICENSE-2.0
%|
%| Unless required by applicable law or agreed to in writing, software
%| distributed under the License is distributed on an "AS IS" BASIS,
%| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%| See the License for the specific language governing permissions and
%| limitations under the License.
%\--------------------------------------------------------------------
%%% @doc
%%% Callback module for `paxoid', storing all the state in memory.
%%% Most likely you will use this module as an example on how
%%% to implement own callback.
%%%
-module(paxoid_cb_mem).
-behaviour(paxoid).
-export([extract_data/1]).
-export([
init/3,
describe/1,
handle_new_id/2,
handle_new_map/3,
handle_new_max/2,
handle_changed_cluster/3,
handle_changed_partition/3,
handle_select/4,
handle_check/2
]).
%%% ============================================================================
%%% Internal state.
%%% ============================================================================
%%
%% The state for this callback module.
%%
-record(state, {
max :: paxoid:num(),
ids :: [paxoid:num()],
map :: #{Old :: paxoid:num() => New :: paxoid:num()}
}).
%% @doc
%% A helper function for extracting state from this callback for the
%% callback implementations that decide to reuse this in-memory model.
%%
extract_data(#state{max = Max, ids = Ids, map = Map}) ->
{ok, Max, Ids, Map}.
%%% ============================================================================
%%% Callbacks for `paxoid'.
%%% ============================================================================
%% @doc
%% Initializes this callback.
%%
init(_Name, Node, Args) ->
Max = maps:get(max, Args, 0),
Ids = maps:get(ids, Args, []),
Map = maps:get(map, Args, #{}),
State = #state{
ids = Ids,
max = Max,
map = Map
},
{ok, Max, [Node], State}.
%% @doc
%% Return IDs owned by this node.
%%
describe(State = #state{ids = Ids}) ->
Info = #{ids => Ids},
{ok, Info, State}.
%% @doc
%% Stores new ID allocated to this node, and upates known maximum, if needed.
%%
handle_new_id(NewId, State = #state{max = Max, ids = Ids}) ->
NewState = State#state{
max = erlang:max(NewId, Max),
ids = [NewId | Ids]
},
{ok, NewState}.
%% @doc
%% Replaces duplicated ID with new one, and upates known maximum, if needed.
%%
handle_new_map(OldId, NewId, State = #state{max = Max, ids = Ids, map = Map}) ->
MapId = fun
(Id) when Id =:= OldId -> NewId;
(Id) -> Id
end,
NewState = State#state{
max = erlang:max(NewId, Max),
ids = lists:map(MapId, Ids),
map = Map#{OldId => NewId}
},
{ok, NewState}.
%% @doc
%% Updates maximal known ID in the scope of the entire cluster.
%%
handle_new_max(NewMax, State) ->
NewState = State#state{
max = NewMax
},
{ok, NewState}.
%% @doc
%% This function is called, when new nodes are added/discovered in the cluster.
%% They can be unreachable yet, but already known.
%%
%% We do nothing in this case.
%%
handle_changed_cluster(_OldNodes, _NewNodes, State) ->
{ok, State}.
%% @doc
%% This function is called when our view of the partition has changed.
%%
%% We do nothing in this case.
%%
handle_changed_partition(_OldNodes, _NewNodes, State) ->
{ok, State}.
%% @doc
%% Returns a requested range of IDs owned by this node.
%%
handle_select(From, Till, MaxCount, State = #state{ids = Ids}) ->
SelectIds = fun
SelectIds([Id | Other], Count, AccIds) ->
if Count =< 0 -> {ok, hd(AccIds), lists:reverse(AccIds), State}; % Overflow by size.
Id > Till -> {ok, Till, lists:reverse(AccIds), State}; % Range scanned.
Id < From -> SelectIds(Other, Count, AccIds); % Skip the first ids.
true -> SelectIds(Other, Count - 1, [Id | AccIds]) % Collect them.
end;
SelectIds([], _Count, AccIds) ->
{ok, Till, lists:reverse(AccIds), State} % Covered all the requested range.
end,
SelectIds(lists:usort(Ids), MaxCount, []).
%% @doc
%% Checks if provided list of IDs has numbers conflicting with this node.
%%
handle_check(PeerIds, State = #state{ids = Ids}) ->
DuplicatedIds = lists:filter(fun (Id) ->
lists:member(Id, Ids)
end, PeerIds),
{ok, DuplicatedIds, State}. | src/paxoid_cb_mem.erl | 0.719679 | 0.425426 | paxoid_cb_mem.erl | starcoder |
-module(bingo_random).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([shuffle/1]).
%% Randomly shuffle the specified List according to uniform distribution.
-spec shuffle([T]) -> [T].
shuffle(List) ->
shuffle(List, []).
-spec shuffle([T], [T]) -> [T].
shuffle([], State) ->
State;
shuffle([E], State) ->
shuffle([], [E | State]);
shuffle(List, State) ->
N = rand:uniform(length(List)),
{Next, Rest} = delete_at(N, List),
shuffle(Rest, [Next | State]).
%% Deletes and returns the element at the specified position from the
%% given List.
-spec delete_at(pos_integer(), [T]) -> {T, [T]} | error.
delete_at(N, List) when is_integer(N), is_list(List) ->
case is_valid_position(N, List) of
false ->
error;
true ->
{First, [Target|Second]} = lists:split(N - 1, List),
{Target, First ++ Second}
end;
delete_at(_, _) ->
error.
%% Returns `true` if `N` is greater than or equal to 1 and less than or
%% equal to `length(List)`.
-spec is_valid_position(pos_integer(), list()) -> boolean().
is_valid_position(N, List) when is_integer(N), is_list(List) ->
N >= 1 andalso N =< length(List);
is_valid_position(_,_) ->
false.
%%
%% TESTS.
%%
-ifdef(TEST).
shuffle_test_() ->
[?_assertEqual([], shuffle([])),
?_assertEqual([a], shuffle([a])),
?_assertEqual(2, length(shuffle([a, b]))),
?_assertEqual(3, length(shuffle([a, b, c])))
].
delete_at_test_() ->
[?_assertEqual({a, [b, c]}, delete_at(1, [a, b, c])),
?_assertEqual({b, [a, c]}, delete_at(2, [a, b, c])),
?_assertEqual({c, [a, b]}, delete_at(3, [a, b, c])),
?_assertEqual({a, [b]}, delete_at(1, [a, b])),
?_assertEqual({b, [a]}, delete_at(2, [a, b])),
?_assertEqual({a, []}, delete_at(1, [a])),
?_assertEqual(error, delete_at(4, [a, b, c])),
?_assertEqual(error, delete_at(0, [a, b, c])),
?_assertEqual(error, delete_at(-1, [a, b, c])),
?_assertEqual(error, delete_at(3, [a, b])),
?_assertEqual(error, delete_at(0, [a, b])),
?_assertEqual(error, delete_at(-1, [a, b])),
?_assertEqual(error, delete_at(2, [a])),
?_assertEqual(error, delete_at(0, [a])),
?_assertEqual(error, delete_at(-1, [a])),
?_assertEqual(error, delete_at(1, [])),
?_assertEqual(error, delete_at(0, [])),
?_assertEqual(error, delete_at(-1, []))
].
is_valid_position_test_() ->
[?_assert(is_valid_position(1, [a, b, c])),
?_assert(is_valid_position(2, [a, b, c])),
?_assert(is_valid_position(3, [a, b, c])),
?_assert(is_valid_position(1, [a])),
?_assertNot(is_valid_position(0, [a, b, c])),
?_assertNot(is_valid_position(4, [a, b, c])),
?_assertNot(is_valid_position(-1, [a, b, c])),
?_assertNot(is_valid_position(2, [a])),
?_assertNot(is_valid_position(0, [a])),
?_assertNot(is_valid_position(-1, [a])),
?_assertNot(is_valid_position(1, [])),
?_assertNot(is_valid_position(0, [])),
?_assertNot(is_valid_position(-1, []))
].
-endif. | src/bingo_random.erl | 0.625896 | 0.679518 | bingo_random.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.