code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2020 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module MUST be used universally for checking the absolute global
%%% "wall time" (which is identical to Onezone's cluster time). It contains
%%% procedures for clock synchronization between different nodes/clusters.
%%%
%%% The timestamp_*/0 and monotonic_timestamp_*/1 functions are the only
%%% recommended way of acquiring an absolute timestamp across Onedata services.
%%%
%%% Note that the global time IS NOT MONOTONIC. It may warp forwards and
%%% backwards due to imperfect clocks that may be readjusted in random moments
%%% on local or remote nodes. DO NOT USE the global time for measuring time
%%% elapsing between specific moments in time within a single node. However,
%%% this module must be used for time dependent logic that is distributed
%%% between two or more nodes, or assumes possible node restarts (stores the
%%% timestamps in persistence). In such case, the logic must be secured against
%%% time warps (especially backward, as this may yield negative time
%%% differences). To that end, monotonic_timestamp_*/1 can be used, which will
%%% "freeze" time reads until the time warp is compensated. NOTE HOWEVER, that
%%% this may yield ZERO TIME DIFFERENCES and cause possible division by zero.
%%%
%%% Clocks can be synchronized by calling the corresponding procedures:
%%% * the local clock with a remote clock
%%% (can be any service providing timestamps)
%%% * the clock on a remote node in the cluster with the local clock
%%% (requires that the other node has the same version of this module)
%%%
%%% Clock synchronization performs a series of requests to fetch a remote
%%% timestamp. It calculates the approximate communication delay with the remote
%%% node/server and difference of the clocks (called "bias" in this module).
%%% Finally, it stores the bias on the local or remote node (depending which
%%% clock is being synchronized). All consecutive timestamps are adjusted using
%%% the bias so that they return measurements as close as possible to the target
%%% clock's time. It is recommended to periodically repeat the synchronization
%%% procedure to ensure that the clocks don't become desynchronized over a
%%% longer period. If synchronization is not performed, the local system clock
%%% is used for timestamps.
%%%
%%% The typical synchronization error can be expected to be below a second,
%%% but for highly utilized machines it can grow to a couple of seconds.
%%% Synchronization is discarded if the communication delay exceeds
%%% ?MAX_ALLOWED_SYNC_DELAY_MILLIS or is high compared to the bias.
%%%
%%% Every time a synchronization succeeds, the measured bias is stored in a
%%% file on disk, along with the information when it was measured. It can be
%%% later used to restore the previous synchronization, given that it is not
%%% outdated. This procedure is dedicated for nodes recovering after a failure.
%%% @end
%%%-------------------------------------------------------------------
-module(global_clock).
-author("<NAME>").
-include("logging.hrl").
-type fetch_remote_timestamp_fun() :: fun(() -> {ok, time:millis()} | {error, term()}).
% difference between readings of two clocks
-type bias() :: time:millis().
% communication delay with a remote server/node (round trip time)
-type delay() :: time:millis().
% Specific clock, as seen by the current erlang node:
% * system_clock - the native clock on the machine
% * local_clock - the clock on this node used to get timestamps,
% essentially the system_clock adjusted with bias
% * remote_clock - the local_clock on a remote node
% (remote node's system clock adjusted with bias)
-type clock_type() :: system_clock | local_clock | {remote_clock, node()}.
-export([timestamp_hours/0, timestamp_seconds/0, timestamp_millis/0]).
-export([monotonic_timestamp_seconds/1, monotonic_timestamp_millis/1]).
-export([synchronize_local_with_remote_server/1]).
-export([synchronize_remote_with_local/1]).
-export([is_synchronized/0]).
-export([reset_to_system_time/0]).
-export([try_to_restore_previous_synchronization/0]).
% internal RPC
-export([store_bias/2]).
-export([read_clock_time/1]).
%% The clock's bias is stored in a node-wide cache and defaults to 0 unless a
%% synchronization is done. The bias is measured in milliseconds, as finer
%% resolution does not make sense in environments based on network communication.
-define(CLOCK_BIAS_CACHE, clock_bias_cache).
%% If a backward time warp greater than the threshold is detected, a warning
%% is logged, but not more often than the backoff.
-define(BACKWARD_TIME_WARP_WARN_THRESHOLD_SECONDS, 60).
-define(BACKWARD_TIME_WARP_WARN_BACKOFF_SECONDS, 60).
-define(SYNC_REQUEST_REPEATS, ctool:get_env(clock_sync_request_repeats, 5)).
%% see examine_delay/2 for information how these env variables are used
-define(SATISFYING_SYNC_DELAY_MILLIS, ctool:get_env(clock_sync_satisfying_delay, 2000)).
-define(MAX_ALLOWED_SYNC_DELAY_MILLIS, ctool:get_env(clock_sync_max_allowed_delay, 10000)).
-define(BIAS_BACKUP_FILE, ctool:get_env(clock_sync_backup_file)).
-define(BIAS_BACKUP_VALIDITY_MILLIS, timer:seconds(ctool:get_env(clock_sync_backup_validity_secs, 900))).
%%%===================================================================
%%% API
%%%===================================================================
-spec timestamp_hours() -> time:hours().
timestamp_hours() ->
timestamp_millis() div 3600000.
-spec timestamp_seconds() -> time:seconds().
timestamp_seconds() ->
timestamp_millis() div 1000.
-spec timestamp_millis() -> time:millis().
timestamp_millis() ->
?MODULE:read_clock_time(local_clock).
%%--------------------------------------------------------------------
%% @doc
%% Returns the current global wall time ensuring that the read is not lower than
%% the previous - which guarantees non-strict monotonicity for consecutive reads.
%% NOTE: if the global clock warps backwards, this may cause the monotonic time to
%% freeze for a significant amount of time, until the difference is compensated.
%% NOTE: during that time, the difference of consecutive reads will be zero.
%% @end
%%--------------------------------------------------------------------
-spec monotonic_timestamp_seconds(Previous :: time:seconds()) -> time:seconds().
monotonic_timestamp_seconds(Previous) ->
TimestampSeconds = timestamp_seconds(),
warn_upon_backward_time_warp(TimestampSeconds - Previous),
max(TimestampSeconds, Previous).
%% @see monotonic_timestamp_seconds/1
-spec monotonic_timestamp_millis(Previous :: time:millis()) -> time:millis().
monotonic_timestamp_millis(Previous) ->
TimestampMillis = timestamp_millis(),
warn_upon_backward_time_warp((TimestampMillis - Previous) div 1000),
max(TimestampMillis, Previous).
-spec synchronize_local_with_remote_server(fetch_remote_timestamp_fun()) -> ok | error.
synchronize_local_with_remote_server(FetchRemoteTimestamp) ->
try
% use system_clock as reference, as it will be adjusted by measured bias
case estimate_bias_and_delay(FetchRemoteTimestamp, system_clock) of
{delay_ok, AverageBias, _} ->
store_bias(local_clock, AverageBias);
{delay_too_high, AverageBias, AverageDelay} ->
?error("Failed to synchronize with remote clock - delay too high (~Bms at bias=~Bms)", [
AverageDelay, AverageBias
]),
error
end
catch
throw:{error, _} = Error ->
?error("Failed to synchronize with remote clock due to ~w", [Error]),
error;
Class:Reason:Stacktrace ->
?error_stacktrace("Failed to synchronize with remote clock - ~w:~p", [Class, Reason], Stacktrace),
error
end.
-spec synchronize_remote_with_local(node()) -> ok | error.
synchronize_remote_with_local(Node) ->
try
FetchRemoteTimestamp = fun() ->
case rpc:call(Node, ?MODULE, read_clock_time, [system_clock]) of
Millis when is_integer(Millis) -> {ok, Millis};
{badrpc, Reason} -> throw({error, {badrpc, Reason}})
end
end,
% use local_clock as reference to adjust the remote clock (remote node's local_clock)
case estimate_bias_and_delay(FetchRemoteTimestamp, local_clock) of
{delay_ok, AverageBias, _} ->
store_bias({remote_clock, Node}, -AverageBias);
{delay_too_high, AverageBias, AverageDelay} ->
?error("Failed to synchronize node's clock (~p) with local - delay too high (~Bms at bias=~Bms)", [
Node, AverageDelay, AverageBias
]),
error
end
catch
throw:{error, _} = Error ->
?error("Failed to synchronize node's clock (~p) with local due to ~w", [Node, Error]),
error;
Class:Reason:Stacktrace ->
?error_stacktrace("Failed to synchronize node's clock (~p) with local - ~w:~p", [Node, Class, Reason], Stacktrace),
error
end.
-spec is_synchronized() -> boolean().
is_synchronized() ->
is_integer(node_cache:get(?CLOCK_BIAS_CACHE, undefined)).
%%--------------------------------------------------------------------
%% @doc
%% Resets the clock bias caused by synchronization, making the timestamps return
%% local system time. If the synchronization has not been performed beforehand,
%% it has no effect.
%% @end
%%--------------------------------------------------------------------
-spec reset_to_system_time() -> ok.
reset_to_system_time() ->
node_cache:clear(?CLOCK_BIAS_CACHE).
%%--------------------------------------------------------------------
%% @doc
%% Attempts to restore the bias that was previously stored on disk,
%% returns a boolean indicating success. See the module's description for more.
%% @end
%%--------------------------------------------------------------------
-spec try_to_restore_previous_synchronization() -> boolean().
try_to_restore_previous_synchronization() ->
case recover_bias_from_disk() of
{up_to_date, Bias} ->
?info("Restored the previous time synchronization from backup"),
store_bias_in_cache(Bias),
true;
stale ->
?info("Discarded a stale time synchronization backup - defaulting to the system clock"),
false;
not_found ->
?info("Time synchronization backup not found - defaulting to the system clock"),
false
end.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% @private
%% exported for internal RPC
%% exported for eunit tests - called by ?MODULE for that reason
-spec read_clock_time(clock_type()) -> time:millis().
read_clock_time(system_clock) ->
native_node_clock:system_time_millis();
read_clock_time(local_clock) ->
native_node_clock:system_time_millis() + get_bias_from_cache().
%% @private
-spec estimate_bias_and_delay(fetch_remote_timestamp_fun(), clock_type()) ->
{delay_ok | delay_too_high, bias(), delay()} | no_return().
estimate_bias_and_delay(FetchRemoteTimestamp, ReferenceClock) ->
{BiasSum, DelaySum} = lists:foldl(fun(_, {BiasAcc, DelayAcc}) ->
Stopwatch = stopwatch:start(),
RemoteTimestamp = case FetchRemoteTimestamp() of
{ok, Timestamp} -> Timestamp;
{error, _} = Error -> throw(Error)
end,
Delay = stopwatch:read_millis(Stopwatch),
TimestampAfter = ?MODULE:read_clock_time(ReferenceClock),
EstimatedMeasurementMoment = TimestampAfter - (Delay div 2),
Bias = RemoteTimestamp - EstimatedMeasurementMoment,
{BiasAcc + Bias, DelayAcc + Delay}
end, {0, 0}, lists:seq(1, ?SYNC_REQUEST_REPEATS)),
AvgBias = round(BiasSum / ?SYNC_REQUEST_REPEATS),
AvgDelay = round(DelaySum / ?SYNC_REQUEST_REPEATS),
{examine_delay(AvgDelay, AvgBias), AvgBias, AvgDelay}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Decides if given communication delay is acceptable. If the delay is lower
%% or equal to ?SATISFYING_SYNC_DELAY_MILLIS, it is always accepted. Otherwise,
%% it can be accepted if lower than half the bias, but not higher than
%% ?MAX_ALLOWED_SYNC_DELAY_MILLIS.
%% @end
%%--------------------------------------------------------------------
-spec examine_delay(delay(), bias()) -> delay_ok | delay_too_high.
examine_delay(Delay, Bias) ->
SatisfyingDelay = ?SATISFYING_SYNC_DELAY_MILLIS,
MaxAllowedDelay = ?MAX_ALLOWED_SYNC_DELAY_MILLIS,
if
Delay =< SatisfyingDelay -> delay_ok;
Delay > MaxAllowedDelay -> delay_too_high;
Delay < abs(Bias) / 2 -> delay_ok;
true -> delay_too_high
end.
%% @private
%% exported for internal RPC
-spec store_bias(clock_type(), bias()) -> ok.
store_bias({remote_clock, Node}, Bias) ->
ok = rpc:call(Node, ?MODULE, ?FUNCTION_NAME, [local_clock, Bias]);
store_bias(local_clock, Bias) ->
case ctool:get_env(clock_sync_ignore_bias_corrections, false) of
true ->
?warning("Ignoring clock bias correction (forced in config)");
false ->
% log on info level upon the first synchronization
case is_synchronized() of
false -> ?info("Local clock has been synchronized, current bias: ~Bms", [Bias]);
true -> ?debug("Local clock has been synchronized, current bias: ~Bms", [Bias])
end,
store_bias_in_cache(Bias),
store_bias_on_disk(Bias)
end.
%% @private
-spec store_bias_in_cache(bias()) -> ok | no_return().
store_bias_in_cache(Bias) ->
ok = node_cache:put(?CLOCK_BIAS_CACHE, Bias).
%% @private
-spec get_bias_from_cache() -> bias().
get_bias_from_cache() ->
node_cache:get(?CLOCK_BIAS_CACHE, 0).
%% @private
-spec store_bias_on_disk(bias()) -> ok | no_return().
store_bias_on_disk(Bias) ->
ok = file:write_file(?BIAS_BACKUP_FILE, json_utils:encode(#{
<<"biasMilliseconds">> => Bias,
<<"backupTimestampMilliseconds">> => ?MODULE:read_clock_time(system_clock)
})).
%% @private
-spec recover_bias_from_disk() -> {up_to_date, bias()} | stale | not_found.
recover_bias_from_disk() ->
case file:read_file(?BIAS_BACKUP_FILE) of
{ok, Binary} ->
try
#{
<<"biasMilliseconds">> := Bias,
<<"backupTimestampMilliseconds">> := BackupTimestampMillis
} = json_utils:decode(Binary),
MaxValidity = BackupTimestampMillis + ?BIAS_BACKUP_VALIDITY_MILLIS,
case MaxValidity > ?MODULE:read_clock_time(system_clock) of
true -> {up_to_date, Bias};
false -> stale
end
catch Class:Reason:Stacktrace ->
?debug_stacktrace("Cannot parse the time synchronization backup file - ~w:~p", [Class, Reason], Stacktrace),
not_found
end;
Other ->
?debug("Cannot read the time synchronization backup file - ~p", [Other]),
not_found
end.
%% @private
-spec warn_upon_backward_time_warp(time:seconds()) -> ok.
warn_upon_backward_time_warp(TimeDiffSeconds) ->
case TimeDiffSeconds > -?BACKWARD_TIME_WARP_WARN_THRESHOLD_SECONDS of
true ->
ok;
false ->
% backoff for some time between warning logs to avoid flooding
utils:throttle(?BACKWARD_TIME_WARP_WARN_BACKOFF_SECONDS, fun() ->
?warning(
"Detected a major backward time warp in the global clock - ~B seconds. "
"Time-triggered events as well as statistics and other information "
"based on absolute time may be temporarily distorted.",
[-TimeDiffSeconds]
)
end)
end. | src/time/global_clock.erl | 0.566019 | 0.581362 | global_clock.erl | starcoder |
-module(socket_example).
-export([nano_get_url/0]).
%% Why programming with sockets fun? Because it allows applications to interact with other machines on the Internet, which has far more potential than just performing local operations.
%% In this chapter, we'll see how to program client and servers using TCP and UDP sockets.
%% We'll go through the different forms of servers that are possible (parallel, sequential, blocking, and nonblocking) and see how to program traggic-shaping applications that can control the flow of data to the application.
nano_get_url() ->
nano_get_url("www.google.de").
nano_get_url(Host) ->
%% Open a TCP socket to port 80 of host by calling gen_tcp:connect
%% The argument binary tells the system to open the socket in binary mode and deliver all data to the applicaion as binaries
%% {packet,0} means the TCP data is delivered directly to the applicaion in an unmodified form.
{ok, Socket} = gen_tcp:connect(Host, 80, [binary, {packet, 0}]),
%% send the message GET / HTTP/1.0\r\n\r\n to the socket.
ok = gen_tcp:send(Socket, "GET / HTTP/1.0\r\n\r\n"),
receive_data(Socket, []).
receive_data(Socket, SoFar) ->
%% the reply does not come all in one packet but comes fragmented, a bit a time.
%% These fragments will be received as a sequence of messages that are sent to the process that opened (or controls) the socket.
receive
{tcp, Socket, Bin} ->
%% We received a {tcp,Socket,Bin} message. The third argument in the tuple is a binary. This is because we opened the socket in binary
%% mode. This message is on of the data fragments we have received so far and wait for the next fragment.
receive_data(Socket, [Bin|SoFar]);
{tcp_closed, Socket} ->
%% We received a {tcp_closed,Socket} message. This happens when the server has finished sending us data.
list_to_binary(reverse(SoFar))
end.
reverse(L) ->
reverse(L,[]).
reverse([], R)->R;
reverse([H|T], R) ->
reverse(T, [H|R]). | elementary/sockets/socket_example.erl | 0.661048 | 0.65055 | socket_example.erl | starcoder |
%% -----------------------------------------------------------------------------
%%
%% The MIT License (MIT)
%%
%% Copyright (c) 2017 <NAME>
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in all
%% copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
%% SOFTWARE.
%%
%% -----------------------------------------------------------------------------
%%
%% @author <NAME> <<EMAIL>>
%% @doc Example digraph implementation as list of edges
%%
%% The example module shows how to implement custom digraph implementation.
%% It should be simplest possible digraph module.
%%
%% @copyright 2017 <NAME>
%% @end
%%
%% -----------------------------------------------------------------------------
-module(map_digraph).
-behaviour(gen_digraph).
-export([new/0]).
-export([ from_list/1
, to_list/1
, edges/1
, no_edges/1
, vertices/1
, no_vertices/1
, in_neighbours/2
, out_neighbours/2
, in_degree/2
, out_degree/2
, sources/1
, sinks/1
, delete/1
, is_edge/3
, is_path/2
, is_vertex/2
, get_path/3
, get_cycle/2
, get_short_path/3
, get_short_cycle/2
, has_path/3
, has_cycle/2
, reachable/2
, reachable_neighbours/2
, reaching/2
, reaching_neighbours/2
, components/1
, strong_components/1
, preorder/1
, is_acyclic/1
, postorder/1
, topsort/1
, condensation/1
, add_vertex/2
, add_edge/3
]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% -----------------------------------------------------------------------------
%% API
%% -----------------------------------------------------------------------------
new() -> {?MODULE, {maps:new(), maps:new(), maps:new()}}.
%% -----------------------------------------------------------------------------
%% Callbacks
%% -----------------------------------------------------------------------------
from_list(L) ->
Vs = [ V || E <- L, V <- case E of
{V} -> [{V,{}}];
{V1, V2} -> [{V1,{}}, {V2,{}}];
_ -> error(badarg)
end ],
Es = [ E || {_, _} = E <- L ],
Pre = lists:foldl(fun({K,V},A) -> put([K,V], {}, A) end, #{}, Es),
Suc = lists:foldl(fun({K,V},A) -> put([V,K], {}, A) end, #{}, Es),
{?MODULE, {Pre, Suc, maps:from_list(Vs)}}.
add_vertex({Mod, {Pre,Suc, Nodes}}, V) ->
{Mod, {Pre,Suc, maps:put(V, {}, Nodes)}}.
add_edge({Mod, {Pre,Suc, Nodes}}, V1, V2) ->
Nodes1 = maps:put(V1, {}, Nodes),
Nodes2 = maps:put(V2, {}, Nodes1),
Pre1 = put([V1, V2], {}, Pre),
Suc1 = put([V2, V1], {}, Suc),
{Mod, {Pre1,Suc1, Nodes2}}.
to_list(G) -> gen_digraph:gen_to_list(G).
edges({_, {Pre, _, _}}) ->
[ Edge || V1 <- keys([], Pre), V2 <- keys([V1], Pre), Edge <- [{V1,V2}]].
no_edges(G) -> gen_digraph:gen_no_edges(G).
vertices({_, {_, _, Nodes}}) -> maps:keys(Nodes).
no_vertices(G) -> gen_digraph:gen_no_vertices(G).
in_neighbours({_,{_, Suc, _}}, V) ->
keys([V], Suc).
out_neighbours({_,{Pre, _, _}}, V) ->
keys([V], Pre).
in_degree(G, V) -> gen_digraph:gen_in_degree(G, V).
out_degree(G, V) -> gen_digraph:gen_out_degree(G, V).
sources(G) -> gen_digraph:gen_sources(G).
sinks(G) -> gen_digraph:gen_sinks(G).
delete(_) -> true.
is_vertex(V, {_, {_, _, Nodes}}) -> maps:is_key(V, Nodes).
is_edge(G, V1, V2) -> gen_digraph:gen_is_edge(G, V1, V2).
is_path(G, P) -> gen_digraph:gen_is_path(G, P).
get_path(G, V1, V2) -> gen_digraph:gen_get_path(G, V1, V2).
get_cycle(G, V) -> gen_digraph:gen_get_cycle(G, V).
get_short_path(G, V1, V2) -> gen_digraph:gen_get_short_path(G, V1, V2).
get_short_cycle(G, V) -> gen_digraph:gen_get_short_cycle(G, V).
has_path(G, V1, V2) -> gen_digraph:gen_has_path(G, V1, V2).
has_cycle(G, V) -> gen_digraph:gen_has_cycle(G, V).
reachable(G, Vs) -> gen_digraph:gen_reachable(G, Vs).
reachable_neighbours(G, Vs) -> gen_digraph:gen_reachable_neighbours(G, Vs).
reaching(G, Vs) -> gen_digraph:gen_reaching(G, Vs).
reaching_neighbours(G, Vs) -> gen_digraph:gen_reaching_neighbours(G, Vs).
components(G) -> gen_digraph:gen_components(G).
strong_components(G) -> gen_digraph:gen_strong_components(G).
preorder(G) -> gen_digraph:gen_preorder(G).
is_acyclic(G) -> gen_digraph:gen_is_acyclic(G).
postorder(G) -> gen_digraph:gen_postorder(G).
topsort(G) -> gen_digraph:gen_topsort(G).
condensation(G) -> gen_digraph:gen_condensation(G).
%% -----------------------------------------------------------------------------
%% Inspired by https://github.com/odo/nested/blob/master/src/nested.erl
%% -----------------------------------------------------------------------------
put([Key|PathRest], Value, Map) ->
SubMap =
case maps:is_key(Key, Map) andalso is_map(maps:get(Key, Map)) of
true -> maps:get(Key, Map);
false -> #{}
end,
maps:put(Key, put(PathRest, Value, SubMap), Map);
put([], Value, _) ->
Value.
keys([Key|PathRest], Map) ->
case maps:is_key(Key, Map) of
true -> keys(PathRest, maps:get(Key, Map));
_ -> []
end;
keys([], Map) ->
maps:keys(Map).
%% -----------------------------------------------------------------------------
%% Tests
%% -----------------------------------------------------------------------------
-ifdef(TEST).
gen_properties_test_() ->
gen_digraph:gen_properties_tests(?MODULE).
gen_tests_test_() ->
gen_digraph:gen_tests(?MODULE).
-endif. %% TEST | src/map_digraph.erl | 0.675444 | 0.415907 | map_digraph.erl | starcoder |
%%% @doc
%%% Format date time values according to a Format Specification string
%%% Format specifications are modeled after the .NET DateTime formats
%%% See:
%%% https://docs.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings
%%% https://docs.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings
%%% https://msdn.microsoft.com/en-us/library/hc4ky857(v=vs.71).aspx
%%% https://msdn.microsoft.com/en-us/library/8kb3ddd4(v=vs.71).aspx
%%%
%%% @end
-module(time_utils).
-author("<NAME>").
-export([
format_local/1,
format_utc/1,
format_time/3,
get_format_defn/1
]).
%%
%% Format the local system time according to the Format specification or
%% Format string and Parameter definitions
%%
-spec format_local(FormatStrParamDefs :: {string(), [atom()]}) -> string();
(FormatSpec :: string()) -> string().
format_local({FormatStr, ParamDefs}) ->
TimeStamp = {_, _, MicroSec} = os:timestamp(),
format_time({FormatStr, ParamDefs}, calendar:now_to_local_time(TimeStamp), MicroSec);
format_local(FormatSpec) ->
case get_format_defn(FormatSpec) of
{error, Reason} -> "Error: " ++ atom_to_list(Reason);
{FormatStr, ParamDefs} -> format_local({FormatStr, ParamDefs})
end.
%%
%% Format the UTC time according to the Format specification or
%% Format string and Parameter definitions
%%
-spec format_utc(FormatStrParamDefs :: {string(), [atom()]}) -> string();
(FormatSpec :: string()) -> string().
format_utc({FormatStr, ParamDefs}) ->
TimeStamp = {_, _, MicroSec} = os:timestamp(),
format_time({FormatStr, ParamDefs}, calendar:now_to_universal_time(TimeStamp), MicroSec);
format_utc(FormatSpec) ->
case get_format_defn(FormatSpec) of
{error, Reason} -> "Error: " ++ atom_to_list(Reason);
{FormatStr, ParamDefs} -> format_utc({FormatStr, ParamDefs})
end.
%%
%% Format the date and time, according to the Format specification or
%% Format string and Parameter definitions
%%
-spec format_time(FormatStrParamDefs :: {string(), [atom()]},
DateTime :: {{pos_integer(), pos_integer(), pos_integer()},
{non_neg_integer(), non_neg_integer(), non_neg_integer()}},
MicroS :: non_neg_integer()) -> string();
(FormatSpec :: string(),
DateTime :: {{pos_integer(), pos_integer(), pos_integer()},
{non_neg_integer(), non_neg_integer(), non_neg_integer()}},
MicroS :: non_neg_integer()) -> string().
format_time({FormatStr, ParamDefs}, DateTime, MicroS) ->
ParamVals = get_param_values(ParamDefs, ui_utils:get_calendar_locale(), DateTime, MicroS),
lists:flatten(io_lib:format(FormatStr, ParamVals));
format_time(FormatSpec, DateTime, MicroS) ->
case get_format_defn(FormatSpec) of
{error, Reason} -> "Error: " ++ atom_to_list(Reason);
{FormatStr, ParamDefs} ->
format_time({FormatStr, ParamDefs}, DateTime, MicroS)
end.
%%
%% Translate a list of date/time Parameter definitions (atoms)
%% into a list of actual date/time values.
%% Used to create a list of values to be inserted into a date/time format string.
%%
-spec get_param_values(ParamDefs :: [atom()],
CalendarStrs :: [{atom(), list()}],
DateTime :: {{pos_integer(), pos_integer(), pos_integer()},
{non_neg_integer(), non_neg_integer(), non_neg_integer()}},
MicroS :: non_neg_integer()) -> [term()].
get_param_values(ParamDefs, CalendarStrs, {{Year, Month, Day}, {Hour, Minute, Second}}, MicroS) ->
lists:map(fun(ParamDef) ->
case ParamDef of
year2 -> Year rem 100;
year3 -> Year rem 1000;
year -> Year;
month -> Month;
month_str ->
get_calendar_string(CalendarStrs, months_strs, Month);
month_abbr ->
get_calendar_string(CalendarStrs, months_abbr, Month);
day -> Day;
day_str ->
get_calendar_string(CalendarStrs, days_strs,
calendar:day_of_the_week(Year, Month, Day));
day_abbr ->
get_calendar_string(CalendarStrs, days_abbr,
calendar:day_of_the_week(Year, Month, Day));
hour -> Hour;
hour12 when Hour == 0 -> 12;
hour12 when Hour > 12 -> Hour - 12;
hour12 -> Hour;
am_pm when Hour >= 12 -> get_calendar_string(CalendarStrs, pm_str);
am_pm -> get_calendar_string(CalendarStrs, am_str);
am_pm1 when Hour >= 12 ->
[FirstChar | _Rest] = get_calendar_string(CalendarStrs, pm_str),
[FirstChar];
am_pm1 ->
[FirstChar | _Rest] = get_calendar_string(CalendarStrs, am_str),
[FirstChar];
minute -> Minute;
second -> Second;
fract_1 -> MicroS div 100000;
fract_2 -> MicroS div 10000;
fract_3 -> MicroS div 1000;
fract_4 -> MicroS div 100;
fract_5 -> MicroS div 10;
fract_6 -> MicroS;
fract_nz_1 -> non_zero_or_no_string(MicroS, 100000);
fract_nz_2 -> non_zero_or_no_string(MicroS, 10000);
fract_nz_3 -> non_zero_or_no_string(MicroS, 1000);
fract_nz_4 -> non_zero_or_no_string(MicroS, 100);
fract_nz_5 -> non_zero_or_no_string(MicroS, 10);
fract_nz_6 -> non_zero_or_no_string(MicroS, 1);
date_sep -> get_calendar_string(CalendarStrs, date_sep_str);
time_sep -> get_calendar_string(CalendarStrs, time_sep_str);
tz_offset_1 -> get_tz_offset_1();
tz_offset_2 -> get_tz_offset_2();
tz_offset_3 -> get_tz_offset_3();
tz_name -> get_tz_name();
era -> get_calendar_string(CalendarStrs, era_str);
Undefined -> "Undef token: " ++ atom_to_list(Undefined)
end end, ParamDefs).
%%
%% Create an Erlang format time string and parameter defintion list
%% Following C# .NET time formatting standard
%%
-spec get_format_defn(FormatSpec :: string()) -> {string(), [atom()]} | {error, atom()}.
% Default format, same as 'G' format
get_format_defn([]) ->
CalendarStrs = ui_utils:get_calendar_locale(),
{DateFormatStr, DateParamDefs} = get_calendar_string(CalendarStrs, short_date_format),
{TimeFormatStr, TimeParamDefs} = get_calendar_string(CalendarStrs, long_time_format),
{DateFormatStr ++ " " ++ TimeFormatStr, DateParamDefs ++ TimeParamDefs};
% Standard formats (One character format definition)
get_format_defn(FormatSpec) when length(FormatSpec) == 1 ->
CalendarStrs = ui_utils:get_calendar_locale(),
case FormatSpec of
"d" -> % Date, short date
get_calendar_string(CalendarStrs, short_date_format);
"D" -> % Date, long date
get_calendar_string(CalendarStrs, long_date_format);
"f" -> % Full, long date / short time
{DateFormatStr, DateParamDefs} = get_calendar_string(CalendarStrs, long_date_format),
{TimeFormatStr, TimeParamDefs} = get_calendar_string(CalendarStrs, short_time_format),
{DateFormatStr ++ " " ++ TimeFormatStr, DateParamDefs ++ TimeParamDefs};
"F" -> % Full, long date / long time
{DateFormatStr, DateParamDefs} = get_calendar_string(CalendarStrs, long_date_format),
{TimeFormatStr, TimeParamDefs} = get_calendar_string(CalendarStrs, long_time_format),
{DateFormatStr ++ " " ++ TimeFormatStr, DateParamDefs ++ TimeParamDefs};
"g" -> % General, short date / short time
{DateFormatStr, DateParamDefs} = get_calendar_string(CalendarStrs, short_date_format),
{TimeFormatStr, TimeParamDefs} = get_calendar_string(CalendarStrs, short_time_format),
{DateFormatStr ++ " " ++ TimeFormatStr, DateParamDefs ++ TimeParamDefs};
"G" -> % General, short date / long time
{DateFormatStr, DateParamDefs} = get_calendar_string(CalendarStrs, short_date_format),
{TimeFormatStr, TimeParamDefs} = get_calendar_string(CalendarStrs, long_time_format),
{DateFormatStr ++ " " ++ TimeFormatStr, DateParamDefs ++ TimeParamDefs};
"m" -> % Month Day
get_calendar_string(CalendarStrs, month_day_format);
"M" -> % Month Day
get_calendar_string(CalendarStrs, month_day_format);
"o" -> % Round Trip
round_trip_format();
"O" -> % Round Trip
round_trip_format();
"r" -> % RFC1123
rfc1123_format();
"R" -> % RFC1123
rfc1123_format();
"s" -> % Sortable
{"~4b-~2..0b-~2..0wT~2..0b:~2..0b:~2..0b", [year, month, day, hour, minute, second]};
"t" -> % Short time
get_calendar_string(CalendarStrs, short_time_format);
"T" -> % Long time
get_calendar_string(CalendarStrs, long_time_format);
"u" -> % Universal sortable
{"~4b-~2..0b-~2..0w ~2..0b:~2..0b:~2..0bZ", [year, month, day, hour, minute, second]};
"U" -> % Universal full
get_calendar_string(CalendarStrs, universal_full_format);
"y" -> % Month year
get_calendar_string(CalendarStrs, month_year_format);
"Y" -> % Month year
get_calendar_string(CalendarStrs, month_year_format);
_ -> {error, invalid_format}
end;
% Custom formats (Multi character format definition)
get_format_defn(FormatSpec) ->
{LastFormatStr, LastFormat, LastParamDefs, LastParamDef, _, _} =
lists:foldl(
fun(FormatChar, {FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case FormatChar of
$y -> parse_year({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$M -> parse_month({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$d -> parse_day({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$h -> parse_12hour({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$H -> parse_24hour({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$m -> parse_minute({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$s -> parse_second({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$t -> parse_am_pm({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$f -> parse_fract({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$F -> parse_fract_nz({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$z -> parse_tz_offset({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$Z -> parse_tz_name({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$/ -> parse_date_sep({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$: -> parse_time_sep({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
$g -> parse_era({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count});
% 2nd Backslash char, add literal Backslash
$\\ when LastChar == $\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $\\);
% Backslash char, literal char to follow
$\\ ->
{FormatStr ++ NextFormat, "", ParamDefs ++ NextParamDef, [], $\\, 1};
% Percent char, single char field definition may follow
$% ->
{FormatStr ++ NextFormat, "", ParamDefs ++ NextParamDef, [], $%, 1};
% Treat any character not already consumed, as a literal character
LiteralChar ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, LiteralChar)
end
end,
init_accum(),
FormatSpec),
{LastFormatStr ++ LastFormat, LastParamDefs ++ LastParamDef}.
%% ====================================================================
%% Helper functions for building custom date/time format string and
%% parameter definitions list
%% ====================================================================
% Used more than once
round_trip_format() ->
{"~4b-~2..0b-~2..0bT~2..0b:~2..0b:~2..0b.~6..0b", [year, month, day, hour, minute, second, fract_6]}.
rfc1123_format() ->
{"~s, ~b ~s ~4b ~2..0b:~2..0b:~2..0b GMT", [day_abbr, day, month_abbr, year, hour, minute, second]}.
%
% Setup the initial accumulator tuple for building
% the Format string, and Parameter definition list
%
init_accum() ->
FormatStr = "",
NextFormat = "",
ParamDefs = [],
NextParamDef = [],
PrevChar = null,
CharCount = 0,
{FormatStr, NextFormat, ParamDefs, NextParamDef, PrevChar, CharCount}.
% Year 'y'
parse_year({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$y when (Count == 2) ->
{FormatStr, "~3..0b", ParamDefs, [year3], LastChar, Count + 1};
$y when (Count >= 3) ->
NewCount = Count + 1,
NewNextFormat = lists:flatten(io_lib:format("~c~b\..0b", [$~, NewCount])),
{FormatStr, NewNextFormat, ParamDefs, [year], LastChar, NewCount};
_ -> % Switch to standard 2 digit formatting
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $y, [year2])
end.
% Month 'M'
parse_month({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$M when (Count == 2) ->
{FormatStr, "~s", ParamDefs, [month_abbr], LastChar, Count + 1};
$M when (Count == 3) ->
{FormatStr, NextFormat, ParamDefs, [month_str], LastChar, Count + 1};
$M when (Count > 3) -> % Format is not changing, just consume the chars
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ -> % Switch to standard 2 digit formatting
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $M, [month])
end.
% Day 'd'
parse_day({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$d when (Count == 2) ->
{FormatStr, "~s", ParamDefs, [day_abbr], LastChar, Count + 1};
$d when (Count == 3) ->
{FormatStr, NextFormat, ParamDefs, [day_str], LastChar, Count + 1};
$d when (Count > 3) -> % Format is not changing, just consume the chars
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ -> % Switch to standard 2 digit formatting
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $d, [day])
end.
% 12 Hour 'h'
parse_12hour({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $h, [hour12]).
% 24 Hour 'H'
parse_24hour({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $H, [hour]).
% Minute 'm'
parse_minute({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $m, [minute]).
% Second 's'
parse_second({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, $s, [second]).
% Formatting rules are the same for standard 2 digit numbers:
% 2 digit year, month number, day number, 12/24 hour, minute, and second
parse_2digit({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}, FormatChar, ParamDef) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, FormatChar);
$% ->
add_single_char_format(FormatStr, "~b", ParamDefs, ParamDef);
FormatChar when (Count == 1) ->
{FormatStr, "~2..0b", ParamDefs, NextParamDef, LastChar, Count + 1};
FormatChar when (Count > 1) -> % Format is not changing, just consume the chars
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ ->
{FormatStr ++ NextFormat, "~b", ParamDefs ++ NextParamDef, ParamDef, FormatChar, 1}
end.
% AM/PM 't'
parse_am_pm({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $t);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [am_pm1]);
$t when (Count == 1) ->
{FormatStr, NextFormat, ParamDefs, [am_pm], LastChar, Count + 1};
$t when (Count > 1) -> % Format is not changing, just consume the chars
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ ->
{FormatStr ++ NextFormat, "~s", ParamDefs ++ NextParamDef, [am_pm1], $t, 1}
end.
% Fraction of second 'f', with leading zeros
parse_fract({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $f);
$% ->
add_single_char_format(FormatStr, "~b", ParamDefs, [fract_1]);
$f when (1 =< Count) andalso (Count =< 4) ->
NewCount = Count + 1,
NewNextFormat = lists:flatten(io_lib:format("~c~b\..0b", [$~, NewCount])),
NewParamDef = list_to_atom(lists:flatten(io_lib:format("fract_~b", [NewCount]))),
{FormatStr, NewNextFormat, ParamDefs, [NewParamDef], LastChar, NewCount};
$f -> % Allow maximum of 6 consecutive "f"s, (full microSecond resolution).
{FormatStr ++ "~6..0b", "", ParamDefs ++ [fract_6], [], null, 0};
_ ->
{FormatStr ++ NextFormat, "~b", ParamDefs ++ NextParamDef, [fract_1], $f, 1}
end.
% Fraction of second 'F', without leading zeros
parse_fract_nz({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $F);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, fract_nz_1);
$F when (1 =< Count) andalso (Count =< 4) ->
NewCount = Count + 1,
NewParamDef = list_to_atom(lists:flatten(io_lib:format("fract_nz_~b", [NewCount]))),
{FormatStr, NextFormat, ParamDefs, [NewParamDef], LastChar, NewCount};
$F -> % Allow maximum of 6 consecutive "F"s, (full microSecond resolution).
{FormatStr ++ "~s", "", ParamDefs ++ [fract_nz_6], [], null, 0};
_ ->
{FormatStr ++ NextFormat, "~s", ParamDefs ++ NextParamDef, [fract_nz_1], $F, 1}
end.
% Time zone offset 'z'
parse_tz_offset({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $z);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [tz_offset_1]);
$z when (Count == 1) ->
{FormatStr, NextFormat, ParamDefs, [tz_offset_2], LastChar, Count + 1};
$z when (Count == 2) ->
{FormatStr, NextFormat, ParamDefs, [tz_offset_3], LastChar, Count + 1};
$z when (Count > 2) -> % Format is not changing, just consume the chars.
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ ->
{FormatStr ++ NextFormat, "~s", ParamDefs ++ NextParamDef, [tz_offset_1], $z, 1}
end.
% Time zone name 'Z'
parse_tz_name({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, _Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $Z);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [tz_name]);
_ ->
{FormatStr ++ NextFormat ++ "~s", "", ParamDefs ++ NextParamDef ++ [tz_name], [], null, 0}
end.
% Date Separator '/'
parse_date_sep({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, _Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $/);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [date_sep]);
_ ->
{FormatStr ++ NextFormat ++ "~s", "", ParamDefs ++ NextParamDef ++ [date_sep], [], null, 0}
end.
% Time Separator ':'
parse_time_sep({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, _Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $:);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [time_sep]);
_ ->
{FormatStr ++ NextFormat ++ "~s", "", ParamDefs ++ NextParamDef ++ [time_sep], [], null, 0}
end.
% Era (i.e. "A.D.") 'g'
parse_era({FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count}) ->
case LastChar of
$\\ ->
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, $g);
$% ->
add_single_char_format(FormatStr, "~s", ParamDefs, [era]);
$z when (Count > 0) -> % Format is not changing, just consume the chars.
{FormatStr, NextFormat, ParamDefs, NextParamDef, LastChar, Count};
_ ->
{FormatStr ++ NextFormat, "~s", ParamDefs ++ NextParamDef, [era], $z, 1}
end.
%
% Add the next Format and the next Parameter definition to the
% current Format String and Parameter definitions list respectively.
% then add a literal character to the Format string.
% The next Format, and next Parameter definition may be blank, that is OK.
% Reset the remaining parameters in the accumulater tuple.
%
add_next_format_and_char(FormatStr, NextFormat, ParamDefs, NextParamDef, Char) ->
{FormatStr ++ NextFormat ++ [Char], "", ParamDefs ++ NextParamDef, [], null, 0}.
%
% Add a format and associated parameter definition to the
% current Format String and Parameter definitions list respectively.
% Do this when we know there is no pending NextFormat/NextParamDef
% and the format is defined by only one character.
% In this case we don't need to wait for the next character.
% Reset the remaining parameters in the accumulater tuple.
%
add_single_char_format(FormatStr, Format, ParamDefs, ParamDef) ->
{FormatStr ++ Format, "", ParamDefs ++ ParamDef, [], null, 0}.
%
% Pick a calendar string based on the string group name and index
%
-spec get_calendar_string(CalendarStrs :: [{atom(), [string()] | [tuple()]}],
Group :: atom()) -> string() | tuple().
get_calendar_string(CalendarStrs, Group) ->
get_calendar_string(CalendarStrs, Group, 1).
-spec get_calendar_string(CalendarStrs :: [{atom(), [string()] | [tuple()]}],
Group :: atom(),
Index :: pos_integer()) -> string() | tuple().
get_calendar_string(CalendarStrs, Group, Index) ->
case lists:keyfind(Group, 1, CalendarStrs) of
{Group, GroupStrs} -> lists:nth(Index, GroupStrs);
_Error -> "Invalid calendar strings"
end.
%
% Return a digit string, if the result of the
% integer division > zero
%
non_zero_or_no_string(MicroS, Divisor) ->
case MicroS div Divisor of
0 -> [];
N -> lists:flatten(io_lib:format("~b", [N]))
end.
%
% Get Time zone info, via OS command
%
get_tz_offset_1() ->
TzOffset = string:trim(os:cmd("date +%:::z")),
case lists:nth(2, TzOffset) of
$0 -> lists:delete($0, TzOffset);
_ -> TzOffset
end.
get_tz_offset_2() ->
string:trim(os:cmd("date +%:::z")).
get_tz_offset_3() ->
string:trim(os:cmd("date +%:z")).
get_tz_name() ->
string:trim(os:cmd("date +%Z")).
%% ====================================================================
%% Tests
%% ====================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
% ====================================================================
% Test format_time()
% Test local and utc
format_time_local_and_utc_test() ->
{LocalHr, _Rest} = string:to_integer(format_local("%H")),
{UtcHr, _Rest} = string:to_integer(format_utc("%H")),
% When executed in the US Central Time zone,
% the hour difference should be -5 (or 19) (DST) or -6 (or 18)(not DST)
Diff = LocalHr - UtcHr,
if (Diff < 0) ->
Result = Diff + 24;
true ->
Result = Diff
end,
?assert(((Result == 18) or (Result == 19))).
% Test default
format_time_default_test() ->
Value = format_time("", {{2018, 3, 25}, {19,19,19}}, 0),
Expected = "3/25/2018 7:19:19 PM",
?assertEqual(Expected, Value).
% Test short date
format_time_short_date_test() ->
Value = format_time("d", {{2018, 3, 22}, {9,9,9}}, 0),
Expected = "3/22/2018",
?assertEqual(Expected, Value).
% Test long date
format_time_long_date_test() ->
Value = format_time("D", {{2018, 3, 22}, {9,9,9}}, 0),
Expected = "Thursday, March 22, 2018",
?assertEqual(Expected, Value).
% Test full date short time
format_time_full_date_short_time_test() ->
Value = format_time("f", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "Friday, March 23, 2018 7:19 PM",
?assertEqual(Expected, Value).
% Test full date long time
format_time_full_date_long_time_test() ->
Value = format_time("F", {{2018, 3, 24}, {19,19,19}}, 0),
Expected = "Saturday, March 24, 2018 7:19:19 PM",
?assertEqual(Expected, Value).
% Test general date short time
format_time_general_date_short_time_test() ->
Value = format_time("g", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "3/23/2018 7:19 PM",
?assertEqual(Expected, Value).
% Test general date long time
format_time_general_date_long_time_test() ->
Value = format_time("G", {{2018, 3, 24}, {19,19,19}}, 0),
Expected = "3/24/2018 7:19:19 PM",
?assertEqual(Expected, Value).
% Test month day 'm'
format_time_month_date_m_test() ->
Value = format_time("m", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "March 23",
?assertEqual(Expected, Value).
% Test month day 'M'
format_time_month_day_M_test() ->
Value = format_time("M", {{2018, 3, 2}, {19,19,19}}, 0),
Expected = "March 2",
?assertEqual(Expected, Value).
% Test round trip 'o'
format_time_round_trip_o_test() ->
Value = format_time("o", {{2018, 3, 22}, {9,9,9}}, 123456),
Expected = "2018-03-22T09:09:09.123456",
?assertEqual(Expected, Value).
% Test round trip 'O'
format_time_round_trip_O_test() ->
Value = format_time("O", {{2018, 3, 22}, {9,9,9}}, 123456),
Expected = "2018-03-22T09:09:09.123456",
?assertEqual(Expected, Value).
% Test RFC1123 'r'
format_time_rfc1123_r_test() ->
Value = format_time("r", {{2018, 3, 22}, {9,9,9}}, 123456),
Expected = "Thu, 22 Mar 2018 09:09:09 GMT",
?assertEqual(Expected, Value).
% Test RFC1123 'R'
format_time_rfc1123_R_test() ->
Value = format_time("R", {{2018, 3, 22}, {9,9,9}}, 123456),
Expected = "Thu, 22 Mar 2018 09:09:09 GMT",
?assertEqual(Expected, Value).
% Test sortable 's'
format_time_sortable_test() ->
Value = format_time("s", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "2018-03-23T19:19:19",
?assertEqual(Expected, Value).
% Test short time 't'
format_time_short_time_test() ->
Value = format_time("t", {{2018, 3, 22}, {9,9,9}}, 0),
Expected = "9:09 AM",
?assertEqual(Expected, Value).
% Test long time 'T'
format_time_long_time_test() ->
Value = format_time("T", {{2018, 3, 22}, {9,9,9}}, 0),
Expected = "9:09:09 AM",
?assertEqual(Expected, Value).
% Test universal 'u'
format_time_universal_test() ->
Value = format_time("u", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "2018-03-23 19:19:19Z",
?assertEqual(Expected, Value).
% Test universal full 'U'
format_time_universal_full_test() ->
Value = format_time("U", {{2018, 3, 23}, {19,19,19}}, 0),
Expected = "Friday, March 23, 2018 7:19:19 PM",
?assertEqual(Expected, Value).
% Test custom format
format_time_custom_format_test() ->
Value = format_time("Year: y, yy, yyy, yyyy, yyyyy \\Mon\\t\\h: M, MM, MMM, MMMM Da\\y: d, dd, ddd, dddd \\Hour: h, hh, H, HH \\Minu\\te: m, mm Secon\\d: s, ss t tt",
{{2108, 3, 18}, {3, 3, 3}}, 0),
Expected = "Year: 8, 08, 108, 2108, 02108 Month: 3, 03, Mar, March Day: 18, 18, Sun, Sunday Hour: 3, 03, 3, 03 Minute: 3, 03 Second: 3, 03 A AM",
?assertEqual(Expected, Value).
format_time_custom_format2_test() ->
Value = format_time("uSec: f, ff, fff, ffff, fffff, ffffff, ffffffffffff",
{{2108, 3, 18}, {3, 3, 3}}, 123456),
Expected = "uSec: 1, 12, 123, 1234, 12345, 123456, 123456123456",
?assertEqual(Expected, Value).
format_time_custom_format3_test() ->
Value = format_time("uSec: f, ff, fff, ffff, fffff, ffffff, ffffffffffff",
{{2108, 3, 18}, {3, 3, 3}}, 6),
Expected = "uSec: 0, 00, 000, 0000, 00000, 000006, 000006000006",
?assertEqual(Expected, Value).
format_time_custom_format4_test() ->
Value = format_time("uSec: F, FF, FFF, FFFF, FFFFF, FFFFFF, FFFFFFFFFFFF",
{{2108, 3, 18}, {3, 3, 3}}, 6),
Expected = "uSec: , , , , , 6, 66",
?assertEqual(Expected, Value).
format_time_custom_format5_test() ->
Value = format_time("T\\Z O\\f\\f\\se\\t: z, zz, zzz, %z T\\Z Na\\me: Z, %Z",
{{2108, 3, 18}, {3, 3, 3}}, 6),
Expected = "TZ Offset: -5, -05, -05:00, -5 TZ Name: DST, DST",
?assertEqual(Expected, Value).
format_time_custom_format6_test() ->
Value = format_time("Era: g, gg, ggg, %g",
{{2108, 3, 18}, {3, 3, 3}}, 6),
Expected = "Era: A.D., A.D., A.D., A.D.",
?assertEqual(Expected, Value).
format_time_custom_format7_test() ->
Value = format_time("12 \\Hour: h",
{{2108, 3, 18}, {0, 3, 3}}, 6),
Expected = "12 Hour: 12",
?assertEqual(Expected, Value).
% ====================================================================
% ====================================================================
% Test get_format_defn()
get_format_str_year_test() ->
Value = get_format_defn("y, yy, yyy, yyyy, yyyyyyyyyyy"),
Expected = {"~b, ~2..0b, ~3..0b, ~4..0b, ~11..0b", [year2, year2, year3, year, year]},
?assertEqual(Expected, Value).
get_format_str_month_test() ->
Value = get_format_defn("M, MM, MMM, MMMM, MMMMMMMMMMM"),
Expected = {"~b, ~2..0b, ~s, ~s, ~s", [month, month, month_abbr, month_str, month_str]},
?assertEqual(Expected, Value).
get_format_str_day_test() ->
Value = get_format_defn("d, dd, ddd, dddd, ddddddddd"),
Expected = {"~b, ~2..0b, ~s, ~s, ~s", [day, day, day_abbr, day_str, day_str]},
?assertEqual(Expected, Value).
get_format_str_hour12_test() ->
Value = get_format_defn("h, hh, hhhh,"),
Expected = {"~b, ~2..0b, ~2..0b,", [hour12, hour12, hour12]},
?assertEqual(Expected, Value).
get_format_str_hour_test() ->
Value = get_format_defn("H, HH, HHHHH,"),
Expected = {"~b, ~2..0b, ~2..0b,", [hour, hour, hour]},
?assertEqual(Expected, Value).
get_format_str_am_pm_test() ->
Value = get_format_defn("t, tt, tttt,"),
Expected = {"~s, ~s, ~s,", [am_pm1, am_pm, am_pm]},
?assertEqual(Expected, Value).
get_format_str_minute_test() ->
Value = get_format_defn("m, mm, mmmmmmmm,"),
Expected = {"~b, ~2..0b, ~2..0b,", [minute, minute, minute]},
?assertEqual(Expected, Value).
get_format_str_second_test() ->
Value = get_format_defn("s, ss, sssssssss,"),
Expected = {"~b, ~2..0b, ~2..0b,", [second, second, second]},
?assertEqual(Expected, Value).
get_format_str_fract_test() ->
Value = get_format_defn("f, ff, fff, ffff, fffff, ffffff, ffffffffff"),
Expected = {"~b, ~2..0b, ~3..0b, ~4..0b, ~5..0b, ~6..0b, ~6..0b~4..0b", [fract_1, fract_2, fract_3, fract_4, fract_5, fract_6, fract_6, fract_4]},
?assertEqual(Expected, Value).
get_format_str_fract_nz_test() ->
Value = get_format_defn("F, FF, FFF, FFFF, FFFFF, FFFFFF, FFFFFFFFFF"),
Expected = {"~s, ~s, ~s, ~s, ~s, ~s, ~s~s", [fract_nz_1, fract_nz_2, fract_nz_3, fract_nz_4, fract_nz_5, fract_nz_6, fract_nz_6, fract_nz_4]},
?assertEqual(Expected, Value).
get_format_str_tz_name_test() ->
Value = get_format_defn("Z, %Z, \\Z "),
Expected = {"~s, ~s, Z ", [tz_name, tz_name]},
?assertEqual(Expected, Value).
get_format_str_tz_offset_test() ->
Value = get_format_defn("z, zz, zzz, zzzz, \\z, %z "),
Expected = {"~s, ~s, ~s, ~s, z, ~s ", [tz_offset_1, tz_offset_2, tz_offset_3, tz_offset_3, tz_offset_1]},
?assertEqual(Expected, Value).
get_format_str_date_time_sep_test() ->
Value = get_format_defn("//, ::, /:/: "),
Expected = {"~s~s, ~s~s, ~s~s~s~s ", [date_sep, date_sep, time_sep, time_sep, date_sep, time_sep, date_sep, time_sep]},
?assertEqual(Expected, Value).
get_format_str_era_test() ->
Value = get_format_defn("g, gg, ggg, %g, \\g"),
Expected = {"~s, ~s, ~s, ~s, g", [era, era, era, era]},
?assertEqual(Expected, Value).
get_format_str_literal_test() ->
Value = get_format_defn("abceijklnopqruvwx 1234567890 ABCDEGIJKLNOPQRSTUVWXY `~!@#$^&*()_-+={}[];\"'"),
Expected = {"abceijklnopqruvwx 1234567890 ABCDEGIJKLNOPQRSTUVWXY `~!@#$^&*()_-+={}[];\"'", []},
?assertEqual(Expected, Value).
get_format_str_escape_test() ->
Value = get_format_defn("\\M\\d\\h\\H\\t\\m\\s\\f\\F\\Z\\z\\g\\/\\:"),
Expected = {"MdhHtmsfFZzg/:", []},
?assertEqual(Expected, Value).
% ====================================================================
% ====================================================================
% Test get_tz_offset()
get_tz_offest_z_test() ->
Value = get_tz_offset_1(),
Expected = "-5",
?assertEqual(Expected, Value).
get_tz_offest_zz_test() ->
Value = get_tz_offset_2(),
Expected = "-05",
?assertEqual(Expected, Value).
get_tz_offest_zzz_test() ->
Value = get_tz_offset_3(),
Expected = "-05:00",
?assertEqual(Expected, Value).
get_tz_name_test() ->
Value = get_tz_name(),
Expected = "DST",
?assertEqual(Expected, Value).
% ====================================================================
-endif. | src/time_utils.erl | 0.58818 | 0.425546 | time_utils.erl | starcoder |
-module(matrix).
-author('<EMAIL>').
% Copyright (c) 2011, <NAME>
% All rights reserved.
% Licensed under BSD license, see LICENSE for details.
-export([tests_good_/0]).
-export([mul/2, mul_vec/2]).
-export([dot/2, blas_0/2, blas_0/3, blas_1/2, blas_1/3, blas_1/5]).
tests_good_() ->
M1 = [
[1.0,2.0,3.0],
[4.0,5.0,6.0],
[7.0,8.0,9.0]
],
V2 = [11.0, 12.0, 13.0],
M1M1 = mul(M1, M1),
true = ([[30.0, 36.0, 42.0],
[66.0, 81.0, 96.0],
[102.0, 126.0, 150.0]] =:= M1M1),
io:format("~p~n", [M1M1]),
M1V2 = mul_vec(M1, V2),
true = ([74.0, 182.0, 290.0] =:= M1V2),
io:format("~p~n", [M1V2]).
% matrix-matrix multiplication
-spec mul([[number()]], [[number()]]) -> [[number()]].
mul(A, B) ->
TB = transpozycja:tr(B),
comb(A, TB, fun (X, Y) ->
% comb(X, Y, fun (XX, YY) -> XX*YY end) % Kronecker's product ?
dot(X, Y)
end).
%-spec comb(list(), list(), fun(list(), list()) -> any() end) -> any().
comb(L1, L2, F) ->
[ [ F(X, Y) || Y <- L2 ] || X <- L1 ].
% matrix-vector multiplication
-spec mul_vec([[number()]], [number()]) -> number().
mul_vec(A, V) ->
[ dot(X, V) || X <- A ].
% dot product
-spec dot([number()], [number()]) -> number().
dot(L1, L2) ->
dot(L1, L2, 0.0).
-spec dot([number()], [number()], number()) -> number().
dot([], [], Acc) ->
Acc;
dot([H1 | T1], [H2 | T2], Acc) ->
dot(T1, T2, H1*H2 + Acc).
% blas methods
-spec blas_0([number()], number()) -> [number()].
blas_0(X, Delta) when is_number(Delta) ->
[ E + Delta || E <- X ].
-spec blas_0(number(), [number()], number()) -> [number()].
blas_0(Alpha, X, Delta) when is_number(Alpha), is_number(Delta) ->
[ Alpha*E + Delta || E <- X ].
-spec blas_1(number(), [number()]) -> [number()].
blas_1(Alpha, X) when is_number(Alpha) ->
[ Alpha*E || E <- X ].
-spec blas_1(number(), [number()], number()) -> [number()].
blas_1(Alpha, X, Delta) when is_number(Alpha), is_number(Delta) ->
[ Alpha*E + Delta || E <- X ].
-spec blas_1(number(), [number()], number(), [number()], number()) -> [number()].
blas_1(Alpha, X, Beta, Y, Delta) when is_number(Alpha), is_number(Beta), is_number(Delta) ->
blas_1_(Alpha, X, Beta, Y, Delta, []).
-spec blas_1_(number(), [number()], number(), [number()], number(), [number()]) -> [number()].
blas_1_(_Alpha, [], _Beta, [], _Delta, Acc) ->
Acc;
blas_1_(Alpha, [H1 | T1], Beta, [H2 | T2], Delta, Acc) ->
blas_1_(Alpha, T1, Beta, T2, Delta, [Alpha*H1 + Beta*H2 + Delta | Acc]). | matrix.erl | 0.517815 | 0.45647 | matrix.erl | starcoder |
-module(transducers).
-export([compose/2, drop_while/1, filter/1, list/2, map/1, stateful/3]).
-type reduction(A) :: {ok, A} | {halt, A}.
-type reduction() :: reduction(any()).
-type step(A) :: fun ((reduction(), A) -> reduction()).
-type stateful_step(A) :: fun ((A, reduction(), any()) -> {A, reduction()}).
-type finalizer() :: fun ((reduction()) -> reduction()).
-type stateful_finalizer(A) :: fun ((A, reduction()) -> reduction()).
-type reducer(A) :: {step(A), finalizer()}.
-type reducer() :: reducer(any()).
-type transducer() :: fun ((reducer()) -> reducer()).
-type predicate() :: fun ((any()) -> boolean()).
-spec list(transducer(), list()) -> list().
list(Transduce, List) ->
{Step, Finalize} = Transduce({
fun ({Type, Acc}, Input) -> {Type, [Input | Acc]} end,
fun ({Type, Acc}) -> {Type, lists:reverse(Acc)} end
}),
{_, Result} = fun
Feed(Acc={halt, _}, _) -> Finalize(Acc);
Feed(Acc, []) -> Finalize(Acc);
Feed(Acc, [Input | Rest]) -> Feed(Step(Acc, Input), Rest)
end({ok, []}, List),
Result.
-spec compose(transducer(), transducer()) -> transducer().
compose(T1, T2) -> fun (R) -> T1(T2(R)) end.
-spec filter(predicate()) -> transducer().
filter(Pred) ->
fun ({Step, Finalize}) ->
{fun (Acc, Input) ->
case Pred(Input) of
true -> Step(Acc, Input);
false -> Acc
end
end,
Finalize}
end.
-spec map(fun ((any()) -> any())) -> transducer().
map(F) ->
fun ({Step, Finalize}) ->
{fun (Acc, Input) ->
Step(Acc, F(Input))
end, Finalize}
end.
-spec stateful(A, stateful_step(A), stateful_finalizer(A)) -> reducer().
stateful(InitialState, Step, Finalize) ->
Self = self(),
P = spawn_link(fun () ->
fun Remember(State) ->
receive
{finalize, Reduction} -> Self ! {self(), Finalize(State, Reduction)};
{step, Reduction, Input} ->
{NewState, NewReduction} = Step(State, Reduction, Input),
Self ! {self(), NewReduction},
Remember(NewState)
end
end(InitialState)
end),
{fun (Reduction, Input) ->
P ! {step, Reduction, Input},
receive {P, NewReduction} -> NewReduction end
end,
fun (Reduction) ->
P ! {finalize, Reduction},
receive {P, NewReduction} -> NewReduction end
end}.
-spec drop_while(predicate()) -> transducer().
drop_while(Pred) ->
fun ({Step, Finalize}) ->
stateful(Pred, fun (CurrentPred, Acc, Input) ->
case CurrentPred(Input) of
true -> {Pred, Acc};
false -> {fun (_) -> false end, Step(Acc, Input)}
end
end, fun (_CurrentPred, Acc) -> Finalize(Acc) end)
end. | src/transducers.erl | 0.535584 | 0.570451 | transducers.erl | starcoder |
%%%----------------------------------------------------------------------------
%%% @author <NAME> <<EMAIL>>
%%% @copyright 2012 University of St Andrews (See LICENCE)
%%% @headerfile "skel.hrl"
%%%
%%% @doc This module contains Reduce skeleton initialization logic.
%%%
%%% A reduce skeleton is an implementation of a parallel treefold.
%%%
%%% === Example ===
%%%
%%% ```skell:run([{reduce, fun?MODULE:reduce/2, fun ?MODULE:id/1}], Inputs)'''
%%%
%%% Here, we call upon the reduce skeleton to reduce a list of inputs,
%%% denoted `Inputs', using the developer-defined functions `reduce' and `id'. In this example, we presume to sum the elements in a list. Hence, `reduce' takes two arguments and returns their total. Whilst, `id' returns its input sans transformation. We receive the answer in the form of a single-element list as a message from the sink process.
%%%
%%% @end
%%%----------------------------------------------------------------------------
-module(sk_reduce).
-export([
make/2
,fold1/2
]).
-include("skel.hrl").
-spec make(decomp_fun(), reduce_fun()) -> fun((pid()) -> pid()).
%% @doc Readies an instance of the Reduce skeleton. Uses the developer-defined
%% decomposition and recomposition functions `Decomp' and `Reduce',
%% respectively. Returns an anonymous function waiting for the sink process
%% `NextPid'.
make(Decomp, Reduce) ->
fun(NextPid) ->
spawn(sk_reduce_decomp, start, [Decomp, Reduce, NextPid])
end.
% Implemented as a treefold underneath
-spec fold1(fun((A, A) -> A), [A,...]) -> A when A :: term().
%% @doc Sequential `reduce' entry-point. Primarily for comparison purposes.
fold1(_ReduceFun, [L1]) ->
L1;
fold1(ReduceFun, [L1, L2 | List]) ->
fold1(ReduceFun, [ReduceFun(L1, L2) | pairs(ReduceFun, List)]).
-spec pairs(fun((A, A) -> A), [A]) -> [A] when A :: term().
%% @doc Second stage to {@link fold1}'s sequential `reduce'. Recursively
%% pairs the first two elements in the list and applies the given function
%% `Fun'.
pairs(Fun, [L1,L2|List]) ->
[Fun(L1,L2) | pairs(Fun, List)];
pairs(_Fun, List) ->
List. | src/sk_reduce.erl | 0.565899 | 0.562237 | sk_reduce.erl | starcoder |
-module('tgen_triangle').
-behaviour(tgen).
%% The Erlang track implementation is quite different from what the canonical data suggests, but does not actually run contrary to it.
%% While the canonical data suggests three functions `equilateral`, `isosceles`, and `scalene` that return a boolean value, the Erlang
%% track implementation consists of a single function `kind` that returns either one of the atoms `equilateral`, `isosceles`, or `scalene`,
%% or an error tuple in case of invalid input.
%%
%% This means that the test cases have to be transformed to invariably replace the property with `kind`, and the expected return to the
%% atom representation of the property.
%%
%% Also, as the canonical data does not define any error cases but expects the called function to just return `false`, these tests need to
%% be transformed to expect the appropriate error tuples instead.
%%
%% Apart from the previous case, we can only consider the positive cases, those that expect `true`.
%%
%% Further, the canonical data at the time of this writing contains a test that passes three equal sides to the `isosceles` function and
%% expects it to return true, as equilateral triangles are a subset of isosceles traingles (and isosceles triangles are in turn a subset
%% of scalene triangles), whereas the Erlang track implementation of `kind` will invariably return the smallest subset (`equilateral` in
%% this case). Tests like this are skipped by this test generator.
-define(REPLACEMENT_PROPERTY, <<"kind">>).
-export([
revision/0,
prepare_tests/1,
generate_test/2
]).
revision() -> 1.
prepare_tests(Cases) ->
lists:filtermap(
fun
%% transform invalid input to expect error tuples
(Case = #{description := Desc, property := Prop, input := #{sides := [A, B, C]}}) when
A =< 0; B =< 0; C =< 0
->
{true, Case#{
description => augment_desc(Prop, Desc),
property => ?REPLACEMENT_PROPERTY,
expected => {error, "all side lengths must be positive"}
}};
(Case = #{description := Desc, property := Prop, input := #{sides := [A, B, C]}}) when
A + B =< C; A + C =< B; B + C =< A
->
{true, Case#{
description => augment_desc(Prop, Desc),
property => ?REPLACEMENT_PROPERTY,
expected => {error, "side lengths violate triangle inequality"}
}};
%% only consider positive cases further down
(#{expected := false}) ->
false;
%% transform equilateral cases
(Case = #{description := Desc, property := Prop = <<"equilateral">>}) ->
{true, Case#{
description => augment_desc(Prop, Desc),
property => ?REPLACEMENT_PROPERTY,
expected => prop2exp(Prop)
}};
%% transform isosceles cases, skip tests whose input would actually be equilateral
(#{property := <<"isosceles">>, input := #{sides := [S, S, S]}}) ->
false;
(Case = #{description := Desc, property := Prop = <<"isosceles">>}) ->
{true, Case#{
description => augment_desc(Prop, Desc),
property => ?REPLACEMENT_PROPERTY,
expected => prop2exp(Prop)
}};
%% transform scalene cases, skip tests whose input would actually be isosceles or equilateral
(#{property := <<"scalene">>, input := #{sides := [S, S, _]}}) ->
false;
(#{property := <<"scalene">>, input := #{sides := [S, _, S]}}) ->
false;
(#{property := <<"scalene">>, input := #{sides := [_, S, S]}}) ->
false;
(Case = #{description := Desc, property := Prop = <<"scalene">>}) ->
{true, Case#{
description => augment_desc(Prop, Desc),
property => ?REPLACEMENT_PROPERTY,
expected => prop2exp(Prop)
}};
%% skip anything we didn't provide for
(_) ->
false
end,
Cases
).
generate_test(N, #{
description := Desc,
expected := Exp,
property := Prop,
input := #{sides := [A, B, C]}
}) ->
TestName = tgen:to_test_name(N, Desc),
Property = tgen:to_property_name(Prop),
Fn =
tgs:simple_fun(
TestName ++ "_",
[
erl_syntax:tuple([
tgs:string(Desc),
tgs:call_macro(
"_assertMatch",
[
tgs:value(Exp),
tgs:call_fun(
"triangle:" ++ Property,
[
tgs:value(A),
tgs:value(B),
tgs:value(C)
]
)
]
)
])
]
),
{ok, Fn, [{Property, ["A", "B", "C"]}]}.
augment_desc(Prop, Desc) ->
<<Prop/binary, $_, Desc/binary>>.
prop2exp(Prop) ->
binary_to_atom(Prop, latin1). | src/tgen_triangle.erl | 0.826607 | 0.837021 | tgen_triangle.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved.
%%
%% This Source Code Form is subject to the terms of the Mozilla Public
%% License, v. 2.0. If a copy of the MPL was not distributed with this
%% file, You can obtain one at http://mozilla.org/MPL/2.0/.
%%
%% -------------------------------------------------------------------
%% @doc Custom reporting probe for Hosted Graphite.
%%
%% Collectd unix socket integration.
%% All data subscribed to by the plugin (through exosense_report:subscribe())
%% will be reported to collectd.
%% @end
%% We have to do this as a gen server since collectd expects periodical
%% metrics "refreshs", even if the values have not changed. We do this
%% through erlang:send_after() calls with the metrics / value update
%% to emit.
%%
%% Please note that exometer_report_collectd is still also a
%% exometer_report implementation.
-module(exometer_report_tty).
-behaviour(exometer_report).
-export(
[
exometer_init/1,
exometer_info/2,
exometer_cast/2,
exometer_call/3,
exometer_report/5,
exometer_subscribe/5,
exometer_unsubscribe/4,
exometer_newentry/2,
exometer_setopts/4,
exometer_terminate/2
]).
-include_lib("hut/include/hut.hrl").
-include("exometer.hrl").
-define(SERVER, ?MODULE).
%% calendar:datetime_to_gregorian_seconds({{1970,1,1},{0,0,0}}).
-define(UNIX_EPOCH, 62167219200).
-record(st, {type_map = []}).
%%%===================================================================
%%% exometer_report callback API
%%%===================================================================
exometer_init(Opts) ->
?log(info, "~p(~p): Starting~n", [?MODULE, Opts]),
TypeMap = proplists:get_value(type_map, Opts, []),
{ok, #st{type_map = TypeMap}}.
exometer_subscribe(_Metric, _DataPoint, _Interval, _Extra, St) ->
{ok, St}.
exometer_unsubscribe(_Metric, _DataPoint, _Extra, St) ->
{ok, St}.
%% Invoked through the remote_exometer() function to
%% send out an update.
exometer_report(Metric, DataPoint, Extra, Value, St) ->
?log(debug, "Report metric ~p_~p = ~p~n", [Metric, DataPoint, Value]),
%% Report the value and setup a new refresh timer.
Key = Metric ++ [DataPoint],
Type = case exometer_util:report_type(Key, Extra, St#st.type_map) of
{ok, T} -> T;
error -> unknown
end,
Str = [?MODULE_STRING, ": ", name(Metric, DataPoint), $\s,
timestamp(), ":", value(Value), io_lib:format(" (~w)", [Type]), $\n],
io:put_chars(lists:flatten(Str)),
{ok, St}.
exometer_call(Unknown, From, St) ->
?log(info, "Unknown call ~p from ~p", [Unknown, From]),
{ok, St}.
exometer_cast(Unknown, St) ->
?log(info, "Unknown cast: ~p", [Unknown]),
{ok, St}.
exometer_info(Unknown, St) ->
?log(info, "Unknown info: ~p", [Unknown]),
{ok, St}.
exometer_newentry(_Entry, St) ->
{ok, St}.
exometer_setopts(_Metric, _Options, _Status, St) ->
{ok, St}.
exometer_terminate(_, _) ->
ignore.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% Add metric and datapoint within metric
name(Metric, Datapoint) when is_integer(Datapoint) ->
metric_to_string(Metric) ++ "_" ++ integer_to_list(Datapoint);
name(Metric, DataPoint) ->
metric_to_string(Metric) ++ "_" ++ atom_to_list(DataPoint).
metric_to_string([Final]) ->
metric_elem_to_list(Final);
metric_to_string([H | T]) ->
metric_elem_to_list(H) ++ "_" ++ metric_to_string(T).
metric_elem_to_list(E) when is_atom(E) ->
atom_to_list(E);
metric_elem_to_list(E) when is_list(E) ->
E;
metric_elem_to_list(E) when is_binary(E) ->
[E];
metric_elem_to_list(E) when is_integer(E) ->
integer_to_list(E).
%% Add value, int or float, converted to list
value(V) when is_integer(V) -> integer_to_list(V);
value(V) when is_float(V) -> io_lib:format("~f", [V]);
value(_) -> "0".
timestamp() ->
integer_to_list(unix_time()).
unix_time() ->
datetime_to_unix_time(erlang:universaltime()).
datetime_to_unix_time({{_,_,_},{_,_,_}} = DateTime) ->
calendar:datetime_to_gregorian_seconds(DateTime) - ?UNIX_EPOCH. | src/external/cloudi_x_exometer_core/src/exometer_report_tty.erl | 0.628635 | 0.438364 | exometer_report_tty.erl | starcoder |
%%%-----------------------------------------------------------------------------
%% Evaluate probabilistic accuracy of calculating inverse cumulative distritbution
%% function.
%%
%% - Generate a random list: [{Weight: float, Node: binary}, ...]
%%
%% - Check that the number of times a node gets picked is roughly equal to it's
%% weight of getting picked in the list. Statistically, those count and weights
%% should line up, probably. Maybe.
%%
%% Inverse Cumulative Distribution Function gives the value associated with a
%% _cumulative_ proabability. It ONLY works with cumulative probabilities and that's
%% what makes it magical.
%%%-----------------------------------------------------------------------------
-module(icdf_eqc).
-include_lib("eqc/include/eqc.hrl").
-include_lib("eunit/include/eunit.hrl").
-export([prop_icdf_check/0]).
prop_icdf_check() ->
?FORALL({Population, Iterations, Hash}, {gen_population(), gen_iterations(), binary(32)},
begin
%% Use entropy to generate randval for running iterations
Entropy = blockchain_utils:rand_state(Hash),
%% Need this to match counters against assumptions
CumulativePopulationList = cdf(Population),
CumulativePopulationMap = maps:from_list(CumulativePopulationList),
%% Intiial acc for the counter, each node starts with a 0 count
InitAcc = maps:map(fun(_, _) -> 0 end, CumulativePopulationMap),
%% Track all counts a node gets picked
{Counter, _} = lists:foldl(fun(_I, {Acc, AccEntropy}) ->
{RandVal, NewEntropy} = rand:uniform_s(AccEntropy),
{ok, Node} = blockchain_utils:icdf_select(Population, RandVal),
{maps:update_with(Node, fun(X) -> X + 1 end, 1, Acc), NewEntropy}
end,
{InitAcc, Entropy},
lists:seq(1, Iterations)),
%% Check that it's roughly equal or more appropriately within some threshold (0.1 is good enough, probably).
%% Fucking probabilities.
CheckCounterLinesUp = lists:all(fun({Node, Count}) ->
abs(Count/Iterations - maps:get(Node, CumulativePopulationMap)) < 0.1
end,
maps:to_list(Counter)),
?WHENFAIL(begin
io:format("Population: ~p~n", [Population]),
io:format("CDF: ~p~n", [CumulativePopulationList]),
io:format("Counter: ~p~n", [Counter])
end,
noshrink(conjunction(
[{verify_population_exists, length(Population) > 0},
{verify_unique_nodes, length(Population) == length(lists:usort(Population))},
{verify_cdf, lists:sum([W || {_, W} <- CumulativePopulationList]) >= 0.99}, %% it's pretty much 1.0 but damn floats
{verify_counts_line_up, CheckCounterLinesUp}]
)
)
)
end).
gen_iterations() ->
%% Count these many times, lower counts don't quite "work" in the sense that the
%% error threshold maybe too high for eqc to work with. But given enough iterations
%% counts _should_ line up with the weights.
elements([1000, 10000, 100000]).
gen_population() ->
%% We need unique node names to mimic unique hotspot addresses
%% Also keeps things simple.
?SUCHTHAT(L, list({gen_node(), gen_weight()}), length(L) >= 3).
gen_node() ->
%% Some random node name.
binary(32).
gen_weight() ->
%% A viable weight between (0, 1), open intervals.
gen_prob().
gen_prob() ->
%% Some probability
?SUCHTHAT(W, real(), W > 0 andalso W < 1).
cdf(PopulationList) ->
%% This takes the population and coverts it to a cumulative distribution.
Sum = lists:sum([Weight || {_Node, Weight} <- PopulationList]),
[{Node, blockchain_utils:normalize_float(Weight/Sum)} || {Node, Weight} <- PopulationList]. | eqc/icdf_eqc.erl | 0.576423 | 0.699921 | icdf_eqc.erl | starcoder |
-module(teal_lists).
-export([is_flat/1, assert_is_flat/1, assert_is_flat/2,
same_members/2, assert_same_members/2, assert_same_members/3,
includes_members/2, assert_includes_members/2, assert_includes_members/3,
assert_include/2, include/2,
order/2, assert_order/2, assert_order/3
]).
-spec is_flat(List :: list()) -> true.
is_flat(List) ->
lists:all(fun(Item) ->
case Item of
Item when is_list(Item) ->
false;
_ ->
true
end
end, List).
-spec assert_is_flat(List :: list()) -> true.
assert_is_flat(List) ->
teal:assert(true, is_flat(List), contains_sublists).
-spec assert_is_flat(List :: list(), Msg :: any()) -> true.
assert_is_flat(List, Msg) ->
teal:assert(true, is_flat(List), Msg).
-spec same_members(List1 :: list(), List2 :: list()) -> true.
same_members([], []) ->
true;
same_members([Elem|List1], List2) ->
case lists:member(Elem, List2) of
true ->
same_members(List1, lists:delete(Elem, List2));
false ->
false
end;
same_members(_, _) ->
false.
-spec assert_same_members(List1 :: list(), List2 :: list()) -> boolean().
assert_same_members(List1, List2) ->
teal:assert(true, same_members(List1, List2), not_same_members).
-spec assert_same_members(List1 :: list(), List2 :: list(), Msg :: any()) ->
boolean().
assert_same_members(List1, List2, Msg) ->
teal:assert(true, same_members(List1, List2), Msg).
-spec includes_members(List :: list(), Members :: list()) -> boolean().
includes_members(List, Members) ->
% Check if each of the members is in the list, and store the result
% in a list of results
MemberResults = lists:map(fun(Member) ->
lists:member(Member, List)
end, Members),
% Verify that all of the results are true
lists:all(fun(Result) ->
Result
end, MemberResults).
-spec assert_includes_members(List :: list(), Members :: list()) -> boolean().
assert_includes_members(List, Members) ->
teal:assert(true, includes_members(List, Members), members_missing).
-spec assert_includes_members(List :: list(), Members :: list(),
Msg :: atom()) -> boolean().
assert_includes_members(List, Members, Msg) ->
teal:assert(true, includes_members(List, Members), Msg).
-spec include(List :: list(), Item :: any()) -> true.
include(List, Item) ->
lists:member(Item, List).
-spec assert_include(List :: list(), Item :: any()) -> true.
assert_include(List, Item) ->
teal:assert(true, include(List, Item), member_missing).
-spec order(List :: list(), OrderFun :: fun()) -> boolean().
order(List, OrderFun) ->
List =:= lists:sort(OrderFun, List).
-spec assert_order(List :: list(), OrderFun :: fun()) -> true.
assert_order(List, OrderFun) ->
teal:assert(true, order(List, OrderFun), wrong_order).
-spec assert_order(List :: list(), OrderFun :: fun(), Msg :: any()) -> true.
assert_order(List, OrderFun, Msg) ->
teal:assert(true, order(List, OrderFun), Msg). | src/teal_lists.erl | 0.720762 | 0.639272 | teal_lists.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(vtree_delete).
-include("vtree.hrl").
-include("couch_db.hrl").
-export([delete/2]).
-ifdef(makecheck).
-compile(nowarn_export_all).
-compile(export_all).
-endif.
% The nodes that should get deleted don't need to be KV-nodes with every
% record field set. It's enough to have the `key` and the `docid` set.
-spec delete(Vt :: #vtree{}, Nodes :: [#kv_node{}]) -> #vtree{}.
delete(Vt, []) ->
Vt;
delete(#vtree{root=nil}=Vt, _Nodes) ->
Vt;
delete(Vt, Nodes) ->
T1 = erlang:monotonic_time(),
Root = Vt#vtree.root,
PartitionedNodes = [Nodes],
KpNodes = delete_multiple(Vt, PartitionedNodes, [Root]),
NewRoot = case KpNodes of
[] -> nil;
KpNodes ->
vtree_modify:write_new_root(Vt, KpNodes)
end,
?LOG_DEBUG("Deletion took: ~ps~n",
[erlang:convert_time_unit(erlang:monotonic_time() - T1, native, microsecond)/1000000]),
Vt#vtree{root=NewRoot}.
-spec delete_multiple(Vt :: #vtree{}, ToDelete :: [#kv_node{}],
Existing :: [#kp_node{}]) -> [#kp_node{}].
delete_multiple(Vt, ToDelete, Existing) ->
ModifyFuns = {fun delete_nodes/2, fun partition_nodes/3},
vtree_modify:modify_multiple(Vt, ModifyFuns, ToDelete, Existing, []).
-spec delete_nodes(ToDelete :: [#kv_node{}], Existing :: [#kv_node{}]) ->
[#kv_node{}].
delete_nodes(ToDelete, Existing) ->
% Filter out all children that should be deleted
[E || E <- Existing, not(member_of_nodes(E, ToDelete))].
% Returns true if a given KV-node is member of a list of KV-nodes.
% `key` and `docid` are the fields that are used to determine whether it is
% a member or not.
-spec member_of_nodes(Node :: #kv_node{}, Nodes :: [#kv_node{}]) -> boolean().
member_of_nodes(_A, []) ->
false;
member_of_nodes(A, [B|_]) when A#kv_node.key == B#kv_node.key andalso
A#kv_node.docid == B#kv_node.docid ->
true;
member_of_nodes(A, [_B|Rest]) ->
member_of_nodes(A, Rest).
% Partitions a list of nodes according to a list of MBBs which are given by
% KP-nodes. The nodes are added to those partitions whose MBB fully encloses
% the given node. This means that one node may end up in multiple partitions.
% The reason is that for deletions there could be several search path where
% the node that should be deleted could be.
-spec partition_nodes(ToPartition :: [#kv_node{}], KpNodes :: [#kp_node{}],
Less :: lessfun()) -> [[#kv_node{}]].
partition_nodes(ToPartition, KpNodes, Less) ->
lists:map(fun(#kp_node{key=PartitionMbb}) ->
% Filter out all nodes that are not within the MBB of
% the current node
[P || #kv_node{key=Mbb}=P <- ToPartition,
vtree_util:within_mbb(Mbb, PartitionMbb, Less)]
end, KpNodes). | vtree/src/vtree_delete.erl | 0.682045 | 0.548129 | vtree_delete.erl | starcoder |
-module(parent).
%%%-------------------------------------------------------------------
%% @doc Functions for implementing a parent process.
%%
%% A parent process has the following properties:
%%
%% 1. It traps exits.
%% 2. It tracks its children inside the process dictionary.
%% 3. Before terminating, it stops its children synchronously, in the reverse startup order.
%%
%% In most cases the simplest option is to start a parent process using a higher-level abstraction
%% such as `gen_server_parent`. In this case you will use a subset of the API from this module to
%% start, stop, and enumerate your children.
%%
%% If available parent behaviours doesn't fit your purposes, you can consider building your own
%% behaviour or a concrete process. In this case, the functions of this module will provide the
%% necessary plumbing. To implement a parent process you need to -> the following:
%%
%% 1. Invoke `initialize/0` when the process is started.
%% 2. Use functions such as `start_child/1` to work with child processes.
%% 3. When a message is received, invoke `handle_message/1` before handling the message yourself.
%% 4. If you receive a shutdown exit message from your parent, stop the process.
%% 5. Before terminating, invoke `shutdown_all/1` to stop all the children.
%% 6. Use `infinity` as the shutdown strategy for the parent process, and `supervisor` for its type.
%% 7. If the process is a `gen_server`, handle supervisor calls (see `supervisor:which_children/0`
%% and `supervisor:count_children/0`).
%% 8. Implement `format_status/2` (see `gen_server_parent` for details) where applicable.
%%
%% If the parent process is powered by a code that not adhering to the OTP Design Principles
%% (e.g. plain erlang, modules that does not implement gen_* behaviors), make sure
%% to receive messages sent to that process, and handle them properly (see points 3 and 4).
%%
%% You can take a look at the code of `gen_server_parent` for specific details.
%% @end
%%%-------------------------------------------------------------------
-include("parent.hrl").
-include_lib("kernel/include/logger.hrl").
-export_type([child_spec/0, child_id/0, child_meta/0, shutdown/0, start_spec/0,
child/0, handle_message_response/0]).
-export([
child_spec/1, child_spec/2,
parent_spec/0, parent_spec/1,
initialize/0,
is_initialized/0,
start_child/1, start_child/2,
start_all_children/1,
shutdown_child/1,
shutdown_all/0, shutdown_all/1,
handle_message/1,
await_child_termination/2,
children/0,
is_child/1,
supervisor_which_children/0,
supervisor_count_children/0,
supervisor_get_childspec/1,
num_children/0,
child_id/1,
child_pid/1,
child_meta/1,
update_child_meta/2
]).
%% ----------------------------------------------------------------------------
%% @doc
%% Builds and overrides a child specification
%%
%% This operation is similar to
%% [supervisor:child_spec/1]
%% @end
%% ----------------------------------------------------------------------------
-define(default_spec, #{id => undefined, meta => undefined, timeout => infinity}).
-spec child_spec(start_spec(), map() | child_spec()) -> child_spec().
child_spec(Spec) -> child_spec(Spec, #{}).
child_spec(Spec, Overrides) ->
maps:merge(expand_child_spec(Spec), Overrides).
-spec parent_spec(map() | child_spec()) -> child_spec().
parent_spec() -> parent_spec(#{}).
parent_spec(Overrides) ->
maps:merge(#{shutdown => infinity, type => supervisor}, Overrides).
%% ----------------------------------------------------------------------------
%% @doc
%% Initializes the state of the parent process.
%%
%% This function should be invoked once inside the parent process before other functions from this
%% module are used. If a parent behaviour, such as `gen_server_parent`, is used, this function must
%% not be invoked.
%% @end
%% ----------------------------------------------------------------------------
-spec initialize() -> ok.
initialize() ->
is_initialized() andalso error("Parent state is already initialized"),
process_flag(trap_exit, true),
store(parent_state:initialize()).
%% @doc Returns true if the parent state is initialized.
-spec is_initialized() -> boolean().
is_initialized() -> undefined =/= get(?MODULE).
%% @doc Starts the child described by the specification.
-spec start_child(start_spec()) -> supervisor:startchild_ret().
start_child(StartSpec) ->
start_child(StartSpec, #{}).
-spec start_child(start_spec(), map() | list({term(), term()})) ->
supervisor:startchild_ret().
start_child(StartSpec, Overrides) when is_map(Overrides) ->
State = state(),
ChildSpec = child_spec(StartSpec, Overrides),
case start_child_process(State, ChildSpec) of
{ok, Pid, TRef} ->
store(parent_state:register_child(State, Pid, ChildSpec, TRef)),
{ok, Pid};
Error ->
Error
end;
start_child(StartSpec, Overrides) when is_list(Overrides) ->
start_child(StartSpec, maps:from_list(Overrides)).
%% ----------------------------------------------------------------------------
%% @doc
%% Synchronously starts all children.
%%
%% If some child fails to start, all of the children will be taken down and the parent process
%% will exit.
%% @end
%% ----------------------------------------------------------------------------
-spec start_all_children([start_spec()]) -> [pid() | undefined].
start_all_children(ChildSpecs) ->
lists:map(
fun(ChildSpec) ->
FullSpec = child_spec(ChildSpec),
case start_child(FullSpec) of
{ok, Pid} -> Pid;
{error, Error} ->
Msg = io_lib:format(
"Error starting the child ~p: ~p~n",
[maps:get(id, FullSpec), Error]),
give_up(state(), start_error, Msg)
end
end, ChildSpecs).
%% ----------------------------------------------------------------------------
%% @doc
%% Terminates the child.
%%
%% This function waits for the child to terminate. In the case of explicit
%% termination, `handle_child_terminated/5` will not be invoked.
%% @end
%% ----------------------------------------------------------------------------
-spec shutdown_child(child_ref()) -> ok.
shutdown_child(ChildRef) ->
case parent_state:pop(state(), ChildRef) of
{ok, Child, NewState} ->
do_shutdown_child(Child, shutdown),
store(NewState);
error -> error
end.
%% ----------------------------------------------------------------------------
%% @doc
%% Terminates all running child processes.
%%
%% Children are terminated synchronously, in the reverse order from the order they
%% have been started in.
%% @end
%% ----------------------------------------------------------------------------
shutdown_all() ->
shutdown_all(shutdown).
-spec shutdown_all(term()) -> ok.
shutdown_all(normal) ->
shutdown_all(shutdown);
shutdown_all(Reason) ->
Children = parent_state:children(state()),
SChildren = lists:sort(
fun(#{startup_index := I1}, #{startup_index := I2}) ->
I1 >= I2
end,
Children
),
lists:foreach(fun(Child) -> do_shutdown_child(Child, Reason) end, SChildren),
store(parent_state:initialize()).
%% ----------------------------------------------------------------------------
%% @doc
%% Should be invoked by the parent process for each incoming message.
%%
%% If the given message is not handled, this function returns `undefined`. In such cases, the client code
%% should perform standard message handling. Otherwise, the message has been handled by the parent,
%% and the client code doesn't shouldn't treat this message as a standard message (e.g. by calling
%% `handle_info` of the callback module).
%%
%% However, in some cases, a client might want to do some special processing, so the return value
%% will contain information which might be of interest to the client. Possible values are:
%%
%% - `{'EXIT', Pid, Id, ChildMeta, Reason :: term()}` - a child process has terminated
%% - `ignore` - `parent` handled this message, but there's no useful information to return
%%
%% Note that you don't need to invoke this function in a `gen_server_parent` callback module.
%% @end
%% ----------------------------------------------------------------------------
-spec handle_message(term()) -> handle_message_response() | undefined.
handle_message({'$parent_call', Client, {parent_client, Function, Args}}) ->
gen_server:reply(Client, apply(?MODULE, Function, Args)),
ignore;
handle_message(Message) ->
case do_handle_message(state(), Message) of
{Result, State} ->
store(State),
Result;
Error ->
Error
end.
%% ----------------------------------------------------------------------------
%% @doc
%% Awaits for the child to terminate.
%%
%% If the function succeeds, `handle_child_terminated/5` will not be invoked.
%% @end
%% ----------------------------------------------------------------------------
-spec await_child_termination(child_id(), non_neg_integer() | infinity) ->
{pid(), child_meta(), Reason :: term()} | timeout.
await_child_termination(ChildId, Timeout) ->
State = state(),
case parent_state:child_pid(State, ChildId) of
error ->
error("unknown child");
{ok, Pid} ->
receive
{'EXIT', Pid, Reason} ->
{ok, Child, NewState} = parent_state:pop(State, Pid),
#{spec := #{id := ChildId}, timer_ref := Tref, meta := Meta} = Child,
kill_timer(Tref, Pid),
store(NewState),
{Pid, Meta, Reason}
after Timeout -> timeout
end
end.
%% @doc "Returns the list of running child processes."
-spec children() -> list(child()).
children() ->
lists:map(
fun(#{spec := #{id := Id}, pid := Pid, meta := Meta}) ->
#{id => Id, pid => Pid, meta => Meta}
end, lists:sort(
fun(#{startup_index := I1}, #{startup_index := I2}) ->
I1 =< I2
end, parent_state:children(state()))
).
%% ----------------------------------------------------------------------------
%% @doc
%% Returns true if the child process is still running, false otherwise.
%%
%% Note that this function might return true even if the child has terminated.
%% This can happen if the corresponding 'EXIT' message still hasn't been
%% processed.
%% @end
%% ----------------------------------------------------------------------------
-spec is_child(child_ref()) -> boolean().
is_child(ChildRef) ->
error =/= parent_state:child(state(), ChildRef).
%% ----------------------------------------------------------------------------
%% @doc
%% Should be invoked by the behaviour when handling `which_children` gen_server call.
%%
%% You only need to invoke this function if you're implementing a parent process using a behaviour
%% which forwards `gen_server` call messages to the `handle_call` callback. In such cases you need
%% to respond to the client with the result of this function. Note that parent behaviours such as
%% `gen_server_parent` will do this automatically.
%%
%% If no translation of `gen_server` messages is taking place, i.e. if you're handling all messages
%% in their original shape, this function will be invoked through `handle_message/1`.
%% @end
%% ----------------------------------------------------------------------------
-spec supervisor_which_children() ->
[{term(), pid(), worker | supervisor, [module()] | dynamic}].
supervisor_which_children() ->
lists:map(
fun(#{pid := Pid, spec := #{id := Id, type := T, modules := Mods}}) ->
{Id, Pid, T, Mods}
end, parent_state:children(state())
).
%% ----------------------------------------------------------------------------
%% @doc
%% Should be invoked by the behaviour when handling `count_children` gen_server call.
%%
%% See `supervisor:which_children/0` for details.
%% @end
%% ----------------------------------------------------------------------------
-spec supervisor_count_children() ->
[{specs | active | supervisors | workers, non_neg_integer()}].
-define(count_children_acc(Specs, Active, Supervisors, Workers), [
{specs, Specs},
{active, Active},
{supervisors, Supervisors},
{workers, Workers}
]).
supervisor_count_children() ->
lists:foldl(fun(#{spec := Spec}, ?count_children_acc(S, A, SV, W)) ->
case maps:get(type, Spec) of
worker -> ?count_children_acc(S+1, A+1, SV, W + 1);
supervisor -> ?count_children_acc(S+1, A+1, SV + 1, W)
end
end, ?count_children_acc(0,0,0,0), parent_state:children(state())).
%% ----------------------------------------------------------------------------
%% @doc Should be invoked by the behaviour when handling `get_childspec` gen_server call.
%%
%% See `supervisor:get_childspec/2` for details.
%% @end
%% ----------------------------------------------------------------------------
-spec supervisor_get_childspec(child_ref()) ->
{ok, child_spec()} | {error, not_found}.
supervisor_get_childspec(ChildRef) ->
case parent_state:child(state(), ChildRef) of
{ok, #{spec := Spec}} -> {ok, Spec};
error -> {error, not_found}
end.
%% @doc "Returns the count of running child processes."
-spec num_children() -> non_neg_integer().
num_children() -> parent_state:num_children(state()).
%% @doc "Returns the id of a child process with the given pid."
-spec child_id(pid()) -> {ok, child_id()} | error.
child_id(Pid) -> parent_state:child_id(state(), Pid).
%% @doc "Returns the pid of a child process with the given id."
-spec child_pid(child_id()) -> {ok, pid()} | error.
child_pid(Id) -> parent_state:child_pid(state(), Id).
%% @doc "Returns the meta associated with the given child id."
-spec child_meta(child_id()) -> {ok, child_meta()} | error.
child_meta(Id) -> parent_state:child_meta(state(), Id).
%% @doc "Updates the meta of the given child process."
-spec update_child_meta(child_ref(), fun((child_meta()) -> child_meta())) ->
ok | error.
update_child_meta(ChildRef, UpdaterFun) ->
case parent_state:update_child_meta(state(), ChildRef, UpdaterFun) of
{ok, NewState} -> store(NewState);
Error -> Error
end.
%%%-------------------------------------------------------------------
%%% Internal functions
%%%-------------------------------------------------------------------
expand_child_spec(Mod) when is_atom(Mod) ->
expand_child_spec({Mod, undefined});
expand_child_spec({Mod, Arg}) ->
expand_child_spec(Mod:child_spec(Arg));
expand_child_spec(#{start := ChildStart} = ChildSpec) ->
Spec = maps:merge(
?default_spec,
default_type_and_shutdown_spec(maps:get(type, ChildSpec, worker))
),
maps:merge(Spec#{modules => default_modules(ChildStart)}, ChildSpec);
expand_child_spec(_other) ->
error("invalid child_spec").
default_type_and_shutdown_spec(worker) ->
#{type => worker, shutdown => timer:seconds(5)};
default_type_and_shutdown_spec(supervisor) ->
#{type => supervisor, shutdown => infinity}.
default_modules({Mod, _Fun, _Args}) ->
[Mod];
default_modules(Fun) when is_function(Fun) ->
[proplists:get_value(module, erlang:fun_info(Fun))].
validate_spec(State, ChildSpec) ->
Id = maps:get(id, ChildSpec),
case check_id_type(Id) of
ok -> check_id_uniqueness(State, Id);
Error -> Error
end.
check_id_type(Pid) when is_pid(Pid) -> {error, invalid_child_id};
check_id_type(_Other) -> ok.
check_id_uniqueness(State, Id) ->
case parent_state:child_pid(State, Id) of
{ok, Pid} -> {error, {already_started, Pid}};
error -> ok
end.
invoke_start_function({Mod, Func, Args}) -> apply(Mod, Func, Args);
invoke_start_function(Fun) when is_function(Fun, 0) -> Fun().
start_child_process(State, ChildSpec) ->
case validate_spec(State, ChildSpec) of
ok ->
case invoke_start_function(maps:get(start, ChildSpec)) of
ignore ->
{ok, undefined};
{ok, Pid} ->
TRef =
case maps:get(timeout, ChildSpec) of
infinity -> undefined;
Timeout ->
erlang:send_after(
Timeout,
self(),
{?MODULE, child_timeout, Pid}
)
end,
{ok, Pid, TRef};
Error ->
Error
end;
Error -> Error
end.
do_handle_message(State, {'EXIT', Pid, Reason}) ->
case parent_state:pop(State, Pid) of
{ok, #{spec := Spec, meta := Meta, timer_ref := TRef}, NewState} ->
kill_timer(TRef, Pid),
{{'EXIT', Pid, maps:get(id, Spec), Meta, Reason}, NewState};
error ->
undefined
end;
do_handle_message(State, {?MODULE, child_timeout, Pid}) ->
{ok, #{spec := Spec, meta := Meta} = Child, NewState} =
parent_state:pop(State, Pid),
do_shutdown_child(Child, kill),
{{'EXIT', Pid, maps:get(id, Spec), Meta, timeout}, NewState};
do_handle_message(State, {'$gen_call', Client, which_children}) ->
gen_server:reply(Client, supervisor_which_children()),
{ignore, State};
do_handle_message(State, {'$gen_call', Client, count_children}) ->
gen_server:reply(Client, supervisor_count_children()),
{ignore, State};
do_handle_message(State, {'$gen_call', Client, {get_childspec, ChildRef}}) ->
gen_server:reply(Client, supervisor_get_childspec(ChildRef)),
{ignore, State};
do_handle_message(_State, _Other) ->
undefined.
do_shutdown_child(Child, Reason) ->
#{pid := Pid, timer_ref := TRef, spec := #{shutdown := Shutdown}} = Child,
kill_timer(TRef, Pid),
ExitSignal =
if
Shutdown == brutal_kill -> kill;
true -> Reason
end,
WaitTime =
if
ExitSignal == kill -> infinity;
true -> Shutdown
end,
sync_stop_process(Pid, ExitSignal, WaitTime).
sync_stop_process(Pid, ExitSignal, WaitTime) ->
exit(Pid, ExitSignal),
receive
{'EXIT', Pid, _Reason} -> ok
after WaitTime ->
exit(Pid, kill),
receive
{'EXIT', Pid, _Reason} -> ok
end
end.
kill_timer(undefined, _Pid) ->
ok;
kill_timer(TimerRef, Pid) ->
erlang:cancel_timer(TimerRef),
receive
{_Parent, child_timeout, Pid} -> ok
after 0 -> ok
end.
-spec state() -> parent_state:t().
state() ->
State = get(?MODULE),
undefined =:= State andalso error("Parent is not initialized"),
State.
-spec store(parent_state:t()) -> ok.
store(State) ->
put(?MODULE, State),
ok.
%% @doc false
give_up(State, ExitReason, ErrorMsg) ->
?LOG(error, ErrorMsg),
store(State),
shutdown_all(),
exit(ExitReason). | src/parent.erl | 0.55917 | 0.657676 | parent.erl | starcoder |
%%%
%%% Copyright (c) 2021 <NAME>
%%% All rights reserved.
%%% Distributed under the terms of the MIT License. See the LICENSE file.
%%%
%%% SIP URI parser auxilary functions
%%%
-module(ersip_uri_parser_aux).
-export([unquote_hex/1,
split_scheme/1]).
%%===================================================================
%% API
%%===================================================================
-spec unquote_hex(binary()) -> binary().
unquote_hex(Bin) ->
do_unquote_hex(Bin, Bin, {0, 0}, []).
-spec split_scheme(binary()) -> {binary(), binary()}.
split_scheme(Bin) ->
case binary:split(Bin, <<":">>) of
[Scheme, Suffix] ->
{ersip_bin:to_lower(Scheme), Suffix};
[Suffix] ->
{<<>>, Suffix}
end.
%%===================================================================
%% Implementation
%%===================================================================
-include("ersip_sip_abnf.hrl").
-spec do_unquote_hex(binary(), binary(), {non_neg_integer(), integer()}, iolist()) -> binary().
do_unquote_hex(<<>>, Orig, {_, Len}, []) when Len == byte_size(Orig) ->
Orig;
do_unquote_hex(<<>>, _, {_, 0}, Acc) ->
iolist_to_binary(lists:reverse(Acc));
do_unquote_hex(<<>>, Orig, Part, Acc) ->
PartBin = binary:part(Orig, Part),
iolist_to_binary(lists:reverse([PartBin | Acc]));
do_unquote_hex(<<$%, H1:8, H2:8, Rest/binary>>, Orig, {Pos, Len} = Part, Acc) when ?is_HEXDIG(H1) andalso ?is_HEXDIG(H2) ->
Char = 16 * hex_char_to_num(H1) + hex_char_to_num(H2),
case Len of
0 ->
do_unquote_hex(Rest, Orig, {Pos + 3, 0}, [Char | Acc]);
_ ->
PartBin = binary:part(Orig, Part),
do_unquote_hex(Rest, Orig, {Pos + Len + 3, 0}, [Char, PartBin | Acc])
end;
do_unquote_hex(<<_:8, Rest/binary>>, Orig, {Pos, Len}, Acc) ->
do_unquote_hex(Rest, Orig, {Pos, Len+1}, Acc).
-spec hex_char_to_num(char()) -> 0..15.
hex_char_to_num(X) when X >= $0 andalso X =< $9 ->
X - $0;
hex_char_to_num(X) when X >= $A andalso X =< $F ->
X - $A + 10;
hex_char_to_num(X) when X >= $a andalso X =< $f ->
X - $a + 10. | src/uri/ersip_uri_parser_aux.erl | 0.504883 | 0.419053 | ersip_uri_parser_aux.erl | starcoder |
%% Copyright (c) 2016-2021 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : lfe_types.erl
%% Author : <NAME>
%% Purpose : Lisp Flavoured Erlang type formatting.
%% Handling types in LFE including functions for converting between
%% Erlang and LFE type syntaxes.
%%
%% We can correctly do most types except for maps where we lose the
%% distinction between assoc and exact pairs.
-module(lfe_types).
-export([is_type_decl/1]).
-export([format_error/1]).
-export([from_type_def/1,from_type_defs/1,to_type_def/2,to_type_defs/2,
check_type_def/3,check_type_defs/3]).
-export([from_func_spec_list/1,to_func_spec_list/2,
check_func_spec_list/3]).
%% -compile(export_all).
-include("lfe.hrl").
%% format_error(Error) -> String.
%% Do we really need this here?
format_error({bad_type,T}) ->
lfe_io:format1("bad ~w type definition", [T]);
format_error({type_syntax,T}) ->
lfe_io:format1(<<"bad ~w type syntax">>, [T]);
format_error({bad_spec,S}) ->
lfe_io:format1("bad function spec: ~w", [S]).
%% is_type_decl(Tag) -> boolean().
%% Is Name a type declaration?
is_type_decl(type) -> true;
is_type_decl(opaque) -> true;
is_type_decl(_Other) -> false.
%% from_type_def(AST) -> Def.
%% Translate an Erlang type definition to LFE. This takes the Erlang
%% AST form of a type definition and translates to the LFE type
%% syntax. No AST there of course.
%% Our special cases.
from_type_def({type,_L,union,Types}) -> %Special case union
['UNION'|from_type_defs(Types)];
from_type_def({type,_L,tuple,any}) -> %Special case tuple() -> (tuple)
[tuple];
from_type_def({type,_L,tuple,Elems}) ->
list_to_tuple(from_type_defs(Elems));
from_type_def({type,_L,binary,Bits}) when Bits =/= [] ->
[bitstring|from_type_defs(Bits)]; %Flip binary<->bitstring here
%% from_type_def({type,_L,bitstring,[]}) -> [bitstring,[]];
from_type_def({type,_L,map,any}) -> %Special case map() -> (map)
[map];
from_type_def({type,_L,map,Pairs}) ->
maps:from_list(from_map_pairs(Pairs));
from_type_def({type,_L,record,[{atom,_L,Name}|Fields]}) ->
[record,Name|from_rec_fields(Fields)];
from_type_def({type,_L,'fun',[Args,Ret]}) ->
[lambda,from_lambda_args(Args),from_type_def(Ret)];
%% The standard erlang types.
from_type_def({type,_L,Type,Args}) when is_list(Args) ->
[Type|from_type_defs(Args)];
from_type_def({user_type,_L,Type,Args}) when is_list(Args) ->
[Type|from_type_defs(Args)];
from_type_def({ann_type,_L,[_Var,Type]}) -> %Annotated types lose variable
from_type_def(Type);
from_type_def({remote_type,_L,[{atom,_,M},{atom,_,T},Args]}) ->
Type = list_to_atom(lists:concat([M,":",T])),
[Type|from_type_defs(Args)];
%% Literal values.
from_type_def({var,_L,Var}) -> Var; %A type variable
from_type_def({atom,_L,Atom}) -> ?Q(Atom); %Literal atom
from_type_def({integer,_L,Int}) -> Int; %Literal integer
from_type_def({float,_L,Float}) -> Float. %Literal float
from_type_defs(Ts) ->
lists:map(fun from_type_def/1, Ts).
from_map_pairs(Pairs) ->
%% Lose distinction between assoc and exact pairs.
Fun = fun ({type,_L,_P,[Kt,Vt]}) ->
{from_type_def(Kt),from_type_def(Vt)}
end,
lists:map(Fun, Pairs).
from_rec_fields(Fields) ->
Fun = fun ({type,_L,field_type,[{atom,_,Name},Type]}) ->
[Name,from_type_def(Type)]
end,
[ Fun(F) || F <- Fields ].
from_lambda_args({type,_L,any}) -> any; %Any arity
from_lambda_args(Args) -> from_func_prod(Args).
%% to_type_def(Def, Line) -> AST.
%% Translate a type definition from the LFE type syntax to the Erlang
%% AST. We must explicitly handle our special cases.
%% Our special cases.
to_type_def(['UNION'|Types], Line) -> %Union
{type,Line,union,to_type_defs(Types, Line)};
to_type_def([range,I1,I2], Line) ->
{type,Line,range,to_type_defs([I1,I2], Line)};
to_type_def([bitstring,I1,I2], Line) -> %Flip binary<->bitstring here
{type,Line,binary,to_type_defs([I1,I2], Line)};
to_type_def([tuple], Line) -> %Special case (tuple) -> tuple()
{type,Line,tuple,any};
to_type_def([tuple|Args], Line) -> %Not a user defined type
{type,Line,tuple,to_type_defs(Args, Line)};
to_type_def([map], Line) -> %Special case (map) -> map()
{type,Line,map,any};
to_type_def([map|Elems], Line) ->
{type,Line,map,to_map_pairs(to_pair_list(Elems), Line)};
to_type_def([record,Name|Fields], Line) ->
{type,Line,record,[to_lit(Name, Line)|to_rec_fields(Fields, Line)]};
to_type_def([lambda,Args,Ret], Line) ->
{type,Line,'fun',[to_lambda_args(Args, Line),to_type_def(Ret, Line)]};
to_type_def(?Q(Val), Line) -> %Quoted atom literal
to_lit(Val, Line);
to_type_def([call,?Q(M),?Q(T)|Args], Line) ->
%% Special case mod:fun expands to (call 'mod 'fun)
Dargs = to_type_defs(Args, Line),
{remote_type,Line,[{atom,Line,M},{atom,Line,T},Dargs]};
%% The standard erlang types.
to_type_def([Type|Args], Line) ->
Dargs = to_type_defs(Args, Line),
case string:tokens(atom_to_list(Type), ":") of
[M,T] -> %Remote type
{remote_type,Line,
[{atom,Line,list_to_atom(M)},{atom,Line,list_to_atom(T)},Dargs]};
_ -> %This will also catch a:b:c
%% Get the right tag here.
Tag = case erl_internal:is_type(Type, length(Args)) of
true -> type;
false -> user_type
end,
{Tag,Line,Type,Dargs}
end;
to_type_def(Tup, Line) when is_tuple(Tup) ->
{type,Line,tuple,to_type_defs(tuple_to_list(Tup), Line)};
to_type_def(Map, Line) when ?IS_MAP(Map) ->
ToPairs = to_map_pairs(maps:to_list(Map), Line),
{type,Line,map,ToPairs};
to_type_def(Val, Line) when is_integer(Val) -> %Literal integer value
to_lit(Val, Line);
to_type_def(Val, Line) when is_float(Val) -> %Literal float value
to_lit(Val, Line);
to_type_def(Val, Line) when is_atom(Val) -> %Variable
{var,Line,Val}.
to_type_defs(Ds, Line) ->
lists:map(fun (D) -> to_type_def(D, Line) end, Ds).
to_lit(Val, Line) when is_atom(Val) -> {atom,Line,Val};
to_lit(Val, Line) when is_integer(Val) -> {integer,Line,Val};
to_lit(Val, Line) when is_float(Val) -> {float,Line,Val}.
to_pair_list([K,V|Rest]) ->
[{K,V}|to_pair_list(Rest)];
to_pair_list([]) -> [].
to_map_pairs(Pairs, Line) ->
%% Have lost distinction between assoc and exact pairs.
Fun = fun ({K,V}) ->
{type,Line,map_field_assoc,to_type_defs([K,V], Line)}
end,
[ Fun(P) || P <- Pairs ].
to_rec_fields(Fs, Line) ->
Fun = fun ([F,Type]) ->
{type,Line,field_type,
[to_lit(F, Line),to_type_def(Type, Line)]}
end,
[ Fun(F) || F <- Fs ].
to_lambda_args(any, Line) -> {type,Line,any};
to_lambda_args(Args, Line) -> to_func_prod(Args, Line).
%% check_type_defs(Defs, KnownRecords, TypeVars) ->
%% {ok,TypeVars} | {error,Error,TypeVars}.
%% check_type_def(Def, KnownRecords, TypeVars) ->
%% {ok,TypeVars} | {error,Error,TypeVars}.
%% Check a type definition. TypeVars is an orddict of variable names
%% and usage counts. Errors returned are:
%% {type_syntax,Type} - error in the type syntax
%% {bad_type,Type} - error in the type definition
%% Our special cases.
check_type_def(['UNION'|Types], Recs, Tvs) ->
check_type_defs(Types, Recs, Tvs);
check_type_def([range,I1,I2], _Recs, Tvs) ->
if is_integer(I1) and is_integer(I2) and (I1 =< I2) ->
{ok,Tvs};
true -> type_syntax_error(range, Tvs)
end;
check_type_def([tuple|Ts], Recs, Tvs) ->
check_type_defs(Ts, Recs, Tvs);
check_type_def([bitstring,I1,I2], _Recs, Tvs) ->
if is_integer(I1) and is_integer(I2) and (I1 >= 0) and (I2 >= 0) ->
{ok,Tvs};
true -> type_syntax_error(bitstring, Tvs)
end;
check_type_def([map|Pairs], Recs, Tvs) ->
check_map_pairs(Pairs, Recs, Tvs);
check_type_def([record,Name|Fields], Recs, Tvs) ->
check_record(Name, Fields, Recs, Tvs);
%% if is_atom(Name) -> check_record_fields(Fields, Recs, Tvs);
%% true -> type_syntax_error(record, Tvs)
%% end;
check_type_def([lambda,Args,Ret], Recs, Tvs0) ->
case check_lambda_args(Args, Recs, Tvs0) of
{ok,Tvs1} -> check_type_def(Ret, Recs, Tvs1);
Error -> Error
end;
check_type_def(?Q(Val), _Recs, Tvs) -> check_type_lit(Val, Tvs);
check_type_def([call,?Q(M),?Q(T)|Args], Recs, Tvs) when is_atom(M), is_atom(T) ->
check_type_defs(Args, Recs, Tvs);
%% The standard Erlang types.
check_type_def([Type|Args], Recs, Tvs0) when is_atom(Type) ->
check_type_defs(Args, Recs, Tvs0);
%% Only literal tuples, maps, integers and atoms (type variables) left now.
check_type_def(Tup, Recs, Tvs) when is_tuple(Tup) ->
check_type_defs(tuple_to_list(Tup), Recs, Tvs);
check_type_def(Map, Recs, Tvs) when ?IS_MAP(Map) ->
ToPairs = fun ({K,V}) -> [K,V] end, %Convert to list pairs
check_map_pairs(lists:flatmap(ToPairs, maps:to_list(Map)), Recs, Tvs);
check_type_def(Val, _Recs, Tvs) when is_integer(Val) -> {ok,Tvs};
check_type_def(Val, _Recs, Tvs) when is_atom(Val) ->
%% It's a type variable.
{ok,orddict:update_counter(Val, 1, Tvs)};
check_type_def(Def, _Recs, Tvs) ->
bad_type_error(Def, Tvs).
check_type_defs(Defs, Recs, Tvs) ->
check_type_list(fun check_type_def/3, Defs, Recs, Tvs).
check_type_lit(Val, Tvs) when is_integer(Val) ; is_atom(Val) -> {ok,Tvs};
check_type_lit(Val, Tvs) -> bad_type_error(Val, Tvs).
check_map_pairs([K,V|Pairs], Recs, Tvs0) ->
case check_map_pair(K, V, Recs, Tvs0) of
{ok,Tvs1} ->
check_map_pairs(Pairs, Recs, Tvs1);
Error -> Error
end;
check_map_pairs([], _Recs, Tvs) -> {ok,Tvs};
check_map_pairs(_Other, _Recs, Tvs) ->
type_syntax_error(map, Tvs).
check_map_pair(K, V, Recs, Tvs0) ->
case check_type_def(K, Recs, Tvs0) of
{ok,Tvs1} -> check_type_def(V, Recs, Tvs1);
Error -> Error
end.
%% check_record(Record, Fields, KnownRecords, TypeVars) ->
%% {ok,TypeVars} | {error,Error,TypeVars}.
check_record(Name, Fields, Recs, Tvs) ->
case orddict:is_key(Name, Recs) of
true ->
check_record_fields(Fields, Recs, Tvs);
false ->
if is_atom(Name) ->
undefined_record_error(Name, Tvs);
true -> type_syntax_error(record, Tvs)
end
end.
check_record_fields(Fs, Recs, Tvs) ->
check_type_list(fun check_record_field/3, Fs, Recs, Tvs).
check_record_field([F,T], Recs, Tvs) when is_atom(F) ->
check_type_def(T, Recs, Tvs);
check_record_field(Other, _Recs, Tvs) ->
bad_type_error(Other, Tvs).
check_lambda_args(any, _Recs, Tvs) -> {ok,Tvs};
check_lambda_args(Args, Recs, Tvs) ->
check_type_defs(Args, Recs, Tvs).
check_type_list(Check, [E|Es], Recs, Tvs0) ->
case Check(E, Recs, Tvs0) of
{ok,Tvs1} -> check_type_list(Check, Es, Recs, Tvs1);
Error -> Error
end;
check_type_list(_Check, [], _Recs, Tvs) -> {ok,Tvs};
check_type_list(_Check, Other, _Recs, Tvs) -> %Not a proper list
bad_type_error(Other, Tvs).
%% from_func_spec_list([FuncType]) -> Type.
from_func_spec_list(Ss) ->
Fun = fun ({type,_L,'fun',_}=Type) ->
from_func_spec(Type) ++ [[]];
({type,_L,bounded_fun,[Fun,Cs]}) ->
from_func_spec(Fun) ++ [from_func_constraints(Cs)]
end,
lists:map(Fun, Ss).
from_func_spec({type,_L,'fun',[Prod,Ret]}) ->
[from_func_prod(Prod),from_type_def(Ret)].
from_func_prod({type,_L,product,Args}) when is_list(Args) ->
from_type_defs(Args). %Function arguments
from_func_constraint({type,_,constraint,[{atom,_,is_subtype},St]}) ->
from_subtype(St).
from_func_constraints(Cs) ->
lists:map(fun from_func_constraint/1, Cs).
from_subtype([{var,_,Var},Type]) -> [Var,from_type_def(Type)].
%% to_func_spec_list(Type, Line) -> AST.
to_func_spec_list(Fts, Line) ->
lists:map(fun (Ft) -> to_func_spec(Ft, Line) end, Fts).
to_func_spec([Prod,Ret], Line) ->
to_func_spec(Prod, Ret, Line);
to_func_spec([Prod,Ret,[]], Line) -> %Future proof
to_func_spec(Prod, Ret, Line);
to_func_spec([Prod,Ret,Cs], Line) ->
Fun = to_func_spec(Prod, Ret, Line),
Constr = to_func_constraints(Cs, Line),
{type,Line,bounded_fun,[Fun,Constr]}.
to_func_spec(Prod, Ret, Line) ->
{type,Line,'fun',[to_func_prod(Prod, Line),to_type_def(Ret, Line)]}.
to_func_prod(Args, Line) ->
{type,Line,product,to_type_defs(Args, Line)}.
to_func_constraints(Cs, Line) ->
[ to_func_constraint(C, Line) || C <- Cs ].
to_func_constraint([Var,Type], Line) ->
{type,Line,constraint,[{atom,Line,is_subtype},
[{var,Line,Var},to_type_def(Type, Line)]]}.
%% check_func_spec_list([FuncType], Arity, KnownRecords) ->
%% {ok,[TypeVars]} | {error,Error,[TypeVars]}.
%% check_func_spec(FuncType, Arity, KnownRecords) ->
%% {ok,TypeVars} | {error,Error,TypeVars}.
%% Check a list of function specs. TypeVars is an orddict of variable
%% names and usage counts. Errors returned are:
%% {bad_spec,Spec} - error in the type definition
check_func_spec_list(Ss, Ar, Recs) ->
check_spec_list(fun check_func_spec/3, Ss, Ar, Recs).
check_func_spec([Prod,Ret], Ar, Recs) ->
check_func_spec([Prod,Ret,[]], Ar, Recs);
check_func_spec([Prod,Ret,Cs], Ar, Recs) ->
Tvs0 = [],
case check_func_prod(Prod, Ar, Recs, Tvs0) of
{ok,Tvs1} ->
case check_type_def(Ret, Recs, Tvs1) of
{ok,Tvs2} ->
check_func_constraints(Cs, Recs, Tvs2);
Error -> Error
end;
Error -> Error
end;
check_func_spec(Other, _Ar, _Recs) ->
bad_spec_error(Other, []).
check_func_prod(Args, Ar, Recs, Tvs0) ->
%% This checks both the list and the types.
case check_type_defs(Args, Recs, Tvs0) of
{ok,Tvs1} ->
if length(Args) =:= Ar -> {ok,Tvs1};
true -> bad_spec_error(Args, Tvs1)
end;
Error -> Error
end.
check_func_constraints([[Var,Type]|Cs], Recs, Tvs0) when is_atom(Var) ->
Tvs1 = orddict:update_counter(Var, 1, Tvs0),
case check_type_def(Type, Recs, Tvs1) of
{ok,Tvs2} -> check_func_constraints(Cs, Recs, Tvs2);
Error -> Error
end;
check_func_constraints([], _Recs, Tvs) -> {ok,Tvs};
check_func_constraints(Other, _Recs, Tvs) ->
bad_spec_error(Other, Tvs).
check_spec_list(Check, Es, Ar, Recs) ->
check_spec_list(Check, Es, Ar, Recs, []).
check_spec_list(Check, [E|Es], Ar, Recs, Tvss) ->
case Check(E, Ar, Recs) of
{ok,Tvs} -> check_spec_list(Check, Es, Ar, Recs, Tvss ++ [Tvs]);
Error -> Error
end;
check_spec_list(_Check, [], _Ar, _Recs, Tvss) -> {ok,Tvss};
check_spec_list(_Check, Other, _Ar, _Recs, Tvss) ->
%% Not a proper list.
bad_spec_error(Other, Tvss).
%% Return errors.
bad_spec_error(Val, Tvs) -> {error,{bad_spec,Val},Tvs}.
bad_type_error(Type, Tvs) -> {error,{bad_type,Type},Tvs}.
type_syntax_error(Type, Tvs) -> {error,{type_syntax,Type},Tvs}.
undefined_record_error(Rec, Tvs) -> {error,{undefined_record,Rec},Tvs}. | src/lfe_types.erl | 0.533884 | 0.509398 | lfe_types.erl | starcoder |
%% Copyright 2014 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% @doc Binomial heap based on Okasaki 6.2.2
-module(binomial_heap).
-export([new/0, insert/2, insert/3, merge/2, delete/1, to_list/1, take/2, size/1]).
-record(node,{
rank = 0 :: non_neg_integer(),
key :: term(),
value :: term(),
children = new() :: binomial_heap()
}).
-export_type([binomial_heap/0, heap_node/0]).
-type binomial_heap() :: [ heap_node() ].
-type heap_node() :: #node{}.
-spec new() -> binomial_heap().
new() ->
[].
% Inserts a new pair into the heap (or creates a new heap)
-spec insert(term(), term()) -> binomial_heap().
insert(Key,Value) ->
insert(Key,Value,[]).
-spec insert(term(), term(), binomial_heap()) -> binomial_heap().
insert(Key,Value,Forest) ->
insTree(#node{key=Key,value=Value},Forest).
% Merges two heaps
-spec merge(binomial_heap(), binomial_heap()) -> binomial_heap().
merge(TS1,[]) when is_list(TS1) -> TS1;
merge([],TS2) when is_list(TS2) -> TS2;
merge([#node{rank=R1}=T1|TS1]=F1,[#node{rank=R2}=T2|TS2]=F2) ->
if
R1 < R2 ->
[T1 | merge(TS1,F2)];
R2 < R1 ->
[T2 | merge(F1, TS2)];
true ->
insTree(link(T1,T2),merge(TS1,TS2))
end.
% Deletes the top entry from the heap and returns it
-spec delete(binomial_heap()) -> {{term(), term()}, binomial_heap()}.
delete(TS) ->
{#node{key=Key,value=Value,children=TS1},TS2} = getMin(TS),
{{Key,Value},merge(lists:reverse(TS1),TS2)}.
% Turns the heap into list in heap order
-spec to_list(binomial_heap()) -> [{term(), term()}].
to_list([]) -> [];
to_list(List) when is_list(List) ->
to_list([],List).
to_list(Acc, []) ->
lists:reverse(Acc);
to_list(Acc,Forest) ->
{Next, Trees} = delete(Forest),
to_list([Next|Acc], Trees).
% Take N elements from the top of the heap
-spec take(non_neg_integer(), binomial_heap()) -> [{term(), term()}].
take(N,Trees) when is_integer(N), is_list(Trees) ->
take(N,Trees,[]).
take(0,_Trees,Acc) ->
lists:reverse(Acc);
take(_N,[],Acc)->
lists:reverse(Acc);
take(N,Trees,Acc) ->
{Top,T2} = delete(Trees),
take(N-1,T2,[Top|Acc]).
% Get an estimate of the size based on the binomial property
-spec size(binomial_heap()) -> non_neg_integer().
size(Forest) ->
erlang:trunc(lists:sum([math:pow(2,R) || #node{rank=R} <- Forest])).
%% Private API
-spec link(heap_node(), heap_node()) -> heap_node().
link(#node{rank=R,key=X1,children=C1}=T1,#node{key=X2,children=C2}=T2) ->
case X1 < X2 of
true ->
T1#node{rank=R+1,children=[T2|C1]};
_ ->
T2#node{rank=R+1,children=[T1|C2]}
end.
insTree(Tree, []) ->
[Tree];
insTree(#node{rank=R1}=T1, [#node{rank=R2}=T2|Rest] = TS) ->
case R1 < R2 of
true ->
[T1|TS];
_ ->
insTree(link(T1,T2),Rest)
end.
getMin([T]) ->
{T,[]};
getMin([#node{key=K} = T|TS]) ->
{#node{key=K1} = T1,TS1} = getMin(TS),
case K < K1 of
true -> {T,TS};
_ -> {T1,[T|TS1]}
end. | src/binomial_heap.erl | 0.602296 | 0.529446 | binomial_heap.erl | starcoder |
%% elephant player plays remembering every previous move of the opponent and calibrates probabilities accordingly.
%% For example, if an opponent has drawn a rock in the past, probabilities for elephant to draw spock and paper
%% increases while probabilities to draw lizard and scissors decrease.
-module(rpsls_elephant_player).
-author('<NAME> <<EMAIL>>').
-behaviour(rpsls_player).
-export([init/0, play/2]).
%% @private
-spec init() -> State::term().
init() -> _ = random:seed(erlang:now()), {}.
%% Increases X with 5% of the others' values (total amount of probability always has to add up to 1.00)
increase(X, List) -> %% "X" is an atom denoting a tuple in the tuple list "List"
CalcAcc = lists:sum([V * 0.05||{Option,V} <- List, Option =/= X]),
lists:map(fun({Option, V}) ->
case Option of
X -> {X, V + CalcAcc};
_ -> {Option, V * 0.95}
end
end, List).
%% Decreases X with 20% of its' own value and hands it out evenly to the others (total amount of probability always has to add up to 1.00)
decrease(X, List) -> %% "X" is an atom key denoting a tuple in the tuple list "List"
{X, V} = lists:keyfind(X, 1, List),
CalcAcc = V * 0.05,
lists:map(fun({Option, K}) ->
case Option of
X -> {X, K - (4 * CalcAcc)};
_ -> {Option, K + CalcAcc}
end
end, List).
%%Updating probabilities according to the rules of the game
updateMemory({_, rock}, UpdatingProbabilities) -> increase(spock, increase(paper, decrease(lizard, decrease(scissors, UpdatingProbabilities))));
updateMemory({_, lizard}, UpdatingProbabilities) -> increase(scissors, increase(rock, decrease(spock, decrease(paper, UpdatingProbabilities))));
updateMemory({_, spock}, UpdatingProbabilities) -> increase(lizard, increase(paper, decrease(scissors, decrease(rock, UpdatingProbabilities))));
updateMemory({_, scissors}, UpdatingProbabilities) -> increase(spock, increase(rock, decrease(lizard, decrease(paper, UpdatingProbabilities))));
updateMemory(_, UpdatingProbabilities) -> increase(scissors, increase(lizard, decrease(rock, decrease(spock, UpdatingProbabilities)))). %%Corresponds to paper
%% The history list is structured in a list of tuples such as {my move, opponent move}.
%% This function will go through the history and calibrate the probabilities according
%% to how the opponent has been drawing so far.
analyseHistory(Logg, Calc) ->
lists:foldl(fun updateMemory/2, Calc, Logg).
%% LAST STEP: GENERATE RANDOM NUMBER AND ALLOCATE A DRAW TO THE PLAYER
%% Send in the probabilities and send out an atom
draw(Logg, Options) ->
Random = random:uniform(),
Probabilities = analyseHistory(Logg, Options),
{rock, LimitRock} = lists:keyfind(rock, 1, Probabilities),
{paper, ProbPaper} = lists:keyfind(paper, 1, Probabilities),
LimitPaper = LimitRock + ProbPaper,
{scissors, ProbScissors} = lists:keyfind(scissors, 1, Probabilities),
LimitScissors = LimitPaper + ProbScissors,
{lizard, ProbLizard} = lists:keyfind(lizard, 1, Probabilities),
LimitLizard = ProbLizard + LimitScissors,
Choose = fun(R) when R < LimitRock -> rock;
(R) when R < LimitPaper -> paper;
(R) when R < LimitScissors -> scissors;
(R) when R < LimitLizard -> lizard;
(_) -> spock
end,
Choose(Random).
-spec play(History::[{You::rpsls_player:choice(), Rival::rpsls_player:choice()}], State::term()) -> {rpsls_player:choice(), NewState::term()}.
play(History, {}) -> {draw(History,[{spock, 0.2}, {scissors, 0.2}, {paper, 0.2}, {rock, 0.2}, {lizard, 0.2}]), {}}. | src/players/rpsls_elephant_player.erl | 0.559531 | 0.744378 | rpsls_elephant_player.erl | starcoder |
%%%---------------------------------------------------
%% @doc
%% A time-based poller to periodically dispatch Telemetry events.
%%
%% A poller is a process start in your supervision tree with a list
%% of measurements to perform periodically. On start it expects the
%% period in milliseconds and a list of measurements to perform:
%%
%% ```
%% telemetry_poller:start_link([
%% {measurements, Measurements},
%% {period, Period}
%% ])
%% '''
%%
%% The following measurements are supported:
%%
%% * `memory' (default)
%% * `total_run_queue_lengths' (default)
%% * `system_counts' (default)
%% * `{process_info, Proplist}'
%% * `{Module, Function, Args}'
%%
%% We will discuss each measurement in detail. Also note that the
%% telemetry_poller application ships with a built-in poller that
%% measures `memory', `total_run_queue_lengths' and `system_counts'. This takes
%% the VM measurement out of the way so your application can focus
%% on what is specific to its behaviour.
%%
%% == Memory ==
%%
%% An event emitted as `[vm, memory]'. The measurement includes all
%% the key-value pairs returned by {@link erlang:memory/0} function,
%% e.g. `total' for total memory, `processes_used' for memory used by
%% all processes, etc.
%%
%% == Total run queue lengths ==
%%
%% On startup, the Erlang VM starts many schedulers to do both IO and
%% CPU work. If a process needs to do some work or wait on IO, it is
%% allocated to the appropriate scheduler. The run queue is a queue of
%% tasks to be scheduled. A length of a run queue corresponds to the amount
%% of work accumulated in the system. If a run queue length is constantly
%% growing, it means that the BEAM is not keeping up with executing all
%% the tasks.
%%
%% There are several run queue types in the Erlang VM. Each CPU scheduler
%% (usually one per core) has its own run queue, and since Erlang 20.0 there
%% is one dirty CPU run queue, and one dirty IO run queue.
%%
%% The run queue length event is emitted as `[vm, total_run_queue_lengths]'.
%% The event contains no metadata and three measurements:
%%
%% <ul>
%% <li>`total' - a sum of all run queue lengths</li>
%% <li>`cpu' - a sum of CPU schedulers' run queue lengths, including dirty CPU run queue length on Erlang version 20 and greater</li>
%% <li>`io' - length of dirty IO run queue. It's always 0 if running on Erlang versions prior to 20.</li>
%% </ul>
%%
%% Note that the method of making this measurement varies between different
%% Erlang versions: the implementation on versions earlier than Erlang/OTP 20
%% is less efficient.
%%
%% The length of all queues is not gathered atomically, so the event value
%% does not represent a consistent snapshot of the run queues' state.
%% However, the value is accurate enough to help to identify issues in a
%% running system.
%%
%% == System counts ==
%%
%% An event emitted as `[vm, system_counts]'. The event contains no metadata
%% and three measurements:
%%
%% <ul>
%% <li>`process_count' - number of process currently existing at the local node</li>
%% <li>`atom_count' - number of atoms currently existing at the local node</li>
%% <li>`port_count' - number of ports currently existing at the local node</li>
%% </ul>
%%
%% All three measurements are from {@link erlang:system_info/1}.
%%
%% == Process info ==
%%
%% A measurement with information about a given process. It must be specified
%% alongside a proplist with the process name, the event name, and a list of
%% keys to be included:
%%
%% ```
%% {process_info, [
%% {name, my_app_worker},
%% {event, [my_app, worker]},
%% {keys, [message_queue_len, memory]}
%% ]}
%% '''
%%
%% The `keys' is a list of atoms accepted by {@link erlang:process_info/2}.
%%
%% == Custom measurements ==
%%
%% Telemetry poller also allows you to perform custom measurements by passing
%% a module-function-args tuple:
%%
%% {my_app_example, measure, []}
%%
%% The given function will be invoked periodically and they must explicitly invoke
%% `telemetry:execute/3' function. If the invokation of the MFA fails, the measurement
%% is removed from the Poller.
%%
%% For all options, see {@link start_link/1}. The options listed there can be given
%% to the default poller as well as to custom pollers.
%%
%% == Default poller ==
%%
%% A default poller is started with `telemetry_poller' responsible for emitting
%% measurements for `memory' and `total_run_queue_lengths'. You can customize
%% the behaviour of the default poller by setting the `default' key under the
%% `telemetry_poller' application environment. Setting it to `false' disables
%% the poller.
%%
%% == Example - tracking number of active sessions in web application ==
%%
%% Let's imagine that you have a web application and you would like to periodically
%% measure number of active user sessions.
%%
%% ```
%% -module(example_app).
%%
%% session_count() ->
%% % logic for calculating session count.
%% '''
%%
%% To achieve that, we need a measurement dispatching the value we're interested in:
%%
%% ```
%% -module(example_app_measurements).
%%
%% dispatch_session_count() ->
%% telemetry:execute([example_app, session_count], example_app:session_count()).
%% '''
%%
%% and tell the Poller to invoke it periodically:
%%
%% ```
%% telemetry_poller:start_link([{measurements, [{example_app_measurements, dispatch_session_count, []}]).
%% '''
%%
%% If you find that you need to somehow label the event values, e.g. differentiate between number of
%% sessions of regular and admin users, you could use event metadata:
%%
%% ```
%% -module(example_app_measurements).
%%
%% dispatch_session_count() ->
%% Regulars = example_app:regular_users_session_count(),
%% Admins = example_app:admin_users_session_count(),
%% telemetry:execute([example_app, session_count], #{count => Admins}, #{role => admin}),
%% telemetry:execute([example_app, session_count], #{count => Regulars}, #{role => regular}).
%% '''
%%
%% <blockquote>Note: the other solution would be to dispatch two different events by hooking up
%% `example_app:regular_users_session_count/0' and `example_app:admin_users_session_count/0'
%% functions directly. However, if you add more and more user roles to your app, you'll find
%% yourself creating a new event for each one of them, which will force you to modify existing
%% event handlers. If you can break down event value by some feature, like user role in this
%% example, it's usually better to use event metadata than add new events.
%% </blockquote>
%%
%% This is a perfect use case for poller, because you don't need to write a dedicated process
%% which would call these functions periodically. Additionally, if you find that you need to collect
%% more statistics like this in the future, you can easily hook them up to the same poller process
%% and avoid creating lots of processes which would stay idle most of the time.
%% @end
%%%---------------------------------------------------
-module(telemetry_poller).
-behaviour(gen_server).
%% API
-export([
child_spec/1,
list_measurements/1,
start_link/1
]).
-export([code_change/3, handle_call/3, handle_cast/2,
handle_info/2, init/1, terminate/2]).
-ifdef('OTP_RELEASE').
-include_lib("kernel/include/logger.hrl").
-else.
-define(LOG_ERROR(Msg, Args), error_logger:error_msg(Msg, Args)).
-define(LOG_WARNING(Msg, Args), error_logger:warning_msg(Msg, Args)).
-endif.
-ifdef('OTP_RELEASE').
-define(WITH_STACKTRACE(T, R, S), T:R:S ->).
-else.
-define(WITH_STACKTRACE(T, R, S), T:R -> S = erlang:get_stacktrace(),).
-endif.
-type t() :: gen_server:server().
-type options() :: [option()].
-type option() ::
{name, atom() | {global, atom()}, {via, module(), term()}}
| {period, period()}
| {measurements, [measurement()]}.
-type measurement() ::
memory
| total_run_queue_lengths
| system_counts
| {process_info, [{name, atom()} | {event, [atom()]} | {keys, [atom()]}]}
| {module(), atom(), list()}.
-type period() :: pos_integer().
-type state() :: #{measurements => [measurement()], period => period()}.
%% @doc Starts a poller linked to the calling process.
%%
%% Useful for starting Pollers as a part of a supervision tree.
%%
%% Default options: [{name, telemetry_poller}, {period, timer:seconds(5)}]
-spec start_link(options()) -> gen_server:on_start().
start_link(Opts) when is_list(Opts) ->
Args = parse_args(Opts),
case lists:keyfind(name, 1, Opts) of
{name, Name} when is_atom(Name) -> gen_server:start_link({local, Name}, ?MODULE, Args, []);
{name, Name} -> gen_server:start_link(Name, ?MODULE, Args, []);
false -> gen_server:start_link(?MODULE, Args, [])
end.
%% @doc
%% Returns a list of measurements used by the poller.
-spec list_measurements(t()) -> [measurement()].
list_measurements(Poller) ->
gen_server:call(Poller, get_measurements).
-spec init(map()) -> {ok, state()}.
init(Args) ->
schedule_measurement(0),
{ok, #{
measurements => maps:get(measurements, Args),
period => maps:get(period, Args)}}.
%% @doc
%% Returns a child spec for the poller for running under a supervisor.
child_spec(Opts) ->
Id =
case proplists:get_value(name, Opts) of
undefined -> ?MODULE;
Name when is_atom(Name) -> Name;
{global, Name} -> Name;
{via, _, Name} -> Name
end,
#{
id => Id,
start => {telemetry_poller, start_link, [Opts]}
}.
parse_args(Args) ->
Measurements = proplists:get_value(measurements, Args, []),
ParsedMeasurements = parse_measurements(Measurements),
Period = proplists:get_value(period, Args, timer:seconds(5)),
validate_period(Period),
#{measurements => ParsedMeasurements, period => Period}.
-spec schedule_measurement(non_neg_integer()) -> ok.
schedule_measurement(CollectInMillis) ->
erlang:send_after(CollectInMillis, self(), collect), ok.
-spec validate_period(term()) -> ok | no_return().
validate_period(Period) when is_integer(Period), Period > 0 ->
ok;
validate_period(Term) ->
erlang:error({badarg, "Expected period to be a positive integer"}, [Term]).
-spec parse_measurements([measurement()]) -> [{module(), atom(), list()}].
parse_measurements(Measurements) when is_list(Measurements) ->
lists:map(fun parse_measurement/1, Measurements);
parse_measurements(Term) ->
erlang:error({badarg, "Expected measurements to be a list"}, [Term]).
-spec parse_measurement(measurement()) -> {module(), atom(), list()}.
parse_measurement(memory) ->
{telemetry_poller_builtin, memory, []};
parse_measurement(total_run_queue_lengths) ->
{telemetry_poller_builtin, total_run_queue_lengths, []};
parse_measurement(system_counts) ->
{telemetry_poller_builtin, system_counts, []};
parse_measurement({process_info, List}) when is_list(List) ->
Name = case proplists:get_value(name, List) of
undefined -> erlang:error({badarg, "Expected `name' key to be given under process_info measurement"});
PropName when is_atom(PropName) -> PropName;
PropName -> erlang:error({badarg, "Expected `name' key to be an atom under process_info measurement"}, [PropName])
end,
Event = case proplists:get_value(event, List) of
undefined -> erlang:error({badarg, "Expected `event' key to be given under process_info measurement"});
PropEvent when is_list(PropEvent) -> PropEvent;
PropEvent -> erlang:error({badarg, "Expected `event' key to be a list of atoms under process_info measurement"}, [PropEvent])
end,
Keys = case proplists:get_value(keys, List) of
undefined -> erlang:error({badarg, "Expected `keys' key to be given under process_info measurement"});
PropKeys when is_list(PropKeys) -> PropKeys;
PropKeys -> erlang:error({badarg, "Expected `keys' key to be a list of atoms under process_info measurement"}, [PropKeys])
end,
{telemetry_poller_builtin, process_info, [Event, Name, Keys]};
parse_measurement({M, F, A}) when is_atom(M), is_atom(F), is_list(A) ->
{M, F, A};
parse_measurement(Term) ->
erlang:error({badarg, "Expected measurement to be memory, total_run_queue_lenths, {process_info, list()}, or a {module(), function(), list()} tuple"}, [Term]).
-spec make_measurements_and_filter_misbehaving([measurement()]) -> [measurement()].
make_measurements_and_filter_misbehaving(Measurements) ->
[Measurement || Measurement <- Measurements, make_measurement(Measurement) =/= error].
-spec make_measurement(measurement()) -> measurement() | no_return().
make_measurement(Measurement = {M, F, A}) ->
try erlang:apply(M, F, A) of
_ -> Measurement
catch
?WITH_STACKTRACE(Class, Reason, Stacktrace)
?LOG_ERROR("Error when calling MFA defined by measurement: ~p ~p ~p~n"
"Class=~p~nReason=~p~nStacktrace=~p~n",
[M, F, A, Class, Reason, Stacktrace]),
error
end.
handle_call(get_measurements, _From, State = #{measurements := Measurements}) ->
{reply, Measurements, State};
handle_call(_Request, _From, State) ->
{reply, ok, State}.
handle_cast(_Msg, State) -> {noreply, State}.
handle_info(collect, State) ->
GoodMeasurements = make_measurements_and_filter_misbehaving(maps:get(measurements, State)),
schedule_measurement(maps:get(period, State)),
{noreply, State#{measurements := GoodMeasurements}};
handle_info(_, State) ->
{noreply, State}.
terminate(_Reason, _State) -> ok.
code_change(_OldVsn, State, _Extra) -> {ok, State}. | astreu/deps/telemetry_poller/src/telemetry_poller.erl | 0.713432 | 0.518485 | telemetry_poller.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2021, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc An implementation of {@link otel_propagator_text_map} that injects and
%% extracts baggage using the
%% <a href="https://w3c.github.io/baggage/">W3C Baggage format</a>.
%%
%% This propagator along with {@link otel_propagator_trace_context} are used
%% by default. The global TextMap Propagators can be configured in the
%% application environment:
%%
%% ```
%% {text_map_propagators, [trace_context, baggage]},
%% '''
%%
%% Or by calling {@link opentelemetry:set_text_map_propagator/1}.
%% @end
%%%-----------------------------------------------------------------------
-module(otel_propagator_baggage).
-behaviour(otel_propagator_text_map).
-export([fields/1,
inject/4,
extract/5]).
-include("opentelemetry.hrl").
-define(DEC2HEX(X),
if ((X) >= 0) andalso ((X) =< 9) -> (X) + $0;
((X) >= 10) andalso ((X) =< 15) -> (X) + $A - 10
end).
-define(HEX2DEC(X),
if ((X) >= $0) andalso ((X) =< $9) -> (X) - $0;
((X) >= $A) andalso ((X) =< $F) -> (X) - $A + 10;
((X) >= $a) andalso ((X) =< $f) -> (X) - $a + 10
end).
-define(BAGGAGE_HEADER, <<"baggage">>).
fields(_) ->
[?BAGGAGE_HEADER].
-spec inject(Context, Carrier, CarrierSetFun, Options) -> Carrier
when Context :: otel_ctx:t(),
Carrier :: otel_propagator:carrier(),
CarrierSetFun :: otel_propagator_text_map:carrier_set(),
Options :: otel_propagator_text_map:propagator_options().
inject(Ctx, Carrier, CarrierSet, _Options) ->
Baggage = otel_baggage:get_all(Ctx),
case maps:fold(fun(Key, Value, Acc) ->
[$,, [encode_key(Key), "=", encode_value(Value)] | Acc]
end, [], Baggage) of
[$, | List] ->
CarrierSet(?BAGGAGE_HEADER, unicode:characters_to_binary(List), Carrier);
_ ->
Carrier
end.
-spec extract(Context, Carrier, CarrierKeysFun, CarrierGetFun, Options) -> Context
when Context :: otel_ctx:t(),
Carrier :: otel_propagator:carrier(),
CarrierKeysFun :: otel_propagator_text_map:carrier_keys(),
CarrierGetFun :: otel_propagator_text_map:carrier_get(),
Options :: otel_propagator_text_map:propagator_options().
extract(Ctx, Carrier, _CarrierKeysFun, CarrierGet, _Options) ->
case CarrierGet(?BAGGAGE_HEADER, Carrier) of
undefined ->
Ctx;
String ->
Pairs = string:lexemes(String, [$,]),
DecodedBaggage =
lists:foldl(fun(Pair, Acc) ->
[Key, Value] = string:split(Pair, "="),
Acc#{decode_key(Key) => decode_value(Value)}
end, #{}, Pairs),
otel_baggage:set(Ctx, DecodedBaggage)
end.
%%
encode_key(Key) ->
form_urlencode(Key, [{encoding, utf8}]).
encode_value({Value, Metadata}) ->
EncodedMetadata = encode_metadata(Metadata),
EncodedValue = form_urlencode(Value, [{encoding, utf8}]),
unicode:characters_to_binary(lists:join(<<";">>, [EncodedValue | EncodedMetadata])).
encode_metadata(Metadata) when is_list(Metadata) ->
lists:filtermap(fun({MK, MV}) when is_binary(MK) , is_binary(MV) ->
{true, [MK, <<"=">>, MV]};
(M) when is_binary(M) ->
{true, M};
(_) ->
false
end, Metadata);
encode_metadata(_) ->
[].
decode_key(Key) ->
percent_decode(string:trim(unicode:characters_to_binary(Key))).
decode_value(ValueAndMetadata) ->
[Value | MetadataList] = string:lexemes(ValueAndMetadata, [$;]),
{string_decode(Value), lists:filtermap(fun metadata_decode/1, MetadataList)}.
metadata_decode(Metadata) ->
case string:split(Metadata, "=") of
[MetadataKey] ->
{true, string_decode(MetadataKey)};
[MetadataKey, MetadataValue] ->
{true, {string_decode(MetadataKey), string_decode(MetadataValue)}};
_ ->
false
end.
string_decode(S) ->
percent_decode(string:trim(unicode:characters_to_binary(S))).
%% TODO: call `uri_string:percent_decode' and remove this when OTP-23 is
%% the oldest version we maintain support for
-spec percent_decode(URI) -> Result when
URI :: uri_string:uri_string(),
Result :: uri_string:uri_string() |
{error, {invalid, {atom(), {term(), term()}}}}.
percent_decode(URI) when is_list(URI) orelse
is_binary(URI) ->
raw_decode(URI).
%% TODO: call `uri_string:percent_encode' when it is added to OTP and
%% available in the oldest version we support
form_urlencode(Cs, [{encoding, Encoding}])
when is_list(Cs), Encoding =:= utf8; Encoding =:= unicode ->
B = convert_to_binary(Cs, utf8, Encoding),
html5_byte_encode(B);
form_urlencode(Cs, [{encoding, Encoding}])
when is_binary(Cs), Encoding =:= utf8; Encoding =:= unicode ->
html5_byte_encode(Cs);
form_urlencode(Cs, [{encoding, Encoding}]) when is_list(Cs); is_binary(Cs) ->
throw({error,invalid_encoding, Encoding});
form_urlencode(Cs, _) ->
throw({error,invalid_input, Cs}).
html5_byte_encode(B) ->
html5_byte_encode(B, <<>>).
%%
html5_byte_encode(<<>>, Acc) ->
Acc;
html5_byte_encode(<<$ ,T/binary>>, Acc) ->
html5_byte_encode(T, <<Acc/binary,$+>>);
html5_byte_encode(<<H,T/binary>>, Acc) ->
case is_url_char(H) of
true ->
html5_byte_encode(T, <<Acc/binary,H>>);
false ->
<<A:4,B:4>> = <<H>>,
html5_byte_encode(T, <<Acc/binary,$%,(?DEC2HEX(A)),(?DEC2HEX(B))>>)
end;
html5_byte_encode(H, _Acc) ->
throw({error,invalid_input, H}).
%% Return true if input char can appear in form-urlencoded string
%% Allowed chararacters:
%% 0x2A, 0x2D, 0x2E, 0x30 to 0x39, 0x41 to 0x5A,
%% 0x5F, 0x61 to 0x7A
is_url_char(C)
when C =:= 16#2A; C =:= 16#2D;
C =:= 16#2E; C =:= 16#5F;
16#30 =< C, C =< 16#39;
16#41 =< C, C =< 16#5A;
16#61 =< C, C =< 16#7A -> true;
is_url_char(_) -> false.
%% Convert to binary
convert_to_binary(Binary, InEncoding, OutEncoding) ->
case unicode:characters_to_binary(Binary, InEncoding, OutEncoding) of
{error, _List, RestData} ->
throw({error, invalid_input, RestData});
{incomplete, _List, RestData} ->
throw({error, invalid_input, RestData});
Result ->
Result
end.
-spec raw_decode(list()|binary()) -> list() | binary() | uri_string:error().
raw_decode(Cs) ->
raw_decode(Cs, <<>>).
%%
raw_decode(L, Acc) when is_list(L) ->
try
B0 = unicode:characters_to_binary(L),
B1 = raw_decode(B0, Acc),
unicode:characters_to_list(B1)
catch
throw:{error, Atom, RestData} ->
{error, Atom, RestData}
end;
raw_decode(<<$%,C0,C1,Cs/binary>>, Acc) ->
case is_hex_digit(C0) andalso is_hex_digit(C1) of
true ->
B = ?HEX2DEC(C0)*16+?HEX2DEC(C1),
raw_decode(Cs, <<Acc/binary, B>>);
false ->
throw({error,invalid_percent_encoding,<<$%,C0,C1>>})
end;
raw_decode(<<C,Cs/binary>>, Acc) ->
raw_decode(Cs, <<Acc/binary, C>>);
raw_decode(<<>>, Acc) ->
check_utf8(Acc).
%% Returns Cs if it is utf8 encoded.
check_utf8(Cs) ->
case unicode:characters_to_list(Cs) of
{incomplete,_,_} ->
throw({error,invalid_utf8,Cs});
{error,_,_} ->
throw({error,invalid_utf8,Cs});
_ -> Cs
end.
-spec is_hex_digit(char()) -> boolean().
is_hex_digit(C)
when $0 =< C, C =< $9;$a =< C, C =< $f;$A =< C, C =< $F -> true;
is_hex_digit(_) -> false. | apps/opentelemetry_api/src/otel_propagator_baggage.erl | 0.636014 | 0.439206 | otel_propagator_baggage.erl | starcoder |
%%%----------------------------------------------------------------------------
%%% Copyright Space-Time Insight 2017. All Rights Reserved.
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%----------------------------------------------------------------------------
%%% ---------------------------------------------------------------------------
%%% @doc Each erleans node is responsible for a subset of partitions used to
%%% distribute responsibility for streams and reminders in the cluster.
%%% This server is notified by partisan when the cluster membership
%%% change and recalculates the range of partitions to handle.
%%%
%%% Ranges are closed intervals, [Start, End]. So searching for streams
%%% or reminders within the interval is to include the Start and End
%%% partitions, i.e. Partition >=Start andalso Partition =< End.
%%% @end
%%% ---------------------------------------------------------------------------
-module(erleans_partitions).
-behaviour(gen_server).
-export([start_link/0,
find_node/1,
add_handler/2,
get_range/0]).
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-include("erleans.hrl").
-type range() :: {integer(), integer()}.
-record(state, {range :: range() | undefined,
num_partitions :: integer(),
node_ranges :: [{range(), node()}],
to_notify :: #{atom() => pid() | atom()}}).
-define(CH(Item, Partitions), jch:ch(Item, Partitions)).
-spec start_link() -> {ok, pid()} | {error, any()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec find_node(term()) -> {integer(), node()}.
find_node(Item) ->
gen_server:call(?MODULE, {find_node, Item}).
-spec get_range() -> range().
get_range() ->
gen_server:call(?MODULE, get_range).
-spec add_handler(atom(), pid() | atom()) -> ok.
add_handler(Name, Pid) ->
gen_server:call(?MODULE, {add_handler, Name, Pid}).
init([]) ->
%% callback for changes to cluster membership
Self = self(),
ok = partisan_peer_service_events:add_sup_callback(fun(Membership) ->
Self ! {update, Membership}
end),
NumPartitions = erleans_config:get(num_partitions),
{ok, #state{num_partitions=NumPartitions,
node_ranges=[],
to_notify=#{}}}.
handle_call({find_node, Item}, From, State=#state{num_partitions=NumPartitions,
node_ranges=Ranges}) ->
spawn(fun() ->
Partition = ?CH(erlang:phash2(Item), NumPartitions),
{_, Node} = ec_lists:fetch(fun({{Start, End}, _Node}) when Partition >= Start
, Partition =< End ->
true;
(_) ->
false
end, Ranges),
gen_server:reply(From, {Partition, Node})
end),
{noreply, State};
handle_call(get_range, _From, State=#state{range=Range}) ->
{reply, Range, State};
handle_call({add_handler, Name, Pid}, _From, State=#state{to_notify=ToNotify}) ->
{reply, ok, State#state{to_notify=ToNotify#{Name => Pid}}}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info({update, Membership}, State=#state{num_partitions=NumPartitions,
to_notify=ToNotify}) ->
MembersList = lists:usort(sets:to_list(state_orset:query(Membership))),
{Range, NodeRanges} = update_ranges(MembersList, NumPartitions, ToNotify),
{noreply, State#state{range=Range,
node_ranges=NodeRanges}};
handle_info({gen_event_EXIT, _, _}, State) ->
%% there is no reason the event handler should be removed
%% so if we receive this message attempt to add it back
Self = self(),
ok = partisan_peer_service_events:add_sup_callback(fun(Membership) ->
Self ! {update, Membership}
end),
{noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(_Reason, _State) ->
ok.
%% Internal functions
update_ranges(MembersList, NumPartitions, ToNotify) ->
Length = length(MembersList),
{_, NodeRanges} = lists:foldl(fun(#{name := Node}, {Pos, Acc}) ->
Range = calc_partition_range(Pos, Length, NumPartitions),
{Pos+1, [{Range, Node} | Acc]};
(Node, {Pos, Acc}) ->
Range = calc_partition_range(Pos, Length, NumPartitions),
{Pos+1, [{Range, Node} | Acc]}
end, {0, []}, MembersList),
case lists:keyfind(node(), 2, NodeRanges) of
{Range, _} ->
maps:map(fun(_, Pid) ->
Pid ! {update_streams, Range}
end, ToNotify),
{Range, NodeRanges};
false ->
%% not fully initialized yet
{undefined, []}
end.
%% Find the range of partitions this node position is responsible for
-spec calc_partition_range(integer(), integer(), integer()) -> {integer(), integer()}.
calc_partition_range(Pos, NumMembers, NumPartitions) ->
Size = NumPartitions div NumMembers,
Start = Pos * Size,
case Start + Size of
End when NumPartitions < (End + Size) ->
{Start, NumPartitions};
End ->
{Start, End-1}
end.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
partition_range_test() ->
?assertEqual({0,15}, calc_partition_range(0, 4, 64)),
?assertEqual({16,31}, calc_partition_range(1, 4, 64)),
?assertEqual({32,47}, calc_partition_range(2, 4, 64)),
?assertEqual({48,64}, calc_partition_range(3, 4, 64)).
-endif. | src/erleans_partitions.erl | 0.59302 | 0.403802 | erleans_partitions.erl | starcoder |
-module(pollution).
-author("tgargula").
-export([create_monitor/0, add_station/3, add_value/5, remove_value/4, get_one_value/4, get_station_mean/3, get_daily_mean/3,
get_maximum_variation_station/2, get_station_variation/3, get_stats/1, get_key/2]).
%% A following header includes amongst others a measurement record definition
-include("../include/pollution.hrl").
%% Monitor data structure:
%% It is a map that stores two types of key-value pairs. One of them is K: Station, V: Coordinates
%% and the second is K: Coordinates, V: list of measurements
%% Measurements are represented by a record (that is a tuple with syntactic sugar)
create_monitor() -> #{}.
add_station(Station, Coordinates, Monitor) ->
%% Do not add the same station
case maps:is_key(Station, Monitor) orelse maps:is_key(Coordinates, Monitor) of
false -> Monitor#{Station => Coordinates, Coordinates => []};
_ -> {error, "Station not unique!"}
end.
add_measurement(Coordinates, Measurement, Monitor) ->
%% Do not add the same measurement
case lists:member(Measurement, maps:get(Coordinates, Monitor)) of
false -> Monitor#{Coordinates => [Measurement | maps:get(Coordinates, Monitor)]};
_ -> {error, "Cannot add the same measurement!"}
end.
add_value(Id, Time, Type, Value, Monitor) ->
%% Add only if the station exists
case maps:is_key(Id, Monitor) of
true ->
Coordinates = get_key(Id, Monitor),
Measurement = #measurement{time = Time, type = Type, value = Value},
add_measurement(Coordinates, Measurement, Monitor);
_ -> {error, "Station does not exist!"}
end.
remove_value(Id, Time, Type, Monitor) ->
Coordinates = get_key(Id, Monitor),
Fun = fun(X) -> X#measurement.time =/= Time orelse X#measurement.type =/= Type end,
Monitor#{Coordinates => lists:filter(Fun, maps:get(Coordinates, Monitor))}.
get_one_value(Id, Time, Type, Monitor) ->
Fun = fun(X) -> X#measurement.time == Time andalso X#measurement.type == Type end,
[Result | _] = lists:filter(Fun, maps:get(get_key(Id, Monitor), Monitor)),
Result.
get_station_mean(Id, Type, Monitor) ->
Measurements = lists:filter(fun(X) -> X#measurement.type == Type end, maps:get(get_key(Id, Monitor), Monitor)),
lists:foldl(fun(X, Y) -> X#measurement.value + Y end, 0, Measurements) / length(Measurements).
get_daily_mean(Type, QueryDate, Monitor) ->
Pred1 = fun(K, _) -> case K of {_, _} -> true; _ -> false end end,
Pred2 = fun(X) -> {Date, _} = X#measurement.time, Date == QueryDate andalso X#measurement.type == Type end,
Measurements = lists:filter(Pred2, lists:flatten(maps:values(maps:filter(Pred1, Monitor)))),
lists:foldl(fun(X, Y) -> X#measurement.value + Y end, 0, Measurements) / length(Measurements).
%% Returns a station with the highest variation of values of the given type
get_maximum_variation_station(Type, Monitor) ->
Stations = maps:to_list(maps:filter(fun(Key, _) -> case Key of {_, _} -> false; _ -> true end end, Monitor)),
StationsWithVariations = lists:map(
fun({StationName, Coordinates}) -> {StationName, Coordinates, get_station_variation(Coordinates, Type, Monitor)} end,
Stations
),
Max = fun(Measurement, CurrentMax) ->
{_, _, Value} = Measurement,
{_, _, CurrentMaxValue} = CurrentMax,
case Value > CurrentMaxValue of
true -> Measurement;
_ -> CurrentMax
end end,
lists:foldl(Max, {null, null, -1}, StationsWithVariations).
%% Returns the variation of values of the given Type of the station with the given Id
get_station_variation(Id, Type, Monitor) ->
Measurements = lists:filter(fun(X) -> X#measurement.type == Type end, maps:get(get_key(Id, Monitor), Monitor)),
Mean = get_station_mean(Id, Type, Monitor),
case length(Measurements) > 0 of
true ->
lists:foldl(fun(X, Y) -> math:pow(X#measurement.value - Mean, 2) + Y end, 0, Measurements) / length(Measurements);
_ -> 0
end.
%% Returns number of stations and all distinct measurement types registered by Monitor
get_stats(Monitor) ->
NumberOfStations = maps:size(Monitor) div 2,
Types = [ Measurement#measurement.type || {_, Value} <- maps:to_list(Monitor), is_list(Value), Measurement <- Value],
#{
numberOfStations => NumberOfStations,
types => lists:usort(Types)
}.
get_key(Id, Monitor) ->
case Id of
{_, _} -> Id;
_ -> maps:get(Id, Monitor)
end. | src/pollution.erl | 0.542621 | 0.474753 | pollution.erl | starcoder |
%% Taken from https://github.com/ninenines/ranch/pull/41/files,
%% with permission from fishcakez
-module(elli_sendfile).
-export([sendfile/5]).
-type sendfile_opts() :: [{chunk_size, non_neg_integer()}].
%% @doc Send part of a file on a socket.
%%
%% Basically, @see file:sendfile/5 but for ssl (i.e. not raw OS sockets).
%% Originally from https://github.com/ninenines/ranch/pull/41/files
%%
%% @end
-spec sendfile(file:fd(), inet:socket() | ssl:sslsocket(),
non_neg_integer(), non_neg_integer(), sendfile_opts())
-> {ok, non_neg_integer()} | {error, atom()}.
sendfile(RawFile, Socket, Offset, Bytes, Opts) ->
ChunkSize = chunk_size(Opts),
Initial2 = case file:position(RawFile, {cur, 0}) of
{ok, Offset} ->
Offset;
{ok, Initial} ->
{ok, _} = file:position(RawFile, {bof, Offset}),
Initial
end,
case sendfile_loop(Socket, RawFile, Bytes, 0, ChunkSize) of
{ok, _Sent} = Result ->
{ok, _} = file:position(RawFile, {bof, Initial2}),
Result;
{error, _Reason} = Error ->
Error
end.
-spec chunk_size(sendfile_opts()) -> pos_integer().
chunk_size(Opts) ->
case lists:keyfind(chunk_size, 1, Opts) of
{chunk_size, ChunkSize}
when is_integer(ChunkSize) andalso ChunkSize > 0 ->
ChunkSize;
{chunk_size, 0} ->
16#1FFF;
false ->
16#1FFF
end.
-spec sendfile_loop(inet:socket() | ssl:sslsocket(), file:fd(), non_neg_integer(),
non_neg_integer(), pos_integer())
-> {ok, non_neg_integer()} | {error, term()}.
sendfile_loop(_Socket, _RawFile, Sent, Sent, _ChunkSize)
when Sent =/= 0 ->
%% All requested data has been read and sent, return number of bytes sent.
{ok, Sent};
sendfile_loop(Socket, RawFile, Bytes, Sent, ChunkSize) ->
ReadSize = read_size(Bytes, Sent, ChunkSize),
case file:read(RawFile, ReadSize) of
{ok, IoData} ->
case ssl:send(Socket, IoData) of
ok ->
Sent2 = iolist_size(IoData) + Sent,
sendfile_loop(Socket, RawFile, Bytes, Sent2,
ChunkSize);
{error, _Reason} = Error ->
Error
end;
eof ->
{ok, Sent};
{error, _Reason} = Error ->
Error
end.
-spec read_size(non_neg_integer(), non_neg_integer(), non_neg_integer()) ->
non_neg_integer().
read_size(0, _Sent, ChunkSize) ->
ChunkSize;
read_size(Bytes, Sent, ChunkSize) ->
min(Bytes - Sent, ChunkSize). | src/elli_sendfile.erl | 0.563018 | 0.447521 | elli_sendfile.erl | starcoder |
%%
%% Copyright (c) 2018 <NAME>
%% All rights reserved.
%% Distributed under the terms of the MIT License. See the LICENSE file.
%%
%% CANCEL request related functions
%%
-module(ersip_request_cancel).
-export([generate/1
]).
%%%===================================================================
%%% API
%%%===================================================================
%% @doc Constracting CANCEL request in according to RFC3262 9.1 Client Behavior
-spec generate(ersip_request:request()) -> ersip_request:request().
generate(InitialRequest) ->
ReqSipMsg = ersip_request:sipmsg(InitialRequest),
%% The following procedures are used to construct a CANCEL request.
%% The Request-URI, Call-ID, To, the numeric part of CSeq, and From
%% header fields in the CANCEL request MUST be identical to those
%% in the request being cancelled, including tags. A CANCEL
%% constructed by a client MUST have only a single Via header field
%% value matching the top Via value in the request being cancelled.
%% Using the same values for these header fields allows the CANCEL
%% to be matched with the request it cancels (Section 9.2 indicates
%% how such matching occurs). However, the method part of the CSeq
%% header field MUST have a value of CANCEL.
RURI = ersip_sipmsg:ruri(ReqSipMsg),
CANCEL0 = ersip_sipmsg:new_request(ersip_method:cancel(), RURI),
CANCEL1 = ersip_sipmsg:copy(callid, ReqSipMsg, CANCEL0),
CANCEL2 = ersip_sipmsg:copy(to, ReqSipMsg, CANCEL1),
ReqCSeq = ersip_sipmsg:get(cseq, ReqSipMsg),
CANCELCSeq = ersip_hdr_cseq:set_method(ersip_method:cancel(), ReqCSeq),
CANCEL3 = ersip_sipmsg:set(cseq, CANCELCSeq, CANCEL2),
CANCEL4 = ersip_sipmsg:copy(from, ReqSipMsg, CANCEL3),
%% If the request being cancelled contains a Route header field, the
%% CANCEL request MUST include that Route header field's values.
CANCEL5 = ersip_sipmsg:copy(route, ReqSipMsg, CANCEL4),
%% RFC 3261 says nothing about max-forwards in CANCEL for this case
%% but following logic of route copy it should be the same as in
%% INVITE:
CANCEL6 = ersip_sipmsg:copy(maxforwards, ReqSipMsg, CANCEL5),
%% Normative:
%% A CANCEL constructed by a client MUST have only a single Via
%% header field value matching the top Via value in the request
%% being cancelled.
%%
%% Implementation: we really do not add Via here because it is
%% automatically added when message is passed via connection. So
%% what we really do here - we generate ersip_request with the
%% same paramters as InitialRequest
ersip_request:set_sipmsg(CANCEL6, InitialRequest). | src/ersip_request_cancel.erl | 0.551211 | 0.489748 | ersip_request_cancel.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2015 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This library contains load balancing logic.
%%% @end
%%%-------------------------------------------------------------------
-module(load_balancing).
-author("lopiola").
-ifdef(TEST).
-compile(export_all).
-endif.
-include("logging.hrl").
-include("monitoring/monitoring.hrl").
% Both computational and network load of a node will be rounded up to this value
% if they are smaller. This does not impact the load balancing process but ensures
% easier calculations (no danger of divison by zero).
-define(SMALLEST_LOAD, 0.1).
% Record used to express how dispatchers should be rerouting requests.
% Such records are distributed to dispatcher modules,
% and later they evaluate choose_node_for_dispatcher/1 function on this
% record. The record structure shall not be visible outside this module.
-record(dispatcher_lb_advice, {
% Flag meaning if any requests should be rerouted to other nodes.
should_delegate = false,
% List of 3-element tuples {Node, Frequency, RequestCount}.
% Frequency (relative to other nodes) is how often
% should requests be rerouted to given node.
nodes_and_frequency = [] :: [{Node :: node(), Frequency :: float()}],
% List of all nodes (dispatcher needs such knowledge for multicalls)
all_nodes = [] :: [node()],
singleton_modules = [] :: [{Module :: atom(), Node :: node()}]
}).
% This records holds a state that should be passed between each calculation
% of load balancing advices. It contains data from previous calculations
% that is needed in next ones.
-record(load_balancing_state, {
% Contains information how much extra load is expected on given node
% in current load balancing interval. Extra load is estimated load
% caused by delegation.
expected_extra_load = [] :: [{Node :: node(), ExtraLoad :: float()}]
}).
-type dispatcher_lb_advice() :: #dispatcher_lb_advice{}.
-type load_balancing_state() :: #load_balancing_state{}.
-export_type([dispatcher_lb_advice/0, load_balancing_state/0]).
%% API
-export([advices_for_dispatchers/3, choose_node_for_dispatcher/2]).
-export([all_nodes_for_dispatcher/2, initial_advice_for_dispatcher/0]).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns guidelines that should be used by dispatchers in the cluster,
%% based on node states of all nodes. The NodeStates list must not be empty.
%% @end
%%--------------------------------------------------------------------
-spec advices_for_dispatchers(NodeStates :: [#node_state{}], LBState :: #load_balancing_state{} | undefined,
Singletons :: [{Module :: atom(), Node :: node() | undefined}]) ->
{[{node(), #dispatcher_lb_advice{}}], #load_balancing_state{}}.
advices_for_dispatchers(NodeStates, LBState, Singletons) ->
ExtraLoads = case LBState of
undefined ->
[];
#load_balancing_state{expected_extra_load = EEL} ->
EEL
end,
Nodes = [NodeState#node_state.node || NodeState <- NodeStates],
% Calculate loads on nodes, take into account the expected extra load from delegation
% i. e. assume the load is lower when deciding where we can delegate (this extra
% load should dissappear quickly as the delegated requests are processed).
LoadsForDisp = lists:map(
fun(NodeState) ->
L = load_for_dispatcher(NodeState),
ExtraLoad = proplists:get_value(NodeState#node_state.node, ExtraLoads, 0.0),
L * (1.0 - ExtraLoad)
end, NodeStates),
AvgLoadForDisp = utils:average(LoadsForDisp),
MinLoadForDisp = lists:min(LoadsForDisp),
NodesAndLoads = lists:zip(Nodes, LoadsForDisp),
% Nodes that are loaded less than average
FreeNodes = lists:filter(
fun({_, Load}) ->
Load =< AvgLoadForDisp
end, NodesAndLoads),
% Nodes that are overloaded
OverloadedNodes = lists:filtermap(
fun({Node, Load}) ->
Overloaded = Load > AvgLoadForDisp andalso
overloaded_for_dispatcher(Load, MinLoadForDisp),
case Overloaded of
false -> false;
true -> {true, Node}
end
end, NodesAndLoads),
OverloadedNodesNum = length(OverloadedNodes),
Result = lists:map(
fun({Node, Load}) ->
ShouldDelegate = lists:member(Node, OverloadedNodes),
NodesAndFrequency =
case ShouldDelegate of
true ->
OtherNodes = [{FNode, MinLoadForDisp / FNLoad / OverloadedNodesNum} || {FNode, FNLoad} <- FreeNodes],
NAndF = [{Node, MinLoadForDisp / Load} | OtherNodes],
FrequencySum = lists:foldl(fun({_, Freq}, Acc) -> Acc + Freq end, 0.0, NAndF),
[{NodeName, Freq / FrequencySum} || {NodeName, Freq} <- NAndF];
false ->
[]
end,
{Node, #dispatcher_lb_advice{should_delegate = ShouldDelegate,
nodes_and_frequency = NodesAndFrequency, all_nodes = Nodes, singleton_modules = Singletons}}
end, NodesAndLoads),
% Calculate expected extra loads on each node.
% For example:
% node A is overloaded and will delegate 70% reqs to node B
% node B in not overloaded
% expected extra load on each node is:
% node A = 0.0
% 0.7
% node B = ---------
% 1.0 + 0.7
%
% (1.0 is expected normal load on node B, as it does not delegate anything)
%
ExtraLoadsSum = lists:foldl(
fun({NodeFrom, DispLBAdvice}, ExtraLoadsAcc) ->
#dispatcher_lb_advice{
should_delegate = ShouldDelegate,
nodes_and_frequency = NodesAndFreq} = DispLBAdvice,
case ShouldDelegate of
false ->
ExtraLoadsAcc;
true ->
lists:foldl(
fun({NodeTo, Freq}, Acc) ->
case NodeFrom of
NodeTo ->
Acc;
_ ->
Current = proplists:get_value(NodeTo, Acc, 0.0),
[{NodeTo, Current + Freq} | proplists:delete(NodeTo, Acc)]
end
end, ExtraLoadsAcc, NodesAndFreq)
end
end, [], Result),
NewExtraLoads = lists:map(
fun({Node, ExtraLoadSum}) ->
{Node, ExtraLoadSum / (1.0 + ExtraLoadSum)}
end, ExtraLoadsSum),
{Result, #load_balancing_state{expected_extra_load = NewExtraLoads}}.
%%--------------------------------------------------------------------
%% @doc
%% Returns guidelines that should be used by dispatchers in the cluster,
%% based on node states of all nodes.
%% @end
%%--------------------------------------------------------------------
-spec choose_node_for_dispatcher(DSNAdvice :: #dispatcher_lb_advice{}, WorkerName :: atom()) -> node().
choose_node_for_dispatcher(Advice, WorkerName) ->
#dispatcher_lb_advice{should_delegate = ShouldDelegate,
nodes_and_frequency = NodesAndFreq, singleton_modules = SM} = Advice,
case {proplists:get_value(WorkerName, SM), ShouldDelegate} of
{undefined, false} ->
node();
{undefined, true} ->
Index = choose_index(NodesAndFreq),
{Node, _} = lists:nth(Index, NodesAndFreq),
Node;
{DedicatedNode, _} ->
DedicatedNode
end.
%%--------------------------------------------------------------------
%% @doc
%% Returns an initial advice for dispatcher that can be used before cluster manager
%% starts broadcasting advices.
%% @end
%%--------------------------------------------------------------------
-spec all_nodes_for_dispatcher(Advice :: #dispatcher_lb_advice{}, WorkerName :: atom()) -> [node()].
all_nodes_for_dispatcher(#dispatcher_lb_advice{all_nodes = AllNodes, singleton_modules = SM}, WorkerName) ->
case proplists:get_value(WorkerName, SM) of
undefined ->
AllNodes;
DedicatedNode ->
[DedicatedNode]
end.
%%--------------------------------------------------------------------
%% @doc
%% Returns an initial advice for dispatcher that can be used before cluster manager
%% starts broadcasting advices.
%% @end
%%--------------------------------------------------------------------
-spec initial_advice_for_dispatcher() -> #dispatcher_lb_advice{}.
initial_advice_for_dispatcher() ->
#dispatcher_lb_advice{}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Helper function that returns the index of node which should be chosen based on
%% frequency (weights) of nodes.
%% @end
%%--------------------------------------------------------------------
-spec choose_index(NodesAndFrequencies :: [{Node :: term(), Frequency :: float()}]) -> integer().
choose_index(NodesAndFrequencies) ->
rand:seed(exsplus),
choose_index(NodesAndFrequencies, 0, rand:uniform()).
choose_index([], CurrIndex, _) ->
CurrIndex;
choose_index(_, CurrIndex, RandomFloat) when RandomFloat < 0.0 ->
CurrIndex;
choose_index([{_, Freq} | T], CurrIndex, RandomFloat) ->
choose_index(T, CurrIndex + 1, RandomFloat - Freq).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns a single value representing node load.
%% This value is used for comparison in dispatcher load balancing.
%% This function will never return value smaller than ?SMALLEST_LOAD.
%% @end
%%--------------------------------------------------------------------
-spec load_for_dispatcher(NodeState :: #node_state{}) -> float().
load_for_dispatcher(#node_state{cpu_usage = CPU, mem_usage = Mem}) ->
LoadForDispatcher = (CPU * 3 + Mem) / 4,
max(?SMALLEST_LOAD, LoadForDispatcher).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Decides if the node is overloaded from dispatcher point of view.
%% @end
%%--------------------------------------------------------------------
-spec overloaded_for_dispatcher(NodeState :: #node_state{}, MinLoadForDisp :: float()) -> boolean().
overloaded_for_dispatcher(LoadForDispatcher, MinLoadForDisp) ->
LoadForDispatcher > 1.5 * MinLoadForDisp andalso LoadForDispatcher > 30.0. | src/monitoring/load_balancing.erl | 0.511229 | 0.453988 | load_balancing.erl | starcoder |
% Copyright 2011 Couchbase, Inc.
%
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(erlgeom).
-export([disjoint/2, from_geom/1, to_geom/1,
topology_preserve_simplify/2, to_geom_validate/1]).
-on_load(init/0).
init() ->
SoName = case code:priv_dir(?MODULE) of
{error, bad_name} ->
case filelib:is_dir(filename:join(["..", "priv"])) of
true ->
filename:join(["..", "priv", "erlgeom"]);
false ->
filename:join(["priv", "erlgeom"])
end;
Dir ->
filename:join(Dir, "erlgeom")
end,
(catch erlang:load_nif(SoName, 0)).
disjoint(_Geom1, _Geom2) ->
"NIF library not loaded".
topology_preserve_simplify(_Geom1, _Tolerance) ->
"NIF library not loaded".
% @doc Convert a GeoCouch geometry to a GEOS geometry, validate
% the structure of the geometry.
-spec to_geom_validate(Geom::{atom(), list()}) -> true|false.
to_geom_validate(Geom) ->
case is_valid_geometry(Geom) of
true -> to_geom(Geom);
{false, Reason} -> throw(Reason)
end.
% @doc Validate the structure of the geometry
-spec is_valid_geometry(Geom::{atom(), list()}) -> true|{false, string()}.
is_valid_geometry(Geom) ->
case Geom of
{'Point', Coords} ->
case is_point(Coords) of
true -> true;
false -> {false, "Invalid Point"}
end;
{'LineString', Coords} ->
is_linestring(Coords);
{'Polygon', Coords} ->
is_polygon(Coords);
{'MultiPoint', Coords} ->
case all(fun(Coord) -> is_point(Coord) end, Coords) of
true ->
true;
false ->
{false, "Not every position of the MultiPoint is a valid Point"}
end;
{'MultiLineString', Coords} ->
is_polygon(Coords);
{'MultiPolygon', Coords} ->
case all(fun(Coord) -> is_polygon(Coord) end, Coords) of
true ->
true;
false ->
{false, "Not every Polygon is a valid one"}
end;
{'GeometryCollection', Coords} ->
case all(fun(Coord) -> is_valid_geometry(Coord) end, Coords) of
true ->
true;
false ->
{false, "Not every Geometry is a valid one"}
end;
{GeomType, _} when is_atom(GeomType) ->
{false, "Invalid geometry type (" ++ atom_to_list(GeomType) ++ ")"};
_ ->
{false, "Invalid geometry"}
end.
-spec is_polygon(Coords::[[[number()]]]) -> true|false.
is_polygon(Coords) ->
case all(fun(Coord) -> is_linestring(Coord) end, Coords) of
true ->
true;
false ->
{false, "Not every LineString is a valid one"}
end.
-spec is_linestring(Coords::[[number()]]) -> true|false.
is_linestring(Coords) when length(Coords) =< 1 ->
{false, "LineString must have more than one position"};
is_linestring(Coords) ->
case all(fun(Coord) -> is_point(Coord) end, Coords) of
true ->
true;
false ->
{false, "Not every position of the LineString is a valid point"}
end.
% @doc Input is a point
-spec is_point(Point::[number()]) -> true|false.
is_point([]) ->
true;
is_point([X, Y]) when is_number(X) andalso is_number(Y) ->
true;
is_point(_) ->
false.
% @doc Works like lists:all, except that not only "false", but all values
% count as false. An empty list returns false. Returns also false if it
% isn't a valid list
all(_Fun, []) ->
false;
all(Fun, List) when is_list(List) ->
all2(Fun, List);
all(_Fun, _NotAList) ->
false.
all2(_Fun, []) ->
true;
all2(Fun, [H|T]) ->
case Fun(H) of
true ->
all2(Fun, T);
_ ->
false
end.
% @doc Convert a GeoCouch geometry to a GEOS geometry
to_geom(_Geom) ->
"NIF library not loaded".
% @doc Convert a GEOS geometry to a GeoCouch geometry
from_geom(_Geom) ->
"NIF library not loaded". | src/erlgeom.erl | 0.722135 | 0.604311 | erlgeom.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% This is an Erlang clone of the original `Phoenix.PubSub' module.
%%% Copyright (c) 2014 <NAME>
%%% @reference See
%%% <a href="https://github.com/phoenixframework/phoenix">Phoenix</a>
%%% @end
%%%-------------------------------------------------------------------
-module(ebus_ps).
%% API
-export([
subscribe/3,
subscribe/4,
unsubscribe/3,
broadcast/3,
broadcast_from/4,
subscribers/2,
list/1
]).
%%%===================================================================
%%% Types
%%%===================================================================
-type options() :: ebus_ps_local:options().
%%%===================================================================
%%% API
%%%===================================================================
%% @equiv subscribe(Server, Pid, Topic, [])
subscribe(Server, Pid, Topic) ->
subscribe(Server, Pid, Topic, []).
%% @doc
%% Subscribes the pid to the PubSub adapter's topic.
%%
%% <ul>
%% <li>`Server': The Pid registered name of the server.</li>
%% <li>`Pid': The subscriber pid to receive pubsub messages.</li>
%% <li>`Topic': The topic to subscribe to, ie: `"users:123"'.</li>
%% <li>`Opts': The optional list of options. See below.</li>
%% </ul>
%%
%% <b>Options:</b>
%% <br/>
%% <ul>
%% <li>`link': links the subscriber to the pubsub adapter.</li>
%% <li>`fastlane': Provides a fastlane path for the broadcasts for
%% `broadcast()' events. The fastlane process is notified of a cached
%% message instead of the normal subscriber. Fastlane handlers must
%% implement `fastlane/1' callbacks which accepts a `broadcast()' struct
%% and returns a fastlaned format for the handler. For example:</li>
%% <br/>
%% ```
%% ebus_ps:subscribe(
%% my_pubsub_server, self(), <<"topic1">>,
%% [{fastlane, {FastPid, my_serializer, [<<"event1">>]}).`
%% '''
%% </ul>
%% @end
-spec subscribe(atom(), pid(), binary(), options()) -> ok | {error, term()}.
subscribe(Server, Pid, Topic, Opts) when is_atom(Server) ->
call(Server, subscribe, [Pid, Topic, Opts]).
%% @doc
%% Unsubscribes the pid from the PubSub adapter's topic.
%% @end
-spec unsubscribe(atom(), pid(), binary()) -> ok | {error, term()}.
unsubscribe(Server, Pid, Topic) when is_atom(Server) ->
call(Server, unsubscribe, [Pid, Topic]).
%% @doc
%% Broadcasts message on given topic.
%% @end
-spec broadcast(atom(), binary(), term()) -> ok | {error, term()}.
broadcast(Server, Topic, Msg) when is_atom(Server) ->
call(Server, broadcast, [none, Topic, Msg]).
%% @doc
%% Broadcasts message to all but `FromPid' on given topic.
%% @end
-spec broadcast_from(atom(), pid(), binary(), term()) -> ok | {error, term()}.
broadcast_from(Server, FromPid, Topic, Msg)
when is_atom(Server), is_pid(FromPid) ->
call(Server, broadcast, [FromPid, Topic, Msg]).
%% @doc
%% Returns a set of subscribers pids for the given topic.
%%
%% <ul>
%% <li>`Server': The registered server name or pid.</li>
%% <li>`Topic': The string topic, for example `<<"users:123">>'.</li>
%% </ul>
%%
%% Example:
%%
%% ```
%% > subscribers(pubsub_server, <<"foo">>).
%% [<0.48.0>, <0.49.0>]
%% '''
%% @end
-spec subscribers(atom(), binary()) -> [pid()].
subscribers(Server, Topic) when is_atom(Server) ->
call(Server, subscribers, [Topic]).
%% @doc
%% Returns the topic list.
%% This is an expensive and private operation. DO NOT USE IT IN PROD.
%% @end
-spec list(atom()) -> [binary()].
list(Server) when is_atom(Server) ->
call(Server, list, []).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%% @private
call(Server, Kind, Args) ->
[{Kind, Module, Head}] = ets:lookup(Server, Kind),
apply(Module, Kind, Head ++ Args). | src/pubsub/ebus_ps.erl | 0.643329 | 0.602559 | ebus_ps.erl | starcoder |
%%% @doc Main external API of the Gradualizer
%%%
%%% The functions `type_check(file|module|dir)' accept the following options:
%%% - `{i, Dir}': Include path for `-include' and `-include_lib' when checking
%%% Erlang source files. Specify multiple times for multiple include paths.
%%% - `stop_on_first_error': if `true' stop type checking at the first error,
%%% if `false' continue checking all functions in the given file and all files
%%% in the given directory.
%%% - `crash_on_error': if `true' crash on the first produced error
%%% - `return_errors': if `true', turns off error printing and errors
%%% (in their internal format) are returned in a list instead of being
%%% condensed into a single ok | nok.
%%% - `fmt_location': how to format location when pretty printing errors
%%% - `none': no location for easier comparison
%%% - `brief': for machine processing ("LINE:COLUMN:" before message text)
%%% - `verbose' (default): for human readers
%%% ("on line LINE at column COLUMN" within the message text)
%%% - `fmt_expr_fun': function to pretty print an expression AST
%%% (useful to support other languages)
%%% - `fmt_type_fun': function to pretty print a type AST
%%% (useful to support other languages)
%%% - `{color, always | never | auto}': Use colors when printing fancy messages.
%%% Auto is the default but auto-detection of tty doesn't work when running
%%% as an escript. It works when running from the Erlang shell though.
%%% - `{fancy, boolean()}': Use fancy error messages when possible. True by
%%% default. Doesn't work when a custom `fmt_expr_fun' is used.
-module(gradualizer).
-export([type_check_file/1,
type_check_file/2,
type_check_module/1,
type_check_module/2,
type_check_dir/1,
type_check_dir/2,
type_check_files/1,
type_check_files/2,
type_check_forms/2
]).
-export_type([options/0, top/0]).
-type options() :: proplists:proplist().
%% This type is the top of the subtyping lattice.
-opaque top() :: any().
-include("gradualizer.hrl").
%% API functions
%% @doc Type check a source or beam file
-spec type_check_file(file:filename()) -> ok | nok | [{file:filename(), any()}].
type_check_file(File) ->
type_check_file(File, []).
%% @doc Type check a source or beam file
-spec type_check_file(file:filename(), options()) -> ok | nok | [{file:filename(), any()}].
type_check_file(File, Opts) ->
case filename:extension(File) of
".erl" ->
Includes = proplists:get_all_values(i, Opts),
case gradualizer_file_utils:get_forms_from_erl(File, Includes) of
{ok, Forms} ->
lint_and_check_forms(Forms, File, Opts);
Error ->
throw(Error)
end;
".beam" ->
case gradualizer_file_utils:get_forms_from_beam(File) of
{ok, Forms} ->
type_check_forms(File, Forms, Opts);
Error ->
throw(Error)
end;
Ext ->
throw({unknown_file_extension, Ext})
end.
%% @doc Runs an erl_lint pass, to check if the forms can be compiled at all,
%% before running the type checker.
-spec lint_and_check_forms([erl_parse:abstract_form()], file:filename(), options()) ->
ok | nok | [{file:filename(), any()}].
lint_and_check_forms(Forms, File, Opts) ->
case erl_lint:module(Forms, File, [return_errors]) of
{ok, _Warnings} ->
type_check_forms(File, Forms, Opts);
{error, Errors, _Warnings} ->
%% If there are lint errors (i.e. compile errors like undefined
%% variables) we don't even try to type check.
case proplists:get_bool(return_errors, Opts) of
true ->
[{Filename, ErrorInfo} || {Filename, ErrorInfos} <- Errors,
ErrorInfo <- ErrorInfos];
false ->
[gradualizer_fmt:print_errors(ErrorInfos,
[{filename, Filename} | Opts])
|| {Filename, ErrorInfos} <- Errors],
nok
end
end.
%% @doc Type check a module
-spec type_check_module(module()) -> ok | nok | [{file:filename(), any()}].
type_check_module(Module) ->
type_check_module(Module, []).
%% @doc Type check a module
-spec type_check_module(module(), options()) ->
ok | nok | [{file:filename(), any()}].
type_check_module(Module, Opts) when is_atom(Module) ->
case code:which(Module) of
File when is_list(File) ->
type_check_file(File, Opts);
Error when is_atom(Error) ->
throw({beam_not_found, Error})
end.
%% @doc Type check all source or beam files in a directory.
-spec type_check_dir(file:filename()) -> ok | nok | [{file:filename(), any()}].
type_check_dir(Dir) ->
type_check_dir(Dir, []).
%% @doc Type check all source or beam files in a directory.
-spec type_check_dir(file:filename(), options()) ->
ok | nok | [{file:filename(), any()}].
type_check_dir(Dir, Opts) ->
case filelib:is_dir(Dir) of
true ->
Pattern = ?assert_type(filename:join(Dir, "*.{erl,beam}"), file:filename()),
type_check_files(filelib:wildcard(Pattern), Opts);
false ->
throw({dir_not_found, Dir})
end.
%% @doc Type check a source or beam file
-spec type_check_files([file:filename()]) ->
ok | nok | [{file:filename(), any()}].
type_check_files(Files) ->
type_check_files(Files, []).
%% @doc Type check a source or beam
-spec type_check_files([file:filename()], options()) ->
ok | nok | [{file:filename(), any()}].
type_check_files(Files, Opts) ->
StopOnFirstError = proplists:get_bool(stop_on_first_error, Opts),
ReturnErrors = proplists:get_bool(return_errors, Opts),
if ReturnErrors ->
lists:foldl(
fun(File, Errors) when Errors =:= [];
not StopOnFirstError ->
type_check_file_or_dir(File, Opts) ++ Errors;
(_, Errors) ->
Errors
end, [], Files);
true ->
lists:foldl(
fun(File, Res) when Res =:= ok;
not StopOnFirstError ->
case type_check_file_or_dir(File, Opts) of
ok -> Res;
nok -> nok
end;
(_, nok) ->
nok
end, ok, Files)
end.
-spec type_check_file_or_dir(file:filename(), options()) ->
ok | nok | [{file:filename(), any()}].
type_check_file_or_dir(File, Opts) ->
IsRegular = filelib:is_regular(File),
IsDir = filelib:is_dir(File),
if
IsDir -> type_check_dir(File, Opts);
IsRegular -> type_check_file(File, Opts);
true -> throw({file_not_found, File}) % TODO: better errors
end.
%% @doc Type check an abstract syntax tree of a module. This can be useful
%% for tools where the abstract forms are generated in memory.
%%
%% If the first form is a file attribute (as in forms retuned by e.g.
%% epp:parse_file/1,2), that filename will be used in error messages.
%% The second form is typically the module attribute.
-spec type_check_forms([erl_parse:abstract_form()], options()) ->
ok | nok | [{file:filename(), any()}].
type_check_forms(Forms, Opts) ->
File = case Forms of
[{attribute, _, file, {F, _}} | _] -> F;
_ -> "no filename"
end,
type_check_forms(File, Forms, Opts).
%% Helper
-spec type_check_forms(file:filename(), Forms, options()) -> R when
Forms :: gradualizer_file_utils:abstract_forms(),
R :: ok | nok | [{file:filename(), any()}].
type_check_forms(File, Forms, Opts) ->
ReturnErrors = proplists:get_bool(return_errors, Opts),
OptsForModule = options_from_forms(Forms) ++ Opts,
Errors = typechecker:type_check_forms(Forms, OptsForModule),
case {ReturnErrors, Errors} of
{true, _ } ->
lists:map(fun(Error) -> {File, Error} end, Errors);
{false, []} ->
ok;
{false, [_|_]} ->
Opts1 = add_source_file_and_forms_to_opts(File, Forms, Opts),
gradualizer_fmt:print_errors(Errors, Opts1),
nok
end.
add_source_file_and_forms_to_opts(File, Forms, Opts) ->
Opts1 = [{filename, File}, {forms, Forms} | Opts],
case filename:extension(File) == ".erl" andalso filelib:is_file(File) of
true -> [{source_file, File} | Opts1];
false -> Opts1
end.
%% Extract -gradualizer(Options) from AST
-spec options_from_forms(gradualizer_file_utils:abstract_forms()) -> options().
options_from_forms([{attribute, _L, gradualizer, Opts} | Fs]) when is_list(Opts) ->
Opts ++ options_from_forms(Fs);
options_from_forms([{attribute, _L, gradualizer, Opt} | Fs]) ->
[Opt | options_from_forms(Fs)];
options_from_forms([_F | Fs]) -> options_from_forms(Fs);
options_from_forms([]) -> []. | src/gradualizer.erl | 0.536313 | 0.417568 | gradualizer.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1996-2021. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% This module administers three kinds of log files:
%%
%% 1 The transaction log
%% mnesia_tm appends to the log (via mnesia_log) at the
%% end of each transaction (or dirty write) and
%% mnesia_dumper reads the log and performs the ops in
%% the dat files. The dump_log is done at startup and
%% at intervals controlled by the user.
%%
%% 2 The mnesia_down log
%% mnesia_tm appends to the log (via mnesia_log) when it
%% realizes that mnesia goes up or down on another node.
%% mnesia_init reads the log (via mnesia_log) at startup.
%%
%% 3 The backup log
%% mnesia_schema produces one tiny log when the schema is
%% initially created. mnesia_schema also reads the log
%% when the user wants tables (possibly incl the schema)
%% to be restored. mnesia_log appends to the log when the
%% user wants to produce a real backup.
%%
%% The actual access to the backup media is performed via the
%% mnesia_backup module for both read and write. mnesia_backup
%% uses the disk_log (*), BUT the user may write an own module
%% with the same interface as mnesia_backup and configure
%% Mnesia so the alternate module performs the actual accesses
%% to the backup media. This means that the user may put the
%% backup on media that Mnesia does not know about possibly on
%% hosts where Erlang is not running.
%%
%% All these logs have to some extent a common structure.
%% They are all using the disk_log module (*) for the basic
%% file structure. The disk_log has a repair feature that
%% can be used to skip erroneous log records if one comes to
%% the conclusion that it is more important to reuse some
%% of the log records than the risque of obtaining inconsistent
%% data. If the data becomes inconsistent it is solely up to the
%% application to make it consistent again. The automatic
%% reparation of the disk_log is very powerful, but use it
%% with extreme care.
%%
%% First in all Mnesia's log file is a mnesia log header.
%% It contains a list with a log_header record as single
%% element. The structure of the log_header may never be
%% changed since it may be written to very old backup files.
%% By holding this record definition stable we can be
%% able to comprahend backups from timepoint 0. It also
%% allows us to use the backup format as an interchange
%% format between Mnesia releases.
%%
%% An op-list is a list of tuples with arity 3. Each tuple
%% has this structure: {Oid, Recs, Op} where Oid is the tuple
%% {Tab, Key}, Recs is a (possibly empty) list of records and
%% Op is an atom.
%%
%% The log file structure for the transaction log is as follows.
%%
%% After the mnesia log section follows an extended record section
%% containing op-lists. There are several values that Op may
%% have, such as write, delete, update_counter, delete_object,
%% and replace. There is no special end of section marker.
%%
%% +-----------------+
%% | mnesia log head |
%% +-----------------+
%% | extended record |
%% | section |
%% +-----------------+
%%
%% The log file structure for the mnesia_down log is as follows.
%%
%% After the mnesia log section follows a mnesia_down section
%% containing lists with yoyo records as single element.
%%
%% +-----------------+
%% | mnesia log head |
%% +-----------------+
%% | mnesia_down |
%% | section |
%% +-----------------+
%%
%% The log file structure for the backup log is as follows.
%%
%% After the mnesia log section follows a schema section
%% containing record lists. A record list is a list of tuples
%% where {schema, Tab} is interpreted as a delete_table(Tab) and
%% {schema, Tab, CreateList} are interpreted as create_table.
%%
%% The record section also contains record lists. In this section
%% {Tab, Key} is interpreted as delete({Tab, Key}) and other tuples
%% as write(Tuple). There is no special end of section marker.
%%
%% +-----------------+
%% | mnesia log head |
%% +-----------------+
%% | schema section |
%% +-----------------+
%% | record section |
%% +-----------------+
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-module(mnesia_log).
-export([
append/2,
backup/1,
backup/2,
backup_checkpoint/2,
backup_checkpoint/3,
backup_log_header/0,
backup_master/2,
chunk_decision_log/1,
chunk_decision_tab/1,
chunk_log/1,
chunk_log/2,
close_decision_log/0,
close_decision_tab/0,
close_log/1,
unsafe_close_log/1,
confirm_log_dump/1,
confirm_decision_log_dump/0,
previous_log_file/0,
previous_decision_log_file/0,
latest_log_file/0,
decision_log_version/0,
decision_log_file/0,
decision_tab_file/0,
decision_tab_version/0,
dcl_version/0,
dcd_version/0,
ets2dcd/1,
ets2dcd/2,
dcd2ets/1,
dcd2ets/2,
init/0,
init_log_dump/0,
log/1,
slog/1,
log_decision/1,
log_files/0,
open_decision_log/0,
trans_log_header/0,
open_decision_tab/0,
dcl_log_header/0,
dcd_log_header/0,
open_log/4,
open_log/6,
prepare_decision_log_dump/0,
prepare_log_dump/1,
save_decision_tab/1,
purge_all_logs/0,
purge_some_logs/0,
stop/0,
tab_copier/3,
version/0,
view/0,
view/1,
write_trans_log_header/0
]).
-compile({no_auto_import,[error/2]}).
-include("mnesia.hrl").
-import(mnesia_lib, [val/1, dir/1]).
-import(mnesia_lib, [exists/1, fatal/2, error/2, dbg_out/2]).
trans_log_header() -> log_header(trans_log, version()).
backup_log_header() -> log_header(backup_log, "1.2").
decision_log_header() -> log_header(decision_log, decision_log_version()).
decision_tab_header() -> log_header(decision_tab, decision_tab_version()).
dcl_log_header() -> log_header(dcl_log, dcl_version()).
dcd_log_header() -> log_header(dcd_log, dcd_version()).
log_header(Kind, Version) ->
#log_header{log_version=Version,
log_kind=Kind,
mnesia_version=mnesia:system_info(version),
node=node(),
now=erlang:timestamp()}.
version() -> "4.3".
decision_log_version() -> "3.0".
decision_tab_version() -> "1.0".
dcl_version() -> "1.0".
dcd_version() -> "1.0".
append(Log, Bin) when is_binary(Bin) ->
disk_log:balog(Log, Bin);
append(Log, Term) ->
disk_log:alog(Log, Term).
%% Synced append
sappend(Log, Bin) when is_binary(Bin) ->
ok = disk_log:blog(Log, Bin);
sappend(Log, Term) ->
ok = disk_log:log(Log, Term).
%% Write commit records to the latest_log
log(C) ->
case need_log(C) andalso mnesia_monitor:use_dir() of
true ->
if
is_record(C, commit) ->
append(latest_log, strip_snmp(C));
true ->
%% Either a commit record as binary
%% or some decision related info
append(latest_log, C)
end,
mnesia_dumper:incr_log_writes();
false ->
ignore
end.
%% Synced
slog(C) ->
case need_log(C) andalso mnesia_monitor:use_dir() of
true ->
if
is_record(C, commit) ->
sappend(latest_log, strip_snmp(C));
true ->
%% Either a commit record as binary
%% or some decision related info
sappend(latest_log, C)
end,
mnesia_dumper:incr_log_writes();
false ->
ignore
end.
need_log(#commit{disc_copies=[], disc_only_copies=[], schema_ops=[], ext=Ext}) ->
lists:keymember(ext_copies, 1, Ext);
need_log(_) -> true.
strip_snmp(#commit{ext=[]}=CR) -> CR;
strip_snmp(#commit{ext=Ext}=CR) ->
CR#commit{ext=lists:keydelete(snmp, 1, Ext)}.
%% Stuff related to the file LOG
%% Returns a list of logfiles. The oldest is first.
log_files() -> [previous_log_file(),
latest_log_file(),
decision_tab_file()
].
latest_log_file() -> dir(latest_log_name()).
previous_log_file() -> dir("PREVIOUS.LOG").
decision_log_file() -> dir(decision_log_name()).
decision_tab_file() -> dir(decision_tab_name()).
previous_decision_log_file() -> dir("PDECISION.LOG").
latest_log_name() -> "LATEST.LOG".
decision_log_name() -> "DECISION.LOG".
decision_tab_name() -> "DECISION_TAB.LOG".
init() ->
case mnesia_monitor:use_dir() of
true ->
Prev = previous_log_file(),
verify_no_exists(Prev),
Latest = latest_log_file(),
verify_no_exists(Latest),
Header = trans_log_header(),
open_log(latest_log, Header, Latest);
false ->
ok
end.
verify_no_exists(Fname) ->
case exists(Fname) of
false ->
ok;
true ->
fatal("Log file exists: ~tp~n", [Fname])
end.
open_log(Name, Header, Fname) ->
Exists = exists(Fname),
open_log(Name, Header, Fname, Exists).
open_log(Name, Header, Fname, Exists) ->
Repair = mnesia_monitor:get_env(auto_repair),
open_log(Name, Header, Fname, Exists, Repair).
open_log(Name, Header, Fname, Exists, Repair) ->
case Name == previous_log of
true ->
open_log(Name, Header, Fname, Exists, Repair, read_only);
false ->
open_log(Name, Header, Fname, Exists, Repair, read_write)
end.
open_log(Name, Header, Fname, Exists, Repair, Mode) ->
Args = [{file, Fname}, {name, Name}, {repair, Repair}, {mode, Mode}],
%% io:format("~p:open_log: ~tp ~tp~n", [?MODULE, Name, Fname]),
case mnesia_monitor:open_log(Args) of
{ok, Log} when Exists == true ->
Log;
{ok, Log} ->
write_header(Log, Header),
Log;
{repaired, Log, _, {badbytes, 0}} when Exists == true ->
Log;
{repaired, Log, _, {badbytes, 0}} ->
write_header(Log, Header),
Log;
{repaired, Log, _Recover, BadBytes} ->
mnesia_lib:important("Data may be missing, log ~tp repaired: Lost ~p bytes~n",
[Fname, BadBytes]),
Log;
{error, Reason = {file_error, _Fname, emfile}} ->
fatal("Cannot open log file ~tp: ~tp~n", [Fname, Reason]);
{error, Reason} when Repair == true ->
file:delete(Fname),
mnesia_lib:important("Data may be missing, Corrupt logfile deleted: ~tp, ~tp ~n",
[Fname, Reason]),
%% Create a new
open_log(Name, Header, Fname, false, false, read_write);
{error, Reason} ->
fatal("Cannot open log file ~tp: ~tp~n", [Fname, Reason])
end.
write_header(Log, Header) ->
append(Log, Header).
write_trans_log_header() ->
write_header(latest_log, trans_log_header()).
stop() ->
case mnesia_monitor:use_dir() of
true ->
close_log(latest_log);
false ->
ok
end.
close_log(Log) ->
%% io:format("mnesia_log:close_log ~p~n", [Log]),
%% io:format("mnesia_log:close_log ~p~n", [Log]),
case disk_log:sync(Log) of
ok -> ok;
{error, {read_only_mode, Log}} ->
ok;
{error, Reason} ->
mnesia_lib:important("Failed syncing ~tp to_disk reason ~tp ~n",
[Log, Reason])
end,
mnesia_monitor:close_log(Log).
unsafe_close_log(Log) ->
%% io:format("mnesia_log:close_log ~p~n", [Log]),
mnesia_monitor:unsafe_close_log(Log).
purge_some_logs() ->
mnesia_monitor:unsafe_close_log(latest_log),
_ = file:delete(latest_log_file()),
_ = file:delete(decision_tab_file()),
ok.
purge_all_logs() ->
_ = file:delete(previous_log_file()),
_ = file:delete(latest_log_file()),
_ = file:delete(decision_tab_file()),
ok.
%% Prepare dump by renaming the open logfile if possible
%% Returns a tuple on the following format: {Res, OpenLog}
%% where OpenLog is the file descriptor to log file, ready for append
%% and Res is one of the following: already_dumped, needs_dump or {error, Reason}
prepare_log_dump(InitBy) ->
Diff = mnesia_dumper:get_log_writes() -
mnesia_lib:read_counter(trans_log_writes_prev),
if
Diff == 0, InitBy /= startup ->
already_dumped;
true ->
case mnesia_monitor:use_dir() of
true ->
Prev = previous_log_file(),
prepare_prev(Diff, InitBy, Prev, exists(Prev));
false ->
already_dumped
end
end.
prepare_prev(Diff, _, _, true) ->
{needs_dump, Diff};
prepare_prev(Diff, startup, Prev, false) ->
Latest = latest_log_file(),
case exists(Latest) of
true ->
case file:rename(Latest, Prev) of
ok ->
{needs_dump, Diff};
{error, Reason} ->
{error, Reason}
end;
false ->
already_dumped
end;
prepare_prev(Diff, _InitBy, Prev, false) ->
Head = trans_log_header(),
case mnesia_monitor:reopen_log(latest_log, Prev, Head) of
ok ->
{needs_dump, Diff};
{error, Reason} ->
Latest = latest_log_file(),
{error, {"Cannot rename log file",
[Latest, Prev, Reason]}}
end.
%% Init dump and return PrevLogFileDesc or exit.
init_log_dump() ->
Fname = previous_log_file(),
open_log(previous_log, trans_log_header(), Fname),
start.
chunk_log(Cont) ->
chunk_log(previous_log, Cont).
chunk_log(_Log, eof) ->
eof;
chunk_log(Log, Cont) ->
case disk_log:chunk(Log, Cont) of
{error, Reason} ->
fatal("Possibly truncated ~tp file: ~tp~n",
[Log, Reason]);
{C2, Chunk, _BadBytes} ->
%% Read_only case, should we warn about the bad log file?
%% BUGBUG Should we crash if Repair == false ??
%% We got to check this !!
mnesia_lib:important("~tp repaired, lost ~p bad bytes~n", [Log, _BadBytes]),
{C2, Chunk};
Other ->
Other
end.
%% Confirms the dump by closing prev log and delete the file
confirm_log_dump(Updates) ->
case mnesia_monitor:close_log(previous_log) of
ok ->
file:delete(previous_log_file()),
mnesia_lib:incr_counter(trans_log_writes_prev, Updates),
dumped;
{error, Reason} ->
{error, Reason}
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Decision log
open_decision_log() ->
Latest = decision_log_file(),
open_log(decision_log, decision_log_header(), Latest),
start.
prepare_decision_log_dump() ->
Prev = previous_decision_log_file(),
prepare_decision_log_dump(exists(Prev), Prev).
prepare_decision_log_dump(false, Prev) ->
Head = decision_log_header(),
case mnesia_monitor:reopen_log(decision_log, Prev, Head) of
ok ->
prepare_decision_log_dump(true, Prev);
{error, Reason} ->
fatal("Cannot rename decision log file ~tp -> ~tp: ~tp~n",
[decision_log_file(), Prev, Reason])
end;
prepare_decision_log_dump(true, Prev) ->
open_log(previous_decision_log, decision_log_header(), Prev),
start.
chunk_decision_log(Cont) ->
%% dbg_out("chunk log ~p~n", [Cont]),
chunk_log(previous_decision_log, Cont).
%% Confirms dump of the decision log
confirm_decision_log_dump() ->
case mnesia_monitor:close_log(previous_decision_log) of
ok ->
file:delete(previous_decision_log_file());
{error, Reason} ->
fatal("Cannot confirm decision log dump: ~tp~n",
[Reason])
end.
save_decision_tab(Decisions) ->
Log = decision_tab,
Tmp = mnesia_lib:dir("DECISION_TAB.TMP"),
file:delete(Tmp),
open_log(Log, decision_tab_header(), Tmp),
append(Log, Decisions),
close_log(Log),
TabFile = decision_tab_file(),
ok = file:rename(Tmp, TabFile).
open_decision_tab() ->
TabFile = decision_tab_file(),
open_log(decision_tab, decision_tab_header(), TabFile),
start.
close_decision_tab() ->
close_log(decision_tab).
chunk_decision_tab(Cont) ->
%% dbg_out("chunk tab ~p~n", [Cont]),
chunk_log(decision_tab, Cont).
close_decision_log() ->
close_log(decision_log).
log_decision(Decision) ->
append(decision_log, Decision).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Debug functions
view() ->
lists:foreach(fun(F) -> view(F) end, log_files()).
view(File) ->
mnesia_lib:show("***** ~tp ***** ~n", [File]),
case exists(File) of
false ->
nolog;
true ->
N = view_only,
Args = [{file, File}, {name, N}, {mode, read_only}],
case disk_log:open(Args) of
{ok, N} ->
view_file(start, N);
{repaired, _, _, _} ->
view_file(start, N);
{error, Reason} ->
error("Cannot open log ~tp: ~tp~n", [File, Reason])
end
end.
view_file(C, Log) ->
case disk_log:chunk(Log, C) of
{error, Reason} ->
error("** Possibly truncated FILE ~tp~n", [Reason]),
error;
eof ->
disk_log:close(Log),
eof;
{C2, Terms, _BadBytes} ->
dbg_out("Lost ~p bytes in ~tp ~n", [_BadBytes, Log]),
lists:foreach(fun(X) -> mnesia_lib:show("~tp~n", [X]) end,
Terms),
view_file(C2, Log);
{C2, Terms} ->
lists:foreach(fun(X) -> mnesia_lib:show("~tp~n", [X]) end,
Terms),
view_file(C2, Log)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Backup
-record(backup_args, {name, module, opaque, scope, prev_name, tables, cookie}).
backup(Opaque) ->
backup(Opaque, []).
backup(Opaque, Mod) when is_atom(Mod) ->
backup(Opaque, [{module, Mod}]);
backup(Opaque, Args) when is_list(Args) ->
%% Backup all tables with max redundancy
CpArgs = [{ram_overrides_dump, false}, {max, val({schema, tables})}],
case mnesia_checkpoint:activate(CpArgs) of
{ok, Name, _Nodes} ->
Res = backup_checkpoint(Name, Opaque, Args),
mnesia_checkpoint:deactivate(Name),
Res;
{error, Reason} ->
{error, Reason}
end.
backup_checkpoint(Name, Opaque) ->
backup_checkpoint(Name, Opaque, []).
backup_checkpoint(Name, Opaque, Mod) when is_atom(Mod) ->
backup_checkpoint(Name, Opaque, [{module, Mod}]);
backup_checkpoint(Name, Opaque, Args) when is_list(Args) ->
DefaultMod = mnesia_monitor:get_env(backup_module),
B = #backup_args{name = Name,
module = DefaultMod,
opaque = Opaque,
scope = global,
tables = all,
prev_name = Name},
case check_backup_args(Args, B) of
{ok, B2} ->
%% Decentralized backup
%% Incremental
Self = self(),
Pid = spawn_link(?MODULE, backup_master, [Self, B2]),
receive
{Pid, Self, Res} -> Res
end;
{error, Reason} ->
{error, Reason}
end.
check_backup_args([Arg | Tail], B) ->
try check_backup_arg_type(Arg, B) of
B2 ->
check_backup_args(Tail, B2)
catch error:_ ->
{error, {badarg, Arg}}
end;
check_backup_args([], B) ->
{ok, B}.
check_backup_arg_type(Arg, B) ->
case Arg of
{scope, global} ->
B#backup_args{scope = global};
{scope, local} ->
B#backup_args{scope = local};
{module, Mod} ->
Mod2 = mnesia_monitor:do_check_type(backup_module, Mod),
B#backup_args{module = Mod2};
{incremental, Name} ->
B#backup_args{prev_name = Name};
{tables, Tabs} when is_list(Tabs) ->
B#backup_args{tables = Tabs}
end.
backup_master(ClientPid, B) ->
process_flag(trap_exit, true),
try do_backup_master(B) of
Res ->
ClientPid ! {self(), ClientPid, Res}
catch _:Reason ->
ClientPid ! {self(), ClientPid, {error, {'EXIT', Reason}}}
end,
unlink(ClientPid),
exit(normal).
do_backup_master(B) ->
Name = B#backup_args.name,
B2 = safe_apply(B, open_write, [B#backup_args.opaque]),
B3 = safe_write(B2, [backup_log_header()]),
case mnesia_checkpoint:tables_and_cookie(Name) of
{ok, AllTabs, Cookie} ->
Tabs = select_tables(AllTabs, B3),
B4 = B3#backup_args{cookie = Cookie},
%% Always put schema first in backup file
B5 = backup_schema(B4, Tabs),
B6 = lists:foldl(fun backup_tab/2, B5, Tabs -- [schema]),
safe_apply(B6, commit_write, [B6#backup_args.opaque]),
ok;
{error, Reason} ->
abort_write(B3, {?MODULE, backup_master}, [B], {error, Reason})
end.
select_tables(AllTabs, B) ->
Tabs =
case B#backup_args.tables of
all -> AllTabs;
SomeTabs when is_list(SomeTabs) -> SomeTabs
end,
case B#backup_args.scope of
global ->
Tabs;
local ->
Name = B#backup_args.name,
[T || T <- Tabs, mnesia_checkpoint:most_local_node(Name, T) == {ok, node()}]
end.
safe_write(B, []) ->
B;
safe_write(B, Recs) ->
safe_apply(B, write, [B#backup_args.opaque, Recs]).
backup_schema(B, Tabs) ->
case lists:member(schema, Tabs) of
true ->
backup_tab(schema, B);
false ->
Defs = [{schema, T, mnesia_schema:get_create_list(T)} || T <- Tabs],
safe_write(B, Defs)
end.
safe_apply(B, write, [_, Items]) when Items == [] ->
B;
safe_apply(B, What, Args) ->
Abort = abort_write_fun(B, What, Args),
receive
{'EXIT', Pid, R} -> Abort({'EXIT', Pid, R})
after 0 ->
Mod = B#backup_args.module,
try apply(Mod, What, Args) of
{ok, Opaque} -> B#backup_args{opaque=Opaque};
{error, R} -> Abort(R)
catch _:R -> Abort(R)
end
end.
-spec abort_write_fun(_, _, _) -> fun((_) -> no_return()).
abort_write_fun(B, What, Args) ->
fun(R) -> abort_write(B, What, Args, R) end.
abort_write(B, What, Args, Reason) ->
Mod = B#backup_args.module,
Opaque = B#backup_args.opaque,
dbg_out("Failed to perform backup. M=~p:F=~tp:A=~tp -> ~tp~n",
[Mod, What, Args, Reason]),
try {ok, _Res} = apply(Mod, abort_write, [Opaque]) of
_ -> throw({error, Reason})
catch _:Other ->
error("Failed to abort backup. ~p:~tp~tp -> ~tp~n",
[Mod, abort_write, [Opaque], Other]),
throw({error, Reason})
end.
backup_tab(Tab, B) ->
Name = B#backup_args.name,
case mnesia_checkpoint:most_local_node(Name, Tab) of
{ok, Node} when Node == node() ->
tab_copier(self(), B, Tab);
{ok, Node} ->
RemoteB = B,
Pid = spawn_link(Node, ?MODULE, tab_copier, [self(), RemoteB, Tab]),
RecName = val({Tab, record_name}),
tab_receiver(Pid, B, Tab, RecName, 0);
{error, Reason} ->
abort_write(B, {?MODULE, backup_tab}, [Tab, B], {error, Reason})
end.
tab_copier(Pid, B, Tab) when is_record(B, backup_args) ->
%% Intentional crash at exit
Name = B#backup_args.name,
PrevName = B#backup_args.prev_name,
{FirstName, FirstSource} = select_source(Tab, Name, PrevName),
?eval_debug_fun({?MODULE, tab_copier, pre}, [{name, Name}, {tab, Tab}]),
Res = handle_more(Pid, B, Tab, FirstName, FirstSource, Name),
?eval_debug_fun({?MODULE, tab_copier, post}, [{name, Name}, {tab, Tab}]),
handle_last(Pid, Res).
select_source(Tab, Name, PrevName) ->
if
Tab == schema ->
%% Always full backup of schema
{Name, table};
Name == PrevName ->
%% Full backup
{Name, table};
true ->
%% Wants incremental backup
case mnesia_checkpoint:most_local_node(PrevName, Tab) of
{ok, Node} when Node == node() ->
%% Accept incremental backup
{PrevName, retainer};
_ ->
%% Do a full backup anyway
dbg_out("Incremental backup escalated to full backup: ~tp~n", [Tab]),
{Name, table}
end
end.
handle_more(Pid, B, Tab, FirstName, FirstSource, Name) ->
Acc = {0, B},
case {mnesia_checkpoint:really_retain(Name, Tab),
mnesia_checkpoint:really_retain(FirstName, Tab)} of
{true, true} ->
Acc2 = iterate(B, FirstName, Tab, Pid, FirstSource, latest, first, Acc),
iterate(B, Name, Tab, Pid, retainer, checkpoint, last, Acc2);
{false, false}->
%% Put the dumped file in the backup
%% instead of the ram table. Does
%% only apply to ram_copies.
iterate(B, Name, Tab, Pid, retainer, checkpoint, last, Acc);
Bad ->
Reason = {"Checkpoints for incremental backup must have same "
"setting of ram_overrides_dump",
Tab, Name, FirstName, Bad},
abort_write(B, {?MODULE, backup_tab}, [Tab, B], {error, Reason})
end.
handle_last(Pid, {_Count, B}) when Pid == self() ->
B;
handle_last(Pid, _Acc) ->
unlink(Pid),
Pid ! {self(), {last, {ok, dummy}}},
exit(normal).
iterate(B, Name, Tab, Pid, Source, Age, Pass, Acc) ->
Fun =
if
Pid == self() ->
RecName = val({Tab, record_name}),
fun(Recs, A) -> copy_records(RecName, Tab, Recs, A) end;
true ->
fun(Recs, A) -> send_records(Pid, Tab, Recs, Pass, A) end
end,
case mnesia_checkpoint:iterate(Name, Tab, Fun, Acc, Source, Age) of
{ok, Acc2} ->
Acc2;
{error, Reason} ->
R = {error, {"Tab copier iteration failed", Reason}},
abort_write(B, {?MODULE, iterate}, [self(), B, Tab], R)
end.
copy_records(_RecName, _Tab, [], Acc) ->
Acc;
copy_records(RecName, Tab, Recs, {Count, B}) ->
Recs2 = rec_filter(B, Tab, RecName, Recs),
B2 = safe_write(B, Recs2),
{Count + 1, B2}.
send_records(Pid, Tab, Recs, Pass, {Count, B}) ->
receive
{Pid, more, Count} ->
if
Pass == last, Recs == [] ->
{Count, B};
true ->
Next = Count + 1,
Pid ! {self(), {more, Next, Recs}},
{Next, B}
end;
Msg ->
exit({send_records_unexpected_msg, Tab, Msg})
end.
tab_receiver(Pid, B, Tab, RecName, Slot) ->
Pid ! {self(), more, Slot},
receive
{Pid, {more, Next, Recs}} ->
Recs2 = rec_filter(B, Tab, RecName, Recs),
B2 = safe_write(B, Recs2),
tab_receiver(Pid, B2, Tab, RecName, Next);
{Pid, {last, {ok,_}}} ->
B;
{'EXIT', Pid, {error, R}} ->
Reason = {error, {"Tab copier crashed", R}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], Reason);
{'EXIT', Pid, R} ->
Reason = {error, {"Tab copier crashed", {'EXIT', R}}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], Reason);
Msg ->
R = {error, {"Tab receiver got unexpected msg", Msg}},
abort_write(B, {?MODULE, remote_tab_sender}, [self(), B, Tab], R)
end.
rec_filter(B, schema, _RecName, Recs) ->
try mnesia_bup:refresh_cookie(Recs, B#backup_args.cookie)
catch throw:{error, _Reason} ->
%% No schema table cookie
Recs
end;
rec_filter(_B, Tab, Tab, Recs) ->
Recs;
rec_filter(_B, Tab, _RecName, Recs) ->
[setelement(1, Rec, Tab) || Rec <- Recs].
ets2dcd(Tab) ->
ets2dcd(Tab, dcd).
ets2dcd(Tab, Ftype) ->
Fname =
case Ftype of
dcd -> mnesia_lib:tab2dcd(Tab);
dmp -> mnesia_lib:tab2dmp(Tab)
end,
TmpF = mnesia_lib:tab2tmp(Tab),
file:delete(TmpF),
Log = open_log({Tab, ets2dcd}, dcd_log_header(), TmpF, false),
mnesia_lib:db_fixtable(ram_copies, Tab, true),
ok = ets2dcd(mnesia_lib:db_init_chunk(ram_copies, Tab, 1000), Tab, Log),
mnesia_lib:db_fixtable(ram_copies, Tab, false),
close_log(Log),
ok = file:rename(TmpF, Fname),
%% Remove old log data which is now in the new dcd.
%% No one else should be accessing this file!
file:delete(mnesia_lib:tab2dcl(Tab)),
ok.
ets2dcd('$end_of_table', _Tab, _Log) ->
ok;
ets2dcd({Recs, Cont}, Tab, Log) ->
ok = disk_log:log_terms(Log, Recs),
ets2dcd(mnesia_lib:db_chunk(ram_copies, Cont), Tab, Log).
dcd2ets(Tab) ->
dcd2ets(Tab, mnesia_monitor:get_env(auto_repair)).
dcd2ets(Tab, Rep) ->
Dcd = mnesia_lib:tab2dcd(Tab),
case mnesia_lib:exists(Dcd) of
true ->
Log = open_log({Tab, dcd2ets}, dcd_log_header(), Dcd,
true, Rep, read_only),
Data = chunk_log(Log, start),
ok = insert_dcdchunk(Data, Log, Tab),
close_log(Log),
load_dcl(Tab, Rep);
false -> %% Handle old dets files, and conversion from disc_only to disc.
Fname = mnesia_lib:tab2dat(Tab),
Type = val({Tab, setorbag}),
case mnesia_lib:dets_to_ets(Tab, Tab, Fname, Type, Rep, yes) of
loaded ->
ets2dcd(Tab),
file:delete(Fname),
0;
{error, Error} ->
erlang:error({"Failed to load table from disc", [Tab, Error]})
end
end.
insert_dcdchunk({Cont, [LogH | Rest]}, Log, Tab)
when is_record(LogH, log_header),
LogH#log_header.log_kind == dcd_log,
LogH#log_header.log_version >= "1.0" ->
insert_dcdchunk({Cont, Rest}, Log, Tab);
insert_dcdchunk({Cont, Recs}, Log, Tab) ->
true = ets:insert(Tab, Recs),
insert_dcdchunk(chunk_log(Log, Cont), Log, Tab);
insert_dcdchunk(eof, _Log, _Tab) ->
ok.
load_dcl(Tab, Rep) ->
FName = mnesia_lib:tab2dcl(Tab),
case mnesia_lib:exists(FName) of
true ->
Name = {load_dcl,Tab},
open_log(Name,
dcl_log_header(),
FName,
true,
Rep,
read_only),
FirstChunk = chunk_log(Name, start),
N = insert_logchunk(FirstChunk, Name, 0),
close_log(Name),
N;
false ->
0
end.
insert_logchunk({C2, Recs}, Tab, C) ->
N = add_recs(Recs, C),
insert_logchunk(chunk_log(Tab, C2), Tab, C+N);
insert_logchunk(eof, _Tab, C) ->
C.
add_recs([{{Tab, _Key}, Val, write} | Rest], N) ->
true = ets:insert(Tab, Val),
add_recs(Rest, N+1);
add_recs([{{Tab, Key}, _Val, delete} | Rest], N) ->
true = ets:delete(Tab, Key),
add_recs(Rest, N+1);
add_recs([{{Tab, _Key}, Val, delete_object} | Rest], N) ->
true = ets:match_delete(Tab, Val),
add_recs(Rest, N+1);
add_recs([{{Tab, Key}, Val, update_counter} | Rest], N) ->
{RecName, Incr} = Val,
try
CounterVal = ets:update_counter(Tab, Key, Incr),
true = (CounterVal >= 0)
catch
error:_ when Incr < 0 ->
Zero = {RecName, Key, 0},
true = ets:insert(Tab, Zero);
error:_ ->
Zero = {RecName, Key, Incr},
true = ets:insert(Tab, Zero)
end,
add_recs(Rest, N+1);
add_recs([LogH|Rest], N)
when is_record(LogH, log_header),
LogH#log_header.log_kind == dcl_log,
LogH#log_header.log_version >= "1.0" ->
add_recs(Rest, N);
add_recs([{{Tab, _Key}, _Val, clear_table} | Rest], N) ->
Size = ets:info(Tab, size),
true = ets:delete_all_objects(Tab),
add_recs(Rest, N+Size);
add_recs([], N) ->
N. | lib/mnesia/src/mnesia_log.erl | 0.511717 | 0.510374 | mnesia_log.erl | starcoder |
-module(erlmachine_supervisor_prototype).
%% NOTE: The main puprouse of supervisor prototype is the ability to change monitoring layer without affecting credentials layer of an extension
%% NOTE: There are few examples of monitorings implementations:
%% 1. erlang:monitor/2
%% 2. supervisor2
%% 3. mirrored_supervisor
%% NOTE: There is should exists the "mother" or bootloader extension which will load and initiate transmission
%% NOTE: Supervisor prototype concerns: health check, recovery management
%% NOTE: In comparison to worker prototype a supervisor prototype is state-less
%% TODO:
%% a) To simplify context for network transmission
%% b) To gather statistics into graph
%% TODO https://github.com/rabbitmq/rabbitmq-common/blob/master/src/supervisor2.erl
%% API
-export([is_supervisor_prototype/1]).
-export([startup/2]).
-export([install/2, uninstall/2]).
%% Context API
-export([init/1, start_child/1, terminate_child/1]).
-type context() :: term().
-include("erlmachine_factory.hrl").
-include("erlmachine_assembly.hrl").
-include("erlmachine_graph.hrl").
-include("erlmachine_system.hrl").
-callback prototype_init(SN::serial_no(), Specs::[map()], Context::context(), Opt::map()) ->
success(pid()) | failure(term(), term()).
-callback prototype_start_child(SN::serial_no(), Spec::map(), Context::context()) ->
success(pid()) | failure(term(), term()).
-callback prototype_terminate_child(SN::serial_no(), ID::term(), Context::context()) ->
success().
-spec is_supervisor_prototype(Module::atom()) -> boolean().
is_supervisor_prototype(Module) ->
lists:member(?MODULE, erlmachine:behaviours(Module)).
%%% Transmission API
-spec startup(Assembly::assembly(), Exts::[assembly()]) ->
success(pid()) | failure(term(), term()).
startup(Assembly, Exts) ->
SN = erlmachine_assembly:serial_no(Assembly),
Prot = erlmachine_assembly:prototype(Assembly),
Module = erlmachine_prototype:module(Prot), Opt = erlmachine_prototype:options(Prot),
Specs = [erlmachine_transmission:spec(Ext)|| Ext <- Exts],
Module:prototype_init(SN, Specs, _Context = [Assembly, Exts], Opt).
-spec install(Assembly::assembly(), Ext::assembly()) ->
success(pid()) | failure(term(), term()).
install(Assembly, Ext) ->
SN = erlmachine_assembly:serial_no(Assembly),
Spec = erlmachine_transmission:spec(Ext),
Prot = erlmachine_assembly:prototype(Assembly),
Module = erlmachine_prototype:module(Prot),
Module:prototype_start_child(SN, Spec, _Context = [Assembly, Ext]).
-spec uninstall(Assembly::assembly(), V::vertex()) ->
failure(term(), term()).
uninstall(Assembly, V) ->
SN = erlmachine_assembly:serial_no(Assembly),
Prot = erlmachine_assembly:prototype(Assembly),
Module = erlmachine_prototype:module(Prot),
Module:prototype_terminate_child(SN, V, _Context = [Assembly, V]).
%%% Prototype API
-spec init(Context::context()) ->
success() | failure(term(), term()).
init(Context) ->
[Assembly, Exts] = Context,
erlmachine_supervisor_model:startup(Assembly, Exts).
-spec start_child(Context::context()) ->
success() | failure(term(), term()).
start_child(Context) ->
[Assembly, Ext] = Context,
erlmachine_supervisor_model:install(Assembly, Ext).
-spec terminate_child(Context::context()) ->
success().
terminate_child(Context) ->
[Assembly, V] = Context,
erlmachine_supervisor_model:uninstall(Assembly, V). | src/behaviours/erlmachine_supervisor_prototype.erl | 0.532425 | 0.547464 | erlmachine_supervisor_prototype.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2020. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Try to calculate the exact size of bitstring produced by a binary
%% comprehension such as:
%%
%% << <<X:16>> || <<X:4>> <= Bs >>
%%
%% For this example, the size of the resulting binary (rounded up to
%% the nearest number of bytes) will be:
%%
%% ((bit_size(Bs) div 4) * 16 + 7) div 8
%%
%% If the exact size can't be determined, such as for this comprehension:
%%
%% << <<X:16>> || <<X:4>> <= Bs, X =/= 15 >>
%%
%% the default size of 256 bytes will be used as starting size of
%% the writable binary.
%%
-module(beam_ssa_bc_size).
-export([opt/1]).
-import(lists, [any/2,member/2,reverse/1,sort/1]).
-include("beam_ssa_opt.hrl").
-spec opt(st_map()) -> st_map().
opt(StMap) ->
opt(maps:keys(StMap), StMap).
opt([Id|Ids], StMap0) ->
StMap = opt_function(Id, StMap0),
opt(Ids, StMap);
opt([], StMap) -> StMap.
opt_function(Id, StMap) ->
#opt_st{anno=Anno,ssa=Linear0,cnt=Count0} = OptSt0 = map_get(Id, StMap),
ParamInfo = maps:get(parameter_info, Anno, #{}),
try opt_blks(Linear0, ParamInfo, StMap, unchanged, Count0, []) of
{Linear,Count} ->
OptSt = OptSt0#opt_st{ssa=Linear,cnt=Count},
StMap#{Id := OptSt};
none ->
StMap
catch
Class:Error:Stack ->
#b_local{name=#b_literal{val=Name},arity=Arity} = Id,
io:fwrite("Function: ~w/~w\n", [Name,Arity]),
erlang:raise(Class, Error, Stack)
end.
opt_blks([{L,#b_blk{is=Is}=Blk}|Blks], ParamInfo, StMap, AnyChange, Count0, Acc0) ->
case Is of
[#b_set{op=bs_init_writable,dst=Dst}] ->
Bs = #{st_map => StMap, Dst => {writable,#b_literal{val=0}}},
try opt_writable(Bs, L, Blk, Blks, ParamInfo, Count0, Acc0) of
{Acc,Count} ->
opt_blks(Blks, ParamInfo, StMap, changed, Count, Acc)
catch
throw:not_possible ->
opt_blks(Blks, ParamInfo, StMap, AnyChange, Count0, [{L,Blk}|Acc0])
end;
_ ->
opt_blks(Blks, ParamInfo, StMap, AnyChange, Count0, [{L,Blk}|Acc0])
end;
opt_blks([], _ParamInfo, _StMap, changed, Count, Acc) ->
{reverse(Acc),Count};
opt_blks([], _ParamInfo, _StMap, unchanged, _Count, _Acc) ->
none.
opt_writable(Bs0, L, Blk, Blks, ParamInfo, Count0, Acc0) ->
case {Blk,Blks} of
{#b_blk{last=#b_br{succ=Next,fail=Next}},
[{Next,#b_blk{is=[#b_set{op=call,args=[_|Args],dst=Dst}=Call|_]}}|_]} ->
ensure_not_match_context(Call, ParamInfo),
ArgTypes = maps:from_list([{Arg,{arg,Arg}} || Arg <- Args]),
Bs = maps:merge(ArgTypes, Bs0),
Result = map_get(Dst, call_size_func(Call, Bs)),
{Expr,Annos} = make_expr_tree(Result),
cg_size_calc(Expr, L, Blk, Annos, Count0, Acc0);
{_,_} ->
throw(not_possible)
end.
ensure_not_match_context(#b_set{anno=Anno,args=[_|Args]}, ParamInfo) ->
case maps:get(bsm_info, Anno, []) of
context_reused ->
%% The generator is a match context. The optimization is
%% not safe. Example:
%%
%% f(<<B/binary>>) ->
%% << <<V>> || <<V>> <= B >>.
throw(not_possible);
_ ->
case any(fun(V) ->
member(accepts_match_context,
maps:get(V, ParamInfo, []))
end, Args) of
true ->
%% Match context is passed from the calling function. Example:
%% f0(<<B/binary>>) -> f1(B).
%% f1(B) -> << <<V>> || <<V>> <= B >>.
throw(not_possible);
false ->
ok
end
end.
%%%
%%% Traverse the SSA code of the binary comprehension functions to
%%% figure out the exact size for the writable binary. This algorithm
%%% is similar to how types are determined by beam_ssa_type, but here
%%% we only care about how many bits are matched of from the generators
%%% and how many bits are appended to the writable binary.
%%%
call_size_func(#b_set{anno=Anno,op=call,args=[Name|Args],dst=Dst}, Bs) ->
StMap = map_get(st_map, Bs),
case StMap of
#{Name := #opt_st{ssa=Linear,args=Params}} ->
NewBs0 = setup_call_bs(Params, Args, Bs, #{}),
case any(fun({writable,_}) -> true;
(_) -> false
end, maps:values(NewBs0)) of
false ->
%% Since the writable binary is not passed to the called function,
%% it can't have any effect on the size of the writable binary
%% and there is no need to analyze it.
Bs#{Dst => any};
true ->
NewBs = NewBs0#{Name => self, st_map => StMap},
Map0 = #{0 => NewBs},
Result = calc_size(Linear, Map0),
Bs#{Dst => Result}
end;
#{} ->
case Name of
#b_remote{mod=#b_literal{val=erlang},
name=#b_literal{val=error},
arity=1} ->
capture_anno(Anno, Dst, Args, Bs#{Dst => exception});
_ ->
Bs#{Dst => any}
end
end.
capture_anno(Anno, Dst, [ErrorTerm], Bs) ->
case get_value(ErrorTerm, Bs) of
{tuple,Elements} ->
Ts = [get_value(E, Bs) || E <- Elements],
capture_anno_1(Anno, Dst, Ts, Bs);
_ ->
Bs
end.
capture_anno_1(Anno, Dst, [{nil_or_bad,Generator}|_], Bs) ->
Bs#{Dst => {generator_anno,{Generator,Anno}}};
capture_anno_1(Anno, Dst, [{arg,Generator}|_], Bs) ->
Bs#{Dst => {generator_anno,{Generator,Anno}}};
capture_anno_1(Anno, Dst, [_|T], Bs) ->
capture_anno_1(Anno, Dst, T, Bs);
capture_anno_1(_, _, [], Bs) ->
Bs.
setup_call_bs([V|Vs], [A0|As], OldBs, NewBs) ->
A = case get_value(A0, OldBs) of
#b_literal{}=Lit -> {arg,Lit};
{writable,#b_literal{val=0}}=Wr -> Wr;
{arg,_}=Arg -> Arg;
_ -> any
end,
setup_call_bs(Vs, As, OldBs, NewBs#{V => A});
setup_call_bs([], [], #{}, NewBs) -> NewBs.
calc_size([{L,#b_blk{is=Is,last=Last}}|Blks], Map0) ->
case maps:take(L, Map0) of
{Bs0,Map1} ->
Bs1 = calc_size_is(Is, Bs0),
Map2 = update_successors(Last, Bs1, Map1),
case get_ret(Last, Bs1) of
none ->
calc_size(Blks, Map2);
Ret ->
%% Save information about the function returned.
Map = Map2#{L => Ret},
calc_size(Blks, Map)
end;
error ->
%% This block is unreachable.
calc_size(Blks, Map0)
end;
calc_size([], Map) ->
case sort(maps:values(Map)) of
[{call,_}=Call,{generator_anno,GenAnno}] ->
{Call,GenAnno};
_ ->
throw(not_possible)
end.
get_ret(#b_ret{arg=Arg}, Bs) ->
case get_value(Arg, Bs) of
exception ->
none;
{writable,#b_literal{val=0}} ->
none;
{generator_anno,_}=GenAnno ->
GenAnno;
Ret ->
Ret
end;
get_ret(_, _) -> none.
update_successors(#b_br{bool=Bool,succ=Succ,fail=Fail}, Bs0, Map0) ->
case get_value(Bool, Bs0) of
#b_literal{val=true} ->
update_successor(Succ, Bs0, Map0);
{succeeded,Var} ->
Map = update_successor(Succ, Bs0, Map0),
update_successor(Fail, maps:remove(Var, Bs0), Map);
{'if',Var,TrueType,FalseType} ->
Bs = maps:remove(Bool, Bs0),
Map = update_successor(Succ, Bs#{Var => TrueType}, Map0),
update_successor(Fail, Bs#{Var => FalseType}, Map);
any ->
Map = update_successor(Succ, Bs0#{Bool := #b_literal{val=true}}, Map0),
update_successor(Fail, Bs0#{Bool := #b_literal{val=false}}, Map)
end;
update_successors(#b_switch{}, _Bs, _Map) ->
%% A switch implies a filter, which means that we cannot calculate the
%% exact size.
throw(not_possible);
update_successors(#b_ret{}, _Bs, Map) -> Map.
update_successor(?EXCEPTION_BLOCK, _Bs, Map) ->
Map;
update_successor(L, Bs, Map) ->
case Map of
#{L := OldBs} ->
Map#{L := join_bs(OldBs, Bs)};
#{} ->
Map#{L => Bs}
end.
calc_size_is([I|Is], Bs0) ->
Bs = calc_size_instr(I, Bs0),
calc_size_is(Is, Bs);
calc_size_is([], Bs) -> Bs.
calc_size_instr(#b_set{op=bs_add,args=[A,B,U],dst=Dst}, Bs) ->
%% We must make sure that the value of bs_add only depends on literals
%% and arguments passed from the function that created the writable
%% binary.
case {get_value(A, Bs),get_arg_value(B, Bs)} of
{#b_literal{}=Lit,Val} ->
Bs#{Dst => {expr,{{bif,'+'},[Lit,{{bif,'*'},[Val,U]}]}}};
{{expr,Expr},Val} ->
Bs#{Dst => {expr,{{bif,'+'},[Expr,{{bif,'*'},[Val,U]}]}}};
{_,_} ->
%% The value depends on a variable of which we know nothing.
Bs#{Dst => any}
end;
calc_size_instr(#b_set{op=bs_init,args=[#b_literal{val=private_append},
Writable,Size,Unit],
dst=Dst}, Bs) ->
case get_value(Size, Bs) of
{arg,SizeOrigin} ->
Expr = {{bif,'*'},[SizeOrigin,Unit]},
update_writable(Dst, Writable, Expr, Bs);
#b_literal{} ->
Expr = {{bif,'*'},[Size,Unit]},
update_writable(Dst, Writable, Expr, Bs);
{expr,Expr} ->
update_writable(Dst, Writable, Expr, Bs);
_ ->
Bs#{Dst => any}
end;
calc_size_instr(#b_set{op=bs_match,args=[_Type,Ctx,_Flags,
Size,Unit],dst=Dst}, Bs) ->
case get_arg_value(Size, Bs) of
none ->
Bs#{Dst => any};
Val ->
update_match(Dst, Ctx, {{safe,{bif,'*'}},[Val,Unit]}, Bs)
end;
calc_size_instr(#b_set{op=bs_start_match,args=[#b_literal{val=new},Arg],dst=Dst}, Bs) ->
case get_arg_value(Arg, Bs) of
none ->
Bs#{Dst => any};
Val ->
Bs#{Dst => {match,{{bif,bit_size},[Val]},#b_literal{val=0}}}
end;
calc_size_instr(#b_set{op=call,args=[Name|Args],dst=Dst}=I, Bs) ->
if
is_map_key(Name, Bs) ->
Result0 = [get_value(A, Bs) || A <- Args],
Result = [Val || Val <- Result0, Val =/= any],
Bs#{Dst => {call,Result}};
true ->
call_size_func(I, Bs)
end;
calc_size_instr(#b_set{op=get_tl,args=[Ctx],dst=Dst}, Bs) ->
update_match(Dst, Ctx, #b_literal{val=1}, Bs);
calc_size_instr(#b_set{op=is_nonempty_list,args=[Arg],dst=Dst}, Bs) ->
case get_arg_value(Arg, Bs) of
none ->
Bs#{Dst => any};
Val ->
NumElements = {{bif,length},[Val]},
Match = {match,NumElements,#b_literal{val=0}},
NoMatch = {nil_or_bad,Val},
Bs#{Dst => {'if',Arg,Match,NoMatch}}
end;
calc_size_instr(#b_set{op=put_tuple,args=Args,dst=Dst}, Bs) ->
Bs#{Dst => {tuple,Args}};
calc_size_instr(#b_set{op={succeeded,_},args=[Arg],dst=Dst}, Bs) ->
Bs#{Dst => {succeeded,Arg}};
calc_size_instr(#b_set{dst=Dst}, Bs) ->
Bs#{Dst => any}.
update_writable(Dst, Writable, Expr, Bs) ->
case get_value(Writable, Bs) of
{writable,#b_literal{val=0}} ->
Bs#{Dst => {writable,Expr}};
_ ->
Bs#{Dst => any}
end.
update_match(Dst, Ctx, Increment, Bs) ->
case get_value(Ctx, Bs) of
{match,NumElements,Offset0} ->
Offset = {{bif,'+'},[Offset0,Increment]},
Bs#{Dst => {match,NumElements,Offset}};
_ ->
Bs#{Dst => any}
end.
get_arg_value(#b_literal{}=Lit, _Bs) ->
Lit;
get_arg_value(Name, Bs) ->
case Bs of
#{Name := {arg,Val}} -> Val;
#{} -> none
end.
get_value(Name, Bs) ->
case Bs of
#{Name := Value} -> Value;
#{} -> Name
end.
join_bs(LHS, RHS) ->
if
map_size(LHS) < map_size(RHS) ->
join_bs_1(maps:keys(LHS), RHS, LHS);
true ->
join_bs_1(maps:keys(RHS), LHS, RHS)
end.
%% Joins two maps of bindings, keeping the variables that are common to both maps.
join_bs_1([V | Vs], Bigger, Smaller) ->
case {Bigger, Smaller} of
{#{V := Same},#{V := Same}} ->
join_bs_1(Vs, Bigger, Smaller);
{#{V := _LHS},#{V := _RHS}} ->
join_bs_1(Vs, Bigger, Smaller#{V := any});
{#{}, #{V := _}} ->
join_bs_1(Vs, Bigger, maps:remove(V, Smaller))
end;
join_bs_1([], _Bigger, Smaller) -> Smaller.
%%%
%%% Turn the result of the traversal of the SSA code into an expression tree.
%%%
make_expr_tree({{call,Alloc0},GenAnno}) ->
{Alloc1,Annos} = make_expr_tree_list(Alloc0, none, none, [GenAnno]),
Alloc2 = opt_expr(Alloc1),
Alloc = round_up_to_byte_size(Alloc2),
{Alloc,maps:from_list(Annos)};
make_expr_tree(_) ->
throw(not_possible).
make_expr_tree_list([{{call,List},GenAnno}|T], Match, none, Annos0) ->
{BuildSize,Annos} = make_expr_tree_list(List, none, none, [GenAnno|Annos0]),
make_expr_tree_list(T, Match, BuildSize, Annos);
make_expr_tree_list([{match,NumItems,N}|T], none, BuildSize, Annos) ->
make_expr_tree_list(T, {NumItems,N}, BuildSize, Annos);
make_expr_tree_list([{writable,BuildSize}|T], Match, none, Annos) ->
make_expr_tree_list(T, Match, BuildSize, Annos);
make_expr_tree_list([_|T], Match, BuildSize, Annos) ->
make_expr_tree_list(T, Match, BuildSize, Annos);
make_expr_tree_list([], Match, BuildSize, Annos)
when Match =/= none, BuildSize =/= none ->
{NumItems,N} = Match,
Expr = {{bif,'*'},[{{safe,{bif,'div'}},[NumItems,N]},BuildSize]},
{Expr,Annos};
make_expr_tree_list([], _, _, Annos) ->
{none,Annos}.
round_up_to_byte_size(Alloc0) ->
Alloc = case divisible_by_eight(Alloc0) of
true -> Alloc0;
false -> {{bif,'+'},[Alloc0,#b_literal{val=7}]}
end,
opt_expr({{bif,'div'},[Alloc,#b_literal{val=8}]}).
divisible_by_eight({{bif,'*'},[Expr1,Expr2]}) ->
divisible_by_eight(Expr1) orelse divisible_by_eight(Expr2);
divisible_by_eight(#b_literal{val=Val}) when Val rem 8 =:= 0 ->
true;
divisible_by_eight(_) -> false.
%%%
%%% Optimize an expression tree.
%%%
opt_expr({Op,Args0}) ->
Args = opt_expr_args(Args0),
case literal_expr_args(Args, []) of
none ->
opt_expr_1(Op, Args);
LitArgs ->
Bif = case Op of
{safe,{bif,Bif0}} -> Bif0;
{bif,Bif0} -> Bif0
end,
try apply(erlang, Bif, LitArgs) of
Result ->
#b_literal{val=Result}
catch
error:_ ->
opt_expr_1(Op, Args)
end
end;
opt_expr(none) -> none.
opt_expr_1({safe,{bif,'div'}}=Op, Args) ->
case Args of
[Int,#b_literal{val=1}] ->
Int;
[_Int,#b_literal{val=N}] when N > 1 ->
opt_expr_1({bif,'div'}, Args);
[_,_] ->
{Op,Args}
end;
opt_expr_1({bif,'div'}=Op, [Numerator,#b_literal{val=Denominator}]=Args) ->
try
opt_expr_div(Numerator, Denominator)
catch
throw:not_possible ->
case Denominator band (Denominator - 1) of
0 ->
%% The denominator is a power of two.
Shift = round(math:log2(Denominator)),
{{bif,'bsr'},[Numerator,#b_literal{val=Shift}]};
_ ->
{Op,Args}
end
end;
opt_expr_1({bif,'*'}, [{{safe,_},_},#b_literal{val=0}=Zero]) ->
Zero;
opt_expr_1({bif,'*'}, [Factor,#b_literal{val=1}]) ->
Factor;
opt_expr_1(Op, Args) ->
{Op,Args}.
opt_expr_div({{bif,'*'},[A,B]}, Denominator) ->
case B of
#b_literal{val=Factor} when Factor rem Denominator =:= 0 ->
{{bif,'*'},[A,#b_literal{val=Factor div Denominator}]};
_ ->
{{bif,'*'},[A,opt_expr_div(B, Denominator)]}
end;
opt_expr_div(_, _) ->
throw(not_possible).
opt_expr_args([A0|As]) ->
A = case A0 of
#b_literal{} -> A0;
#b_var{} -> A0;
_ -> opt_expr(A0)
end,
[A|opt_expr_args(As)];
opt_expr_args([]) -> [].
literal_expr_args([#b_literal{val=Val}|As], Acc) ->
literal_expr_args(As, [Val|Acc]);
literal_expr_args([_|_], _) ->
none;
literal_expr_args([], Acc) ->
reverse(Acc).
%%%
%%% Given an expression tree, generate SSA code to calculate the number
%%% bytes to allocate for the writable binary.
%%%
cg_size_calc(Expr, L, #b_blk{is=Is0}=Blk0, Annos, Count0, Acc0) ->
[InitWr] = Is0,
FailBlk0 = [],
{Acc1,Alloc,NextBlk,FailBlk,Count} = cg_size_calc_1(L, Expr, Annos, FailBlk0, Count0, Acc0),
Is = [InitWr#b_set{args=[Alloc]}],
Blk = Blk0#b_blk{is=Is},
Acc = [{NextBlk,Blk}|FailBlk++Acc1],
{Acc,Count}.
cg_size_calc_1(L, #b_literal{}=Alloc, _Annos, FailBlk, Count, Acc) ->
{Acc,Alloc,L,FailBlk,Count};
cg_size_calc_1(L0, {Op0,Args0}, Annos, FailBlk0, Count0, Acc0) ->
{Args,Acc1,L,FailBlk1,Count1} = cg_atomic_args(Args0, L0, Annos, FailBlk0, Count0, Acc0, []),
{BadGenL,FailBlk,Count2} = cg_bad_generator(Args, Annos, FailBlk1, Count1),
{Dst,Count3} = new_var('@ssa_tmp', Count2),
case Op0 of
{safe,Op} ->
{OpDst,Count4} = new_var('@ssa_size', Count3),
{[OpSuccL,OpFailL,PhiL,NextL],Count5} = new_blocks(4, Count4),
I = #b_set{op=Op,args=Args,dst=OpDst},
{Blk,Count} = cg_succeeded(I, OpSuccL, OpFailL, Count5),
JumpBlk = #b_blk{is=[],last=cg_br(PhiL)},
PhiIs = [#b_set{op=phi,
args=[{OpDst,OpSuccL},{#b_literal{val=0},OpFailL}],
dst=Dst}],
PhiBlk = #b_blk{is=PhiIs,last=cg_br(NextL)},
Acc = [{PhiL,PhiBlk},{OpSuccL,JumpBlk},
{OpFailL,JumpBlk},{L,Blk}|Acc1],
{Acc,Dst,NextL,FailBlk,Count};
_ ->
{NextBlkL,Count4} = new_block(Count3),
I = #b_set{op=Op0,args=Args,dst=Dst},
{SuccBlk,Count} = cg_succeeded(I, NextBlkL, BadGenL, Count4),
Acc = [{L,SuccBlk}|Acc1],
{Acc,Dst,NextBlkL,FailBlk,Count}
end.
cg_bad_generator([Arg|_], Annos, FailBlk, Count) ->
case Annos of
#{Arg := Anno} ->
cg_bad_generator_1(Anno, Arg, FailBlk, Count);
#{} ->
case FailBlk of
[{L,_}|_] ->
{L,FailBlk,Count};
[] ->
cg_bad_generator_1(#{}, Arg, FailBlk, Count)
end
end.
cg_bad_generator_1(Anno, Arg, FailBlk, Count0) ->
{L,Count1} = new_block(Count0),
{TupleDst,Count2} = new_var('@ssa_tuple', Count1),
{Ret,Count3} = new_var('@ssa_ret', Count2),
MFA = #b_remote{mod=#b_literal{val=erlang},
name=#b_literal{val=error},
arity=1},
TupleI = #b_set{op=put_tuple,
args=[#b_literal{val=bad_generator},Arg],
dst=TupleDst},
CallI = #b_set{anno=Anno,op=call,args=[MFA,TupleDst],dst=Ret},
Is = [TupleI,CallI],
Blk = #b_blk{is=Is,last=#b_ret{arg=Ret}},
{L,[{L,Blk}|FailBlk],Count3}.
cg_succeeded(#b_set{dst=OpDst}=I, Succ, Fail, Count0) ->
{Bool,Count} = new_var('@ssa_bool', Count0),
SuccI = #b_set{op={succeeded,guard},args=[OpDst],dst=Bool},
Blk = #b_blk{is=[I,SuccI],last=#b_br{bool=Bool,succ=Succ,fail=Fail}},
{Blk,Count}.
cg_br(Target) ->
#b_br{bool=#b_literal{val=true},succ=Target,fail=Target}.
cg_atomic_args([A|As], L, Annos, FailBlk0, Count0, BlkAcc0, Acc) ->
case A of
#b_literal{} ->
cg_atomic_args(As, L, Annos, FailBlk0, Count0, BlkAcc0, [A|Acc]);
#b_var{} ->
cg_atomic_args(As, L, Annos, FailBlk0, Count0, BlkAcc0, [A|Acc]);
none ->
throw(not_possible);
_ ->
{BlkAcc,Var,NextBlk,FailBlk,Count} =
cg_size_calc_1(L, A, Annos, FailBlk0, Count0, BlkAcc0),
cg_atomic_args(As, NextBlk, Annos, FailBlk, Count, BlkAcc, [Var|Acc])
end;
cg_atomic_args([], NextBlk, _Annos, FailBlk, Count, BlkAcc, Acc) ->
{reverse(Acc),BlkAcc,NextBlk,FailBlk,Count}.
new_var(Base, Count) ->
{#b_var{name={Base,Count}},Count+1}.
new_blocks(N, Count) ->
new_blocks(N, Count, []).
new_blocks(0, Count, Acc) ->
{Acc,Count};
new_blocks(N, Count, Acc) ->
new_blocks(N - 1, Count + 1, [Count|Acc]).
new_block(Count) ->
{Count,Count+1}. | lib/compiler/src/beam_ssa_bc_size.erl | 0.564339 | 0.452052 | beam_ssa_bc_size.erl | starcoder |
%%%
%%% Copyright 2011, Boundary
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%%-------------------------------------------------------------------
%%% File: folsom_metrics.erl
%%% @author <NAME> <<EMAIL>>
%%% @doc
%%% @end
%%%------------------------------------------------------------------
-module(folsom_metrics).
-export([
new_counter/1,
new_gauge/1,
new_histogram/1,
new_histogram/2,
new_histogram/3,
new_histogram/4,
new_history/1,
new_history/2,
new_meter/1,
delete_metric/1,
notify/1,
notify/2,
notify/3,
notify_existing_metric/3,
get_metrics/0,
metric_exists/1,
get_metrics_info/0,
get_metric_info/1,
get_metric_value/1,
get_histogram_statistics/1,
get_histogram_statistics/2,
get_history_values/2,
histogram_timed_update/2,
histogram_timed_update/3,
histogram_timed_update/4
]).
-include("folsom.hrl").
%% Metrics API
new_counter(Name) ->
folsom_ets:add_handler(counter, Name).
new_gauge(Name) ->
folsom_ets:add_handler(gauge, Name).
new_histogram(Name) ->
folsom_metrics:new_histogram(Name, ?DEFAULT_SAMPLE_TYPE, ?DEFAULT_SIZE, ?DEFAULT_ALPHA).
new_histogram(Name, SampleType) ->
folsom_metrics:new_histogram(Name, SampleType, ?DEFAULT_SIZE, ?DEFAULT_ALPHA).
new_histogram(Name, SampleType, SampleSize) ->
folsom_metrics:new_histogram(Name, SampleType, SampleSize, ?DEFAULT_ALPHA).
new_histogram(Name, SampleType, SampleSize, Alpha) ->
folsom_ets:add_handler(histogram, Name, SampleType, SampleSize, Alpha).
new_history(Name) ->
folsom_metrics:new_history(Name, ?DEFAULT_SIZE).
new_history(Name, SampleSize) ->
folsom_ets:add_handler(history, Name, SampleSize).
new_meter(Name) ->
folsom_ets:add_handler(meter, Name).
delete_metric(Name) ->
folsom_ets:delete_handler(Name).
notify(Event) ->
folsom_ets:notify(Event).
notify(Name, Event) ->
folsom_ets:notify(Name, Event).
notify(Name, Event, Type) ->
folsom_ets:notify(Name, Event, Type).
notify_existing_metric(Name, Event, Type) ->
folsom_ets:notify_existing_metric(Name, Event, Type).
get_metrics() ->
folsom_ets:get_handlers().
metric_exists(Name) ->
folsom_ets:handler_exists(Name).
get_metrics_info() ->
folsom_ets:get_handlers_info().
get_metric_info(Name) ->
[folsom_ets:get_info(Name)].
get_metric_value(Name) ->
folsom_ets:get_values(Name).
get_histogram_statistics(Name) ->
Values = folsom_ets:get_values(Name),
folsom_statistics:get_statistics(Values).
get_histogram_statistics(Name1, Name2) ->
Values1 = get_metric_value(Name1),
Values2 = get_metric_value(Name2),
folsom_statistics:get_statistics(Values1, Values2).
get_history_values(Name, Count) ->
folsom_ets:get_history_values(Name, Count).
histogram_timed_update(Name, Fun) ->
{Time, Value} = timer:tc(Fun),
ok = notify({Name, Time}),
Value.
histogram_timed_update(Name, Fun, Args) ->
{Time, Value} = timer:tc(Fun, Args),
ok = notify({Name, Time}),
Value.
histogram_timed_update(Name, Mod, Fun, Args) ->
{Time, Value} = timer:tc(Mod, Fun, Args),
ok = notify({Name, Time}),
Value. | src/folsom_metrics.erl | 0.503174 | 0.406567 | folsom_metrics.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% A hierarchical token bucket algorithm
%% Note: this is not the linux HTB algorithm(http://luxik.cdi.cz/~devik/qos/htb/manual/theory.htm)
%% Algorithm:
%% 1. the root node periodically generates tokens and then distributes them
%% just like the oscillation of water waves
%% 2. the leaf node has a counter, which is the place where the token is actually held.
%% 3. other nodes only play the role of transmission, and the rate of the node is like a valve,
%% limiting the oscillation transmitted from the parent node
-module(emqx_limiter_server).
-behaviour(gen_server).
-include_lib("emqx/include/logger.hrl").
%% gen_server callbacks
-export([
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3,
format_status/2
]).
-export([
start_link/1,
connect/2,
info/1,
name/1,
get_initial_val/1,
update_config/1
]).
%% number of tokens generated per period
-type root() :: #{
rate := rate(),
burst := rate(),
%% token generation interval(second)
period := pos_integer(),
consumed := non_neg_integer()
}.
-type bucket() :: #{
name := bucket_name(),
rate := rate(),
obtained := non_neg_integer(),
%% token correction value
correction := emqx_limiter_decimal:zero_or_float(),
capacity := capacity(),
counter := undefined | counters:counters_ref(),
index := undefined | index()
}.
-type state() :: #{
type := limiter_type(),
root := undefined | root(),
buckets := buckets(),
%% current counter to alloc
counter := undefined | counters:counters_ref(),
index := index()
}.
-type buckets() :: #{bucket_name() => bucket()}.
-type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type rate() :: decimal().
-type flow() :: decimal().
-type capacity() :: decimal().
-type decimal() :: emqx_limiter_decimal:decimal().
-type index() :: pos_integer().
-define(CALL(Type), gen_server:call(name(Type), ?FUNCTION_NAME)).
%% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3).
-define(CURRYING(X, F2), fun(Y) -> F2(X, Y) end).
-export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
-elvis([{elvis_style, no_if_expression, disable}]).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
-spec connect(
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined}
) ->
emqx_htb_limiter:limiter().
%% If no bucket path is set in config, there will be no limit
connect(_Type, undefined) ->
emqx_htb_limiter:make_infinity_limiter();
connect(Type, BucketName) when is_atom(BucketName) ->
CfgPath = emqx_limiter_schema:get_bucket_cfg_path(Type, BucketName),
case emqx:get_config(CfgPath, undefined) of
undefined ->
?SLOG(error, #{msg => "bucket_config_not_found", path => CfgPath}),
throw("bucket's config not found");
#{
rate := AggrRate,
capacity := AggrSize,
per_client := #{rate := CliRate, capacity := CliSize} = Cfg
} ->
case emqx_limiter_manager:find_bucket(Type, BucketName) of
{ok, Bucket} ->
if
CliRate < AggrRate orelse CliSize < AggrSize ->
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket);
Bucket =:= infinity ->
emqx_htb_limiter:make_infinity_limiter();
true ->
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket)
end;
undefined ->
?SLOG(error, #{msg => "bucket_not_found", path => CfgPath}),
throw("invalid bucket")
end
end;
connect(Type, Paths) ->
connect(Type, maps:get(Type, Paths, undefined)).
-spec info(limiter_type()) -> state().
info(Type) ->
?CALL(Type).
-spec name(limiter_type()) -> atom().
name(Type) ->
erlang:list_to_atom(io_lib:format("~s_~s", [?MODULE, Type])).
-spec update_config(limiter_type()) -> ok.
update_config(Type) ->
?CALL(Type).
%%--------------------------------------------------------------------
%% @doc
%% Starts the server
%% @end
%%--------------------------------------------------------------------
-spec start_link(limiter_type()) -> _.
start_link(Type) ->
gen_server:start_link({local, name(Type)}, ?MODULE, [Type], []).
%%--------------------------------------------------------------------
%%% gen_server callbacks
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Initializes the server
%% @end
%%--------------------------------------------------------------------
-spec init(Args :: term()) ->
{ok, State :: term()}
| {ok, State :: term(), Timeout :: timeout()}
| {ok, State :: term(), hibernate}
| {stop, Reason :: term()}
| ignore.
init([Type]) ->
State = init_tree(Type),
#{root := #{period := Perido}} = State,
oscillate(Perido),
{ok, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling call messages
%% @end
%%--------------------------------------------------------------------
-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) ->
{reply, Reply :: term(), NewState :: term()}
| {reply, Reply :: term(), NewState :: term(), Timeout :: timeout()}
| {reply, Reply :: term(), NewState :: term(), hibernate}
| {noreply, NewState :: term()}
| {noreply, NewState :: term(), Timeout :: timeout()}
| {noreply, NewState :: term(), hibernate}
| {stop, Reason :: term(), Reply :: term(), NewState :: term()}
| {stop, Reason :: term(), NewState :: term()}.
handle_call(info, _From, State) ->
{reply, State, State};
handle_call(update_config, _From, #{type := Type}) ->
NewState = init_tree(Type),
{reply, ok, NewState};
handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling cast messages
%% @end
%%--------------------------------------------------------------------
-spec handle_cast(Request :: term(), State :: term()) ->
{noreply, NewState :: term()}
| {noreply, NewState :: term(), Timeout :: timeout()}
| {noreply, NewState :: term(), hibernate}
| {stop, Reason :: term(), NewState :: term()}.
handle_cast(Req, State) ->
?SLOG(error, #{msg => "unexpected_cast", cast => Req}),
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling all non call/cast messages
%% @end
%%--------------------------------------------------------------------
-spec handle_info(Info :: timeout() | term(), State :: term()) ->
{noreply, NewState :: term()}
| {noreply, NewState :: term(), Timeout :: timeout()}
| {noreply, NewState :: term(), hibernate}
| {stop, Reason :: normal | term(), NewState :: term()}.
handle_info(oscillate, State) ->
{noreply, oscillation(State)};
handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function is called by a gen_server when it is about to
%% terminate. It should be the opposite of Module:init/1 and do any
%% necessary cleaning up. When it returns, the gen_server terminates
%% with Reason. The return value is ignored.
%% @end
%%--------------------------------------------------------------------
-spec terminate(
Reason :: normal | shutdown | {shutdown, term()} | term(),
State :: term()
) -> any().
terminate(_Reason, _State) ->
ok.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Convert process state when code is changed
%% @end
%%--------------------------------------------------------------------
-spec code_change(
OldVsn :: term() | {down, term()},
State :: term(),
Extra :: term()
) ->
{ok, NewState :: term()}
| {error, Reason :: term()}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function is called for changing the form and appearance
%% of gen_server status when it is returned from sys:get_status/1,2
%% or when it appears in termination error logs.
%% @end
%%--------------------------------------------------------------------
-spec format_status(
Opt :: normal | terminate,
Status :: list()
) -> Status :: term().
format_status(_Opt, Status) ->
Status.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
oscillate(Interval) ->
erlang:send_after(Interval, self(), ?FUNCTION_NAME).
%% @doc generate tokens, and then spread to leaf nodes
-spec oscillation(state()) -> state().
oscillation(
#{
root := #{
rate := Flow,
period := Interval,
consumed := Consumed
} = Root,
buckets := Buckets
} = State
) ->
oscillate(Interval),
Ordereds = get_ordered_buckets(Buckets),
{Alloced, Buckets2} = transverse(Ordereds, Flow, 0, Buckets),
maybe_burst(State#{
buckets := Buckets2,
root := Root#{consumed := Consumed + Alloced}
}).
%% @doc horizontal spread
-spec transverse(
list(bucket()),
flow(),
non_neg_integer(),
buckets()
) -> {non_neg_integer(), buckets()}.
transverse([H | T], InFlow, Alloced, Buckets) when InFlow > 0 ->
{BucketAlloced, Buckets2} = longitudinal(H, InFlow, Buckets),
InFlow2 = sub(InFlow, BucketAlloced),
Alloced2 = Alloced + BucketAlloced,
transverse(T, InFlow2, Alloced2, Buckets2);
transverse(_, _, Alloced, Buckets) ->
{Alloced, Buckets}.
%% @doc vertical spread
-spec longitudinal(bucket(), flow(), buckets()) ->
{non_neg_integer(), buckets()}.
longitudinal(
#{
name := Name,
rate := Rate,
capacity := Capacity,
counter := Counter,
index := Index,
obtained := Obtained
} = Bucket,
InFlow,
Buckets
) when Counter =/= undefined ->
Flow = erlang:min(InFlow, Rate),
ShouldAlloc =
case counters:get(Counter, Index) of
Tokens when Tokens < 0 ->
%% toknes's value mayb be a negative value(stolen from the future)
%% because ∃ x. add(Capacity, x) < 0, so here we must compare with minimum value
erlang:max(
add(Capacity, Tokens),
mul(Capacity, ?OVERLOAD_MIN_ALLOC)
);
Tokens ->
%% is it possible that Tokens > Capacity ???
erlang:max(sub(Capacity, Tokens), 0)
end,
case lists:min([ShouldAlloc, Flow, Capacity]) of
Available when Available > 0 ->
%% XXX if capacity is infinity, and flow always > 0, the value in
%% counter will be overflow at some point in the future, do we need
%% to deal with this situation???
{Inc, Bucket2} = emqx_limiter_correction:add(Available, Bucket),
counters:add(Counter, Index, Inc),
{Inc, Buckets#{Name := Bucket2#{obtained := Obtained + Inc}}};
_ ->
{0, Buckets}
end;
longitudinal(_, _, Buckets) ->
{0, Buckets}.
-spec get_ordered_buckets(list(bucket()) | buckets()) -> list(bucket()).
get_ordered_buckets(Buckets) when is_map(Buckets) ->
BucketList = maps:values(Buckets),
get_ordered_buckets(BucketList);
get_ordered_buckets(Buckets) ->
%% sort by obtained, avoid node goes hungry
lists:sort(
fun(#{obtained := A}, #{obtained := B}) ->
A < B
end,
Buckets
).
-spec maybe_burst(state()) -> state().
maybe_burst(
#{
buckets := Buckets,
root := #{burst := Burst}
} = State
) when Burst > 0 ->
Fold = fun
(_Name, #{counter := Cnt, index := Idx} = Bucket, Acc) when Cnt =/= undefined ->
case counters:get(Cnt, Idx) > 0 of
true ->
Acc;
false ->
[Bucket | Acc]
end;
(_Name, _Bucket, Acc) ->
Acc
end,
Empties = maps:fold(Fold, [], Buckets),
dispatch_burst(Empties, Burst, State);
maybe_burst(State) ->
State.
-spec dispatch_burst(list(bucket()), non_neg_integer(), state()) -> state().
dispatch_burst([], _, State) ->
State;
dispatch_burst(
Empties,
InFlow,
#{root := #{consumed := Consumed} = Root, buckets := Buckets} = State
) ->
EachFlow = InFlow / erlang:length(Empties),
{Alloced, Buckets2} = dispatch_burst_to_buckets(Empties, EachFlow, 0, Buckets),
State#{root := Root#{consumed := Consumed + Alloced}, buckets := Buckets2}.
-spec dispatch_burst_to_buckets(
list(bucket()),
float(),
non_neg_integer(),
buckets()
) -> {non_neg_integer(), buckets()}.
dispatch_burst_to_buckets([Bucket | T], InFlow, Alloced, Buckets) ->
#{
name := Name,
counter := Counter,
index := Index,
obtained := Obtained
} = Bucket,
{Inc, Bucket2} = emqx_limiter_correction:add(InFlow, Bucket),
counters:add(Counter, Index, Inc),
Buckets2 = Buckets#{Name := Bucket2#{obtained := Obtained + Inc}},
dispatch_burst_to_buckets(T, InFlow, Alloced + Inc, Buckets2);
dispatch_burst_to_buckets([], _, Alloced, Buckets) ->
{Alloced, Buckets}.
-spec init_tree(emqx_limiter_schema:limiter_type()) -> state().
init_tree(Type) ->
State = #{
type => Type,
root => undefined,
counter => undefined,
index => 1,
buckets => #{}
},
#{bucket := Buckets} = Cfg = emqx:get_config([limiter, Type]),
{Factor, Root} = make_root(Cfg),
{CounterNum, DelayBuckets} = make_bucket(maps:to_list(Buckets), Type, Cfg, Factor, 1, []),
State2 = State#{
root := Root,
counter := counters:new(CounterNum, [write_concurrency])
},
lists:foldl(fun(F, Acc) -> F(Acc) end, State2, DelayBuckets).
-spec make_root(hocons:confg()) -> {number(), root()}.
make_root(#{rate := Rate, burst := Burst}) when Rate >= 1 ->
{1, #{
rate => Rate,
burst => Burst,
period => emqx_limiter_schema:minimum_period(),
consumed => 0
}};
make_root(#{rate := Rate, burst := Burst}) ->
MiniPeriod = emqx_limiter_schema:minimum_period(),
Factor = 1 / Rate,
{Factor, #{
rate => 1,
burst => Burst * Factor,
period => erlang:floor(Factor * MiniPeriod),
consumed => 0
}}.
make_bucket([{Name, Conf} | T], Type, GlobalCfg, Factor, CounterNum, DelayBuckets) ->
Path = emqx_limiter_manager:make_path(Type, Name),
case get_counter_rate(Conf, GlobalCfg) of
infinity ->
Rate = infinity,
Capacity = infinity,
Ref = emqx_limiter_bucket_ref:new(undefined, undefined, Rate),
emqx_limiter_manager:insert_bucket(Path, Ref),
CounterNum2 = CounterNum,
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
State#{buckets := Buckets#{BucketName => Bucket}}
end;
RawRate ->
#{capacity := Capacity} = Conf,
Initial = get_initial_val(Conf),
Rate = mul(RawRate, Factor),
CounterNum2 = CounterNum + 1,
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
{Counter, Idx, State2} = alloc_counter(Path, RawRate, Initial, State),
Bucket2 = Bucket#{counter := Counter, index := Idx},
State2#{buckets := Buckets#{BucketName => Bucket2}}
end
end,
Bucket = #{
name => Name,
rate => Rate,
obtained => 0,
correction => 0,
capacity => Capacity,
counter => undefined,
index => undefined
},
DelayInit = ?CURRYING(Bucket, InitFun),
make_bucket(
T,
Type,
GlobalCfg,
Factor,
CounterNum2,
[DelayInit | DelayBuckets]
);
make_bucket([], _Type, _Global, _Factor, CounterNum, DelayBuckets) ->
{CounterNum, DelayBuckets}.
-spec alloc_counter(emqx_limiter_manager:path(), rate(), capacity(), state()) ->
{counters:counters_ref(), pos_integer(), state()}.
alloc_counter(
Path,
Rate,
Initial,
#{counter := Counter, index := Index} = State
) ->
case emqx_limiter_manager:find_bucket(Path) of
{ok, #{
counter := ECounter,
index := EIndex
}} when ECounter =/= undefined ->
init_counter(Path, ECounter, EIndex, Rate, Initial, State);
_ ->
init_counter(
Path,
Counter,
Index,
Rate,
Initial,
State#{index := Index + 1}
)
end.
init_counter(Path, Counter, Index, Rate, Initial, State) ->
_ = put_to_counter(Counter, Index, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
emqx_limiter_manager:insert_bucket(Path, Ref),
{Counter, Index, State}.
%% @doc find first limited node
get_counter_rate(#{rate := Rate, capacity := Capacity}, _GlobalCfg) when
%% TODO maybe no need to check capacity
Rate =/= infinity orelse Capacity =/= infinity
->
Rate;
get_counter_rate(_Cfg, #{rate := Rate}) ->
Rate.
-spec get_initial_val(hocons:config()) -> decimal().
get_initial_val(#{
initial := Initial,
rate := Rate,
capacity := Capacity
}) ->
%% initial will nevner be infinity(see the emqx_limiter_schema)
if
Initial > 0 ->
Initial;
Rate =/= infinity ->
erlang:min(Rate, Capacity);
Capacity =/= infinity ->
Capacity;
true ->
0
end. | apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl | 0.645567 | 0.405272 | emqx_limiter_server.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(state_type).
-author("<NAME> <<EMAIL>>").
-export([new/1,
mutate/3,
merge/3,
is_inflation/2,
is_strict_inflation/2,
irreducible_is_strict_inflation/2]).
-export([delta/3]).
-export([extract_args/1]).
-export_type([state_type/0,
crdt/0,
format/0,
delta_method/0]).
%% Define some initial types.
-type state_type() :: state_awset |
state_awset_ps |
state_bcounter |
state_boolean |
state_dwflag |
state_ewflag |
state_gcounter |
state_gmap |
state_gset |
state_ivar |
state_lexcounter |
state_lwwregister |
state_max_int |
state_mvregister |
state_mvmap |
state_ormap |
state_orset |
state_pair |
state_pncounter |
state_twopset.
-type crdt() :: {state_type(), type:payload()}.
-type delta_method() :: state_driven | digest_driven.
%% Supported serialization formats.
-type format() :: erlang.
%% Perform a delta mutation.
-callback delta_mutate(type:operation(), type:id(), crdt()) ->
{ok, crdt()} | {error, type:error()}.
%% Merge two replicas.
%% If we merge two CRDTs, the result is a CRDT.
%% If we merge a delta and a CRDT, the result is a CRDT.
%% If we merge two deltas, the result is a delta (delta group).
-callback merge(crdt(), crdt()) -> crdt().
%% Check if a some state is bottom
-callback is_bottom(crdt()) -> boolean().
%% Inflation testing.
-callback is_inflation(crdt(), crdt()) -> boolean().
-callback is_strict_inflation(crdt(), crdt()) -> boolean().
%% Let A be the first argument.
%% Let B be the second argument.
%% A is a join-irreducible state.
%% This functions checks if A will strictly inflate B.
%% "B < A \join B"
-callback irreducible_is_strict_inflation(crdt(), crdt()) -> boolean().
%% Join decomposition.
-callback join_decomposition(crdt()) -> [crdt()].
%% Let A be the second argument.
%% Let B be the third argument.
%% This function returns a ∆ from A that inflates B.
%% "The join of all s in join_decomposition(A) such that s strictly inflates B"
-callback delta(delta_method(), crdt(), crdt()) -> crdt().
%% @todo These should be moved to type.erl
%% Encode and Decode.
-callback encode(format(), crdt()) -> binary().
-callback decode(format(), binary()) -> crdt().
%% @doc Builds a new CRDT from a given CRDT
-spec new(crdt()) -> any(). %% @todo Fix this any()
new({state_awset, _Payload}) ->
state_awset:new();
new({state_awset_ps, _Payload}) ->
state_awset_ps:new();
new({state_bcounter, _Payload}) ->
state_bcounter:new();
new({state_boolean, _Payload}) ->
state_boolean:new();
new({state_gcounter, _Payload}) ->
state_gcounter:new();
new({state_gmap, {ValuesType, _Payload}}) ->
state_gmap:new([ValuesType]);
new({state_gset, _Payload}) ->
state_gset:new();
new({state_ivar, _Payload}) ->
state_ivar:new();
new({state_lexcounter, _Payload}) ->
state_lexcounter:new();
new({state_max_int, _Payload}) ->
state_max_int:new();
new({state_orset, _Payload}) ->
state_orset:new();
new({state_pair, {Fst, Snd}}) ->
{state_pair, {new(Fst), new(Snd)}};
new({state_pncounter, _Payload}) ->
state_pncounter:new();
new({state_twopset, _Payload}) ->
state_twopset:new().
%% @doc Generic Join composition.
-spec mutate(type:operation(), type:id(), crdt()) ->
{ok, crdt()} | {error, type:error()}.
mutate(Op, Actor, {Type, _}=CRDT) ->
case Type:delta_mutate(Op, Actor, CRDT) of
{ok, {Type, Delta}} ->
{ok, Type:merge({Type, Delta}, CRDT)};
Error ->
Error
end.
%% @doc Generic Merge.
-spec merge(crdt(), crdt(), function()) -> crdt().
merge({Type, CRDT1}, {Type, CRDT2}, MergeFun) ->
MergeFun({Type, CRDT1}, {Type, CRDT2}).
%% @doc Generic check for inflation.
-spec is_inflation(crdt(), crdt()) -> boolean().
is_inflation({Type, _}=CRDT1, {Type, _}=CRDT2) ->
Type:equal(Type:merge(CRDT1, CRDT2), CRDT2).
%% @doc Generic check for strict inflation.
%% We have a strict inflation if:
%% - we have an inflation
%% - we have different CRDTs
-spec is_strict_inflation(crdt(), crdt()) -> boolean().
is_strict_inflation({Type, _}=CRDT1, {Type, _}=CRDT2) ->
Type:is_inflation(CRDT1, CRDT2) andalso
not Type:equal(CRDT1, CRDT2).
%% @doc Generic check for irreducible strict inflation.
-spec irreducible_is_strict_inflation(crdt(), crdt()) -> boolean().
irreducible_is_strict_inflation({Type, _}=Irreducible, {Type, _}=CRDT) ->
Merged = Type:merge(Irreducible, CRDT),
Type:is_strict_inflation(CRDT, Merged).
%% @doc Generic delta calculation.
-spec delta(delta_method(), crdt(), crdt()) -> crdt().
delta(state_driven, {Type, _}=A, {Type, _}=B) ->
Inflations = lists:filter(
fun(Irreducible) ->
Type:irreducible_is_strict_inflation(Irreducible, B)
end,
Type:join_decomposition(A)
),
lists:foldl(
fun(Irreducible, Acc) ->
Type:merge(Acc, Irreducible)
end,
new(A),
Inflations
).
%% @doc extract arguments from complex (composite) types
extract_args({Type, Args}) ->
{Type, Args};
extract_args(Type) ->
{Type, []}. | _build/default/lib/types/src/state_type.erl | 0.64646 | 0.402157 | state_type.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_base32).
-export([encode/1, decode/1]).
-define(SET, <<"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567">>).
-spec encode(binary()) -> binary().
encode(Plain) when is_binary(Plain) ->
IoList = encode(Plain, 0, byte_size(Plain) * 8, []),
iolist_to_binary(lists:reverse(IoList)).
encode(_Plain, _ByteOffset, 0, Acc) ->
Acc;
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 8 ->
<<A:5, B:3>> = binary:part(Plain, ByteOffset, 1),
[<<(binary:at(?SET, A)), (binary:at(?SET, B bsl 2)), "======">> | Acc];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 16 ->
<<A:5, B:5, C:5, D:1>> = binary:part(Plain, ByteOffset, 2),
[
<<
(binary:at(?SET, A)),
(binary:at(?SET, B)),
(binary:at(?SET, C)),
(binary:at(?SET, D bsl 4)),
"===="
>>
| Acc
];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 24 ->
<<A:5, B:5, C:5, D:5, E:4>> = binary:part(Plain, ByteOffset, 3),
[
<<
(binary:at(?SET, A)),
(binary:at(?SET, B)),
(binary:at(?SET, C)),
(binary:at(?SET, D)),
(binary:at(?SET, E bsl 1)),
"==="
>>
| Acc
];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 32 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:2>> = binary:part(Plain, ByteOffset, 4),
[
<<
(binary:at(?SET, A)),
(binary:at(?SET, B)),
(binary:at(?SET, C)),
(binary:at(?SET, D)),
(binary:at(?SET, E)),
(binary:at(?SET, F)),
(binary:at(?SET, G bsl 3)),
"="
>>
| Acc
];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining >= 40 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:5, H:5>> =
binary:part(Plain, ByteOffset, 5),
Output = <<
(binary:at(?SET, A)),
(binary:at(?SET, B)),
(binary:at(?SET, C)),
(binary:at(?SET, D)),
(binary:at(?SET, E)),
(binary:at(?SET, F)),
(binary:at(?SET, G)),
(binary:at(?SET, H))
>>,
encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-spec decode(binary()) -> binary().
decode(Encoded) when is_binary(Encoded) ->
IoList = decode(Encoded, 0, []),
iolist_to_binary(lists:reverse(IoList)).
decode(Encoded, ByteOffset, Acc) when ByteOffset == byte_size(Encoded) ->
Acc;
decode(Encoded, ByteOffset, Acc) ->
case binary:part(Encoded, ByteOffset, 8) of
<<A:1/binary, B:1/binary, "======">> ->
[<<(find_in_set(A)):5, (find_in_set(B) bsr 2):3>> | Acc];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, "====">> ->
[
<<
(find_in_set(A)):5,
(find_in_set(B)):5,
(find_in_set(C)):5,
(find_in_set(D) bsr 4):1
>>
| Acc
];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, "===">> ->
[
<<
(find_in_set(A)):5,
(find_in_set(B)):5,
(find_in_set(C)):5,
(find_in_set(D)):5,
(find_in_set(E) bsr 1):4
>>
| Acc
];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, "=">> ->
[
<<
(find_in_set(A)):5,
(find_in_set(B)):5,
(find_in_set(C)):5,
(find_in_set(D)):5,
(find_in_set(E)):5,
(find_in_set(F)):5,
(find_in_set(G) bsr 3):2
>>
| Acc
];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary,
H:1/binary>> ->
decode(
Encoded,
ByteOffset + 8,
[
<<
(find_in_set(A)):5,
(find_in_set(B)):5,
(find_in_set(C)):5,
(find_in_set(D)):5,
(find_in_set(E)):5,
(find_in_set(F)):5,
(find_in_set(G)):5,
(find_in_set(H)):5
>>
| Acc
]
)
end.
find_in_set(Char) ->
case binary:match(?SET, Char) of
nomatch ->
erlang:error(not_base32);
{Offset, _} ->
Offset
end. | src/couch/src/couch_base32.erl | 0.518302 | 0.425426 | couch_base32.erl | starcoder |
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil; fill-column: 92-*-
%% ex: ts=4 sw=4 et
%% Copyright 2012 Opscode, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% @doc Utility functions to support the construction of stats_hero
%% metric labels.
-module(chef_metrics).
-export([
label/2
]).
-ifdef(TEST).
-compile(export_all).
-compile(nowarn_export_all).
-endif.
%% @doc Generate a label for stats_hero based on an `Upstream' name
%% and a `{Module, Function}' pair.
%%
%% `Upstream' refers to the broad domain of external-service
%% functionality the metric corresponds to (e.g., `rdbms', `solr',
%% etc.), while `{Mod, Fun}' refers to the specific module /
%% function under measurement that implements some aspect of that
%% functionality.
-spec label(Upstream :: atom(),
{Mod :: atom(), Fun :: atom()}) -> Label :: <<_:16,_:_*8>>.
label(s3=Upstream, {Mod, Fun}) when is_atom(Mod),
is_atom(Fun) ->
%% S3-related labels are special snowflakes because we want to
%% incorporate not only the upstream, module, and function, but
%% also the storage server host and the bucket name.
%%
%% The host will show whether we're hitting Bookshelf or actual
%% S3, and the bucket will allow us to ascertain the impact of
%% using different S3 regions on latency and performance.
%%
%% Because data.
Url = envy:get(chef_objects, s3_url, string),
Bucket = envy:get(chef_objects, s3_platform_bucket_name, string),
%% These two components need to have '.' characters stripped so
%% that we don't inadvertently introduce new hierarchy levels into
%% our Graphite metric labels
BucketBin = sanitize_label_component(Bucket),
HostBin = sanitize_label_component(extract_host(Url)),
%% TODO: In the future, we will likely be refactoring label
%% generation such that the host and bucket information will be
%% looked up once in the supervisor, and passed through in a "blob
%% of static data" down to this function. This means that we will
%% no longer need to use application:get_env/2 here. It also
%% means that the host-extraction and component sanitization will
%% take place in the supervisor, as well.
%%
%% Just an FYI.
{UpstreamBin, ModBin, FunBin} = to_binaries(Upstream, Mod, Fun),
%% e.g. <<s3.chef_mycompany_com.my_bucket.chef_s3.delete_checksums>>
<<UpstreamBin/binary, ".",
HostBin/binary, ".",
BucketBin/binary, ".",
ModBin/binary, ".",
FunBin/binary>>;
label(Upstream, {Mod, Fun}) when is_atom(Upstream),
is_atom(Mod),
is_atom(Fun) ->
%% All the other upstream-related metric labels are just simple
%% concatenations of Upstream, Mod, and Fun.
{UpstreamBin, ModBin, FunBin} = to_binaries(Upstream, Mod, Fun),
<<UpstreamBin/binary, ".", ModBin/binary, ".", FunBin/binary>>.
-spec extract_host(Url :: list()) -> Host :: list().
extract_host(Url) when is_list(Url) ->
%% Regex matches, e.g.:
%%
%% http://foo.bar.com
%% https://foo.bar.com
%% http://foo.bar.com:1234
%% https://foo.bar.com:1234
%% http://foo.bar.com/x/y/z
%% https://foo.bar.com:1234/x/y/z
%%
%% etc.,and captures just the hostname portion.
%%
%% In actual use, there probably won't ever be a path component,
%% or even a port number, but it pays to be paranoid.
%%
%% (NB: This is an 'extended' regex)
Pattern = "(?:http|https):// # protocol
([^:/]+) # hostname (the only capturing group in the regex)
(?::\d+)? # optional port (i.e., colon followed by digits)
(?:/.*)? # optional path component (everything after a slash)",
%% The 'extended' directive applies to the regex pattern
%% compilation, and is required so the pattern above doesn't blow
%% up :)
%%
%% The 'all_but_first' capture directive returns only explicitly
%% captured sub-patterns (i.e., throws out the first match, which
%% would be the whole portion of the input that matches the
%% pattern). Since we only have one capturing group, this will
%% give us a single-element list of captures.
%%
%% The 'list' capture directive ensures that the captured group is
%% returned as a string, as opposed to an index or a binary.
{match, [Host]} = re:run(Url, Pattern,
[extended,
{capture, all_but_first, list}]),
Host.
%% @doc Makes the given string a valid single path component of a
%% Graphite metrics label.
%%
%% Basically, ensures there no '.' characters (which would turn a
%% single component into several). Currently used for sanitizing the
%% hostname and bucket components of the S3-related metrics.
%%
%% E.g., "www.foo.com" becomes "www_foo_com"
%%
%% Returns a binary for ease of inclusion in a (binary) metrics label.
-spec sanitize_label_component(Component :: list()) -> Sanitized :: binary().
sanitize_label_component(Component) when is_list(Component) ->
re:replace(Component,
"\\.", %% Replace literal '.' characters
"_", %% with '_' characters
[global, {return, binary}]). %% everywhere, and return as a binary
%% @doc Utility function to convert a bunch of atoms to binaries for
%% inclusion in metrics labels.
to_binaries(Upstream, Mod, Fun) when is_atom(Upstream),
is_atom(Mod),
is_atom(Fun) ->
%% Since these are destined to be individual metrics label
%% components, in order to technically and paranoically correct,
%% we could / should also sanitize them as well, but you'd have to
%% be kind of twisted and self-hating to configure your system in
%% such a way as to make that necessary.
UpstreamBin = erlang:atom_to_binary(Upstream, utf8),
ModBin = erlang:atom_to_binary(Mod, utf8),
FunBin = erlang:atom_to_binary(Fun, utf8),
{UpstreamBin, ModBin, FunBin}. | src/oc_erchef/apps/chef_objects/src/chef_metrics.erl | 0.603698 | 0.495728 | chef_metrics.erl | starcoder |
%% ---------------------------------------------------------------------
%%
%% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved,
%% 2021 <NAME> All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% ---------------------------------------------------------------------
%% @doc This module implements the two-phase CRDT set.
%% This implementation has one notable difference from
%% the Comprehensive CRDT Paper [1]. The CRDT paper
%% has the precondition that deletes are only allowed
%% if the element is present in the local replica
%% of the set already. This implementation does not
%% have that restriction. This can lead to a situation
%% where an item is added, but that item has been previously
%% deleted and it not visible.
%% For Riak CS, we opt for this implementation because
%% we _only_ delete items that we have previously observed
%% in another replica of the same set. This allows
%% to be sure that the item is only missing in our
%% local replica because it hasn't been replicated
%% to it yet.
%% [1]: [http://hal.inria.fr/docs/00/55/55/88/PDF/techreport.pdf]
-module(twop_set).
-ifdef(TEST).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% export Public API
-export([
new/0,
size/1,
to_list/1,
is_element/2,
add_element/2,
del_element/2,
resolve/1
]).
-type stdlib_set() :: sets:set().
-type twop_set() :: {stdlib_set(), stdlib_set()}.
-export_type([twop_set/0]).
%%%===================================================================
%%% API
%%%===================================================================
new() ->
{sets:new(), sets:new()}.
%% not implementing is_set
size(Set) ->
sets:size(minus_deletes(Set)).
to_list(Set) ->
sets:to_list(minus_deletes(Set)).
is_element(Element, Set) ->
sets:is_element(Element, minus_deletes(Set)).
add_element(Element, Set={Adds,Dels}) ->
case sets:is_element(Element, Dels) of
true ->
%% this element
%% has already been added
%% and deleted. It can't be
%% added back.
Set;
false ->
{sets:add_element(Element, Adds),
Dels}
end.
del_element(Element, {Adds, Dels}) ->
{sets:del_element(Element, Adds),
sets:add_element(Element, Dels)}.
%% CRDT Funs =========================================================
resolve(Siblings) ->
FoldFun = fun({A_Adds, A_Dels}, {B_Adds, B_Dels}) ->
DelsUnion = sets:union(A_Dels, B_Dels),
AddsUnion = sets:union(A_Adds, B_Adds),
Adds_Minus_Dels = sets:subtract(AddsUnion, DelsUnion),
{Adds_Minus_Dels, DelsUnion} end,
lists:foldl(FoldFun, new(), Siblings).
%%%===================================================================
%%% Internal functions
%%%===================================================================
minus_deletes({Adds, Dels}) ->
sets:subtract(Adds, Dels).
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
empty_resolve_test() ->
?assertEqual(new(), resolve([])).
item_shows_up_test() ->
Set = add_element(foo, new()),
?assert(is_element(foo, Set)).
item_is_deleted_test() ->
Set = add_element(foo, new()),
Set2 = del_element(foo, Set),
?assertNot(is_element(foo, Set2)).
resolution_test() ->
O = new(),
WithFoo = add_element(foo, O),
WithOutFoo = del_element(foo, O),
WithBar = add_element(bar, O),
Resolved = resolve([WithBar, WithFoo, WithOutFoo]),
?assert(is_element(bar, Resolved)),
?assertNot(is_element(foo, Resolved)).
%%%===================================================================
%%% Test API
%%%===================================================================
-spec adds(twop_set()) -> stdlib_set().
adds({Adds, _}) ->
Adds.
-spec dels(twop_set()) -> stdlib_set().
dels({_, Dels}) ->
Dels.
-endif. | apps/riak_cs/src/twop_set.erl | 0.609408 | 0.413359 | twop_set.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved online at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% %CopyrightEnd%
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2000-2004 <NAME>
%% @doc Closure conversion of Core Erlang modules. This is done as a
%% step in the translation from Core Erlang down to HiPE Icode, and is
%% very much tied to the calling conventions used in HiPE native code.
%% @see cerl_to_icode
%% Some information about function closures in Beam and HiPE:
%%
%% - In Beam, each fun-expression is lifted to a top-level function such
%% that the arity of the new function is equal to the arity of the fun
%% *plus* the number of free variables. The original fun-expression is
%% replaced by a call to 'make_fun' which takes the *label* of the new
%% function and the number of free variables as arguments (the arity
%% of the fun can be found via the label). When a call is made through
%% the closure, the free variables are extracted from the closure by
%% the 'call_fun' operation and are placed in the X registers
%% following the ones used for the normal parameters; then the call is
%% made to the function label.
%%
%% - In HiPE (when compiling from Beam bytecode), the Beam-to-Icode
%% translation rewrites the fun-functions (those referenced by
%% 'make_fun' operations) so that the code expects only the normal
%% parameters, plus *one* extra parameter containing the closure
%% itself, and then immediately extracts the free variables from the
%% closure - the code knows how many free variables it expects.
%% However, the arity part of the function name is *not* changed;
%% thus, the native code and the Beam code still use the same
%% fun-table entry. The arity value used in native-code 'make_fun'
%% operations should therefore be the same as in Beam, i.e., the sum
%% of the number of parameters and the number of free variables.
-module(cerl_cconv).
-export([transform/2]).
-export([core_transform/2]).
-include("cerl_hipe_primops.hrl").
%% A descriptor for top-level and letrec-bound functions. (Top-level
%% functions always have an empty list of free variables.) The 'name'
%% field is the name of the lifted function, and is thus unique over the
%% whole module.
-record(function, {name :: {atom(), arity()}, free}).
%% A record for holding fun-information (if such information is attached
%% as an annotation on a fun, it should preferably be preserved).
-record(fun_info, {name :: atom(),
id = 0 :: integer(),
hash = 0 :: integer()}).
%% @spec core_transform(Module::cerl_records(), Options::[term()]) ->
%% cerl_records()
%%
%% @doc Transforms a module represented by records. See
%% <code>transform/2</code> for details.
%%
%% <p>Use the compiler option <code>{core_transform, cerl_cconv}</code>
%% to insert this function as a compilation pass.</p>
%%
%% @see transform/2
-spec core_transform(cerl:cerl(), [term()]) -> cerl:cerl().
core_transform(M, Opts) ->
cerl:to_records(transform(cerl:from_records(M), Opts)).
%% @spec transform(Module::cerl(), Options::[term()]) -> cerl()
%%
%% cerl() = cerl:cerl()
%%
%% @doc Rewrites a Core Erlang module so that all fun-expressions
%% (lambda expressions) in the code are in top level function
%% definitions, and the operators of all `apply'-expressions are names
%% of such top-level functions. The primitive operations `make_fun' and
%% `call_fun' are inserted in the code to create and apply functional
%% values; this transformation is known as "Closure Conversion"
%%
%% <p>See the module {@link cerl_to_icode} for details.</p>
-spec transform(cerl:c_module(), [term()]) -> cerl:c_module().
transform(E, _Options) ->
M = cerl:module_name(E),
S0 = s__new(cerl:atom_val(M)),
{Defs1, S1} = module_defs(cerl:module_defs(E), env__new(),
ren__new(), S0),
Defs2 = lists:reverse(s__get_defs(S1) ++ Defs1),
cerl:update_c_module(E, M, cerl:module_exports(E),
cerl:module_attrs(E), Defs2).
%% Note that the environment is defined on the renamed variables.
expr(E, Env, Ren, S0) ->
case cerl:type(E) of
literal ->
{E, S0};
var ->
var(E, Env, Ren, S0);
values ->
{Es, S1} = expr_list(cerl:values_es(E), Env, Ren, S0),
{cerl:update_c_values(E, Es), S1};
cons ->
{E1, S1} = expr(cerl:cons_hd(E), Env, Ren, S0),
{E2, S2} = expr(cerl:cons_tl(E), Env, Ren, S1),
{cerl:update_c_cons(E, E1, E2), S2};
tuple ->
{Es, S1} = expr_list(cerl:tuple_es(E), Env, Ren, S0),
{cerl:update_c_tuple(E, Es), S1};
'let' ->
{A, S1} = expr(cerl:let_arg(E), Env, Ren, S0),
Vs = cerl:let_vars(E),
{Vs1, Env1, Ren1} = bind_vars(Vs, Env, Ren),
{B, S2} = expr(cerl:let_body(E), Env1, Ren1, S1),
{cerl:update_c_let(E, Vs1, A, B), S2};
seq ->
{A, S1} = expr(cerl:seq_arg(E), Env, Ren, S0),
{B, S2} = expr(cerl:seq_body(E), Env, Ren, S1),
{cerl:update_c_seq(E, A, B), S2};
apply ->
apply_expr(E, Env, Ren, S0);
call ->
{M, S1} = expr(cerl:call_module(E), Env, Ren, S0),
{N, S2} = expr(cerl:call_name(E), Env, Ren, S1),
{As, S3} = expr_list(cerl:call_args(E), Env, Ren, S2),
{cerl:update_c_call(E, M, N, As), S3};
primop ->
{As, S1} = expr_list(cerl:primop_args(E), Env, Ren, S0),
N = cerl:primop_name(E),
{cerl:update_c_primop(E, N, As), S1};
'case' ->
{A, S1} = expr(cerl:case_arg(E), Env, Ren, S0),
{Cs, S2} = expr_list(cerl:case_clauses(E), Env, Ren, S1),
{cerl:update_c_case(E, A, Cs), S2};
clause ->
Vs = cerl:clause_vars(E),
{_, Env1, Ren1} = bind_vars(Vs, Env, Ren),
%% Visit patterns to rename variables.
Ps = pattern_list(cerl:clause_pats(E), Env1, Ren1),
{G, S1} = expr(cerl:clause_guard(E), Env1, Ren1, S0),
{B, S2} = expr(cerl:clause_body(E), Env1, Ren1, S1),
{cerl:update_c_clause(E, Ps, G, B), S2};
'fun' ->
fun_expr(E, Env, Ren, S0);
'receive' ->
{Cs, S1} = expr_list(cerl:receive_clauses(E), Env, Ren, S0),
{T, S2} = expr(cerl:receive_timeout(E), Env, Ren, S1),
{A, S3} = expr(cerl:receive_action(E), Env, Ren, S2),
{cerl:update_c_receive(E, Cs, T, A), S3};
'try' ->
{A, S1} = expr(cerl:try_arg(E), Env, Ren, S0),
Vs = cerl:try_vars(E),
{Vs1, Env1, Ren1} = bind_vars(Vs, Env, Ren),
{B, S2} = expr(cerl:try_body(E), Env1, Ren1, S1),
Evs = cerl:try_evars(E),
{Evs1, Env2, Ren2} = bind_vars(Evs, Env, Ren),
{H, S3} = expr(cerl:try_handler(E), Env2, Ren2, S2),
{cerl:update_c_try(E, A, Vs1, B, Evs1, H), S3};
'catch' ->
{B, S1} = expr(cerl:catch_body(E), Env, Ren, S0),
{cerl:update_c_catch(E, B), S1};
letrec ->
{Env1, Ren1, S1} = letrec_defs(cerl:letrec_defs(E), Env,
Ren, S0),
expr(cerl:letrec_body(E), Env1, Ren1, S1);
binary ->
{Segs, S1} = expr_list(cerl:binary_segments(E), Env, Ren, S0),
{cerl:update_c_binary(E, Segs),S1};
bitstr ->
{E1,S1} = expr(cerl:bitstr_val(E), Env, Ren, S0),
{E2,S2} = expr(cerl:bitstr_size(E), Env, Ren, S1),
E3 = cerl:bitstr_unit(E),
E4 = cerl:bitstr_type(E),
E5 = cerl:bitstr_flags(E),
{cerl:update_c_bitstr(E, E1, E2, E3, E4, E5), S2}
end.
expr_list([E | Es], Env, Ren, S0) ->
{E1, S1} = expr(E, Env, Ren, S0),
{Es1, S2} = expr_list(Es, Env, Ren, S1),
{[E1 | Es1], S2};
expr_list([], _, _, S) ->
{[], S}.
pattern(E, Env, Ren) ->
case cerl:type(E) of
literal ->
E;
var ->
cerl:update_c_var(E, ren__map(cerl:var_name(E), Ren));
values ->
Es = pattern_list(cerl:values_es(E), Env, Ren),
cerl:update_c_values(E, Es);
cons ->
E1 = pattern(cerl:cons_hd(E), Env, Ren),
E2 = pattern(cerl:cons_tl(E), Env, Ren),
cerl:update_c_cons(E, E1, E2);
tuple ->
Es = pattern_list(cerl:tuple_es(E), Env, Ren),
cerl:update_c_tuple(E, Es);
binary ->
Es = pattern_list(cerl:binary_segments(E), Env, Ren),
cerl:update_c_binary(E, Es);
bitstr ->
E1 = pattern(cerl:bitstr_val(E), Env, Ren),
E2 = pattern(cerl:bitstr_size(E), Env, Ren),
E3 = cerl:bitstr_unit(E),
E4 = cerl:bitstr_type(E),
E5 = cerl:bitstr_flags(E),
cerl:update_c_bitstr(E, E1, E2, E3, E4, E5);
alias ->
V = pattern(cerl:alias_var(E), Env, Ren),
P = pattern(cerl:alias_pat(E), Env, Ren),
cerl:update_c_alias(E, V, P)
end.
pattern_list([E | Es], Env, Ren) ->
[pattern(E, Env, Ren) | pattern_list(Es, Env, Ren)];
pattern_list([], _, _) ->
[].
%% First we set up the environment, binding the function names to the
%% corresponding descriptors. (For the top level functions, we don't
%% want to cause renaming.) After that, we can visit each function body
%% and return the new function definitions and the final state.
module_defs(Ds, Env, Ren, S) ->
{Env1, S1} = bind_module_defs(Ds, Env, S),
module_defs_1(Ds, [], Env1, Ren, S1).
bind_module_defs([{V, _F} | Ds], Env, S) ->
Name = cerl:var_name(V),
check_function_name(Name, S),
S1 = s__add_function_name(Name, S),
Info = #function{name = Name, free = []},
Env1 = env__bind(Name, Info, Env),
bind_module_defs(Ds, Env1, S1);
bind_module_defs([], Env, S) ->
{Env, S}.
%% Checking that top-level function names are not reused
check_function_name(Name, S) ->
case s__is_function_name(Name, S) of
true ->
error_msg("multiple definitions of function `~w'.", [Name]),
exit(error);
false ->
ok
end.
%% We must track which top-level function we are in, for name generation
%% purposes.
module_defs_1([{V, F} | Ds], Ds1, Env, Ren, S) ->
S1 = s__enter_function(cerl:var_name(V), S),
%% The parameters should never need renaming, but this is easiest.
{Vs, Env1, Ren1} = bind_vars(cerl:fun_vars(F), Env, Ren),
{B, S2} = expr(cerl:fun_body(F), Env1, Ren1, S1),
F1 = cerl:update_c_fun(F, Vs, B),
module_defs_1(Ds, [{V, F1} | Ds1], Env, Ren, S2);
module_defs_1([], Ds, _, _, S) ->
{Ds, S}.
%% First we must create the new function names and set up the
%% environment with descriptors for the letrec-bound functions.
%%
%% Since we never shadow variables, the free variables of any
%% letrec-bound fun can always be referenced directly wherever the
%% fun-variable itself is referenced - this is important when we create
%% direct calls to lifted letrec-bound functions, and is the main reason
%% why we do renaming. For example:
%%
%% 'f'/0 = fun () ->
%% let X = 42 in
%% letrec 'g'/1 = fun (Y) -> {X, Y} in
%% let X = 17 in
%% apply 'g'/1(X)
%%
%% will become something like
%%
%% 'f'/0 = fun () ->
%% let X = 42 in
%% let X1 = 17 in
%% apply 'g'/2(X1, X)
%% 'g'/2 = fun (Y, X) -> {X, Y}
%%
%% where the innermost X has been renamed so that the outermost X can be
%% referenced in the call to the lifted function 'g'/2. (Renaming must
%% of course also be applied also to letrec-bound function variables.)
%%
%% Furthermore, if some variable X occurs free in a fun 'f'/N, and 'f'/N
%% it its turn occurs free in a fun 'g'/M, then we transitively count X
%% as free in 'g'/M, even if it has no occurrence there. This allows us
%% to rewrite code such as the following:
%%
%% 'f'/0 = fun () ->
%% let X = 42 in
%% letrec 'g'/1 = fun (Y) -> {X, Y}
%% 'h'/1 = fun (Z) -> {'bar', apply 'g'/1(Z)}
%% in let X = 17 in
%% apply 'h'/1(X)
%%
%% into something like:
%%
%% 'f'/0 = fun () ->
%% let X = 42 in
%% let X1 = 17 in
%% apply 'h'/2(X1, X)
%% 'g'/2 = fun (Y, X) -> {X, Y}
%% 'h'/2 = fun (Z, X) -> {'bar', apply 'g'/2(Z, X)}
%%
%% which uses only direct calls. The drawback is that if the occurrence
%% of 'f'/N in 'g'/M instead would cause a closure to be created, then
%% that closure could have been formed earlier (at the point where 'f'/N
%% was defined), rather than passing on all the free variables of 'f'/N
%% into 'g'/M. Since we must know the interface to 'g'/M (i.e., the
%% total number of parameters) before we begin processing its body, and
%% the interface depends on what we do to the body (and functions can be
%% mutually recursive), this problem can only be solved by finding out
%% _what_ we are going to do before we can even define the interfaces of
%% the functions, by looking at _how_ variables are being referenced
%% when we look for free variables. Currently, we don't do that.
letrec_defs(Ds, Env, Ren, S) ->
{Env1, Ren1, S1} = bind_letrec_defs(Ds, Env, Ren, S),
{Env1, Ren1, lift_letrec_defs(Ds, Env1, Ren1, S1)}.
%% Note: it is important that we store the *renamed* free variables for
%% each function to be lifted.
bind_letrec_defs(Ds, Env, Ren, S) ->
bind_letrec_defs(Ds, free_in_defs(Ds, Env, Ren), Env, Ren, S).
bind_letrec_defs([{V, _F} | Ds], Free, Env, Ren, S) ->
Name = cerl:var_name(V),
{Env1, Ren1, S1} = bind_letrec_fun(Name, Free, Env, Ren, S),
bind_letrec_defs(Ds, Free, Env1, Ren1, S1);
bind_letrec_defs([], _Free, Env, Ren, S) ->
{Env, Ren, S}.
bind_letrec_fun(Name = {_,A}, Free, Env, Ren, S) ->
A1 = A + length(Free),
{Name1, Ren1, S1} = rename_letrec_fun(Name, A1, Env, Ren, S),
Info = #function{name = Name1, free = Free},
{env__bind(Name1, Info, Env), Ren1, S1}.
%% Creating a new name for the lifted function that is informative, is
%% not in the environment, and is not already used for some other lifted
%% function.
rename_letrec_fun(Name, NewArity, Env, Ren, S) ->
{New, S1} = new_letrec_fun_name(Name, NewArity, Env, S),
{New, ren__add(Name, New, Ren), s__add_function_name(New, S1)}.
new_letrec_fun_name({N,_}, Arity, Env, S) ->
{FName, FArity} = s__get_function(S),
Base = fun_name_base(FName, FArity)
++ "-letrec-" ++ atom_to_list(N) ++ "-",
%% We try the base as name first. This will usually work.
Name = {list_to_atom(Base), Arity},
case env__is_defined(Name, Env) of
true ->
new_fun_name(Base, Arity, Env, S);
false ->
case s__is_function_name(Name, S) of
true ->
new_fun_name(Base, Arity, Env, S);
false ->
{Name, S}
end
end.
%% Processing the actual functions of a letrec
lift_letrec_defs([{V, F} | Ds], Env, Ren, S) ->
Info = env__get(ren__map(cerl:var_name(V), Ren), Env),
S1 = lift_letrec_fun(F, Info, Env, Ren, S),
lift_letrec_defs(Ds, Env, Ren, S1);
lift_letrec_defs([], _, _, S) ->
S.
%% The direct calling convention for letrec-defined functions is to pass
%% the free variables as additional parameters. Note that the free
%% variables (if any) are already in the environment when we get here.
%% We only have to append them to the parameter list so that they are in
%% scope in the lifted function; they are already renamed.
%%
%% It should not be possible for the original parameters to clash with
%% the free ones (in that case they cannot be free), but we do the full
%% bind-and-rename anyway, since it's easiest.
lift_letrec_fun(F, Info, Env, Ren, S) ->
{Vs, Env1, Ren1} = bind_vars(cerl:fun_vars(F), Env, Ren),
{B, S1} = expr(cerl:fun_body(F), Env1, Ren1, S),
Fs = [cerl:c_var(V) || V <- Info#function.free],
F1 = cerl:c_fun(Vs ++ Fs, B),
s__add_def(cerl:c_var(Info#function.name), F1, S1).
%% This is a simple way of handling mutual recursion in a group of
%% letrec-definitions: classify a variable as free in all the functions
%% if it is free in any of them. (The preferred way would be to actually
%% take the transitive closure for each function.)
free_in_defs(Ds, Env, Ren) ->
{Vs, Fs} = free_in_defs(Ds, [], [], Ren),
closure_vars(ordsets:subtract(Fs, Vs), Env, Ren).
free_in_defs([{V, F} | Ds], Vs, Free, Ren) ->
Fs = cerl_trees:free_variables(F),
free_in_defs(Ds, [ren__map(cerl:var_name(V), Ren) | Vs], Fs ++ Free,
Ren);
free_in_defs([], Vs, Free, _Ren) ->
{ordsets:from_list(Vs), ordsets:from_list(Free)}.
%% Replacing function variables with the free variables of the function
closure_vars(Vs, Env, Ren) ->
closure_vars(Vs, [], Env, Ren).
closure_vars([V = {_, _} | Vs], As, Env, Ren) ->
V1 = ren__map(V, Ren),
case env__lookup(V1, Env) of
{ok, #function{free = Vs1}} ->
closure_vars(Vs, Vs1 ++ As, Env, Ren);
_ ->
closure_vars(Vs, As, Env, Ren)
end;
closure_vars([V | Vs], As, Env, Ren) ->
closure_vars(Vs, [V | As], Env, Ren);
closure_vars([], As, _Env, _Ren) ->
ordsets:from_list(As).
%% We use the no-shadowing strategy, renaming variables on the fly and
%% only when necessary to uphold the invariant.
bind_vars(Vs, Env, Ren) ->
bind_vars(Vs, [], Env, Ren).
bind_vars([V | Vs], Vs1, Env, Ren) ->
Name = cerl:var_name(V),
{Name1, Ren1} = rename_var(Name, Env, Ren),
bind_vars(Vs, [cerl:update_c_var(V, Name1) | Vs1],
env__bind(Name1, variable, Env), Ren1);
bind_vars([], Vs, Env, Ren) ->
{lists:reverse(Vs), Env, Ren}.
rename_var(Name, Env, Ren) ->
case env__is_defined(Name, Env) of
false ->
{Name, Ren};
true ->
New = env__new_name(Env),
{New, ren__add(Name, New, Ren)}
end.
%% This handles variable references *except* in function application
%% operator positions (see apply_expr/4).
%%
%% The Beam compiler annotates function-variable references with 'id'
%% info, eventually transforming a direct reference such as "fun f/2"
%% into a new fun-expression "fun (X1,X2) -> apply f/2(X1,X2)" for which
%% the info is used to create the lifted function as for any other fun.
%% We do the same thing for function-bound variables.
var(V, Env, Ren, S) ->
Name = ren__map(cerl:var_name(V), Ren),
case lookup_var(Name, Env) of
#function{name = F, free = Vs} ->
{_, Arity} = F,
Vs1 = make_vars(Arity),
C = cerl:c_apply(cerl:c_var(F), Vs1),
E = cerl:ann_c_fun(cerl:get_ann(V), Vs1, C),
fun_expr_1(E, Vs, Env, Ren, S);
variable ->
{cerl:update_c_var(V, Name), S}
end.
lookup_var(V, Env) ->
case env__lookup(V, Env) of
{ok, X} ->
X;
error ->
error_msg("unbound variable `~P'.", [V, 5]),
exit(error)
end.
make_vars(N) when N > 0 ->
[cerl:c_var(list_to_atom("X" ++ integer_to_list(N)))
| make_vars(N - 1)];
make_vars(0) ->
[].
%% All funs that are not bound by module or letrec definitions will be
%% rewritten to create explicit closures using "make fun". We don't
%% currently track ordinary let-bindings of funs, as in "let F = fun
%% ... in ...apply F(...)...".
%%
%% Note that we (currently) follow the Beam naming convention, including
%% the free variables in the arity of the name, even though the actual
%% function typically expects a different number of parameters.
fun_expr(F, Env, Ren, S) ->
Free = closure_vars(cerl_trees:free_variables(F), Env, Ren),
Vs = [cerl:c_var(V) || V <- Free],
fun_expr_1(F, Vs, Env, Ren, S).
fun_expr_1(F, Vs, Env, Ren, S) ->
Arity = cerl:fun_arity(F) + length(Vs), % for the name only
{Info, S1} = fun_info(F, Env, S),
Name = {Info#fun_info.name, Arity},
S2 = lift_fun(Name, F, Vs, Env, Ren, S1),
{make_fun_primop(Name, Vs, Info, F, S2), S2}.
make_fun_primop({Name, Arity}, Free, #fun_info{id = Id, hash = Hash},
F, S) ->
Module = s__get_module_name(S),
cerl:update_c_primop(F, cerl:c_atom(?PRIMOP_MAKE_FUN),
[cerl:c_atom(Module),
cerl:c_atom(Name),
cerl:c_int(Arity),
cerl:c_int(Hash),
cerl:c_int(Id),
cerl:make_list(Free)]).
%% Getting attached fun-info, if present; otherwise making it up.
fun_info(E, Env, S) ->
case lists:keyfind(id, 1, cerl:get_ann(E)) of
{id, {Id, H, Name}} ->
%% io:fwrite("Got fun-info: ~w: {~w,~w}.\n", [Name,Id,H]),
{#fun_info{name = Name, id = Id, hash = H}, S};
_ ->
io:fwrite("Warning - fun not annotated: "
"making up new name.\n"), % for now
{{Name,_Arity}, S1} = new_fun_name(E, Env, S),
{#fun_info{name = Name, id = 0, hash = 0}, S1}
end.
fun_name_base(FName, FArity) ->
"-" ++ atom_to_list(FName) ++ "/" ++ integer_to_list(FArity).
%% Generate a name for the new function, using a the same convention
%% that is used by the Beam compiler.
new_fun_name(F, Env, S) ->
{FName, FArity} = s__get_function(S),
Base = fun_name_base(FName, FArity) ++ "-fun-",
Arity = cerl:fun_arity(F),
new_fun_name(Base, Arity, Env, S).
%% Creating a new function name that is not in the environment and is
%% not already used for some other lifted function.
new_fun_name(Base, Arity, Env, S) ->
F = fun (N) ->
{list_to_atom(Base ++ integer_to_list(N)), Arity}
end,
new_fun_name(Base, Arity, Env, S, F).
new_fun_name(Base, Arity, Env, S, F) ->
%% Note that repeated calls to env__new_function_name/2 will yield
%% different names even though Env and F are the same.
Name = env__new_function_name(F, Env),
case s__is_function_name(Name, S) of
true ->
new_fun_name(Base, Arity, Env, S, F);
false ->
{Name, S}
end.
%% This lifts the fun to a new top-level function which uses the calling
%% convention for closures, with the closure itself as the final
%% parameter. Note that the free variables (if any) are already in the
%% environment.
%%
%% It should not be possible for the original parameters to clash with
%% the free ones (in that case they cannot be free), but we do the full
%% bind-and-rename anyway, since it's easiest.
lift_fun(Name, F, Free, Env, Ren, S) ->
%% If the name is already in the list of top-level definitions, we
%% assume we have already generated this function, and do not need
%% to do it again (typically, this happens for 'fun f/n'-variables
%% that have been duplicated before being rewritten to actual
%% fun-expressions, and the name is taken from their annotations).
%% Otherwise, we add the name to the list.
case s__is_function_name(Name, S) of
true ->
S;
false ->
S1 = s__add_function_name(Name, S),
lift_fun_1(Name, F, Free, Env, Ren, S1)
end.
lift_fun_1(Name, F, Free, Env, Ren, S) ->
%% (The original parameters must be added to the environment before
%% we generate the new variable for the closure parameter.)
{Vs, Env1, Ren1} = bind_vars(cerl:fun_vars(F), Env, Ren),
V = env__new_name(Env1),
Env2 = env__bind(V, variable, Env1),
{B, S1} = expr(cerl:fun_body(F), Env2, Ren1, S),
%% We unpack all free variables from the closure upon entering.
%% (Adding this to the body before we process it would introduce
%% unnecessary, although harmless, renaming of the free variables.)
Es = closure_elements(length(Free), cerl:c_var(V)),
B1 = cerl:c_let(Free, cerl:c_values(Es), B),
%% The closure itself is passed as the last argument. The new
%% function is annotated as being a closure-call entry point.
E = cerl:ann_c_fun([closure, {closure_orig_arity, cerl:fun_arity(F)}], Vs ++ [cerl:c_var(V)], B1),
s__add_def(cerl:c_var(Name), E, S1).
closure_elements(N, V) ->
closure_elements(N, N + 1, V).
closure_elements(0, _, _) -> [];
closure_elements(N, M, V) ->
[cerl:c_primop(cerl:c_atom(?PRIMOP_FUN_ELEMENT),
[cerl:c_int(M - N), V])
| closure_elements(N - 1, M, V)].
%% Function applications must be rewritten depending on the
%% operator. For a call to a known top-level function or letrec-bound
%% function, we make a direct call, passing the free variables as extra
%% parameters (we know they are in scope, since variables may not be
%% shadowed). Otherwise, we create an "apply fun" primop call that
%% expects a closure.
apply_expr(E, Env, Ren, S) ->
{As, S1} = expr_list(cerl:apply_args(E), Env, Ren, S),
Op = cerl:apply_op(E),
case cerl:is_c_var(Op) of
true ->
Name = ren__map(cerl:var_name(Op), Ren),
case lookup_var(Name, Env) of
#function{name = F, free = Vs} ->
Vs1 = As ++ [cerl:c_var(V) || V <- Vs],
{cerl:update_c_apply(E, cerl:c_var(F), Vs1), S1};
variable ->
apply_expr_1(E, Op, As, Env, Ren, S1)
end;
_ ->
apply_expr_1(E, Op, As, Env, Ren, S1)
end.
%% Note that this primop call only communicates the necessary
%% information to the core-to-icode stage, which rewrites it to use the
%% real calling convention for funs.
apply_expr_1(E, Op, As, Env, Ren, S) ->
{Op1, S1} = expr(Op, Env, Ren, S),
Call = cerl:update_c_primop(E, cerl:c_atom(?PRIMOP_APPLY_FUN),
[Op1, cerl:make_list(As)]),
{Call, S1}.
%% ---------------------------------------------------------------------
%% Environment
env__new() ->
rec_env:empty().
env__bind(Key, Value, Env) ->
rec_env:bind(Key, Value, Env).
env__lookup(Key, Env) ->
rec_env:lookup(Key, Env).
env__get(Key, Env) ->
rec_env:get(Key, Env).
env__is_defined(Key, Env) ->
rec_env:is_defined(Key, Env).
env__new_name(Env) ->
rec_env:new_key(Env).
env__new_function_name(F, Env) ->
rec_env:new_key(F, Env).
%% ---------------------------------------------------------------------
%% Renaming
ren__new() ->
dict:new().
ren__add(Key, Value, Ren) ->
dict:store(Key, Value, Ren).
ren__map(Key, Ren) ->
case dict:find(Key, Ren) of
{ok, Value} ->
Value;
error ->
Key
end.
%% ---------------------------------------------------------------------
%% State
-record(state, {module :: module(), function :: {atom(), arity()},
names, refs, defs = []}).
s__new(Module) ->
#state{module = Module, names = sets:new(), refs = dict:new()}.
s__add_function_name(Name, S) ->
S#state{names = sets:add_element(Name, S#state.names)}.
s__is_function_name(Name, S) ->
sets:is_element(Name, S#state.names).
s__get_module_name(S) ->
S#state.module.
s__enter_function(F, S) ->
S#state{function = F}.
s__get_function(S) ->
S#state.function.
s__add_def(V, F, S) ->
S#state{defs = [{V, F} | S#state.defs]}.
s__get_defs(S) ->
S#state.defs.
%% ---------------------------------------------------------------------
%% Reporting
%% internal_error_msg(S) ->
%% internal_error_msg(S, []).
%% internal_error_msg(S, Vs) ->
%% error_msg(lists:concat(["Internal error: ", S]), Vs).
%% error_msg(S) ->
%% error_msg(S, []).
error_msg(S, Vs) ->
error_logger:error_msg(lists:concat([?MODULE, ": ", S, "\n"]), Vs).
%% warning_msg(S) ->
%% warning_msg(S, []).
%% warning_msg(S, Vs) ->
%% info_msg(lists:concat(["warning: ", S]), Vs).
%% info_msg(S) ->
%% info_msg(S, []).
%% info_msg(S, Vs) ->
%% error_logger:info_msg(lists:concat([?MODULE, ": ", S, "\n"]), Vs). | dependencies/otp/r15b03-1/lib/hipe/cerl/cerl_cconv.erl | 0.540196 | 0.481576 | cerl_cconv.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2001-2015. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% =====================================================================
%% General Balanced Trees - highly efficient dictionaries.
%%
%% Copyright (C) 1999-2001 <NAME>, <NAME>
%%
%% An efficient implementation of Prof. <NAME>'s General
%% Balanced Trees. These have no storage overhead compared to plain
%% unbalanced binary trees, and their performance is in general better
%% than AVL trees.
%% ---------------------------------------------------------------------
%% Operations:
%%
%% - empty(): returns empty tree.
%%
%% - is_empty(T): returns 'true' if T is an empty tree, and 'false'
%% otherwise.
%%
%% - size(T): returns the number of nodes in the tree as an integer.
%% Returns 0 (zero) if the tree is empty.
%%
%% - lookup(X, T): looks up key X in tree T; returns {value, V}, or
%% `none' if the key is not present.
%%
%% - get(X, T): retreives the value stored with key X in tree T. Assumes
%% that the key is present in the tree.
%%
%% - insert(X, V, T): inserts key X with value V into tree T; returns
%% the new tree. Assumes that the key is *not* present in the tree.
%%
%% - update(X, V, T): updates key X to value V in tree T; returns the
%% new tree. Assumes that the key is present in the tree.
%%
%% - enter(X, V, T): inserts key X with value V into tree T if the key
%% is not present in the tree, otherwise updates key X to value V in
%% T. Returns the new tree.
%%
%% - delete(X, T): removes key X from tree T; returns new tree. Assumes
%% that the key is present in the tree.
%%
%% - delete_any(X, T): removes key X from tree T if the key is present
%% in the tree, otherwise does nothing; returns new tree.
%%
%% - balance(T): rebalances tree T. Note that this is rarely necessary,
%% but may be motivated when a large number of entries have been
%% deleted from the tree without further insertions. Rebalancing could
%% then be forced in order to minimise lookup times, since deletion
%% only does not rebalance the tree.
%%
%% - is_defined(X, T): returns `true' if key X is present in tree T, and
%% `false' otherwise.
%%
%% - keys(T): returns an ordered list of all keys in tree T.
%%
%% - values(T): returns the list of values for all keys in tree T,
%% sorted by their corresponding keys. Duplicates are not removed.
%%
%% - to_list(T): returns an ordered list of {Key, Value} pairs for all
%% keys in tree T.
%%
%% - from_orddict(L): turns an ordered list L of {Key, Value} pairs into
%% a tree. The list must not contain duplicate keys.
%%
%% - smallest(T): returns {X, V}, where X is the smallest key in tree T,
%% and V is the value associated with X in T. Assumes that the tree T
%% is nonempty.
%%
%% - largest(T): returns {X, V}, where X is the largest key in tree T,
%% and V is the value associated with X in T. Assumes that the tree T
%% is nonempty.
%%
%% - take_smallest(T): returns {X, V, T1}, where X is the smallest key
%% in tree T, V is the value associated with X in T, and T1 is the
%% tree T with key X deleted. Assumes that the tree T is nonempty.
%%
%% - take_largest(T): returns {X, V, T1}, where X is the largest key
%% in tree T, V is the value associated with X in T, and T1 is the
%% tree T with key X deleted. Assumes that the tree T is nonempty.
%%
%% - iterator(T): returns an iterator that can be used for traversing
%% the entries of tree T; see `next'. The implementation of this is
%% very efficient; traversing the whole tree using `next' is only
%% slightly slower than getting the list of all elements using
%% `to_list' and traversing that. The main advantage of the iterator
%% approach is that it does not require the complete list of all
%% elements to be built in memory at one time.
%%
%% - iterator_from(K, T): returns an iterator that can be used for
%% traversing the entries of tree T with key greater than or
%% equal to K; see `next'.
%%
%% - next(S): returns {X, V, S1} where X is the smallest key referred to
%% by the iterator S, and S1 is the new iterator to be used for
%% traversing the remaining entries, or the atom `none' if no entries
%% remain.
%%
%% - map(F, T): maps the function F(K, V) -> V' to all key-value pairs
%% of the tree T and returns a new tree T' with the same set of keys
%% as T and the new set of values V'.
-module(gb_trees).
-export([empty/0, is_empty/1, size/1, lookup/2, get/2, insert/3,
update/3, enter/3, delete/2, delete_any/2, balance/1,
is_defined/2, keys/1, values/1, to_list/1, from_orddict/1,
smallest/1, largest/1, take_smallest/1, take_largest/1,
iterator/1, iterator_from/2, next/1, map/2]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Data structure:
%% - {Size, Tree}, where `Tree' is composed of nodes of the form:
%% - {Key, Value, Smaller, Bigger}, and the "empty tree" node:
%% - nil.
%%
%% I make no attempt to balance trees after deletions. Since deletions
%% don't increase the height of a tree, I figure this is OK.
%%
%% Original balance condition h(T) <= ceil(c * log(|T|)) has been
%% changed to the similar (but not quite equivalent) condition 2 ^ h(T)
%% <= |T| ^ c. I figure this should also be OK.
%%
%% Performance is comparable to the AVL trees in the Erlang book (and
%% faster in general due to less overhead); the difference is that
%% deletion works for my trees, but not for the book's trees. Behaviour
%% is logaritmic (as it should be).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Some macros.
-define(p, 2). % It seems that p = 2 is optimal for sorted keys
-define(pow(A, _), A * A). % correct with exponent as defined above.
-define(div2(X), X bsr 1).
-define(mul2(X), X bsl 1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Some types.
-export_type([tree/0, tree/2, iter/0, iter/2]).
-type gb_tree_node(K, V) :: 'nil'
| {K, V, gb_tree_node(K, V), gb_tree_node(K, V)}.
-opaque tree(Key, Value) :: {non_neg_integer(), gb_tree_node(Key, Value)}.
-type tree() :: tree(_, _).
-opaque iter(Key, Value) :: [gb_tree_node(Key, Value)].
-type iter() :: iter(_, _).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec empty() -> tree().
empty() ->
{0, nil}.
-spec is_empty(Tree) -> boolean() when
Tree :: tree().
is_empty({0, nil}) ->
true;
is_empty(_) ->
false.
-spec size(Tree) -> non_neg_integer() when
Tree :: tree().
size({Size, _}) when is_integer(Size), Size >= 0 ->
Size.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec lookup(Key, Tree) -> 'none' | {'value', Value} when
Tree :: tree(Key, Value).
lookup(Key, {_, T}) ->
lookup_1(Key, T).
%% The term order is an arithmetic total order, so we should not
%% test exact equality for the keys. (If we do, then it becomes
%% possible that neither `>', `<', nor `=:=' matches.) Testing '<'
%% and '>' first is statistically better than testing for
%% equality, and also allows us to skip the test completely in the
%% remaining case.
lookup_1(Key, {Key1, _, Smaller, _}) when Key < Key1 ->
lookup_1(Key, Smaller);
lookup_1(Key, {Key1, _, _, Bigger}) when Key > Key1 ->
lookup_1(Key, Bigger);
lookup_1(_, {_, Value, _, _}) ->
{value, Value};
lookup_1(_, nil) ->
none.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% This is a specialized version of `lookup'.
-spec is_defined(Key, Tree) -> boolean() when
Tree :: tree(Key, Value :: term()).
is_defined(Key, {_, T}) ->
is_defined_1(Key, T).
is_defined_1(Key, {Key1, _, Smaller, _}) when Key < Key1 ->
is_defined_1(Key, Smaller);
is_defined_1(Key, {Key1, _, _, Bigger}) when Key > Key1 ->
is_defined_1(Key, Bigger);
is_defined_1(_, {_, _, _, _}) ->
true;
is_defined_1(_, nil) ->
false.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% This is a specialized version of `lookup'.
-spec get(Key, Tree) -> Value when
Tree :: tree(Key, Value).
get(Key, {_, T}) ->
get_1(Key, T).
get_1(Key, {Key1, _, Smaller, _}) when Key < Key1 ->
get_1(Key, Smaller);
get_1(Key, {Key1, _, _, Bigger}) when Key > Key1 ->
get_1(Key, Bigger);
get_1(_, {_, Value, _, _}) ->
Value.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec update(Key, Value, Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
update(Key, Val, {S, T}) ->
T1 = update_1(Key, Val, T),
{S, T1}.
%% See `lookup' for notes on the term comparison order.
update_1(Key, Value, {Key1, V, Smaller, Bigger}) when Key < Key1 ->
{Key1, V, update_1(Key, Value, Smaller), Bigger};
update_1(Key, Value, {Key1, V, Smaller, Bigger}) when Key > Key1 ->
{Key1, V, Smaller, update_1(Key, Value, Bigger)};
update_1(Key, Value, {_, _, Smaller, Bigger}) ->
{Key, Value, Smaller, Bigger}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec insert(Key, Value, Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
insert(Key, Val, {S, T}) when is_integer(S) ->
S1 = S+1,
{S1, insert_1(Key, Val, T, ?pow(S1, ?p))}.
insert_1(Key, Value, {Key1, V, Smaller, Bigger}, S) when Key < Key1 ->
case insert_1(Key, Value, Smaller, ?div2(S)) of
{T1, H1, S1} ->
T = {Key1, V, T1, Bigger},
{H2, S2} = count(Bigger),
H = ?mul2(erlang:max(H1, H2)),
SS = S1 + S2 + 1,
P = ?pow(SS, ?p),
if
H > P ->
balance(T, SS);
true ->
{T, H, SS}
end;
T1 ->
{Key1, V, T1, Bigger}
end;
insert_1(Key, Value, {Key1, V, Smaller, Bigger}, S) when Key > Key1 ->
case insert_1(Key, Value, Bigger, ?div2(S)) of
{T1, H1, S1} ->
T = {Key1, V, Smaller, T1},
{H2, S2} = count(Smaller),
H = ?mul2(erlang:max(H1, H2)),
SS = S1 + S2 + 1,
P = ?pow(SS, ?p),
if
H > P ->
balance(T, SS);
true ->
{T, H, SS}
end;
T1 ->
{Key1, V, Smaller, T1}
end;
insert_1(Key, Value, nil, S) when S =:= 0 ->
{{Key, Value, nil, nil}, 1, 1};
insert_1(Key, Value, nil, _S) ->
{Key, Value, nil, nil};
insert_1(Key, _, _, _) ->
erlang:error({key_exists, Key}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec enter(Key, Value, Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
enter(Key, Val, T) ->
case is_defined(Key, T) of
true ->
update(Key, Val, T);
false ->
insert(Key, Val, T)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
count({_, _, nil, nil}) ->
{1, 1};
count({_, _, Sm, Bi}) ->
{H1, S1} = count(Sm),
{H2, S2} = count(Bi),
{?mul2(erlang:max(H1, H2)), S1 + S2 + 1};
count(nil) ->
{1, 0}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec balance(Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
balance({S, T}) ->
{S, balance(T, S)}.
balance(T, S) ->
balance_list(to_list_1(T), S).
balance_list(L, S) ->
{T, []} = balance_list_1(L, S),
T.
balance_list_1(L, S) when S > 1 ->
Sm = S - 1,
S2 = Sm div 2,
S1 = Sm - S2,
{T1, [{K, V} | L1]} = balance_list_1(L, S1),
{T2, L2} = balance_list_1(L1, S2),
T = {K, V, T1, T2},
{T, L2};
balance_list_1([{Key, Val} | L], 1) ->
{{Key, Val, nil, nil}, L};
balance_list_1(L, 0) ->
{nil, L}.
-spec from_orddict(List) -> Tree when
List :: [{Key, Value}],
Tree :: tree(Key, Value).
from_orddict(L) ->
S = length(L),
{S, balance_list(L, S)}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec delete_any(Key, Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
delete_any(Key, T) ->
case is_defined(Key, T) of
true ->
delete(Key, T);
false ->
T
end.
%%% delete. Assumes that key is present.
-spec delete(Key, Tree1) -> Tree2 when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
delete(Key, {S, T}) when is_integer(S), S >= 0 ->
{S - 1, delete_1(Key, T)}.
%% See `lookup' for notes on the term comparison order.
delete_1(Key, {Key1, Value, Smaller, Larger}) when Key < Key1 ->
Smaller1 = delete_1(Key, Smaller),
{Key1, Value, Smaller1, Larger};
delete_1(Key, {Key1, Value, Smaller, Bigger}) when Key > Key1 ->
Bigger1 = delete_1(Key, Bigger),
{Key1, Value, Smaller, Bigger1};
delete_1(_, {_, _, Smaller, Larger}) ->
merge(Smaller, Larger).
merge(Smaller, nil) ->
Smaller;
merge(nil, Larger) ->
Larger;
merge(Smaller, Larger) ->
{Key, Value, Larger1} = take_smallest1(Larger),
{Key, Value, Smaller, Larger1}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec take_smallest(Tree1) -> {Key, Value, Tree2} when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
take_smallest({Size, Tree}) when is_integer(Size), Size >= 0 ->
{Key, Value, Larger} = take_smallest1(Tree),
{Key, Value, {Size - 1, Larger}}.
take_smallest1({Key, Value, nil, Larger}) ->
{Key, Value, Larger};
take_smallest1({Key, Value, Smaller, Larger}) ->
{Key1, Value1, Smaller1} = take_smallest1(Smaller),
{Key1, Value1, {Key, Value, Smaller1, Larger}}.
-spec smallest(Tree) -> {Key, Value} when
Tree :: tree(Key, Value).
smallest({_, Tree}) ->
smallest_1(Tree).
smallest_1({Key, Value, nil, _Larger}) ->
{Key, Value};
smallest_1({_Key, _Value, Smaller, _Larger}) ->
smallest_1(Smaller).
-spec take_largest(Tree1) -> {Key, Value, Tree2} when
Tree1 :: tree(Key, Value),
Tree2 :: tree(Key, Value).
take_largest({Size, Tree}) when is_integer(Size), Size >= 0 ->
{Key, Value, Smaller} = take_largest1(Tree),
{Key, Value, {Size - 1, Smaller}}.
take_largest1({Key, Value, Smaller, nil}) ->
{Key, Value, Smaller};
take_largest1({Key, Value, Smaller, Larger}) ->
{Key1, Value1, Larger1} = take_largest1(Larger),
{Key1, Value1, {Key, Value, Smaller, Larger1}}.
-spec largest(Tree) -> {Key, Value} when
Tree :: tree(Key, Value).
largest({_, Tree}) ->
largest_1(Tree).
largest_1({Key, Value, _Smaller, nil}) ->
{Key, Value};
largest_1({_Key, _Value, _Smaller, Larger}) ->
largest_1(Larger).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec to_list(Tree) -> [{Key, Value}] when
Tree :: tree(Key, Value).
to_list({_, T}) ->
to_list(T, []).
to_list_1(T) -> to_list(T, []).
to_list({Key, Value, Small, Big}, L) ->
to_list(Small, [{Key, Value} | to_list(Big, L)]);
to_list(nil, L) -> L.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec keys(Tree) -> [Key] when
Tree :: tree(Key, Value :: term()).
keys({_, T}) ->
keys(T, []).
keys({Key, _Value, Small, Big}, L) ->
keys(Small, [Key | keys(Big, L)]);
keys(nil, L) -> L.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec values(Tree) -> [Value] when
Tree :: tree(Key :: term(), Value).
values({_, T}) ->
values(T, []).
values({_Key, Value, Small, Big}, L) ->
values(Small, [Value | values(Big, L)]);
values(nil, L) -> L.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec iterator(Tree) -> Iter when
Tree :: tree(Key, Value),
Iter :: iter(Key, Value).
iterator({_, T}) ->
iterator_1(T).
iterator_1(T) ->
iterator(T, []).
%% The iterator structure is really just a list corresponding to
%% the call stack of an in-order traversal. This is quite fast.
iterator({_, _, nil, _} = T, As) ->
[T | As];
iterator({_, _, L, _} = T, As) ->
iterator(L, [T | As]);
iterator(nil, As) ->
As.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec iterator_from(Key, Tree) -> Iter when
Tree :: tree(Key, Value),
Iter :: iter(Key, Value).
iterator_from(S, {_, T}) ->
iterator_1_from(S, T).
iterator_1_from(S, T) ->
iterator_from(S, T, []).
iterator_from(S, {K, _, _, T}, As) when K < S ->
iterator_from(S, T, As);
iterator_from(_, {_, _, nil, _} = T, As) ->
[T | As];
iterator_from(S, {_, _, L, _} = T, As) ->
iterator_from(S, L, [T | As]);
iterator_from(_, nil, As) ->
As.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec next(Iter1) -> 'none' | {Key, Value, Iter2} when
Iter1 :: iter(Key, Value),
Iter2 :: iter(Key, Value).
next([{X, V, _, T} | As]) ->
{X, V, iterator(T, As)};
next([]) ->
none.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec map(Function, Tree1) -> Tree2 when
Function :: fun((K :: Key, V1 :: Value1) -> V2 :: Value2),
Tree1 :: tree(Key, Value1),
Tree2 :: tree(Key, Value2).
map(F, {Size, Tree}) when is_function(F, 2) ->
{Size, map_1(F, Tree)}.
map_1(_, nil) -> nil;
map_1(F, {K, V, Smaller, Larger}) ->
{K, F(K, V), map_1(F, Smaller), map_1(F, Larger)}. | lib/stdlib/src/gb_trees.erl | 0.738103 | 0.515498 | gb_trees.erl | starcoder |
%%
%% Query Node
%%
%% @copyright 2015-2019 UP FAMNIT and Yahoo Japan Corporation
%% @version 0.3
%% @since September, 2015
%% @author <NAME> <<EMAIL>>
%%
%% @see b3s
%% @see tp_query_node
%% @see join_query_node
%% @see query_tree
%%
%% @doc This module provides functions commonly shared by query node
%% modules.
%%
%% == streams ==
%%
%% (LINK: {@section streams})
%%
%% The main function of query_node module is to provide interface to streams of
%% graphs implemented among the pairs of query nodes. Each pair of query nodes
%% is connected by N empty messages where N is defined by environment variable
%% *num_of_empty_msgs*. Empty messages functions as wagons of circular
%% "train" forming the stream between the query nodes. Child query node is allowed
%% to send a block to its parent only if it holds an empty message.
%%
%% The streams are implemented by means of process queues holding messages from
%% other processes. The query node queues have two modes of work. Firstly, they
%% serve as standard queues where one can add the message to the beginning and
%% take it from the end of queue.
%% Secondly, data messages include blocks composed of lists of graphs. To be
%% able to process graphs in iterative manner, either when adding graphs to queue,
%% or, when retrieving graphs from queue, we need graph-based interface.
%%
%% Graphs added to queue are first gathered in buffers which are moved into the queue
%% when full. From the opposite side, when reading from queue, data messages are
%% first unpacked and the block
%% is stored into the buffer. Graphs can then be iterativelly pulled from buffer---if
%% buffer is empty then another message is retrieved from queue.
%%
%% == operations select and project ==
%%
%% (LINK: {@section operations select and project})
%%
%% Each query node that is a part of some graph-pattern includes in addition to its
%% primary operation as, for example, join or triple-pattern access methods,
%% also the fuctionality of select and project operations.
%%
%% Module query_node provides the functions for processing operations project and select.
%% The function {@link project_prepare/1} prepares the environment
%% for processing projections using function {@link eval_project/1}. The function
%% {@link eval_select/1} evaluates selection on current graph.
%%
%% @type qn_opsym() = tp | join | leftjoin | union | differ.
%% Atom describing query operation can be one of presented.
%%
%% @type qn_id() = string() | integer().
%% @type qn_subject() = string() | integer().
%% @type qn_predicate() = string() | integer().
%% @type qn_object_value() = integer() | real() | calendar:datetime() | string().
%% @type qn_object_type() = undefined | code | integer | real | datetime | string.
%% @type qn_object() = string() | {qn_object_value(), qn_object_type()}.
%% @type qn_var() = string().
%%
%% @type qn_term() = qn_var() | string().
%% Query term used for expressing selection conditions.
%%
%% @type qn_triple() = {qn_id(), qn_subject(), qn_predicate(), qn_object()}.
%% Every triple has id. Graphs can be represented as sets of triples. Relationship
%% between triples and graphs is therefore expressed by means of reification, i.e., sets
%% of triples relating triple ids with graphs. This model allows for expressing
%% semantically more complex relationships, as, for instance, in CycL.
%%
%% @type qn_triple_pattern() = {qn_id() | qn_var(), qn_subject() | qn_var(), qn_predicate() | qn_var(), qn_object() | qn_var()}.
%% Triple patterns are triples (with id) that include variables. Type of triple
%% pattern follows format of qn_triple().
%%
%% @type qn_graph() = maps:map(). Mapping form {@link qn_id()}
%% to {@link qn_triple()}.
%%
%% @type qn_graph_pattern() = maps:map(). Mapping form {@link
%% qn_id()} to {@link qn_triple_pattern()}.
%%
%% @type qn_attribute() = qn_subject() | qn_predicate() | qn_object().
%% Query node attribute is triple component, S, P or O.
%%
%% @type qn_var_val_map() = [{qn_var(), string()}]. Mapping between
%% variables and their values.
%%
%% @type qn_project_list() = [qn_var()].
%% Project list is a list of variables to be eliminated from graph pattern.
%%
%% @type qn_binary_operation() = equal | less | lesseq | greater | greatereq | land | lor
%%
%% @type qn_unary_operation() = lnot.
%%
%% @type qn_select_predicate() = {qn_unary_operation(), qn_term() | qn_select_predicate()} |
%% {qn_term() | qn_select_predicate(), qn_binary_operation(), qn_term() | qn_select_predicate()}.
%% Selection predicate is an expression composed of pairs of terms connected
%% by some operation.
%%
%% @type qn_side() = outer | inner.
%% The side of query node is its position in relation to parent.
%%
-module(query_node).
-export(
[
queue_prepared/1,
eval_project/1,
project_prepare/1,
difference_lists/2,
eval_attribute/1,
eval_select/1,
queue_block_end/1
]).
-include_lib("eunit/include/eunit.hrl").
%% ======================================================================
%%
%% utility
%%
%%
%% @doc Report an error issue to the error_logger.
%%
%% @spec error_msg(atom(), term(), term()) -> ok
%%
error_msg(FunName, Argument, Result) ->
node_state:error_msg(?MODULE, FunName, Argument, Result).
%%
%% @doc Report an information issue to the error_logger if current
%% debug level is greater than ThresholdDL.
%%
%% @spec info_msg(atom(), term(), term(), integer()) -> ok
%%
info_msg(FunName, Argument, Result, ThresholdDL) ->
node_state:info_msg(?MODULE, FunName, Argument, Result, ThresholdDL).
%% ======================================================================
%%
%% api
%%
%% ======================================================================
%%
%% query node queue manipulation
%%
%% @doc Initializes queue Queue of type Type by setting appropriate PD entries.
%%
%% Input queues accept queue_write() to enter complete messages including blocks
%% of elements into the queue, and, queue_get() to retrieve elements that
%% constitute message blocks.
%%
%% Output queues use queue_put() to enter elements into the queue, and,
%% queue_read() to read messages including blocks of elements from queue.
%%
%% Queues of type plain are simple queues that use queue_write() to enter messages
%% and queue_read() to retrieve messages from queues.
%%
%% @spec queue_init(Queue::atom(), Type::(input|output|none), MsgHead::atom()) -> ok|fail
%%
queue_init(Queue, Type, MsgHead) when (Type == input) or (Type == output) or (Type == plain) ->
QL = atom_to_list(Queue),
Q = list_to_atom(string:concat("queue_",QL)),
B = list_to_atom(string:concat("buff_",QL)),
C = list_to_atom(string:concat("cnt_",QL)),
P = list_to_atom(string:concat("pid_",QL)),
M = list_to_atom(string:concat("msg_",QL)),
%% create main queue and buff
put(Q,queue:new()), % init queue
put(B,[]), % init buffer
put(M, MsgHead), % init msg head
%% create special PD entries for queues
case Type of
input -> put(P,undefined);
output -> put(C,0);
plain -> ok
end,
info_msg(queue_init, [get(self), {queue,Queue}, {Q,get(Q)}, {B,get(B)}, {C,get(C)}, {M,get(M)}, {P,get(P)}, get(state)], init_done, 50);
queue_init(Queue, Type, MsgHead) ->
error_msg(queue_init, [get(self), {queue,Queue}, {type,Type}, {msg_head,MsgHead}, {all,get()}, get(state)], wrong_queue),
fail.
%%
%% @doc Check if (input|output) queue Queue is empty and return results as boolean().
%% Input queue is empty then there are no more triples in the queue.
%% Output queue is empty if there are no complete messages prepared to be sent.
%%
%% @spec queue_empty(Queue::atom()) -> boolean()
%%
queue_empty(Queue) ->
%% gen atoms for buff and queue
Q = list_to_atom(string:concat("queue_",atom_to_list(Queue))),
B = list_to_atom(string:concat("buff_",atom_to_list(Queue))),
%% true if empty queue and buff
Res = queue:is_empty(get(Q)) and (get(B) =:= []),
info_msg(queue_empty, [get(self), {queue,Queue}, {B, get(B)}, {Q, get(Q)}, {return,Res}, get(state)], check_empty, 50),
Res.
%%
%% @doc Check if queue Queue incudes messages prepared to be read from or
%% sent to remote process.
%%
%% @spec queue_prepared(Queue::atom()) -> boolean()
%%
queue_prepared(Queue) ->
%% gen atom for queue
Q = list_to_atom(string:concat("queue_",atom_to_list(Queue))),
B = list_to_atom(string:concat("buff_",atom_to_list(Queue))),
Res = not queue:is_empty(get(Q)),
info_msg(queue_prepared, [get(self), {queue,Queue}, {B, get(B)}, {Q, get(Q)}, {return,Res}, get(state)], check_prepared, 50),
Res.
%%
%% @doc Write message Msg to queue named Queue. Functions queue_write and
%% queue_read deal with queue of messages solevly, and do not touch individual
%% graphs that can be included in messages.
%%
%% @spec queue_write(Queue::atom(), Msg::term()) -> ok
%%
queue_write(Queue, Msg) ->
Queue0 = list_to_atom(string:concat("queue_",atom_to_list(Queue))),
%% insert Msg into queue
Q = get(Queue0),
case Q of
undefined -> put(Queue0, queue:in(Msg, queue:new()));
_ -> put(Queue0, queue:in(Msg, Q))
end,
info_msg(queue_write, [get(self), {queue,Queue}, {msg,Msg}, {Queue0,get(Queue0)}, get(state)], write_done, 50),
ok.
%%
%% @doc Read message from queue Queue. Functions queue_write and
%% queue_read deal with queue of messages solely, and do not touch individual
%% graphs that can be included in messages.
%%
%% @spec queue_read(Queue::atom()) -> Msg::term()
%%
queue_read(Queue) ->
Queue0 = list_to_atom(string:concat("queue_",atom_to_list(Queue))),
info_msg(queue_read, [get(self), {queue,Queue}, {Queue0,get(Queue0)}, get(state)], read_enter, 50),
%% retrieve Msg from queue
{{value, Msg}, Q} = queue:out(get(Queue0)),
put(Queue0, Q),
info_msg(queue_read, [get(self), {queue,Queue}, {Queue0,get(Queue0)}, {return,Msg}, get(state)], read_done, 50),
Msg.
%%
%% @doc Determines if Queue buffer holding a block of triples is at the end.
%% Method queue_get() must be called before calling queue_block_end()
%% to intialize the queue buffer.
%%
%% @spec queue_block_end(Queue::atom()) -> boolean()
%%
queue_block_end(Queue) ->
%% set names of dictionary variables
QL = atom_to_list(Queue),
B = list_to_atom(string:concat("buff_",QL)),
BV = get(B), % get buffer
%% check buffer if it contains no messages
case BV of
[] -> Ret = true;
_ -> Ret = fail
end,
info_msg(queue_block_end, [get(self), {queue,Queue}, {B,get(B)}, {return,Ret}, get(state)], check_block_end_done, 50),
Ret.
%%
%% @doc Read a triple Triple from input queue Queue. Input queue is a queue
%% that receives data messages from some other process. Data messages include
%% lists of Graphs. Access to graphs received from some other process is
%% provided by using function queue_get() and queue_put(). There are two
%% input queues in join query node: from_outer and from_inner.
%%
%% @spec queue_get(Queue::atom()) -> {From::node_state:ns_pid(), Graph::qn_graph()} | fail
%%
queue_get(Queue) ->
%% set names of dictionary variables
QL = atom_to_list(Queue),
Q = list_to_atom(string:concat("queue_",QL)),
B = list_to_atom(string:concat("buff_",QL)),
P = list_to_atom(string:concat("pid_",QL)),
M = list_to_atom(string:concat("msg_",QL)),
%% read dictionary variables
BV = get(B), % get buffer
PV = get(P), % get pid of sender
MV = get(M), % get message name
EQ = queue:is_empty(get(Q)),
%% retrieve graph
case {BV, EQ} of
{[], true} -> %% no more graphs in queue
Ret = fail;
{[], false} -> %% empty buffer but not empty queue
{MV, F, [H|T]} = queue_read(Queue),
put(P, F),
put(B, T),
Ret = {F, H};
{[H|T], _} -> %% data is in buffer
put(B, T),
Ret = {PV, H};
_ -> Ret = fail
end,
info_msg(queue_get, [get(self), {queue,Queue}, {Q,get(Q)}, {B,get(B)}, {M,get(M)}, {P,get(P)}, {return,Ret}, get(state)], get_done, 50),
Ret.
%%
%% @doc Write graph Graph to output queue Queue. Output queue of query
%% nodes is a queue that gathers graphs from local process and then
%% packs list of graphs into messages to be sent to remote process.
%% There is only one output queue of join query node: to_parent.
%%
%% @spec queue_put(Queue::atom(), Graph::qn_graph()) -> ok|fail
%%
queue_put(Queue, Graph) ->
%% set names of dictionary variables
QL = atom_to_list(Queue),
Q = list_to_atom(string:concat("queue_",QL)),
B = list_to_atom(string:concat("buff_",QL)),
C = list_to_atom(string:concat("cnt_",QL)),
M = list_to_atom(string:concat("msg_",QL)),
%% read dictiounary variables
CS = get(block_size), % get block size
BV = get(B), % get buffer
CN = get(C), % get counter
MV = get(M), % get message head
%% insert graph to queue
case {BV, (CN >= (CS-1))} of
{[], _} -> %% buff is empty
put(B, [Graph|[]]),
put(C, 1),
Ret = ok;
{L, false} -> %% buff is not full
put(B, [Graph|L]),
put(C, CN+1),
Ret = ok;
{L, true} -> %% buff full, store it to queue
queue_write(Queue, {MV, get(self), lists:reverse([Graph|L])}),
put(B, []),
put(C, 0),
Ret = ok;
_ -> Ret = fail
end,
info_msg(queue_put, [get(self), {queue,Queue}, {graph,Graph}, {Q,get(Q)}, {B,get(B)}, {M,get(M)}, {C,get(C)}, {return,Ret}, get(state)], put_done, 50),
Ret.
%%
%% @doc Flush output queue. Output queue buffer is packed and inserted into
%% queue. Queue buffer is set empty and counter of graphs in the buffer is
%% set to 0.
%%
%% @spec queue_flush(Queue::atom()) -> ok|fail
%%
queue_flush(Queue) ->
%% set names of dictionary variables
QL = atom_to_list(Queue),
Q = list_to_atom(string:concat("queue_",QL)),
B = list_to_atom(string:concat("buff_",QL)),
C = list_to_atom(string:concat("cnt_",QL)),
M = list_to_atom(string:concat("msg_",QL)),
%% flush buffer to queue
BV = get(B),
MV = get(M),
case BV of
[] -> ok;
_ -> queue_write(Queue, {MV, get(self), lists:reverse(BV)})
end,
put(B, []),
put(C, 0),
info_msg(queue_flush, [get(self), {queue,Queue}, {Queue,get(Q)}, {B,get(B)}, {M,get(M)}, {C,get(C)}, get(state)], flush_done, 50),
ok.
%%
%% @doc Test function for join query node queues.
%%
queue_test_() ->
b3s:start(),
b3s:stop(),
b3s:start(),
b3s:bootstrap(),
{inorder,
[
% ?_assertMatch(ok, b3s:start()),
{generator, fun()-> queue_t1() end},
% {generator, fun()-> queue_t2() end},
?_assertMatch(ok, b3s:stop())
]}.
queue_t1() ->
info_msg(queue_t1, [get(self)], start, 50),
{ok, N1} = application:get_env(b3s, block_size),
put(block_size, N1),
put(self, testpid0),
%% init queues
queue_init(from_db, input, db_block),
queue_init(to_parent, output, data_outer),
queue_init(from_parent, plain, empty),
T1 = {triple_store, "id23", "tokyo", "isLocatedIn", "japan"},
T2 = {triple_store, "id24", "kyoto", "isLocatedIn", "japan"},
T3 = {triple_store, "id25", "osaka", "isLocatedIn", "japan"},
T4 = {triple_store, "id26", "koper", "isLocatedIn", "slovenia"},
T5 = {triple_store, "id27", "ljubljana", "isLocatedIn","slovenia"},
G1 = maps:put("1", T1, maps:new()),
G2 = maps:put("1", T2, maps:new()),
G3 = maps:put("1", T3, maps:new()),
G4 = maps:put("1", T4, maps:new()),
G5 = maps:put("1", T5, maps:new()),
M1 = {db_block, testpid1, [G1, G2, G3, G4, G5]},
M2 = {data_outer, testpid0, [G1, G2, G3, G4, G5]},
M3 = {data_outer, testpid0, [G5, G4, G3, G2, G1]},
M4 = {data_outer, testpid0, [G1, G2, G3]},
% M7 = {data_outer, testpid0, [G5]},
R1 = {testpid1, G1},
R2 = {testpid1, G2},
R3 = {testpid1, G3},
R4 = {testpid1, G4},
R5 = {testpid1, G5},
% R6 = {testpid2, G3},
% R7 = {testpid2, G3},
% R8 = {testpid2, G4},
% R9 = {testpid2, G5},
{inorder,
[
?_assertMatch(true, queue_empty(to_parent)),
?_assertMatch(true, queue_empty(from_parent)),
?_assertMatch(true, queue_empty(from_db)),
%
?_assertMatch(ok, queue_write(from_db, M1)),
?_assertMatch(R1, queue_get(from_db)),
?_assertMatch(R2, queue_get(from_db)),
?_assertMatch(R3, queue_get(from_db)),
?_assertMatch(R4, queue_get(from_db)),
?_assertMatch(R5, queue_get(from_db)),
?_assertMatch(true, queue_empty(from_db)),
%
?_assertMatch(ok, queue_put(to_parent, G1)),
?_assertMatch(ok, queue_put(to_parent, G2)),
?_assertMatch(ok, queue_put(to_parent, G3)),
?_assertMatch(ok, queue_put(to_parent, G4)),
?_assertMatch(ok, queue_put(to_parent, G5)),
?_assertMatch(M2, queue_read(to_parent)),
?_assertMatch(ok, queue_put(to_parent, G1)),
?_assertMatch(ok, queue_put(to_parent, G2)),
?_assertMatch(ok, queue_put(to_parent, G3)),
?_assertMatch(ok, queue_put(to_parent, G4)),
?_assertMatch(ok, queue_put(to_parent, G5)),
?_assertMatch(ok, queue_put(to_parent, G5)),
?_assertMatch(ok, queue_put(to_parent, G4)),
?_assertMatch(ok, queue_put(to_parent, G3)),
?_assertMatch(ok, queue_put(to_parent, G2)),
?_assertMatch(ok, queue_put(to_parent, G1)),
?_assertMatch(ok, queue_put(to_parent, G1)),
?_assertMatch(ok, queue_put(to_parent, G2)),
?_assertMatch(ok, queue_put(to_parent, G3)),
?_assertMatch(M2, queue_read(to_parent)),
?_assertMatch(M3, queue_read(to_parent)),
?_assertMatch(ok, queue_flush(to_parent)),
?_assertMatch(M4, queue_read(to_parent))
]}.
%% ======================================================================
%%
%% select and project operation can be part of every query node
%%
%% eval_project/1
%%
%% @doc Compute projection of graph by retaining values of varables specified
%% by the list PL and deleting triples that do not contain these variables.
%%
%% eval_project(PL::query_tree:qn_project_list()) -> maps:map()
%%
eval_project(none) -> ok;
eval_project(_) ->
%% construct new graph to return¸
PO = get(project_out),
F1 = fun (E, M) ->
maps:remove(E, M)
end,
G = lists:foldl(F1, get(gp_val), PO),
put(gp_val, G),
ok.
%%
%% project_prepare/0
%%
%% @doc From list of variables that are retained in the projected graph
%% prepare list of variables to be projected out. Input list of variables
%% is the parameter PL. Output is stored as PD entry project_out.
%%
%% project_prepare(PL::query_tree:qn_project_list()) -> ok|fail
%%
project_prepare(PL) ->
case PL of
none -> put(project_out, []);
_ -> %% first get id-s of al tp-s
VP = get(vars_pos),
L = lists:flatten(maps:values(VP)),
%% get id-s of tp-s from gp including vars from PL
F2 = fun (V) ->
maps:get(V, VP)
end,
%% map list of vars to list of pairs {qn_id,int}
L1 = lists:flatmap(F2, PL),
%% now extract qn_id-s ie. 1st component of each pair
F3 = fun (P) ->
element(1, P)
end,
LA = lists:usort(lists:map(F3, L)),
L2 = lists:usort(lists:map(F3, L1)),
L3 = difference_lists(LA, L2),
info_msg(hc_eval, [get(self), {project_list,PL}, {all_qids,LA}, {project_qids,L2}, {project_out,L3}, get(state)], comp_project_out, 50),
put(project_out, L3)
end.
%%
%% @doc Compute difference between two lists.
%%
%% difference_list(L1,L2) -> L3
difference_lists([], _) -> [];
difference_lists([X|L], L1) ->
XInBoth = lists:member(X, L1),
case XInBoth of
true ->
difference_lists(L, L1);
false ->
L2 = difference_lists(L, L1),
[X|L2]
end.
%%
%% @doc Convert an instance of qn_attribute() to a value of a given Erlang type.
%%
%% eval_attribute(Atr::qn_attribute())) -> string() | integer() | real() | timedate()
eval_attribute(Atr) ->
case Atr of
{V,code} -> V;
{V,integer} -> V;
{V,timedate} -> V;
{V,real} -> V;
{V,string} -> V;
_ -> Atr
end.
%%
%% eval_select/1
%%
%% @doc Compute selection predicate on a given graph Graph and return
%% result as boolean(). Predicate is parameter SelectPred.
%%
%% eval_select(SelectPred::query_tree:qn_select_predicate()) -> any()
%%
eval_select(S) when is_atom(S) ->
info_msg(select, [get(self), {expr,S}, {select_pred,get(select_pred)}, get(state)], select_atom_expr, 50),
S == none;
eval_select(S) when is_integer(S) ->
info_msg(select, [get(self), {expr,S}, {select_pred,get(select_pred)}, get(state)], select_int_expr, 50),
S;
eval_select({V,Type}) when (Type == integer) or (Type == real) or (Type == timedate) or
(Type == code) or (Type == string) ->
info_msg(select, [get(self), {expr,{V,Type}}, {select_pred,get(select_pred)}, get(state)], select_typed_expr, 50),
V;
%% selection predicate is a string S.
eval_select(S) when is_list(S) ->
info_msg(select, [get(self), {expr,S}, {qnode,get(qnode)}, {select_pred,get(select_pred)}, get(state)], select_list_expr, 50),
%% check if variable
IsVar = string:chr(S, $?) =:= 1,
case get(qnode) of
join -> %% return either var value or constant
case IsVar of
true -> %% get position and tuple
[{NodeId, Pos}|_] = maps:get(S, get(vars_pos)),
Tuple = maps:get(NodeId, get(gp_val)),
%% Pos+1 since first component is table-name
E = element(Pos+1, Tuple),
info_msg(select, [get(self), {select_pred,get(select_pred)}, {variable,S}, {value,E}, get(state)], variable_value, 50),
eval_attribute(E);
%% return string constant if not var
false -> S
end;
tp -> %% return either var value or constant
case IsVar of
true -> Pos = maps:get(S, get(vars)),
E = element(Pos+1, get(tp_val)),
info_msg(select, [get(self), {select_pred,get(select_pred)}, {variable,S}, {value,E}, get(state)], variable_value, 50),
eval_attribute(E);
%% return string constant if not var
false -> S
end
end;
%% selection predicate includes comparison ops
eval_select({S1, equal, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 == VS2;
eval_select({S1, less, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 < VS2;
eval_select({S1, lesseq, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 =< VS2;
eval_select({S1, greater, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 > VS2;
eval_select({S1, greatereq, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 >= VS2;
eval_select({S1, land, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 and VS2;
eval_select({S1, lor, S2}) ->
VS1 = eval_select(S1),
VS2 = eval_select(S2),
VS1 or VS2;
eval_select({lnot, S1}) ->
VS1 = eval_select(S1),
not VS1;
eval_select(Expr) ->
error_msg(select, [get(self), {expr,Expr}, {all,get()}, get(state)], illegal_select_expression),
fail.
%% ===================================================================
%%
%% @doc Unit tests.
%%
qn_test_() ->
application:load(b3s),
qt_site(b3s_state:get(test_mode)).
qt_site(local1) ->
{inorder,
[
?_assertMatch(ok, b3s:start()),
?_assertMatch(ok, b3s:bootstrap()),
?_assertMatch(ok, b3s:stop())
]};
qt_site(_) ->
[].
%% ====> END OF LINE <==== | src/query_node.erl | 0.598195 | 0.581778 | query_node.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(ot_tracer).
-export([start_span/3,
start_span/4,
start_inactive_span/3,
start_inactive_span/4,
set_span/2,
with_span/3,
with_span/4,
current_ctx/1,
current_span_ctx/1,
end_span/1,
end_span/2]).
%% tracer access functions
-export([span_module/1]).
-include("opentelemetry.hrl").
-type traced_fun(T) :: fun((opentelemetry:span_ctx()) -> T).
-type tracer_ctx() :: term().
-export_type([traced_fun/1]).
-callback start_span(opentelemetry:tracer(),
opentelemetry:span_name(),
ot_span:start_opts()) -> opentelemetry:span_ctx().
-callback start_span(ot_ctx:ctx(),
opentelemetry:tracer(),
opentelemetry:span_name(),
ot_span:start_opts()) -> {opentelemetry:span_ctx(), ot_ctx:ctx()}.
-callback start_inactive_span(opentelemetry:tracer(),
opentelemetry:span_name(),
ot_span:start_opts()) -> opentelemetry:span_ctx().
-callback start_inactive_span(ot_ctx:ctx(),
opentelemetry:tracer(),
opentelemetry:span_name(),
ot_span:start_opts()) -> {opentelemetry:span_ctx(), ot_ctx:ctx()}.
-callback set_span(opentelemetry:tracer(), opentelemetry:span_ctx()) -> ok.
-callback with_span(opentelemetry:tracer(), opentelemetry:span_name(), traced_fun(T)) -> T.
-callback with_span(opentelemetry:tracer(), opentelemetry:span_name(), ot_span:start_opts(), traced_fun(T)) -> T.
-callback end_span(ot_ctx:ctx() | opentelemetry:tracer(), opentelemetry:tracer() | opentelemetry:span_ctx()) -> boolean() | {error, term()}.
-callback end_span(opentelemetry:tracer()) -> boolean() | {error, term()}.
-callback current_ctx(opentelemetry:tracer()) -> tracer_ctx().
-callback current_span_ctx(opentelemetry:tracer()) -> opentelemetry:span_ctx().
-callback span_module(opentelemetry:tracer()) -> module().
-spec start_span(opentelemetry:tracer(), opentelemetry:span_name(), ot_span:start_opts())
-> opentelemetry:span_ctx().
start_span(Tracer={Module, _}, Name, Opts) ->
Module:start_span(Tracer, Name, Opts).
-spec start_span(ot_ctx:ctx(), opentelemetry:tracer(), opentelemetry:span_name(), ot_span:start_opts())
-> {opentelemetry:span_ctx(), ot_ctx:ctx()}.
start_span(Ctx, Tracer={Module, _}, Name, Opts) ->
Module:start_span(Ctx, Tracer, Name, Opts).
-spec start_inactive_span(opentelemetry:tracer(), opentelemetry:span_name(), ot_span:start_opts())
-> opentelemetry:span_ctx().
start_inactive_span(Tracer={Module, _}, Name, Opts) ->
Module:start_inactive_span(Tracer, Name, Opts).
-spec start_inactive_span(ot_ctx:ctx(), opentelemetry:tracer(), opentelemetry:span_name(),
ot_span:start_opts()) -> {opentelemetry:span_ctx(), ot_ctx:ctx()}.
start_inactive_span(Ctx, Tracer={Module, _}, Name, Opts) ->
Module:start_inactive_span(Ctx, Tracer, Name, Opts).
-spec set_span(opentelemetry:tracer(), opentelemetry:span_ctx()) -> ok.
set_span(Tracer={Module, _}, SpanCtx) when is_atom(Module) ->
Module:set_span(Tracer, SpanCtx).
-spec with_span(opentelemetry:tracer(), opentelemetry:span_name(), traced_fun(T)) -> T.
with_span(Tracer={Module, _}, SpanName, Fun) when is_atom(Module) ->
Module:with_span(Tracer, SpanName, Fun).
-spec with_span(opentelemetry:tracer(), opentelemetry:span_name(), ot_span:start_opts(), traced_fun(T)) -> T.
with_span(Tracer={Module, _}, SpanName, Opts, Fun) when is_atom(Module) ->
Module:with_span(Tracer, SpanName, Opts, Fun).
-spec end_span(opentelemetry:tracer()) -> boolean() | {error, term()}.
end_span(Tracer={Module, _}) ->
Module:end_span(Tracer).
-spec end_span(ot_ctx:ctx() | opentelemetry:tracer(), opentelemetry:tracer() | opentelemetry:span_ctx())
-> boolean() | {error, term()}.
end_span(Ctx, Tracer={Module, _}) ->
Module:end_span(Ctx, Tracer);
end_span(Tracer={Module, _}, SpanCtx) ->
Module:end_span(Tracer, SpanCtx).
-spec current_ctx(opentelemetry:tracer()) -> ot_tracer:tracer_ctx().
current_ctx(Tracer={Module, _}) ->
Module:current_ctx(Tracer).
-spec current_span_ctx(opentelemetry:tracer()) -> opentelemetry:span_ctx().
current_span_ctx(Tracer={Module, _}) ->
Module:current_span_ctx(Tracer).
%% tracer access functions
span_module(Tracer={Module, _}) ->
Module:span_module(Tracer). | src/ot_tracer.erl | 0.625324 | 0.516595 | ot_tracer.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(otel_tracer).
-export([start_span/3,
start_span/4,
with_span/4,
with_span/5,
non_recording_span/3,
from_remote_span/3,
set_current_span/1,
set_current_span/2,
current_span_ctx/0,
current_span_ctx/1]).
-include("opentelemetry.hrl").
-define(CURRENT_SPAN_CTX, {?MODULE, span_ctx}).
-define(is_recording(SpanCtx), SpanCtx =/= undefined andalso SpanCtx#span_ctx.is_recording =:= true).
-type traced_fun(T) :: fun((opentelemetry:span_ctx()) -> T).
-export_type([traced_fun/1]).
-callback start_span(otel_ctx:t(),
opentelemetry:tracer(),
opentelemetry:span_name(),
otel_span:start_opts()) -> opentelemetry:span_ctx().
-callback with_span(otel_ctx:t(), opentelemetry:tracer(),
opentelemetry:span_name(), otel_span:start_opts(), traced_fun(T)) -> T.
-spec start_span(opentelemetry:tracer(), opentelemetry:span_name(), otel_span:start_opts())
-> opentelemetry:span_ctx().
start_span(Tracer={Module, _}, SpanName, Opts) ->
case otel_span:is_valid_name(SpanName) of
true ->
Module:start_span(otel_ctx:get_current(), Tracer, SpanName, otel_span:validate_start_opts(Opts));
false ->
otel_tracer_noop:noop_span_ctx()
end.
-spec start_span(otel_ctx:t(), opentelemetry:tracer(), opentelemetry:span_name(), otel_span:start_opts())
-> opentelemetry:span_ctx().
start_span(Ctx, Tracer={Module, _}, SpanName, Opts) ->
case otel_span:is_valid_name(SpanName) of
true ->
Module:start_span(Ctx, Tracer, SpanName, otel_span:validate_start_opts(Opts));
false ->
otel_tracer_noop:noop_span_ctx()
end.
-spec with_span(opentelemetry:tracer(), opentelemetry:span_name(), otel_span:start_opts(), traced_fun(T)) -> T.
with_span(Tracer={Module, _}, SpanName, Opts, Fun) when is_atom(Module) ->
case otel_span:is_valid_name(SpanName) of
true ->
Module:with_span(otel_ctx:get_current(), Tracer, SpanName, otel_span:validate_start_opts(Opts), Fun);
false ->
Fun(otel_tracer_noop:noop_span_ctx())
end.
-spec with_span(otel_ctx:t(), opentelemetry:tracer(), opentelemetry:span_name(), otel_span:start_opts(), traced_fun(T)) -> T.
with_span(Ctx, Tracer={Module, _}, SpanName, Opts, Fun) when is_atom(Module) ->
case otel_span:is_valid_name(SpanName) of
true ->
Module:with_span(Ctx, Tracer, SpanName, otel_span:validate_start_opts(Opts), Fun);
false ->
Fun(otel_tracer_noop:noop_span_ctx())
end.
%% @doc Returns a `span_ctx' record with `is_recording' set to `false'. This is mainly
%% for use in propagators when they extract a Span to be used as a parent.
-spec non_recording_span(opentelemetry:trace_id(), opentelemetry:span_id(), opentelemetry:trace_flags())
-> opentelemetry:span_ctx().
non_recording_span(TraceId, SpanId, Traceflags) ->
#span_ctx{trace_id=TraceId,
span_id=SpanId,
is_recording=false,
trace_flags=Traceflags}.
%% @doc Returns a `span_ctx' record with `is_recording' set to `false' and `is_remote' set to `true'.
%% This is mainly for use in propagators when they extract a Span to be used as a parent.
-spec from_remote_span(opentelemetry:trace_id(), opentelemetry:span_id(), opentelemetry:trace_flags())
-> opentelemetry:span_ctx().
from_remote_span(TraceId, SpanId, Traceflags) ->
#span_ctx{trace_id=TraceId,
span_id=SpanId,
is_valid=true,
is_recording=false,
is_remote=true,
trace_flags=Traceflags}.
-spec set_current_span(opentelemetry:span_ctx() | undefined) -> opentelemetry:span_ctx() | undefined.
set_current_span(SpanCtx) ->
_ = otel_ctx:set_value(?CURRENT_SPAN_CTX, SpanCtx),
SpanCtx.
-spec set_current_span(otel_ctx:t(), opentelemetry:span_ctx() | undefined) -> otel_ctx:t() | undefined.
set_current_span(Ctx, SpanCtx) ->
otel_ctx:set_value(Ctx, ?CURRENT_SPAN_CTX, SpanCtx).
-spec current_span_ctx() -> opentelemetry:span_ctx() | undefined.
current_span_ctx() ->
otel_ctx:get_value(?CURRENT_SPAN_CTX).
-spec current_span_ctx(otel_ctx:t()) -> opentelemetry:span_ctx() | undefined.
current_span_ctx(Ctx) ->
otel_ctx:get_value(Ctx, ?CURRENT_SPAN_CTX, undefined). | apps/opentelemetry_api/src/otel_tracer.erl | 0.589953 | 0.436682 | otel_tracer.erl | starcoder |
-module(ux_uca_extract).
-export([extract/3]).
-include("ux.hrl").
-include("ux_uca.hrl").
-type uca_array() :: ux_uca:uca_array().
-type uca_weight() :: ux_uca:uca_weight().
-type uca_elem() :: ux_uca:uca_elem().
-type result() :: ux_uca:uca_result().
-type ux_ccc() :: ux_types:ux_ccc().
%% @doc MANUAL:
%% S2.1 Find the longest initial substring S at each point
%% that has a match in the table.
%% S2.1.1 If there are any non-starters following S, process each non-starter C.
%% S2.1.2 If C is not blocked from S, find if S + C has a match in the table.
%% S2.1.3 If there is a match, replace S by S + C, and remove C.
%%
%% Returns: {Not reversed list of weight elements, Tail of the string}.
%% @end
%% @private
-spec extract(string(), #uca_options{}, fun()) ->
result().
extract(C=#uca_options{},D,S) when is_list(S), is_function(D) ->
do_extract(C,D,S).
%% @param C::#uca_options{} Config
%% @param S::string() String
%% @param D::fun() DUCET function
%% @param W::fun() Weights before extracted weights.
-spec do_extract(string(), #uca_options{}, fun()) ->
result().
do_extract(#uca_options {
case_sensitive=CS,
case_first=CF
} = C,D,S) ->
R1 = do_extract0(S, D),
{W1,S1} = R1,
{W2,S2} = check_mod(C, W1, D, S1),
W3 = case CF of
off -> W2;
lower -> W2;
upper -> lists:map(fun case_first_hack/1, W2)
end,
W4 = case CS of
false -> W3;
true -> lists:map(fun case_sensitive_hack/1, W3)
end,
% ok = check_weights(W4),
{W4, S2}.
check_mod(#uca_options{natural_sort=NS} = C, W1, D, S1) ->
case has_mod(W1, NS) of
true ->
?DBG("Run post processing.~n", []),
% Form function for proxy.
F = do_proxy(C,D,S1),
mod_weights(F, W1, NS, []);
false ->
?DBG("Skip post processing.~n", []),
{W1, S1}
end.
check_weights(W) -> do_check_weights(W).
do_check_weights([[variable,_,_,_,_]|T]) ->
do_check_weights(T);
do_check_weights([[non_variable,_,_,_,_]|T]) ->
do_check_weights(T);
do_check_weights([]) -> ok.
%% This function hides C,D,S from client.
-spec do_proxy(#uca_options{}, fun(), string()) -> fun().
do_proxy(C,D,S) ->
fun(get_more) ->
case S of
[] -> no_more;
_ ->
{W, NewS} = do_extract0(S, D),
F = do_proxy(C,D,NewS),
{W, F}
end;
(term) -> get_terminator(C);
(restart) ->
F = do_proxy(C,D,S),
fun(W, Acc) ->
do_mod(C,F,W,Acc)
end;
%% One hangul sequance was found, restart check_mod
(mod_continue) ->
fun(W, Acc) ->
{W1, S1} = check_mod(C, W, D, S),
%% lists:reverse(Acc) ++ W1
{lists:reverse(Acc, W1), S1}
end;
(Result) -> {lists:reverse(Result),S}
end.
-spec do_mod(#uca_options{}, fun(), uca_array(), uca_array()) ->
result().
do_mod(#uca_options {
natural_sort=NS
}, F, W, Acc) ->
mod_weights(F, W, NS, Acc).
-spec get_terminator(#uca_options{}) -> uca_weight().
get_terminator(#uca_options {
hangul_terminator=Term
}) -> [non_variable, Term, 0,0,0].
%% @doc Uppercase to sort before lowercase. Remap L3.
%% @private
-spec case_first_hack(uca_elem()) -> uca_elem().
case_first_hack([Var,L1,L2,L3,L4]) ->
NewL3 = case_invert(L3),
[Var,L1,L2,NewL3,L4].
%% @private
-spec case_invert(uca_weight()) -> uca_weight().
case_invert(L3) when L3 >= 2, L3 =< 6 ->
L3 + 6;
case_invert(L3) when L3 >= 8, L3 =< 12 ->
L3 - 6;
case_invert(L3) ->
L3.
%% @doc Move L3 before L1.
%% @private
-spec case_sensitive_hack(uca_elem()) -> uca_elem().
case_sensitive_hack([Var,L1,L2,L3,L4]) ->
[Var,L3,L2,L1,L4].
% Hack for numbers.
has_mod([[_Var,L1|_]|T], _NS=true)
when ?IS_L1_OF_DECIMAL(L1) ->
true;
has_mod([[_Var,L1|_]|_], _NS)
when ?IS_L1_OF_HANGUL_L(L1) ->
true;
has_mod([[_|_]|T], NS) ->
has_mod(T, NS);
has_mod([], _NS) ->
false.
% 7.1.5 Hangul Collation
% Interleaving Method
% MANUAL:
% Generate a modified weight table:
% 1. Assign a weight to each precomposed Hangul syllable character,
% with a 1-weight gap between each one.
% (See Section 6.2, Large Weight Values)
% 2. Give each jamo a 1-byte internal weight.
% Also add an internal terminator 1-byte weight (W).
% These are assigned so that al W < T < V < L.
% These weights are separate from the default weights, and are just used
% internally.
% When any string of jamo and/or Hangul syllables is encountered,
% break it into syllables according to the rules of Section 3.12,
% Conjoining Jamo Behavior of [Unicode].
% Process each syllable separately:
% If a syllable is canonically equivalent to one of the precomposed Hangul
% syllables, then just assign the weight as above
% If not, then find the greatest syllable that it is greater than;
% call that the base syllable. Generate a weight sequence corresponding to
% the following gap weight, followed by all the jamo weight bytes,
% followed by the terminator byte.
%
% L1 as an argument is first hangul jamo L.
% L1 as an part of ?IS_L1_OF_HANGUL_L is first level.
%% @private
% Hack for Hangul.
-spec mod_weights(fun(), uca_array(), boolean(), uca_array()) -> result().
% Hack for numbers.
mod_weights(E, [[Var,L1|LOther]=H|T], _NS=true, Acc)
when ?IS_L1_OF_DECIMAL(L1) ->
F = fun(W) -> [Var,W|LOther] end, % define F.
Num = ?COL_WEIGHT_TO_DECIMAL(L1),
do_decimal(E, F, Num, T, Acc);
mod_weights(E, [[Var,L1|_]=H|T], _NS, Acc)
when ?IS_L1_OF_HANGUL_L(L1) ->
do_hangul(E, l, T, [H|Acc]);
mod_weights(E, [H|T], NS, Acc) ->
mod_weights(E, T, NS, [H|Acc]);
mod_weights(E, [], _NS, Acc) ->
E(Acc). % L1 is not found. There is no hangul jamo in this string.
%% @doc Scans the string for the digits.
%% When a non-digit character is extracted, stop extraction and
%% form the weights.
%%
%% @end
%% @param E The proxy function
%% @param F The function which forms a weight element.
%% @param N Number
%% @param W The tail of the weights
%% @param Acc Accumulator for the weights
-spec do_decimal(fun(), fun(), boolean(), uca_array(), uca_array()) -> result().
do_decimal(E, F, N, [[_,0|_]=H|T]=_W, Acc) ->
do_decimal(E, F, N, T, [H|Acc]); % skip an ignorable element.
do_decimal(E, F, N, [[_,L1|_]=H|T]=_W, Acc)
when ?IS_L1_OF_DECIMAL(L1) ->
NewN = (N * 10) + ?COL_WEIGHT_TO_DECIMAL(L1),
?DBG("old ~w; new ~w~n", [N, NewN]),
do_decimal(E, F, NewN, T, Acc);
do_decimal(E, F, N, []=_W, Acc) ->
% We need more gold. Try extract 1 more char. :)
case E(get_more) of
{NewW, NewE} ->
?DBG("more ~w~n", [NewW]),
do_decimal(NewE, F, N, NewW, Acc);
no_more ->
NewAcc = decimal_result(F, N, Acc),
E(NewAcc)
end;
% Bad char. Cancel last extraction.
do_decimal(E, F, N, W, Acc) ->
NewAcc = decimal_result(F, N, Acc),
Restarter = E(restart),
Restarter(W, NewAcc).
-spec decimal_result(fun(), integer(), uca_array()) -> uca_array().
%% @doc Forms the weight elements.
%% F is function, which gets the L1 weights and returns the full element.
%% For example:
%% ```
%% > decimal_result(F, 100, []).
%% [[100],[16#FFFE],[1]].
%% '''
%% @end
decimal_result(F, N, Acc) ->
NewAcc = [F(16#FFFE), F(1)|Acc],
do_decimal_result(F, N, Acc).
-spec do_decimal_result(fun(), integer(), uca_array()) -> uca_array().
do_decimal_result(F, N, Acc) ->
?DBG("Res: ~w~n", [N]),
case N div 16#FFFE of
0 -> [F(N)|Acc];
Div ->
Rem = N rem 16#FFFE,
do_decimal_result(F, Div, [F(Rem), F(16#FFFE)|Acc])
end.
%% L1 was found.
%% Mod: l
%% @private
%% @param E Proxy function
%% @param E l, lv, ll Step
%% @param Tail of weights
%% @param Accumulator for weights
-spec do_hangul(fun(), atom(), uca_array(), uca_array()) -> result().
do_hangul(E, Mod, [[_,0|_]=H|T], Acc) ->
% skip an ignorable element.
do_hangul(E, Mod, T, [H|Acc]);
do_hangul(E, l, [[_,L1|_]=H|T], Acc)
when ?IS_L1_OF_HANGUL_L(L1) -> % L2 is found. LL*
do_hangul(E, ll, T, [H|Acc]);
do_hangul(E, l, [[_,L1|_] = H|T], Acc)
when ?IS_L1_OF_HANGUL_V(L1) -> % V1 is found. LV*
do_hangul(E, lv, T, [H|Acc]);
do_hangul(E, lv, [[_,L1|_]=H|T], Acc)
when ?IS_L1_OF_HANGUL_T(L1) -> % T1 is found. LVT
hangul_result(E, T, [H|Acc]);
do_hangul(E, lv, [[_,L1|_]=H|T], Acc)
when ?IS_L1_OF_HANGUL_V(L1) -> % V2 is found. LVV
hangul_result(E, T, [H|Acc]);
do_hangul(E, lv, [_|_] = W, Acc) -> % X is found. LVX
hangul_result_and_continue(E, W, Acc);
do_hangul(E, ll, [[_,L1|_]=H|T], Acc)
when ?IS_L1_OF_HANGUL_V(L1) -> % V1 is found. LLV
hangul_result(E, T, [H|Acc]);
do_hangul(E, Mod, [], Acc) ->
case E(get_more) of
{NewW, NewE} ->
do_hangul(NewE, Mod, NewW, Acc);
no_more ->
E(Acc)
end;
do_hangul(E, _Mod, W, Acc) -> % L
Continue = E(mod_continue),
Continue(W, Acc).
%% @private
-spec hangul_result(fun(), uca_array(), uca_array()) -> result().
hangul_result(E, T, Acc) ->
TermWeight = E(term),
E([TermWeight | Acc]).
hangul_result_and_continue(E, W, Acc) ->
TermWeight = E(term),
Continue = E(mod_continue),
Continue(W, [TermWeight|Acc]).
%% Step 0: try extract derived weights.
%% @private
%% @param Str:string() String
%% @param D::fun() Ducet Function
-spec do_extract0(string(), fun()) -> result().
do_extract0([], _) -> % No Any Char
{[], []};
% Try extract from ducet.
%% var DFn:fun() Ducet function
%% var CFn:fun() CCC function
%% var LFn:fun() DUCET lookup function
do_extract0([H], D) -> % Last Char
case D([H]) of
[_|_] = W ->
{W, []};
_ ->
{[], []}
end;
do_extract0([H|T]=S, DFn) ->
% Max ccc among ccces of skipped chars beetween the starter char
% and the processed char. If there are no skipped chars, then
% Ccc1=false.
OldCCC = false,
Key = [],
Skipped = [],
LFn = ducet_lookup(DFn),
CFn = ux_unidata:ccc(skip_check),
MFn = get_more(LFn, CFn),
Res = false,
case do_extract1(S, MFn, Key, OldCCC, Skipped, Res) of
{result, Key2, T2} ->
W = DFn(Key2),
?DBG("W:~w T: ~w~n", [W, T2]),
{W, T2};
not_found ->
{do_implicit(H), T}
end.
%% @param S:string() String
%% Res is a last good Key.
-spec do_extract1(string(), fun(), string(), ux_ccc()|false,
string(), uca_array()) ->
{result,string(),string()}|not_found.
do_extract1([H|T]=S, MFn, Key, OldCCC, Skipped, Res)
when is_list(Skipped) ->
NewKey = [H|Key],
case MFn(NewKey, OldCCC) of
{false, _NewCCC} when Res =:= more ->
more_error;
{false, NewCCC} ->
NewSkipped = [H|Skipped],
do_extract1(T, MFn, Key, NewCCC, NewSkipped, Res);
{true, NewCCC} ->
CCC = select_ccc(OldCCC, NewCCC),
?DBG("selected ccc is ~w.~n", [CCC]),
do_extract1(T, MFn, NewKey, CCC, Skipped, NewKey);
{maybe, NewCCC} when Res =:= more ->
CCC = select_ccc(OldCCC, NewCCC),
do_extract1(T, MFn, NewKey, CCC, Skipped, more);
{maybe, NewCCC} ->
CCC = select_ccc(OldCCC, NewCCC),
case do_extract1(T, MFn, NewKey, CCC, Skipped, more) of
more_error ->
NewSkipped = [H|Skipped],
do_extract1(T, MFn, Key, NewCCC, NewSkipped, Res);
Return -> Return
end;
bad_ccc when Res =:= more ->
more_error;
bad_ccc when Res =:= false ->
% http://unicode.org/reports/tr10/#Unassigned_And_Other
not_found;
bad_ccc ->
{result, do_extract1_return(Res), lists:reverse(Skipped, S)}
end;
do_extract1([]=_S, _MFn, _Key, _OldCCC, Skipped, _Res=more)
when is_list(Skipped) ->
more_error;
do_extract1([]=_S, _MFn, _Key, _OldCCC, Skipped, _Res=false)
when is_list(Skipped) ->
not_found;
do_extract1([]=_S, _MFn, _Key, _OldCCC, Skipped, Res)
when is_list(Skipped) ->
{result, do_extract1_return(Res), lists:reverse(Skipped)}.
% Table 18. Values for Base
% -----------------------------------------------------------------------------
% Range 1: Unified_Ideograph=True AND
% ((Block=CJK_Unified_Ideograph) OR (Block=CJK_Compatibility_Ideographs))
% Base 1: FB40
% Range 2: Unified_Ideograph=True AND NOT
% ((Block=CJK_Unified_Ideograph) OR (Block=CJK_Compatibility_Ideographs))
% Base 2: FB80
% Base 3: FBC0 Any other code point
% Range 3: Ideographic AND NOT Unified_Ideograph
% -----------------------------------------------------------------------------
do_implicit(H)
when ?CHAR_IS_UNIFIED_IDEOGRAPH(H) ->
if
(?CHAR_IS_CJK_COMPATIBILITY_IDEOGRAPH(H)
or ?CHAR_IS_CJK_UNIFIED_IDEOGRAPH(H)) ->
implicit_weight(H, 16#FB40);
true ->
implicit_weight(H, 16#FB80)
end;
do_implicit(H) ->
implicit_weight(H, 16#FBC0).
%% After skiping a character, we set OldCCC = NewCCC.
-spec select_ccc(false|ux_ccc(), ux_ccc()) -> false|ux_ccc().
select_ccc(_OldCCC=false, _NewCCC) ->
false;
select_ccc(_OldCCC, NewCCC) ->
NewCCC.
-spec do_extract1_return(string()) -> string().
do_extract1_return(Res) -> lists:reverse(Res).
%% @param L::string() List of unicode codepaints
-spec ducet_lookup(fun()) -> fun().
ducet_lookup(D) ->
D(member_function).
-spec get_more(fun(), fun()) -> term().
get_more(LFn, CFn) ->
fun([H|_]=K, OldCCC) when is_integer(H) ->
case CFn(H) of
NewCCC when OldCCC =:= false;
OldCCC=/=0, OldCCC<NewCCC ->
?DBG("ccc is ok. OldCCC is ~w. NewCCC is ~w. ~n",
[OldCCC, NewCCC]),
Status = LFn(K),
?DBG("Status is ~w. ~n", [Status]),
{Status, NewCCC};
NewCCC when OldCCC =:= NewCCC, OldCCC =/= 0 ->
?DBG("Char is blocked. CCC is ~w. ~n", [OldCCC]),
{false, NewCCC}; % blocked
NewCCC ->
?DBG("Bad CCC. OldCCC is ~w. NewCCC is ~w ~n",
[OldCCC, NewCCC]),
bad_ccc
end
end.
% Note: A non-starter in a string is called blocked if there is another
% non-starter of the same canonical combining class or zero between
% it and the last character of canonical combining class 0.
%% @doc 7.1.3 Implicit Weights
%% The result of this process consists of collation elements that are sorted in
%% code point order, that do not collide with any explicit values in the table,
%% and that can be placed anywhere (for example, at BASE) with respect to the
%% explicit collation element mappings. By default, implicit mappings are given
%% higher weights than all explicit collation elements.
%% @end
%% @private
implicit_weight(CP, BASE) when is_integer(CP) and is_integer(BASE) ->
AAAA = BASE + (CP bsr 15),
BBBB = (CP band 16#7FFF) bor 16#8000,
[[non_variable, AAAA, 32, 2, 0],
[non_variable, BBBB, 0, 0, 0]]. % reversed | src/uca/ux_uca_extract.erl | 0.569853 | 0.494751 | ux_uca_extract.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%% @copyright 2011 <NAME>
%% @doc Stats aggregation process, periodically dumping metrics.
-module (estatsd_server).
%% See estatsd.hrl for a complete list of introduced types.
-include ("estatsd.hrl").
%% This is an OTP gen_server.
-behaviour (gen_server).
%% Client API.
-export ([
start_link/0
]).
%% OTP gen_server callbacks.
-export ([
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
%% Process state: Settings and the timer metrics.
-record (state, {
timers % Timers stored in a gb_tree
}).
% ====================== \/ CLIENT API =========================================
%% @doc Starts the estatsd statistics server.
%% Registers a process named `estatsd_server`.
-spec start_link() -> {ok, Pid::pid()} | {error, Reason::term()}.
start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
% ====================== /\ CLIENT API =========================================
% ====================== \/ GEN_SERVER CALLBACKS ===============================
%% @doc Initializes the server's state.
%% Registers a process named `estatsda_manager`.
%% Sets up an event manager and all configured adapters which will forward
%% to some graphing system or log, or whatever you want your adapter to do.
init([]) ->
% Time between each flush, default to 10 seconds.
FlushInterval = estatsd:env_or_default(flush_interval, 10000),
% Adapters to handle the collected metrics. Defaults to logging only.
DefaultAdapter = {estatsda_logger, []},
Adapters = estatsd:env_or_default(adapters, [DefaultAdapter]),
setup_adapters_(Adapters),
% Initialize the table for counter metrics
ets:new(statsd, [named_table, set]),
% Flush out stats periodically
{ok, _} = timer:apply_interval(
FlushInterval, gen_server, cast, [?MODULE, flush]),
State = #state{timers = gb_trees:empty()},
{ok, State}.
%% @doc Increments or creates the counter.
handle_cast({increment, Key, IncrementBy, SampleRate}, State)
% Only handle increments with proper sample rates
when SampleRate >= 0 andalso SampleRate =< 1 ->
% Account for sample rates < 1.0
Delta = IncrementBy * (1 / SampleRate),
case ets:lookup(statsd, Key) of
% Initialize new counters
% The table holds both the counters value and the number of increments
[] -> ets:insert(statsd, {Key, {Delta, 1}});
% Otherwise update the counter
[{Key, {Total, Times}}] ->
ets:insert(statsd, {Key, {Total + Delta, Times + 1}})
end,
{noreply, State};
%% @doc Drops requests with invalid sample rates.
handle_cast({increment, _, _, _}, State) -> {noreply, State};
%% @doc Inserts or updates the given timing.
handle_cast({timing, Key, Duration}, State) ->
Timers = State#state.timers,
case gb_trees:lookup(Key, Timers) of
% Initialize a new timer
none ->
{noreply, State#state{
timers = gb_trees:insert(Key, [Duration], Timers)
}};
% Otherwise just append the duration
{value, Val} ->
{noreply, State#state{
timers = gb_trees:update(Key, [Duration|Val], Timers)
}}
end;
%% @doc Flushes the current set of metrics.
handle_cast(flush, State) ->
% Retrieve the metrics from the state and the env
Counters = ets:tab2list(statsd),
Timers = gb_trees:to_list(State#state.timers),
% Handle the flush in another process
spawn(fun() -> flush_metrics_(Counters, Timers) end),
% Continue with a blank slate
ets:delete_all_objects(statsd),
NewState = State#state{timers = gb_trees:empty()},
{noreply, NewState}.
%% @doc Logs and drops all calls.
handle_call(Request, From, State) ->
error_logger:warning_msg(
"[~s] Ignored call '~p' from '~p'~n", [?MODULE, Request, From]),
{reply, ok, State}.
%% @doc Logs and drops all info messages.
handle_info(Info, State) ->
error_logger:info_msg("[~s] Ignored info '~p'~n", [?MODULE, Info]),
{noreply, State}.
%% @doc Returns the old state.
code_change(_OldVsn, State, _Extra) -> {noreply, State}.
%% @doc Does nothing.
terminate(_Arg, _State) -> ok.
% ====================== /\ GEN_SERVER CALLBACKS ===============================
% ====================== \/ HELPER FUNCTIONS ===================================
%% @doc Flushes the given metrics to registered adapters.
flush_metrics_(Counters, Timers) ->
case length(Counters) + length(Timers) of
0 -> emptyset; % Do nothing if no metrics were collected
_ ->
Metrics = precompile_metrics_({Counters, Timers}),
gen_event:notify(estatsda_manager, {metrics, Metrics})
end.
%% @doc Setup the metrics manager and its handlers, aka adapters.
-spec setup_adapters_([{Module::atom(), InitArgs::term()}]) -> no_return().
setup_adapters_(Adapters) ->
% Start the metrics event manager
gen_event:start_link({local, estatsda_manager}),
% Register the specified adapters with the manager
lists:foreach(
fun(AdapterSpec) ->
% Every adapter implements the estatsda_handler behaviour and is also
% invoked through an instance of estatsda_handler, which implements the
% generic parts of each adapter.
gen_event:add_handler(estatsda_manager, estatsda_adapter, AdapterSpec)
end,
Adapters
).
%% @doc Prepares the given metrics to be handled by the adapters.
-spec precompile_metrics_(metrics()) -> prepared_metrics().
precompile_metrics_({Counters, Timers}) ->
{precompile_counters_(Counters), precompile_timers_(Timers)}.
%% @doc Prepares the given counters to be handled by the adapters.
-spec precompile_counters_(counters()) -> prepared_counters().
precompile_counters_(Counters) ->
lists:map(
fun({Key, {Value, NoIncrements}}) ->
KeyAsBinary = erlang:list_to_binary(estatsd:key2str(Key)),
{KeyAsBinary, Value, NoIncrements}
end,
Counters).
%% @doc Prepares the given timers to be handled by the adapters.
-spec precompile_timers_(timers()) -> prepared_timers().
precompile_timers_(Timers) ->
lists:map(
fun({Key, Durations}) ->
KeyAsBinary = erlang:list_to_binary(estatsd:key2str(Key)),
DurationsSorted = lists:sort(Durations),
Count = length(Durations),
Min = hd(Durations),
Max = lists:last(Durations),
{KeyAsBinary, DurationsSorted, Count, Min, Max}
end,
Timers).
% ====================== /\ HELPER FUNCTIONS =================================== | apps/estatsd/src/estatsd_server.erl | 0.639961 | 0.400749 | estatsd_server.erl | starcoder |
%% @copyright 2011 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Unit tests for src/histogram.erl
%% @end
%% @version $Id$
-module(histogram_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-include("unittest.hrl").
-compile(export_all).
all() -> [
add2,
add3,
add2_identical,
add3_identical,
add_integer,
resize,
get_num_inserts,
find_smallest_interval,
merge_interval,
perf_add,
foldl_until,
foldr_until
].
suite() -> [ {timetrap, {seconds, 40}} ].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
add2(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.0,1}, {2.0,1}, {3.0,1}, {3.5,1}]),
ok.
add3(_Config) ->
H = histogram:create(10),
Values = [{3.5, 2}, {3.0, 1}, {2.0, 5}, {1.0, 7}],
H2 = lists:foldl(fun ({Value, Count}, Histogram) ->
histogram:add(Value, Count, Histogram)
end, H, Values),
?equals(histogram:get_data(H2), [{1.0, 7}, {2.0, 5}, {3.0, 1}, {3.5, 2}]),
ok.
add2_identical(_Config) ->
H = histogram:create(2),
Values = [1.25, 5.0, 5.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.25, 1}, {5.0, 2}]),
H3 = lists:foldl(fun histogram:add/2, H2, Values),
?equals(histogram:get_data(H3), [{1.25, 2}, {5.0, 4}]),
ok.
add3_identical(_Config) ->
H = histogram:create(4),
Values = [{2.0, 1}, {3.5, 2}, {3.0, 1}, {2.0, 6}, {1.0, 3}],
H2 = lists:foldl(fun({Value, Count}, Histogram) ->
histogram:add(Value, Count, Histogram)
end,
H, Values),
?equals(histogram:get_data(H2), [{1.0, 3}, {2.0, 7}, {3.0, 1}, {3.5, 2}]),
ok.
add_integer(_Config) ->
H = histogram:create(5),
Values = [1, 2, 3, 4, 5],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1, 1}, {2, 1}, {3, 1}, {4, 1}, {5, 1}]),
Values2 = [3, 4, 10],
H3 = lists:foldl(fun histogram:add/2, H2, Values2),
?equals(histogram:get_data(H3), [{1, 2}, {3, 2}, {4, 2}, {5, 1}, {10, 1}]),
ok.
resize(_Config) ->
H = histogram:create(3),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_data(H2), [{1.0,1}, {2.0,1}, {3.25,2}]),
ok.
get_num_inserts(_Config) ->
H = histogram:create(10),
?equals(histogram:get_num_inserts(H), 0),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
?equals(histogram:get_num_inserts(H2), 4),
Values2 = [{2.0, 1}, {3.5, 2}, {3.0, 1}, {2.0, 6}, {1.0, 3}],
H3 = lists:foldl(fun({Value, Count}, Histogram) ->
histogram:add(Value, Count, Histogram)
end,
H2, Values2),
?equals(histogram:get_num_inserts(H3), 17),
ok.
find_smallest_interval(_Config) ->
H1a = histogram:create(10),
Values1a = [3.5, 3.0, 2.0, 1.0],
H1b = lists:foldl(fun histogram:add/2, H1a, Values1a),
?equals(3.0, histogram:find_smallest_interval(histogram:get_data(H1b))),
Values2a = [4.0, 2.5, 2.0, 1.0],
H2b = lists:foldl(fun histogram:add/2, H1a, Values2a),
?equals(2.0, histogram:find_smallest_interval(histogram:get_data(H2b))),
ok.
merge_interval(_Config) ->
H = histogram:create(10),
Values = [3.5, 3.0, 2.0, 1.0],
H2 = lists:foldl(fun histogram:add/2, H, Values),
MinFirstValue = histogram:find_smallest_interval(histogram:get_data(H2)),
H3 = histogram:merge_interval(MinFirstValue, histogram:get_data(H2)),
?equals(3.0, MinFirstValue),
?equals(H3, [{1.0,1}, {2.0,1}, {3.25,2}]),
ok.
perf_add(_Config) ->
Hist = histogram:create(10),
AddIntFun = fun(I, AccH) -> histogram:add(float(I), AccH) end,
Hist2 = performance_SUITE:iter2_foldl(performance_SUITE:count(), AddIntFun, Hist, "histogram:add (1)"),
_Hist3 = performance_SUITE:iter2_foldl(performance_SUITE:count(), AddIntFun, Hist2, "histogram:add (2)"),
ok.
foldl_until(_Config) ->
H1 = histogram:create(10),
Values = [1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5],
H2 = lists:foldl(fun histogram:add/2, H1, Values),
%% sum found
{ok, Val, Sum} = histogram:foldl_until(6, H2),
?equals(Val, 3),
?equals(Sum, 6),
%% target value too high
{fail, Val2, Sum2} = histogram:foldl_until(1000, H2),
?equals(Val2, 5),
?equals(Sum2, 12),
%% target value too low
{ok, Val3, Sum3} = histogram:foldl_until(0, H2),
?equals(Val3, nil),
?equals(Sum3, 0),
ok.
foldr_until(_Config) ->
H1 = histogram:create(10),
Values = [1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5],
H2 = lists:foldl(fun histogram:add/2, H1, Values),
%% sum found
{ok, Val, Sum} = histogram:foldr_until(6, H2),
?equals(Val, 4),
?equals(Sum, 6),
%% target value too high
{fail, Val2, Sum2} = histogram:foldr_until(1000, H2),
?equals(Val2, 1),
?equals(Sum2, 12),
%% target value too low
{ok, Val3, Sum3} = histogram:foldr_until(0, H2),
?equals(Val3, nil),
?equals(Sum3, 0),
ok. | test/histogram_SUITE.erl | 0.611614 | 0.456168 | histogram_SUITE.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(lasp_sql_materialized_view).
-author("<NAME> <<EMAIL>>").
-include("lasp.hrl").
-export([create/1,
create/2,
get_value/2,
insert_row/2,
create_empty_table/1,
create_table_with_values/2]).
-ifdef(TEST).
-export([generate_identifier/1]).
-endif.
-record(state, {output_id, set_id, projection_output_id, predicate_output_id}).
-define(DEFAULT, <<"hi">>).
create_empty_table(TableName) when is_atom(TableName) ->
{Name, Type}=Identifier = generate_identifier(TableName),
lasp:declare(Name, Type),
Identifier.
%% @doc Insert in a SQL table the given rows.
create_table_with_values(TableName, Args) when is_atom(TableName) ->
{Name, Type} = generate_identifier(TableName),
lasp:declare(Name, Type),
lists:foreach(fun(Row) ->
RowMap = maps:from_list(Row),
lasp:update({Name, ?SET}, {add, RowMap}, a)
end, Args).
insert_row(TableName, Row) ->
Name = case is_atom(TableName) of
true -> generate_identifier(TableName);
_ -> TableName
end,
lasp:update(Name, {add, maps:from_list(Row)}, a).
%% @doc Given a SQL table and a list of rows, return the values of those rows.
get_value(_TableName, []) -> [];
get_value(TableName, Rows) ->
Name = case is_atom(TableName) of
true -> generate_identifier(TableName);
_ -> TableName
end,
{ok, Set} = lasp:query(Name),
lists:map(fun(M) ->
lists:foldl(fun(Row, Acc) ->
[maps:get(Row, M) | Acc]
end, [], lists:reverse(Rows))
end, sets:to_list(Set)).
%% @doc Create a SQL view from a textual specification.
create(Specification) when is_list(Specification) ->
%% Create a node for the result of the predicate.
{ok, {OutputId, _, _, _}} = lasp:declare(?SET),
%% Create using a given output.
create(OutputId, Specification).
%% @doc Create a SQL view from a textual specification.
create(OutputId, Specification) when is_list(Specification) ->
%% Tokenize the string.
{ok, Tokens, _EndLine} = ?SQL_LEXER:string(Specification),
ct:pal("Tokens: ~p", [Tokens]),
%% Parse the tokens.
{ok, ParseTree} = ?SQL_PARSER:parse(Tokens),
ct:pal("Parse Tree: ~p", [ParseTree]),
%% Create and return identifier.
OutputId = materialize(ParseTree, #state{output_id=OutputId}),
%% Return output identifier.
{ok, OutputId}.
%% Entry point to evaluation of the parse tree.
materialize({query, Projections, {from, Collection}, Predicates}, State0) ->
%% Convert collection identifier to binary.
CollectionId = generate_identifier(Collection),
%% Materialize a dataflow graph for the predicate tree.
{PredicateOutputId, State1} = materialize(Predicates,
State0#state{set_id=CollectionId}),
%% Materialize projections.
{ProjectionOutputId, _State} = materialize(Projections,
State1#state{predicate_output_id=PredicateOutputId}),
%% Return the top node of the DAG.
ProjectionOutputId;
materialize({select, Projections},
#state{output_id=OutputId, predicate_output_id=PredicateOutputId}=State) ->
%% Apply the projection.
lasp:map(PredicateOutputId,
fun(Tuple) -> extract(Projections, Tuple) end,
OutputId),
{OutputId, State};
materialize({where, Predicates}, State) ->
materialize(Predicates, State);
materialize({intersection, Left, Right}, State0) ->
%% Build predicates on the left.
{LeftId, State1} = materialize(Left, State0),
%% Build predicates on the right.
{RightId, State} = materialize(Right, State1),
%% Create a node for the result of the predicate.
{ok, {OutputId, _, _, _}} = lasp:declare(?SET),
%% Intersection.
lasp:intersection(LeftId, RightId, OutputId),
{OutputId, State};
materialize({union, Left, Right}, State0) ->
%% Build predicates on the left.
{LeftId, State1} = materialize(Left, State0),
%% Build predicates on the right.
{RightId, State} = materialize(Right, State1),
%% Create a node for the result of the predicate.
{ok, {OutputId, _, _, _}} = lasp:declare(?SET),
%% Union.
lasp:union(LeftId, RightId, OutputId),
{OutputId, State};
%% Predicate handling.
materialize({predicate, {var, Variable}, Comparator, Element},
#state{set_id=SetId}=State) ->
%% Create a node for the result of the predicate.
{ok, {OutputId, _, _, _}} = lasp:declare(?SET),
%% Filter the source into the predicate.
lasp:filter(SetId,
fun(Tuple) -> comparator(Tuple, Variable, Comparator, Element) end,
OutputId),
{OutputId, State}.
comparator(Tuple, Variable, Comparator, Element) ->
%% Select element out of map for comparison.
Var = maps:get(Variable, Tuple),
case Comparator of
'=>' ->
Var >= Element;
'>' ->
Var > Element;
'<' ->
Var < Element;
'<=' ->
Var =< Element;
'=' ->
Var =:= Element;
_ ->
exit({error, comparator_unsupported})
end.
extract(Variables, Tuple) ->
lists:foldl(fun(Variable, Map) ->
maps:put(Variable, maps:get(Variable, Tuple, undefined), Map)
end, maps:new(), Variables).
generate_identifier(Id) when is_atom(Id) ->
{list_to_binary(atom_to_list(Id)), ?SET}. | src/lasp_sql_materialized_view.erl | 0.585457 | 0.429728 | lasp_sql_materialized_view.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% xs_regex - XML Schema regex translation
%%
%% Copyright (c) 2017-2018 <NAME> All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Module for unicode range lookup and combination.
-module(xs_regex_util).
%% ====================================================================
%% API functions
%% ====================================================================
-export([range/1]).
-export([
range_to_set/1,
range_to_regex/1,
decode_string/1
]).
-export([
intersection/2,
union/2,
subtract/2,
symmetric_diff/2
]).
-type range() :: list(
{range, non_neg_integer(), non_neg_integer()}
| {value, non_neg_integer()}
).
-type regex() :: string().
%% When given a range, returns its regex string representation.
-spec range_to_regex(range()) -> regex().
range_to_regex(Range) ->
%io:format("~p~n",[Range]),
Fun = fun
({value, Val}, Acc) ->
hex(Val) ++ Acc;
({range, Val, Val}, Acc) ->
hex(Val) ++ Acc;
({range, Min, Max}, Acc) ->
hex(Min) ++ "-" ++ hex(Max) ++ Acc
end,
lists:foldr(Fun, "", Range).
%% Sorts a range.
-spec range_to_set(range()) -> range().
range_to_set(Range) -> lists:keysort(2, Range).
% A∩B Intersection of two ranges.
-spec intersection(range(), range()) -> range().
intersection([], _) ->
[];
intersection(_, []) ->
[];
% simple values
intersection([{value, V1} | T1], [{value, V2} | T2]) when V1 < V2 ->
intersection(T1, [{value, V2} | T2]);
intersection([{value, V1} | T1], [{value, V2} | T2]) when V1 > V2 ->
intersection([{value, V1} | T1], T2);
% ==
intersection([{value, V1} | T1], [{value, _} | T2]) ->
[{value, V1} | intersection(T1, T2)];
% value | range
intersection([{value, V1} | T1], [{range, V2, _} | _] = R2) when V1 < V2 ->
intersection(T1, R2);
intersection([{value, V1} | _] = R1, [{range, _, M2} | T2]) when V1 > M2 ->
intersection(R1, T2);
intersection([{value, V1} | T1], [{range, _, M2} | T2]) when V1 == M2 ->
[{value, V1} | intersection(T1, T2)];
intersection([{value, V1} | T1], [{range, V2, _} | _] = R2) when V1 == V2 ->
[{value, V1} | intersection(T1, R2)];
intersection([{value, V1} | T1], [{range, V2, M2} | T2]) when V1 >= V2, V1 =< M2 ->
[{value, V1} | intersection(T1, [{range, V1, M2} | T2])];
% range | value = switch
intersection([{range, _, _} | _] = R1, [{value, _} | _] = R2) ->
intersection(R2, R1);
% range | range
intersection([{range, _, M1} | T1], [{range, V2, _} | _] = R2) when M1 < V2 ->
intersection(T1, R2);
intersection([{range, _, M1} | T1], [{range, V2, _} | _] = R2) when M1 == V2 ->
[{value, M1} | intersection(T1, R2)];
intersection([{range, V1, _} | _] = R1, [{range, _, M2} | T2]) when V1 > M2 ->
intersection(R1, T2);
intersection([{range, V1, _} | _] = R1, [{range, _, M2} | T2]) when V1 == M2 ->
[{value, M2} | intersection(R1, T2)];
intersection([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 >= V2, M1 =< M2 ->
[{range, V1, M1} | intersection(T1, R2)];
intersection([{range, V1, M1} | _] = R1, [{range, V2, M2} | T2]) when V2 >= V1, M2 =< M1 ->
[{range, V2, M2} | intersection(R1, T2)];
intersection([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 < V2, M1 < M2 ->
[{range, V2, M1} | intersection(T1, [{range, M1, M2} | T2])];
intersection([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V2 < V1, M2 < M1 ->
[{range, V1, M2} | intersection([{range, M2, M1} | T1], T2)];
intersection(Set1, Set2) ->
throw({error, Set1, Set2}).
%% A∪B Union of two ranges.
-spec union(range(), range()) -> range().
union([], R2) ->
R2;
union(R1, []) ->
R1;
% value | value
union([{value, V1} | T1], [{value, V2} | T2]) when V1 + 1 == V2 ->
union(T1, [{range, V1, V2} | T2]);
union([{value, V1} | T1], [{value, V2} | T2]) when V1 < V2 ->
[{value, V1} | union(T1, [{value, V2} | T2])];
union([{value, V1} | T1], [{value, V2} | T2]) when V1 == V2 + 1 ->
union([{range, V2, V1} | T1], T2);
union([{value, V1} | T1], [{value, V2} | T2]) when V1 > V2 ->
[{value, V2} | union([{value, V1} | T1], T2)];
union([{value, V1} | T1], [{value, _} | T2]) ->
[{value, V1} | union(T1, T2)];
% value | range
union([{value, V1} | T1], [{range, V2, M2} | T2]) when V1 + 1 == V2 ->
union(T1, [{range, V1, M2} | T2]);
union([{value, V1} | T1], [{range, V2, M2} | T2]) when M2 + 1 == V1 ->
union([{range, V2, V1} | T1], T2);
union([{value, V1} | T1], [{range, V2, M2} | _] = R2) when V1 >= V2, V1 =< M2 ->
union(T1, R2);
union([{value, V1} | T1], [{range, V2, _} | _] = R2) when V1 < V2 ->
[{value, V1} | union(T1, R2)];
union([{value, V1} | _] = R1, [{range, V2, M2} | T2]) when V1 > M2 ->
[{range, V2, M2} | union(R1, T2)];
% range | value = switch
union([{range, _, _} | _] = R1, [{value, _} | _] = R2) ->
union(R2, R1);
% range | range
union([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when M1 + 1 < V2 ->
[{range, V1, M1} | union(T1, [{range, V2, M2} | T2])];
union([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when M2 + 1 < V1 ->
[{range, V2, M2} | union([{range, V1, M1} | T1], T2)];
union([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 < V2, M1 + 1 =< M2 ->
union(T1, [{range, V1, M2} | T2]);
union([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 >= V2 + 1, M1 > M2 ->
union([{range, V2, M1} | T1], T2);
union([{range, V1, M1} | T1], [{range, V2, M2} | _T2] = R2) when V1 >= V2, M1 =< M2 ->
union(T1, R2);
union([{range, V1, M1} | _T1] = R1, [{range, V2, M2} | T2]) when V2 >= V1, M2 =< M1 ->
union(R1, T2);
% other
union(R1, R2) ->
throw({unknown, R1, R2}).
%% A-B Range A minus Range B.
-spec subtract(range(), range()) -> range().
subtract(R1, []) ->
R1;
subtract([], _) ->
[];
subtract([{value, V1} | _] = R1, [{value, V2} | T2]) when V1 > V2 ->
subtract(R1, T2);
subtract([{value, V1} | T1], [{value, V2} | _] = R2) when V1 < V2 ->
[{value, V1} | subtract(T1, R2)];
subtract([{value, _} | T1], [{value, _} | T2]) ->
subtract(T1, T2);
subtract([{value, V1} | T1], [{range, V2, _} | _] = R2) when V1 < V2 ->
[{value, V1} | subtract(T1, R2)];
subtract([{value, V1} | _] = R1, [{range, _, M2} | T2]) when V1 > M2 ->
subtract(R1, T2);
subtract([{value, V1} | T1], [{range, V2, M2} | _] = R2) when V1 >= V2, V1 =< M2 ->
subtract(T1, R2);
subtract([{range, V1, M1} | T1], [{value, V2} | _] = R2) when M1 < V2 ->
[{range, V1, M1} | subtract(T1, R2)];
subtract([{range, V1, _} | _] = R1, [{value, V2} | T2]) when V1 > V2 ->
subtract(R1, T2);
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when M1 == V2, M1 == V1 + 1 ->
[{value, V1} | subtract(T1, T2)];
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when M1 == V2 ->
[{range, V1, M1 - 1} | subtract(T1, T2)];
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V1 == V2, M1 == V1 + 1 ->
subtract([{value, M1} | T1], T2);
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V1 == V2 ->
subtract([{range, V1 + 1, M1} | T1], T2);
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V2 == V1 + 1, V2 == M1 - 1 ->
[{value, V1} | subtract([{value, M1} | T1], T2)];
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V2 == V1 + 1, V2 =< M1 ->
[{value, V1} | subtract([{range, V2 + 1, M1} | T1], T2)];
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V2 > V1, V2 == M1 - 1 ->
[{range, V1, V2 - 1} | subtract([{value, M1} | T1], T2)];
subtract([{range, V1, M1} | T1], [{value, V2} | T2]) when V2 > V1, V2 =< M1 ->
[{range, V1, V2 - 1} | subtract([{range, V2 + 1, M1} | T1], T2)];
% range | range
subtract([{range, V1, M1} | T1], [{range, V2, _} | _] = R2) when M1 < V2 ->
[{range, V1, M1} | subtract(T1, R2)];
subtract([{range, V1, _} | _] = R1, [{range, _, M2} | T2]) when V1 > M2 ->
subtract(R1, T2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 >= V2, M1 =< M2 ->
subtract(T1, R2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 == V2, M1 == M2 ->
subtract(T1, T2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 == V2, M1 == M2 + 1 ->
subtract([{value, M1} | T1], T2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 == V2, M1 > M2 ->
subtract([{range, M2 + 1, M1} | T1], T2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 == V2, M1 < M2 ->
subtract(T1, R2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 + 1 == V2, M1 == M2 + 1 ->
[{value, V1} | subtract([{value, M1} | T1], R2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 + 1 == V2, M1 =< M2 ->
[{value, V1} | subtract(T1, R2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 + 1 == V2 ->
[{value, V1} | subtract([{range, M2 + 1, M1} | T1], T2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 < V2, M1 == M2 + 1 ->
[{range, V1, V2 - 1} | subtract([{value, M1} | T1], R2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | _] = R2) when V1 < V2, M1 =< M2 ->
[{range, V1, V2 - 1} | subtract(T1, R2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 < V2 ->
[{range, V1, V2 - 1} | subtract([{range, M2 + 1, M1} | T1], T2)];
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 > V2, M1 == M2 + 1 ->
subtract([{value, M1} | T1], T2);
subtract([{range, V1, M1} | T1], [{range, V2, M2} | T2]) when V1 > V2, M1 > M2 ->
subtract([{range, M2 + 1, M1} | T1], T2);
subtract(Set1, Set2) ->
throw({error, {subtract, Set1, Set2}}).
%% A⊖B Symmetric difference of two ranges.
-spec symmetric_diff(range(), range()) -> range().
symmetric_diff(Set1, Set2) ->
SetU = union(Set1, Set2),
SetI = intersection(Set1, Set2),
subtract(SetU, SetI).
hex(Int) when $a =< Int, Int =< $z -> [Int];
hex(Int) when $0 =< Int, Int =< $9 -> [Int];
hex(Int) when $A =< Int, Int =< $Z -> [Int];
hex(Int) -> "\\x{" ++ integer_to_list(Int, 16) ++ "}".
%% Replaces character references with their codepoints in a string.
-spec decode_string(string()) -> string().
decode_string(Str) ->
decode_string(Str, []).
decode_string([], Acc) ->
lists:reverse(Acc);
decode_string("'" ++ T, Acc) ->
decode_string(T, [$' | Acc]);
decode_string(""" ++ T, Acc) ->
decode_string(T, [$" | Acc]);
decode_string("&" ++ T, Acc) ->
decode_string(T, [$& | Acc]);
decode_string(">" ++ T, Acc) ->
decode_string(T, [$> | Acc]);
decode_string("<" ++ T, Acc) ->
decode_string(T, [$< | Acc]);
decode_string("&#x" ++ T, Acc) ->
{CP, T1} = scan_hex_char_ref(T, []),
decode_string(T1, [CP | Acc]);
decode_string("&#" ++ T, Acc) ->
{CP, T1} = scan_dec_char_ref(T, []),
decode_string(T1, [CP | Acc]);
decode_string([H | T], Acc) ->
decode_string(T, [H | Acc]).
scan_dec_char_ref([H | T], Acc) when H >= $0, H =< $9 ->
scan_dec_char_ref(T, [H | Acc]);
scan_dec_char_ref([H | T], Acc) when H == $; ->
{list_to_integer(lists:reverse(Acc)), T}.
scan_hex_char_ref([H | T], Acc) when
H >= $0, H =< $9;
H >= $a, H =< $f;
H >= $A, H =< $F
->
scan_hex_char_ref(T, [H | Acc]);
scan_hex_char_ref([H | T], Acc) when H == $; ->
Hex = lists:reverse(Acc),
{list_to_integer(Hex, 16), T}.
% http://www.unicode.org/reports/tr18/ "The values for these properties must
% follow the Unicode definitions, and include the property and property value
% aliases from the UCD. Matching of Binary, Enumerated, Catalog, and Name
% values, must follow the Matching Rules from [UAX44] with one exception:
% implementations are not required to ignore an initial prefix string of "is"
% in property values."
% https://www.w3.org/TR/xmlschema-2/#regexs
%% Returns the range() of all unicode character codepoints for a given property.
-spec range(string()) -> range().
range("IsBasicLatin") ->
[{range, 16#0000, 16#007F}];
range("IsLatin-1Supplement") ->
[{range, 16#0080, 16#00FF}];
range("IsLatinExtended-A") ->
[{range, 16#0100, 16#017F}];
range("IsLatinExtended-B") ->
[{range, 16#0180, 16#024F}];
range("IsIPAExtensions") ->
[{range, 16#0250, 16#02AF}];
range("IsSpacingModifierLetters") ->
[{range, 16#02B0, 16#02FF}];
range("IsCombiningDiacriticalMarks") ->
[{range, 16#0300, 16#036F}];
range("IsGreek") ->
[{range, 16#0370, 16#03FF}];
range("IsCyrillic") ->
[{range, 16#0400, 16#04FF}];
range("IsArmenian") ->
[{range, 16#0530, 16#058F}];
range("IsHebrew") ->
[{range, 16#0590, 16#05FF}];
range("IsArabic") ->
[{range, 16#0600, 16#06FF}];
range("IsSyriac") ->
[{range, 16#0700, 16#074F}];
range("IsThaana") ->
[{range, 16#0780, 16#07BF}];
range("IsDevanagari") ->
[{range, 16#0900, 16#097F}];
range("IsBengali") ->
[{range, 16#0980, 16#09FF}];
range("IsGurmukhi") ->
[{range, 16#0A00, 16#0A7F}];
range("IsGujarati") ->
[{range, 16#0A80, 16#0AFF}];
range("IsOriya") ->
[{range, 16#0B00, 16#0B7F}];
range("IsTamil") ->
[{range, 16#0B80, 16#0BFF}];
range("IsTelugu") ->
[{range, 16#0C00, 16#0C7F}];
range("IsKannada") ->
[{range, 16#0C80, 16#0CFF}];
range("IsMalayalam") ->
[{range, 16#0D00, 16#0D7F}];
range("IsSinhala") ->
[{range, 16#0D80, 16#0DFF}];
range("IsThai") ->
[{range, 16#0E00, 16#0E7F}];
range("IsLao") ->
[{range, 16#0E80, 16#0EFF}];
range("IsTibetan") ->
[{range, 16#0F00, 16#0FFF}];
range("IsMyanmar") ->
[{range, 16#1000, 16#109F}];
range("IsGeorgian") ->
[{range, 16#10A0, 16#10FF}];
range("IsHangulJamo") ->
[{range, 16#1100, 16#11FF}];
range("IsEthiopic") ->
[{range, 16#1200, 16#137F}];
range("IsCherokee") ->
[{range, 16#13A0, 16#13FF}];
range("IsUnifiedCanadianAboriginalSyllabics") ->
[{range, 16#1400, 16#167F}];
range("IsOgham") ->
[{range, 16#1680, 16#169F}];
range("IsRunic") ->
[{range, 16#16A0, 16#16FF}];
range("IsKhmer") ->
[{range, 16#1780, 16#17FF}];
range("IsMongolian") ->
[{range, 16#1800, 16#18AF}];
range("IsLatinExtendedAdditional") ->
[{range, 16#1E00, 16#1EFF}];
range("IsGreekExtended") ->
[{range, 16#1F00, 16#1FFF}];
range("IsGeneralPunctuation") ->
[{range, 16#2000, 16#206F}];
range("IsSuperscriptsandSubscripts") ->
[{range, 16#2070, 16#209F}];
range("IsCurrencySymbols") ->
[{range, 16#20A0, 16#20CF}];
range("IsCombiningMarksforSymbols") ->
[{range, 16#20D0, 16#20FF}];
range("IsLetterlikeSymbols") ->
[{range, 16#2100, 16#214F}];
range("IsNumberForms") ->
[{range, 16#2150, 16#218F}];
range("IsArrows") ->
[{range, 16#2190, 16#21FF}];
range("IsMathematicalOperators") ->
[{range, 16#2200, 16#22FF}];
range("IsMiscellaneousTechnical") ->
[{range, 16#2300, 16#23FF}];
range("IsControlPictures") ->
[{range, 16#2400, 16#243F}];
range("IsOpticalCharacterRecognition") ->
[{range, 16#2440, 16#245F}];
range("IsEnclosedAlphanumerics") ->
[{range, 16#2460, 16#24FF}];
range("IsBoxDrawing") ->
[{range, 16#2500, 16#257F}];
range("IsBlockElements") ->
[{range, 16#2580, 16#259F}];
range("IsGeometricShapes") ->
[{range, 16#25A0, 16#25FF}];
range("IsMiscellaneousSymbols") ->
[{range, 16#2600, 16#26FF}];
range("IsDingbats") ->
[{range, 16#2700, 16#27BF}];
range("IsBraillePatterns") ->
[{range, 16#2800, 16#28FF}];
range("IsCJKRadicalsSupplement") ->
[{range, 16#2E80, 16#2EFF}];
range("IsKangxiRadicals") ->
[{range, 16#2F00, 16#2FDF}];
range("IsIdeographicDescriptionCharacters") ->
[{range, 16#2FF0, 16#2FFF}];
range("IsCJKSymbolsandPunctuation") ->
[{range, 16#3000, 16#303F}];
range("IsHiragana") ->
[{range, 16#3040, 16#309F}];
range("IsKatakana") ->
[{range, 16#30A0, 16#30FF}];
range("IsBopomofo") ->
[{range, 16#3100, 16#312F}];
range("IsHangulCompatibilityJamo") ->
[{range, 16#3130, 16#318F}];
range("IsKanbun") ->
[{range, 16#3190, 16#319F}];
range("IsBopomofoExtended") ->
[{range, 16#31A0, 16#31BF}];
range("IsEnclosedCJKLettersandMonths") ->
[{range, 16#3200, 16#32FF}];
range("IsCJKCompatibility") ->
[{range, 16#3300, 16#33FF}];
range("IsCJKUnifiedIdeographsExtensionA") ->
[{range, 16#3400, 16#4DB5}];
range("IsCJKUnifiedIdeographs") ->
[{range, 16#4E00, 16#9FFF}];
range("IsYiSyllables") ->
[{range, 16#A000, 16#A48F}];
range("IsYiRadicals") ->
[{range, 16#A490, 16#A4CF}];
range("IsHangulSyllables") ->
[{range, 16#AC00, 16#D7A3}];
range("IsPrivateUse") ->
[
{range, 16#E000, 16#F8FF},
{range, 16#F0000, 16#FFFFD},
{range, 16#100000, 16#10FFFD}
];
range("IsCJKCompatibilityIdeographs") ->
[{range, 16#F900, 16#FAFF}];
range("IsAlphabeticPresentationForms") ->
[{range, 16#FB00, 16#FB4F}];
range("IsArabicPresentationForms-A") ->
[{range, 16#FB50, 16#FDFF}];
range("IsCombiningHalfMarks") ->
[{range, 16#FE20, 16#FE2F}];
range("IsCJKCompatibilityForms") ->
[{range, 16#FE30, 16#FE4F}];
range("IsSmallFormVariants") ->
[{range, 16#FE50, 16#FE6F}];
range("IsArabicPresentationForms-B") ->
[{range, 16#FE70, 16#FEFE}];
range("IsHalfwidthandFullwidthForms") ->
[{range, 16#FF00, 16#FFEF}];
range("IsSpecials") ->
[{value, 16#FEFF}, {range, 16#FFF0, 16#FFFD}];
% added
% faked with 0, illegal characters
range("IsLowSurrogates") ->
[{value, 16#0}];
% faked with 0, illegal characters
range("IsHighSurrogates") ->
[{value, 16#0}];
range("IsOldItalic") ->
[{range, 16#10300, 16#1032F}];
range("IsGothic") ->
[{range, 16#10330, 16#1034A}];
range("IsDeseret") ->
[{range, 16#10400, 16#1044F}];
range("IsByzantineMusicalSymbols") ->
[{range, 16#1D000, 16#1D0FF}];
range("IsMusicalSymbols") ->
[{range, 16#1D100, 16#1D1FF}];
range("IsMathematicalAlphanumericSymbols") ->
[{range, 16#1D400, 16#1D7FF}];
range("IsCJKUnifiedIdeographsExtensionB") ->
[{range, 16#20000, 16#2A6DF}];
range("IsCJKCompatibilityIdeographsSupplement") ->
[{range, 16#2F800, 16#2FA1F}];
range("IsTags") ->
[{value, 16#0}];
%%%
%% Start generated code
%%%
range("C") ->
[
{range, 0, 31},
{range, 127, 159},
{value, 173},
{range, 1536, 1541},
{range, 1564, 1565},
{value, 1757},
{value, 1807},
{value, 2274},
{range, 6158, 6159},
{range, 8203, 8207},
{range, 8234, 8238},
{range, 8288, 8303},
{range, 57344, 63743},
{range, 65279, 65280},
{range, 65529, 65531},
{value, 69821},
{range, 69837, 69839},
{range, 113824, 118783},
{range, 119155, 119162},
{range, 917505, 917759},
{range, 983040, 1114109}
];
range("Cc") ->
[{range, 0, 31}, {range, 127, 159}];
range("Cf") ->
[
{value, 173},
{range, 1536, 1541},
{range, 1564, 1565},
{value, 1757},
{value, 1807},
{value, 2274},
{range, 6158, 6159},
{range, 8203, 8207},
{range, 8234, 8238},
{range, 8288, 8303},
{range, 65279, 65280},
{range, 65529, 65531},
{value, 69821},
{range, 69837, 69839},
{range, 113824, 118783},
{range, 119155, 119162},
{range, 917505, 917759}
];
range("Cn") ->
[{value, 0}];
range("Co") ->
[{range, 57344, 63743}, {range, 983040, 1114109}];
range("Cs") ->
[{value, 0}];
range("L") ->
[
{range, 65, 90},
{range, 97, 122},
{value, 170},
{value, 181},
{value, 186},
{range, 192, 214},
{range, 216, 246},
{range, 248, 705},
{range, 710, 721},
{range, 736, 740},
{value, 748},
{value, 750},
{range, 880, 884},
{range, 886, 893},
{range, 895, 899},
{value, 902},
{range, 904, 1013},
{range, 1015, 1153},
{range, 1162, 1369},
{range, 1376, 1416},
{range, 1488, 1522},
{range, 1568, 1610},
{range, 1646, 1647},
{range, 1649, 1747},
{value, 1749},
{range, 1765, 1766},
{range, 1774, 1775},
{range, 1786, 1788},
{value, 1791},
{value, 1808},
{range, 1810, 1839},
{range, 1869, 1957},
{range, 1969, 1983},
{range, 1994, 2026},
{range, 2036, 2037},
{range, 2042, 2044},
{range, 2048, 2069},
{value, 2074},
{value, 2084},
{value, 2088},
{range, 2112, 2136},
{range, 2144, 2258},
{range, 2308, 2361},
{value, 2365},
{value, 2384},
{range, 2392, 2401},
{range, 2417, 2432},
{range, 2437, 2491},
{value, 2493},
{range, 2510, 2518},
{range, 2524, 2529},
{range, 2544, 2545},
{value, 2556},
{range, 2565, 2619},
{range, 2649, 2661},
{range, 2674, 2676},
{range, 2693, 2747},
{value, 2749},
{range, 2768, 2785},
{value, 2809},
{range, 2821, 2875},
{value, 2877},
{range, 2908, 2913},
{value, 2929},
{range, 2947, 3005},
{range, 3024, 3030},
{range, 3077, 3133},
{range, 3160, 3169},
{value, 3200},
{range, 3205, 3259},
{value, 3261},
{range, 3294, 3297},
{range, 3313, 3327},
{range, 3333, 3386},
{value, 3389},
{value, 3406},
{range, 3412, 3414},
{range, 3423, 3425},
{range, 3450, 3457},
{range, 3461, 3529},
{range, 3585, 3632},
{range, 3634, 3635},
{range, 3648, 3654},
{range, 3713, 3760},
{range, 3762, 3763},
{range, 3773, 3783},
{range, 3804, 3840},
{range, 3904, 3952},
{range, 3976, 3980},
{range, 4096, 4138},
{value, 4159},
{range, 4176, 4181},
{range, 4186, 4189},
{value, 4193},
{range, 4197, 4198},
{range, 4206, 4208},
{range, 4213, 4225},
{value, 4238},
{range, 4256, 4346},
{range, 4348, 4956},
{range, 4992, 5007},
{range, 5024, 5119},
{range, 5121, 5740},
{range, 5743, 5759},
{range, 5761, 5786},
{range, 5792, 5866},
{range, 5873, 5905},
{range, 5920, 5937},
{range, 5952, 5969},
{range, 5984, 6001},
{range, 6016, 6067},
{value, 6103},
{value, 6108},
{range, 6176, 6276},
{range, 6279, 6312},
{range, 6314, 6431},
{range, 6480, 6607},
{range, 6656, 6678},
{range, 6688, 6740},
{value, 6823},
{range, 6917, 6963},
{range, 6981, 6991},
{range, 7043, 7072},
{range, 7086, 7087},
{range, 7098, 7141},
{range, 7168, 7203},
{range, 7245, 7247},
{range, 7258, 7293},
{range, 7296, 7359},
{range, 7401, 7404},
{range, 7406, 7409},
{range, 7413, 7414},
{range, 7424, 7615},
{range, 7680, 8124},
{value, 8126},
{range, 8130, 8140},
{range, 8144, 8156},
{range, 8160, 8172},
{range, 8178, 8188},
{range, 8305, 8307},
{value, 8319},
{range, 8336, 8351},
{value, 8450},
{value, 8455},
{range, 8458, 8467},
{value, 8469},
{range, 8473, 8477},
{value, 8484},
{value, 8486},
{value, 8488},
{range, 8490, 8493},
{range, 8495, 8505},
{range, 8508, 8511},
{range, 8517, 8521},
{value, 8526},
{range, 8579, 8580},
{range, 11264, 11492},
{range, 11499, 11502},
{range, 11506, 11512},
{range, 11520, 11631},
{range, 11648, 11743},
{value, 11823},
{range, 12293, 12294},
{range, 12337, 12341},
{range, 12347, 12348},
{range, 12353, 12440},
{range, 12445, 12447},
{range, 12449, 12538},
{range, 12540, 12687},
{range, 12704, 12735},
{range, 12784, 12799},
{range, 13312, 19903},
{range, 19968, 42127},
{range, 42192, 42237},
{range, 42240, 42508},
{range, 42512, 42527},
{range, 42538, 42606},
{range, 42623, 42653},
{range, 42656, 42725},
{range, 42775, 42783},
{range, 42786, 42888},
{range, 42891, 43009},
{range, 43011, 43013},
{range, 43015, 43018},
{range, 43020, 43042},
{range, 43072, 43123},
{range, 43138, 43187},
{range, 43250, 43255},
{value, 43259},
{range, 43261, 43262},
{range, 43274, 43301},
{range, 43312, 43334},
{range, 43360, 43391},
{range, 43396, 43442},
{value, 43471},
{range, 43488, 43492},
{range, 43494, 43503},
{range, 43514, 43560},
{range, 43584, 43586},
{range, 43588, 43595},
{range, 43616, 43638},
{value, 43642},
{range, 43646, 43695},
{value, 43697},
{range, 43701, 43702},
{range, 43705, 43709},
{value, 43712},
{range, 43714, 43741},
{range, 43744, 43754},
{range, 43762, 43764},
{range, 43777, 43866},
{range, 43868, 44002},
{range, 44032, 55291},
{range, 63744, 64285},
{range, 64287, 64296},
{range, 64298, 64433},
{range, 64467, 64829},
{range, 64848, 65019},
{range, 65136, 65278},
{range, 65313, 65338},
{range, 65345, 65370},
{range, 65382, 65503},
{range, 65536, 65791},
{range, 66176, 66271},
{range, 66304, 66335},
{range, 66349, 66368},
{range, 66370, 66377},
{range, 66384, 66421},
{range, 66432, 66462},
{range, 66464, 66511},
{range, 66560, 66719},
{range, 66736, 66926},
{range, 67072, 67670},
{range, 67680, 67702},
{range, 67712, 67750},
{range, 67808, 67834},
{range, 67840, 67861},
{range, 67872, 67902},
{range, 67968, 68027},
{range, 68030, 68031},
{value, 68096},
{range, 68112, 68151},
{range, 68192, 68220},
{range, 68224, 68252},
{range, 68288, 68295},
{range, 68297, 68324},
{range, 68352, 68408},
{range, 68416, 68439},
{range, 68448, 68471},
{range, 68480, 68504},
{range, 68608, 68857},
{range, 68864, 68899},
{range, 69376, 69404},
{range, 69415, 69445},
{range, 69635, 69687},
{range, 69763, 69807},
{range, 69840, 69871},
{range, 69891, 69926},
{value, 69956},
{range, 69968, 70002},
{range, 70006, 70015},
{range, 70019, 70066},
{range, 70081, 70084},
{value, 70106},
{value, 70108},
{range, 70144, 70187},
{range, 70272, 70312},
{range, 70320, 70366},
{range, 70405, 70458},
{value, 70461},
{range, 70480, 70486},
{range, 70493, 70497},
{range, 70656, 70708},
{range, 70727, 70730},
{range, 70784, 70831},
{range, 70852, 70853},
{range, 70855, 70863},
{range, 71040, 71086},
{range, 71128, 71131},
{range, 71168, 71215},
{range, 71236, 71247},
{range, 71296, 71338},
{range, 71424, 71452},
{range, 71680, 71723},
{range, 71840, 71903},
{range, 71935, 72192},
{range, 72203, 72242},
{value, 72250},
{value, 72272},
{range, 72284, 72329},
{value, 72349},
{range, 72384, 72750},
{value, 72768},
{range, 72818, 72849},
{range, 72960, 73008},
{value, 73030},
{range, 73056, 73097},
{range, 73112, 73119},
{range, 73440, 73458},
{range, 73728, 74751},
{range, 74880, 92767},
{range, 92880, 92911},
{range, 92928, 92975},
{range, 92992, 92995},
{range, 93027, 93823},
{range, 93952, 94032},
{range, 94099, 113819},
{range, 119808, 120512},
{range, 120514, 120538},
{range, 120540, 120570},
{range, 120572, 120596},
{range, 120598, 120628},
{range, 120630, 120654},
{range, 120656, 120686},
{range, 120688, 120712},
{range, 120714, 120744},
{range, 120746, 120770},
{range, 120772, 120781},
{range, 124928, 125126},
{range, 125184, 125251},
{range, 126464, 126703},
{range, 131072, 917504}
];
range("Ll") ->
[
{range, 97, 122},
{value, 181},
{range, 223, 246},
{range, 248, 255},
{value, 257},
{value, 259},
{value, 261},
{value, 263},
{value, 265},
{value, 267},
{value, 269},
{value, 271},
{value, 273},
{value, 275},
{value, 277},
{value, 279},
{value, 281},
{value, 283},
{value, 285},
{value, 287},
{value, 289},
{value, 291},
{value, 293},
{value, 295},
{value, 297},
{value, 299},
{value, 301},
{value, 303},
{value, 305},
{value, 307},
{value, 309},
{range, 311, 312},
{value, 314},
{value, 316},
{value, 318},
{value, 320},
{value, 322},
{value, 324},
{value, 326},
{range, 328, 329},
{value, 331},
{value, 333},
{value, 335},
{value, 337},
{value, 339},
{value, 341},
{value, 343},
{value, 345},
{value, 347},
{value, 349},
{value, 351},
{value, 353},
{value, 355},
{value, 357},
{value, 359},
{value, 361},
{value, 363},
{value, 365},
{value, 367},
{value, 369},
{value, 371},
{value, 373},
{value, 375},
{value, 378},
{value, 380},
{range, 382, 384},
{value, 387},
{value, 389},
{value, 392},
{range, 396, 397},
{value, 402},
{value, 405},
{range, 409, 411},
{value, 414},
{value, 417},
{value, 419},
{value, 421},
{value, 424},
{range, 426, 427},
{value, 429},
{value, 432},
{value, 436},
{value, 438},
{range, 441, 442},
{range, 445, 447},
{value, 454},
{value, 457},
{value, 460},
{value, 462},
{value, 464},
{value, 466},
{value, 468},
{value, 470},
{value, 472},
{value, 474},
{range, 476, 477},
{value, 479},
{value, 481},
{value, 483},
{value, 485},
{value, 487},
{value, 489},
{value, 491},
{value, 493},
{range, 495, 496},
{value, 499},
{value, 501},
{value, 505},
{value, 507},
{value, 509},
{value, 511},
{value, 513},
{value, 515},
{value, 517},
{value, 519},
{value, 521},
{value, 523},
{value, 525},
{value, 527},
{value, 529},
{value, 531},
{value, 533},
{value, 535},
{value, 537},
{value, 539},
{value, 541},
{value, 543},
{value, 545},
{value, 547},
{value, 549},
{value, 551},
{value, 553},
{value, 555},
{value, 557},
{value, 559},
{value, 561},
{range, 563, 569},
{value, 572},
{range, 575, 576},
{value, 578},
{value, 583},
{value, 585},
{value, 587},
{value, 589},
{range, 591, 659},
{range, 661, 687},
{value, 881},
{value, 883},
{range, 887, 889},
{range, 891, 893},
{value, 912},
{range, 940, 974},
{range, 976, 977},
{range, 981, 983},
{value, 985},
{value, 987},
{value, 989},
{value, 991},
{value, 993},
{value, 995},
{value, 997},
{value, 999},
{value, 1001},
{value, 1003},
{value, 1005},
{range, 1007, 1011},
{value, 1013},
{value, 1016},
{range, 1019, 1020},
{range, 1072, 1119},
{value, 1121},
{value, 1123},
{value, 1125},
{value, 1127},
{value, 1129},
{value, 1131},
{value, 1133},
{value, 1135},
{value, 1137},
{value, 1139},
{value, 1141},
{value, 1143},
{value, 1145},
{value, 1147},
{value, 1149},
{value, 1151},
{value, 1153},
{value, 1163},
{value, 1165},
{value, 1167},
{value, 1169},
{value, 1171},
{value, 1173},
{value, 1175},
{value, 1177},
{value, 1179},
{value, 1181},
{value, 1183},
{value, 1185},
{value, 1187},
{value, 1189},
{value, 1191},
{value, 1193},
{value, 1195},
{value, 1197},
{value, 1199},
{value, 1201},
{value, 1203},
{value, 1205},
{value, 1207},
{value, 1209},
{value, 1211},
{value, 1213},
{value, 1215},
{value, 1218},
{value, 1220},
{value, 1222},
{value, 1224},
{value, 1226},
{value, 1228},
{range, 1230, 1231},
{value, 1233},
{value, 1235},
{value, 1237},
{value, 1239},
{value, 1241},
{value, 1243},
{value, 1245},
{value, 1247},
{value, 1249},
{value, 1251},
{value, 1253},
{value, 1255},
{value, 1257},
{value, 1259},
{value, 1261},
{value, 1263},
{value, 1265},
{value, 1267},
{value, 1269},
{value, 1271},
{value, 1273},
{value, 1275},
{value, 1277},
{value, 1279},
{value, 1281},
{value, 1283},
{value, 1285},
{value, 1287},
{value, 1289},
{value, 1291},
{value, 1293},
{value, 1295},
{value, 1297},
{value, 1299},
{value, 1301},
{value, 1303},
{value, 1305},
{value, 1307},
{value, 1309},
{value, 1311},
{value, 1313},
{value, 1315},
{value, 1317},
{value, 1319},
{value, 1321},
{value, 1323},
{value, 1325},
{range, 1327, 1328},
{range, 1376, 1416},
{range, 4304, 4346},
{range, 4349, 4351},
{range, 5112, 5119},
{range, 7296, 7311},
{range, 7424, 7467},
{range, 7531, 7543},
{range, 7545, 7578},
{value, 7681},
{value, 7683},
{value, 7685},
{value, 7687},
{value, 7689},
{value, 7691},
{value, 7693},
{value, 7695},
{value, 7697},
{value, 7699},
{value, 7701},
{value, 7703},
{value, 7705},
{value, 7707},
{value, 7709},
{value, 7711},
{value, 7713},
{value, 7715},
{value, 7717},
{value, 7719},
{value, 7721},
{value, 7723},
{value, 7725},
{value, 7727},
{value, 7729},
{value, 7731},
{value, 7733},
{value, 7735},
{value, 7737},
{value, 7739},
{value, 7741},
{value, 7743},
{value, 7745},
{value, 7747},
{value, 7749},
{value, 7751},
{value, 7753},
{value, 7755},
{value, 7757},
{value, 7759},
{value, 7761},
{value, 7763},
{value, 7765},
{value, 7767},
{value, 7769},
{value, 7771},
{value, 7773},
{value, 7775},
{value, 7777},
{value, 7779},
{value, 7781},
{value, 7783},
{value, 7785},
{value, 7787},
{value, 7789},
{value, 7791},
{value, 7793},
{value, 7795},
{value, 7797},
{value, 7799},
{value, 7801},
{value, 7803},
{value, 7805},
{value, 7807},
{value, 7809},
{value, 7811},
{value, 7813},
{value, 7815},
{value, 7817},
{value, 7819},
{value, 7821},
{value, 7823},
{value, 7825},
{value, 7827},
{range, 7829, 7837},
{value, 7839},
{value, 7841},
{value, 7843},
{value, 7845},
{value, 7847},
{value, 7849},
{value, 7851},
{value, 7853},
{value, 7855},
{value, 7857},
{value, 7859},
{value, 7861},
{value, 7863},
{value, 7865},
{value, 7867},
{value, 7869},
{value, 7871},
{value, 7873},
{value, 7875},
{value, 7877},
{value, 7879},
{value, 7881},
{value, 7883},
{value, 7885},
{value, 7887},
{value, 7889},
{value, 7891},
{value, 7893},
{value, 7895},
{value, 7897},
{value, 7899},
{value, 7901},
{value, 7903},
{value, 7905},
{value, 7907},
{value, 7909},
{value, 7911},
{value, 7913},
{value, 7915},
{value, 7917},
{value, 7919},
{value, 7921},
{value, 7923},
{value, 7925},
{value, 7927},
{value, 7929},
{value, 7931},
{value, 7933},
{range, 7935, 7943},
{range, 7952, 7959},
{range, 7968, 7975},
{range, 7984, 7991},
{range, 8000, 8007},
{range, 8016, 8024},
{range, 8032, 8039},
{range, 8048, 8071},
{range, 8080, 8087},
{range, 8096, 8103},
{range, 8112, 8119},
{value, 8126},
{range, 8130, 8135},
{range, 8144, 8151},
{range, 8160, 8167},
{range, 8178, 8183},
{value, 8458},
{range, 8462, 8463},
{value, 8467},
{value, 8495},
{value, 8500},
{value, 8505},
{range, 8508, 8509},
{range, 8518, 8521},
{value, 8526},
{value, 8580},
{range, 11312, 11359},
{value, 11361},
{range, 11365, 11366},
{value, 11368},
{value, 11370},
{value, 11372},
{value, 11377},
{range, 11379, 11380},
{range, 11382, 11387},
{value, 11393},
{value, 11395},
{value, 11397},
{value, 11399},
{value, 11401},
{value, 11403},
{value, 11405},
{value, 11407},
{value, 11409},
{value, 11411},
{value, 11413},
{value, 11415},
{value, 11417},
{value, 11419},
{value, 11421},
{value, 11423},
{value, 11425},
{value, 11427},
{value, 11429},
{value, 11431},
{value, 11433},
{value, 11435},
{value, 11437},
{value, 11439},
{value, 11441},
{value, 11443},
{value, 11445},
{value, 11447},
{value, 11449},
{value, 11451},
{value, 11453},
{value, 11455},
{value, 11457},
{value, 11459},
{value, 11461},
{value, 11463},
{value, 11465},
{value, 11467},
{value, 11469},
{value, 11471},
{value, 11473},
{value, 11475},
{value, 11477},
{value, 11479},
{value, 11481},
{value, 11483},
{value, 11485},
{value, 11487},
{value, 11489},
{range, 11491, 11492},
{value, 11500},
{value, 11502},
{range, 11507, 11512},
{range, 11520, 11567},
{value, 42561},
{value, 42563},
{value, 42565},
{value, 42567},
{value, 42569},
{value, 42571},
{value, 42573},
{value, 42575},
{value, 42577},
{value, 42579},
{value, 42581},
{value, 42583},
{value, 42585},
{value, 42587},
{value, 42589},
{value, 42591},
{value, 42593},
{value, 42595},
{value, 42597},
{value, 42599},
{value, 42601},
{value, 42603},
{value, 42605},
{value, 42625},
{value, 42627},
{value, 42629},
{value, 42631},
{value, 42633},
{value, 42635},
{value, 42637},
{value, 42639},
{value, 42641},
{value, 42643},
{value, 42645},
{value, 42647},
{value, 42649},
{value, 42651},
{value, 42787},
{value, 42789},
{value, 42791},
{value, 42793},
{value, 42795},
{value, 42797},
{range, 42799, 42801},
{value, 42803},
{value, 42805},
{value, 42807},
{value, 42809},
{value, 42811},
{value, 42813},
{value, 42815},
{value, 42817},
{value, 42819},
{value, 42821},
{value, 42823},
{value, 42825},
{value, 42827},
{value, 42829},
{value, 42831},
{value, 42833},
{value, 42835},
{value, 42837},
{value, 42839},
{value, 42841},
{value, 42843},
{value, 42845},
{value, 42847},
{value, 42849},
{value, 42851},
{value, 42853},
{value, 42855},
{value, 42857},
{value, 42859},
{value, 42861},
{value, 42863},
{range, 42865, 42872},
{value, 42874},
{value, 42876},
{value, 42879},
{value, 42881},
{value, 42883},
{value, 42885},
{value, 42887},
{value, 42892},
{value, 42894},
{value, 42897},
{range, 42899, 42901},
{value, 42903},
{value, 42905},
{value, 42907},
{value, 42909},
{value, 42911},
{value, 42913},
{value, 42915},
{value, 42917},
{value, 42919},
{value, 42921},
{value, 42927},
{value, 42933},
{value, 42935},
{range, 42937, 42998},
{value, 43002},
{range, 43824, 43866},
{range, 43872, 43967},
{range, 64256, 64284},
{range, 65345, 65370},
{range, 66600, 66639},
{range, 66776, 66815},
{range, 68800, 68857},
{range, 71872, 71903},
{range, 93792, 93823},
{range, 119834, 119859},
{range, 119886, 119911},
{range, 119938, 119963},
{range, 119990, 120015},
{range, 120042, 120067},
{range, 120094, 120119},
{range, 120146, 120171},
{range, 120198, 120223},
{range, 120250, 120275},
{range, 120302, 120327},
{range, 120354, 120379},
{range, 120406, 120431},
{range, 120458, 120487},
{range, 120514, 120538},
{range, 120540, 120545},
{range, 120572, 120596},
{range, 120598, 120603},
{range, 120630, 120654},
{range, 120656, 120661},
{range, 120688, 120712},
{range, 120714, 120719},
{range, 120746, 120770},
{range, 120772, 120777},
{range, 120779, 120781},
{range, 125218, 125251}
];
range("Lm") ->
[
{range, 688, 705},
{range, 710, 721},
{range, 736, 740},
{value, 748},
{value, 750},
{value, 884},
{value, 890},
{value, 1369},
{value, 1600},
{range, 1765, 1766},
{range, 2036, 2037},
{range, 2042, 2044},
{value, 2074},
{value, 2084},
{value, 2088},
{value, 2417},
{value, 3654},
{range, 3782, 3783},
{value, 4348},
{value, 6103},
{value, 6211},
{value, 6823},
{range, 7288, 7293},
{range, 7468, 7530},
{value, 7544},
{range, 7579, 7615},
{range, 8305, 8307},
{value, 8319},
{range, 8336, 8351},
{range, 11388, 11389},
{value, 11631},
{value, 11823},
{value, 12293},
{range, 12337, 12341},
{value, 12347},
{range, 12445, 12446},
{range, 12540, 12542},
{value, 40981},
{range, 42232, 42237},
{value, 42508},
{value, 42623},
{range, 42652, 42653},
{range, 42775, 42783},
{value, 42864},
{value, 42888},
{range, 43000, 43001},
{value, 43471},
{value, 43494},
{value, 43632},
{value, 43741},
{range, 43763, 43764},
{range, 43868, 43871},
{value, 65392},
{range, 65438, 65439},
{range, 92992, 92995},
{range, 94099, 94207}
];
range("Lo") ->
[
{value, 170},
{value, 186},
{value, 443},
{range, 448, 451},
{value, 660},
{range, 1488, 1522},
{range, 1568, 1599},
{range, 1601, 1610},
{range, 1646, 1647},
{range, 1649, 1747},
{value, 1749},
{range, 1774, 1775},
{range, 1786, 1788},
{value, 1791},
{value, 1808},
{range, 1810, 1839},
{range, 1869, 1957},
{range, 1969, 1983},
{range, 1994, 2026},
{range, 2048, 2069},
{range, 2112, 2136},
{range, 2144, 2258},
{range, 2308, 2361},
{value, 2365},
{value, 2384},
{range, 2392, 2401},
{range, 2418, 2432},
{range, 2437, 2491},
{value, 2493},
{range, 2510, 2518},
{range, 2524, 2529},
{range, 2544, 2545},
{value, 2556},
{range, 2565, 2619},
{range, 2649, 2661},
{range, 2674, 2676},
{range, 2693, 2747},
{value, 2749},
{range, 2768, 2785},
{value, 2809},
{range, 2821, 2875},
{value, 2877},
{range, 2908, 2913},
{value, 2929},
{range, 2947, 3005},
{range, 3024, 3030},
{range, 3077, 3133},
{range, 3160, 3169},
{value, 3200},
{range, 3205, 3259},
{value, 3261},
{range, 3294, 3297},
{range, 3313, 3327},
{range, 3333, 3386},
{value, 3389},
{value, 3406},
{range, 3412, 3414},
{range, 3423, 3425},
{range, 3450, 3457},
{range, 3461, 3529},
{range, 3585, 3632},
{range, 3634, 3635},
{range, 3648, 3653},
{range, 3713, 3760},
{range, 3762, 3763},
{range, 3773, 3781},
{range, 3804, 3840},
{range, 3904, 3952},
{range, 3976, 3980},
{range, 4096, 4138},
{value, 4159},
{range, 4176, 4181},
{range, 4186, 4189},
{value, 4193},
{range, 4197, 4198},
{range, 4206, 4208},
{range, 4213, 4225},
{value, 4238},
{range, 4352, 4956},
{range, 4992, 5007},
{range, 5121, 5740},
{range, 5743, 5759},
{range, 5761, 5786},
{range, 5792, 5866},
{range, 5873, 5905},
{range, 5920, 5937},
{range, 5952, 5969},
{range, 5984, 6001},
{range, 6016, 6067},
{value, 6108},
{range, 6176, 6210},
{range, 6212, 6276},
{range, 6279, 6312},
{range, 6314, 6431},
{range, 6480, 6607},
{range, 6656, 6678},
{range, 6688, 6740},
{range, 6917, 6963},
{range, 6981, 6991},
{range, 7043, 7072},
{range, 7086, 7087},
{range, 7098, 7141},
{range, 7168, 7203},
{range, 7245, 7247},
{range, 7258, 7287},
{range, 7401, 7404},
{range, 7406, 7409},
{range, 7413, 7414},
{range, 8501, 8504},
{range, 11568, 11630},
{range, 11648, 11743},
{value, 12294},
{value, 12348},
{range, 12353, 12440},
{value, 12447},
{range, 12449, 12538},
{range, 12543, 12687},
{range, 12704, 12735},
{range, 12784, 12799},
{range, 13312, 19903},
{range, 19968, 40980},
{range, 40982, 42127},
{range, 42192, 42231},
{range, 42240, 42507},
{range, 42512, 42527},
{range, 42538, 42559},
{value, 42606},
{range, 42656, 42725},
{value, 42895},
{value, 42999},
{range, 43003, 43009},
{range, 43011, 43013},
{range, 43015, 43018},
{range, 43020, 43042},
{range, 43072, 43123},
{range, 43138, 43187},
{range, 43250, 43255},
{value, 43259},
{range, 43261, 43262},
{range, 43274, 43301},
{range, 43312, 43334},
{range, 43360, 43391},
{range, 43396, 43442},
{range, 43488, 43492},
{range, 43495, 43503},
{range, 43514, 43560},
{range, 43584, 43586},
{range, 43588, 43595},
{range, 43616, 43631},
{range, 43633, 43638},
{value, 43642},
{range, 43646, 43695},
{value, 43697},
{range, 43701, 43702},
{range, 43705, 43709},
{value, 43712},
{range, 43714, 43740},
{range, 43744, 43754},
{value, 43762},
{range, 43777, 43823},
{range, 43968, 44002},
{range, 44032, 55291},
{range, 63744, 64255},
{value, 64285},
{range, 64287, 64296},
{range, 64298, 64433},
{range, 64467, 64829},
{range, 64848, 65019},
{range, 65136, 65278},
{range, 65382, 65391},
{range, 65393, 65437},
{range, 65440, 65503},
{range, 65536, 65791},
{range, 66176, 66271},
{range, 66304, 66335},
{range, 66349, 66368},
{range, 66370, 66377},
{range, 66384, 66421},
{range, 66432, 66462},
{range, 66464, 66511},
{range, 66640, 66719},
{range, 66816, 66926},
{range, 67072, 67670},
{range, 67680, 67702},
{range, 67712, 67750},
{range, 67808, 67834},
{range, 67840, 67861},
{range, 67872, 67902},
{range, 67968, 68027},
{range, 68030, 68031},
{value, 68096},
{range, 68112, 68151},
{range, 68192, 68220},
{range, 68224, 68252},
{range, 68288, 68295},
{range, 68297, 68324},
{range, 68352, 68408},
{range, 68416, 68439},
{range, 68448, 68471},
{range, 68480, 68504},
{range, 68608, 68735},
{range, 68864, 68899},
{range, 69376, 69404},
{range, 69415, 69445},
{range, 69635, 69687},
{range, 69763, 69807},
{range, 69840, 69871},
{range, 69891, 69926},
{value, 69956},
{range, 69968, 70002},
{range, 70006, 70015},
{range, 70019, 70066},
{range, 70081, 70084},
{value, 70106},
{value, 70108},
{range, 70144, 70187},
{range, 70272, 70312},
{range, 70320, 70366},
{range, 70405, 70458},
{value, 70461},
{range, 70480, 70486},
{range, 70493, 70497},
{range, 70656, 70708},
{range, 70727, 70730},
{range, 70784, 70831},
{range, 70852, 70853},
{range, 70855, 70863},
{range, 71040, 71086},
{range, 71128, 71131},
{range, 71168, 71215},
{range, 71236, 71247},
{range, 71296, 71338},
{range, 71424, 71452},
{range, 71680, 71723},
{range, 71935, 72192},
{range, 72203, 72242},
{value, 72250},
{value, 72272},
{range, 72284, 72329},
{value, 72349},
{range, 72384, 72750},
{value, 72768},
{range, 72818, 72849},
{range, 72960, 73008},
{value, 73030},
{range, 73056, 73097},
{range, 73112, 73119},
{range, 73440, 73458},
{range, 73728, 74751},
{range, 74880, 92767},
{range, 92880, 92911},
{range, 92928, 92975},
{range, 93027, 93759},
{range, 93952, 94032},
{range, 94208, 113819},
{range, 124928, 125126},
{range, 126464, 126703},
{range, 131072, 917504}
];
range("Lt") ->
[
{value, 453},
{value, 456},
{value, 459},
{value, 498},
{range, 8072, 8079},
{range, 8088, 8095},
{range, 8104, 8111},
{value, 8124},
{value, 8140},
{value, 8188}
];
range("Lu") ->
[
{range, 65, 90},
{range, 192, 214},
{range, 216, 222},
{value, 256},
{value, 258},
{value, 260},
{value, 262},
{value, 264},
{value, 266},
{value, 268},
{value, 270},
{value, 272},
{value, 274},
{value, 276},
{value, 278},
{value, 280},
{value, 282},
{value, 284},
{value, 286},
{value, 288},
{value, 290},
{value, 292},
{value, 294},
{value, 296},
{value, 298},
{value, 300},
{value, 302},
{value, 304},
{value, 306},
{value, 308},
{value, 310},
{value, 313},
{value, 315},
{value, 317},
{value, 319},
{value, 321},
{value, 323},
{value, 325},
{value, 327},
{value, 330},
{value, 332},
{value, 334},
{value, 336},
{value, 338},
{value, 340},
{value, 342},
{value, 344},
{value, 346},
{value, 348},
{value, 350},
{value, 352},
{value, 354},
{value, 356},
{value, 358},
{value, 360},
{value, 362},
{value, 364},
{value, 366},
{value, 368},
{value, 370},
{value, 372},
{value, 374},
{range, 376, 377},
{value, 379},
{value, 381},
{range, 385, 386},
{value, 388},
{range, 390, 391},
{range, 393, 395},
{range, 398, 401},
{range, 403, 404},
{range, 406, 408},
{range, 412, 413},
{range, 415, 416},
{value, 418},
{value, 420},
{range, 422, 423},
{value, 425},
{value, 428},
{range, 430, 431},
{range, 433, 435},
{value, 437},
{range, 439, 440},
{value, 444},
{value, 452},
{value, 455},
{value, 458},
{value, 461},
{value, 463},
{value, 465},
{value, 467},
{value, 469},
{value, 471},
{value, 473},
{value, 475},
{value, 478},
{value, 480},
{value, 482},
{value, 484},
{value, 486},
{value, 488},
{value, 490},
{value, 492},
{value, 494},
{value, 497},
{value, 500},
{range, 502, 504},
{value, 506},
{value, 508},
{value, 510},
{value, 512},
{value, 514},
{value, 516},
{value, 518},
{value, 520},
{value, 522},
{value, 524},
{value, 526},
{value, 528},
{value, 530},
{value, 532},
{value, 534},
{value, 536},
{value, 538},
{value, 540},
{value, 542},
{value, 544},
{value, 546},
{value, 548},
{value, 550},
{value, 552},
{value, 554},
{value, 556},
{value, 558},
{value, 560},
{value, 562},
{range, 570, 571},
{range, 573, 574},
{value, 577},
{range, 579, 582},
{value, 584},
{value, 586},
{value, 588},
{value, 590},
{value, 880},
{value, 882},
{value, 886},
{range, 895, 899},
{value, 902},
{range, 904, 911},
{range, 913, 939},
{value, 975},
{range, 978, 980},
{value, 984},
{value, 986},
{value, 988},
{value, 990},
{value, 992},
{value, 994},
{value, 996},
{value, 998},
{value, 1000},
{value, 1002},
{value, 1004},
{value, 1006},
{value, 1012},
{value, 1015},
{range, 1017, 1018},
{range, 1021, 1071},
{value, 1120},
{value, 1122},
{value, 1124},
{value, 1126},
{value, 1128},
{value, 1130},
{value, 1132},
{value, 1134},
{value, 1136},
{value, 1138},
{value, 1140},
{value, 1142},
{value, 1144},
{value, 1146},
{value, 1148},
{value, 1150},
{value, 1152},
{value, 1162},
{value, 1164},
{value, 1166},
{value, 1168},
{value, 1170},
{value, 1172},
{value, 1174},
{value, 1176},
{value, 1178},
{value, 1180},
{value, 1182},
{value, 1184},
{value, 1186},
{value, 1188},
{value, 1190},
{value, 1192},
{value, 1194},
{value, 1196},
{value, 1198},
{value, 1200},
{value, 1202},
{value, 1204},
{value, 1206},
{value, 1208},
{value, 1210},
{value, 1212},
{value, 1214},
{range, 1216, 1217},
{value, 1219},
{value, 1221},
{value, 1223},
{value, 1225},
{value, 1227},
{value, 1229},
{value, 1232},
{value, 1234},
{value, 1236},
{value, 1238},
{value, 1240},
{value, 1242},
{value, 1244},
{value, 1246},
{value, 1248},
{value, 1250},
{value, 1252},
{value, 1254},
{value, 1256},
{value, 1258},
{value, 1260},
{value, 1262},
{value, 1264},
{value, 1266},
{value, 1268},
{value, 1270},
{value, 1272},
{value, 1274},
{value, 1276},
{value, 1278},
{value, 1280},
{value, 1282},
{value, 1284},
{value, 1286},
{value, 1288},
{value, 1290},
{value, 1292},
{value, 1294},
{value, 1296},
{value, 1298},
{value, 1300},
{value, 1302},
{value, 1304},
{value, 1306},
{value, 1308},
{value, 1310},
{value, 1312},
{value, 1314},
{value, 1316},
{value, 1318},
{value, 1320},
{value, 1322},
{value, 1324},
{value, 1326},
{range, 1329, 1368},
{range, 4256, 4303},
{range, 5024, 5111},
{range, 7312, 7359},
{value, 7680},
{value, 7682},
{value, 7684},
{value, 7686},
{value, 7688},
{value, 7690},
{value, 7692},
{value, 7694},
{value, 7696},
{value, 7698},
{value, 7700},
{value, 7702},
{value, 7704},
{value, 7706},
{value, 7708},
{value, 7710},
{value, 7712},
{value, 7714},
{value, 7716},
{value, 7718},
{value, 7720},
{value, 7722},
{value, 7724},
{value, 7726},
{value, 7728},
{value, 7730},
{value, 7732},
{value, 7734},
{value, 7736},
{value, 7738},
{value, 7740},
{value, 7742},
{value, 7744},
{value, 7746},
{value, 7748},
{value, 7750},
{value, 7752},
{value, 7754},
{value, 7756},
{value, 7758},
{value, 7760},
{value, 7762},
{value, 7764},
{value, 7766},
{value, 7768},
{value, 7770},
{value, 7772},
{value, 7774},
{value, 7776},
{value, 7778},
{value, 7780},
{value, 7782},
{value, 7784},
{value, 7786},
{value, 7788},
{value, 7790},
{value, 7792},
{value, 7794},
{value, 7796},
{value, 7798},
{value, 7800},
{value, 7802},
{value, 7804},
{value, 7806},
{value, 7808},
{value, 7810},
{value, 7812},
{value, 7814},
{value, 7816},
{value, 7818},
{value, 7820},
{value, 7822},
{value, 7824},
{value, 7826},
{value, 7828},
{value, 7838},
{value, 7840},
{value, 7842},
{value, 7844},
{value, 7846},
{value, 7848},
{value, 7850},
{value, 7852},
{value, 7854},
{value, 7856},
{value, 7858},
{value, 7860},
{value, 7862},
{value, 7864},
{value, 7866},
{value, 7868},
{value, 7870},
{value, 7872},
{value, 7874},
{value, 7876},
{value, 7878},
{value, 7880},
{value, 7882},
{value, 7884},
{value, 7886},
{value, 7888},
{value, 7890},
{value, 7892},
{value, 7894},
{value, 7896},
{value, 7898},
{value, 7900},
{value, 7902},
{value, 7904},
{value, 7906},
{value, 7908},
{value, 7910},
{value, 7912},
{value, 7914},
{value, 7916},
{value, 7918},
{value, 7920},
{value, 7922},
{value, 7924},
{value, 7926},
{value, 7928},
{value, 7930},
{value, 7932},
{value, 7934},
{range, 7944, 7951},
{range, 7960, 7967},
{range, 7976, 7983},
{range, 7992, 7999},
{range, 8008, 8015},
{range, 8025, 8031},
{range, 8040, 8047},
{range, 8120, 8123},
{range, 8136, 8139},
{range, 8152, 8156},
{range, 8168, 8172},
{range, 8184, 8187},
{value, 8450},
{value, 8455},
{range, 8459, 8461},
{range, 8464, 8466},
{value, 8469},
{range, 8473, 8477},
{value, 8484},
{value, 8486},
{value, 8488},
{range, 8490, 8493},
{range, 8496, 8499},
{range, 8510, 8511},
{value, 8517},
{value, 8579},
{range, 11264, 11311},
{value, 11360},
{range, 11362, 11364},
{value, 11367},
{value, 11369},
{value, 11371},
{range, 11373, 11376},
{value, 11378},
{value, 11381},
{range, 11390, 11392},
{value, 11394},
{value, 11396},
{value, 11398},
{value, 11400},
{value, 11402},
{value, 11404},
{value, 11406},
{value, 11408},
{value, 11410},
{value, 11412},
{value, 11414},
{value, 11416},
{value, 11418},
{value, 11420},
{value, 11422},
{value, 11424},
{value, 11426},
{value, 11428},
{value, 11430},
{value, 11432},
{value, 11434},
{value, 11436},
{value, 11438},
{value, 11440},
{value, 11442},
{value, 11444},
{value, 11446},
{value, 11448},
{value, 11450},
{value, 11452},
{value, 11454},
{value, 11456},
{value, 11458},
{value, 11460},
{value, 11462},
{value, 11464},
{value, 11466},
{value, 11468},
{value, 11470},
{value, 11472},
{value, 11474},
{value, 11476},
{value, 11478},
{value, 11480},
{value, 11482},
{value, 11484},
{value, 11486},
{value, 11488},
{value, 11490},
{value, 11499},
{value, 11501},
{value, 11506},
{value, 42560},
{value, 42562},
{value, 42564},
{value, 42566},
{value, 42568},
{value, 42570},
{value, 42572},
{value, 42574},
{value, 42576},
{value, 42578},
{value, 42580},
{value, 42582},
{value, 42584},
{value, 42586},
{value, 42588},
{value, 42590},
{value, 42592},
{value, 42594},
{value, 42596},
{value, 42598},
{value, 42600},
{value, 42602},
{value, 42604},
{value, 42624},
{value, 42626},
{value, 42628},
{value, 42630},
{value, 42632},
{value, 42634},
{value, 42636},
{value, 42638},
{value, 42640},
{value, 42642},
{value, 42644},
{value, 42646},
{value, 42648},
{value, 42650},
{value, 42786},
{value, 42788},
{value, 42790},
{value, 42792},
{value, 42794},
{value, 42796},
{value, 42798},
{value, 42802},
{value, 42804},
{value, 42806},
{value, 42808},
{value, 42810},
{value, 42812},
{value, 42814},
{value, 42816},
{value, 42818},
{value, 42820},
{value, 42822},
{value, 42824},
{value, 42826},
{value, 42828},
{value, 42830},
{value, 42832},
{value, 42834},
{value, 42836},
{value, 42838},
{value, 42840},
{value, 42842},
{value, 42844},
{value, 42846},
{value, 42848},
{value, 42850},
{value, 42852},
{value, 42854},
{value, 42856},
{value, 42858},
{value, 42860},
{value, 42862},
{value, 42873},
{value, 42875},
{range, 42877, 42878},
{value, 42880},
{value, 42882},
{value, 42884},
{value, 42886},
{value, 42891},
{value, 42893},
{value, 42896},
{value, 42898},
{value, 42902},
{value, 42904},
{value, 42906},
{value, 42908},
{value, 42910},
{value, 42912},
{value, 42914},
{value, 42916},
{value, 42918},
{value, 42920},
{range, 42922, 42926},
{range, 42928, 42932},
{value, 42934},
{value, 42936},
{range, 65313, 65338},
{range, 66560, 66599},
{range, 66736, 66775},
{range, 68736, 68799},
{range, 71840, 71871},
{range, 93760, 93791},
{range, 119808, 119833},
{range, 119860, 119885},
{range, 119912, 119937},
{range, 119964, 119989},
{range, 120016, 120041},
{range, 120068, 120093},
{range, 120120, 120145},
{range, 120172, 120197},
{range, 120224, 120249},
{range, 120276, 120301},
{range, 120328, 120353},
{range, 120380, 120405},
{range, 120432, 120457},
{range, 120488, 120512},
{range, 120546, 120570},
{range, 120604, 120628},
{range, 120662, 120686},
{range, 120720, 120744},
{value, 120778},
{range, 125184, 125217}
];
range("M") ->
[
{range, 768, 879},
{range, 1155, 1161},
{range, 1425, 1469},
{value, 1471},
{range, 1473, 1474},
{range, 1476, 1477},
{range, 1479, 1487},
{range, 1552, 1562},
{range, 1611, 1631},
{value, 1648},
{range, 1750, 1756},
{range, 1759, 1764},
{range, 1767, 1768},
{range, 1770, 1773},
{value, 1809},
{range, 1840, 1868},
{range, 1958, 1968},
{range, 2027, 2035},
{value, 2045},
{range, 2070, 2073},
{range, 2075, 2083},
{range, 2085, 2087},
{range, 2089, 2095},
{range, 2137, 2141},
{range, 2259, 2273},
{range, 2275, 2307},
{range, 2362, 2364},
{range, 2366, 2383},
{range, 2385, 2391},
{range, 2402, 2403},
{range, 2433, 2436},
{value, 2492},
{range, 2494, 2509},
{range, 2519, 2523},
{range, 2530, 2533},
{range, 2558, 2564},
{range, 2620, 2648},
{range, 2672, 2673},
{value, 2677},
{range, 2689, 2692},
{value, 2748},
{range, 2750, 2767},
{range, 2786, 2789},
{range, 2810, 2820},
{value, 2876},
{range, 2878, 2907},
{range, 2914, 2917},
{value, 2946},
{range, 3006, 3023},
{range, 3031, 3045},
{range, 3072, 3076},
{range, 3134, 3159},
{range, 3170, 3173},
{range, 3201, 3203},
{value, 3260},
{range, 3262, 3293},
{range, 3298, 3301},
{range, 3328, 3332},
{range, 3387, 3388},
{range, 3390, 3405},
{value, 3415},
{range, 3426, 3429},
{range, 3458, 3460},
{range, 3530, 3557},
{range, 3570, 3571},
{value, 3633},
{range, 3636, 3646},
{range, 3655, 3662},
{value, 3761},
{range, 3764, 3772},
{range, 3784, 3791},
{range, 3864, 3865},
{value, 3893},
{value, 3895},
{value, 3897},
{range, 3902, 3903},
{range, 3953, 3972},
{range, 3974, 3975},
{range, 3981, 4029},
{value, 4038},
{range, 4139, 4158},
{range, 4182, 4185},
{range, 4190, 4192},
{range, 4194, 4196},
{range, 4199, 4205},
{range, 4209, 4212},
{range, 4226, 4237},
{value, 4239},
{range, 4250, 4253},
{range, 4957, 4959},
{range, 5906, 5919},
{range, 5938, 5940},
{range, 5970, 5983},
{range, 6002, 6015},
{range, 6068, 6099},
{range, 6109, 6111},
{range, 6155, 6157},
{range, 6277, 6278},
{value, 6313},
{range, 6432, 6463},
{range, 6679, 6685},
{range, 6741, 6783},
{range, 6832, 6916},
{range, 6964, 6980},
{range, 7019, 7027},
{range, 7040, 7042},
{range, 7073, 7085},
{range, 7142, 7163},
{range, 7204, 7226},
{range, 7376, 7378},
{range, 7380, 7400},
{value, 7405},
{range, 7410, 7412},
{range, 7415, 7423},
{range, 7616, 7679},
{range, 8400, 8447},
{range, 11503, 11505},
{value, 11647},
{range, 11744, 11775},
{range, 12330, 12335},
{range, 12441, 12442},
{range, 42607, 42610},
{range, 42612, 42621},
{range, 42654, 42655},
{range, 42736, 42737},
{value, 43010},
{value, 43014},
{value, 43019},
{range, 43043, 43047},
{range, 43136, 43137},
{range, 43188, 43213},
{range, 43232, 43249},
{value, 43263},
{range, 43302, 43309},
{range, 43335, 43358},
{range, 43392, 43395},
{range, 43443, 43456},
{value, 43493},
{range, 43561, 43583},
{value, 43587},
{range, 43596, 43599},
{range, 43643, 43645},
{value, 43696},
{range, 43698, 43700},
{range, 43703, 43704},
{range, 43710, 43711},
{value, 43713},
{range, 43755, 43759},
{range, 43765, 43776},
{range, 44003, 44010},
{range, 44012, 44015},
{value, 64286},
{range, 65024, 65039},
{range, 65056, 65071},
{range, 66045, 66175},
{value, 66272},
{range, 66422, 66431},
{range, 68097, 68111},
{range, 68152, 68159},
{range, 68325, 68330},
{range, 68900, 68911},
{range, 69446, 69456},
{range, 69632, 69634},
{range, 69688, 69702},
{range, 69759, 69762},
{range, 69808, 69818},
{range, 69888, 69890},
{range, 69927, 69941},
{range, 69957, 69967},
{value, 70003},
{range, 70016, 70018},
{range, 70067, 70080},
{range, 70089, 70092},
{range, 70188, 70199},
{range, 70206, 70271},
{range, 70367, 70383},
{range, 70400, 70404},
{range, 70459, 70460},
{range, 70462, 70479},
{range, 70487, 70492},
{range, 70498, 70655},
{range, 70709, 70726},
{range, 70750, 70783},
{range, 70832, 70851},
{range, 71087, 71104},
{range, 71132, 71167},
{range, 71216, 71232},
{range, 71339, 71359},
{range, 71453, 71471},
{range, 71724, 71738},
{range, 72193, 72202},
{range, 72243, 72249},
{range, 72251, 72254},
{range, 72263, 72271},
{range, 72273, 72283},
{range, 72330, 72345},
{range, 72751, 72767},
{range, 72850, 72959},
{range, 73009, 73029},
{range, 73031, 73039},
{range, 73098, 73111},
{range, 73459, 73462},
{range, 92912, 92916},
{range, 92976, 92982},
{range, 94033, 94098},
{range, 113821, 113822},
{range, 119141, 119145},
{range, 119149, 119154},
{range, 119163, 119170},
{range, 119173, 119179},
{range, 119210, 119213},
{range, 119362, 119364},
{range, 121344, 121398},
{range, 121403, 121452},
{value, 121461},
{value, 121476},
{range, 121499, 124927},
{range, 125136, 125183},
{range, 125252, 125263},
{range, 917760, 983039}
];
range("Mc") ->
[
{value, 2307},
{value, 2363},
{range, 2366, 2368},
{range, 2377, 2380},
{range, 2382, 2383},
{range, 2434, 2436},
{range, 2494, 2496},
{range, 2503, 2508},
{range, 2519, 2523},
{range, 2563, 2564},
{range, 2622, 2624},
{range, 2691, 2692},
{range, 2750, 2752},
{range, 2761, 2764},
{range, 2818, 2820},
{value, 2878},
{value, 2880},
{range, 2887, 2892},
{range, 2903, 2907},
{range, 3006, 3007},
{range, 3009, 3020},
{range, 3031, 3045},
{range, 3073, 3075},
{range, 3137, 3141},
{range, 3202, 3203},
{value, 3262},
{range, 3264, 3269},
{range, 3271, 3275},
{range, 3285, 3293},
{range, 3330, 3332},
{range, 3390, 3392},
{range, 3398, 3404},
{value, 3415},
{range, 3458, 3460},
{range, 3535, 3537},
{range, 3544, 3557},
{range, 3570, 3571},
{range, 3902, 3903},
{value, 3967},
{range, 4139, 4140},
{value, 4145},
{value, 4152},
{range, 4155, 4156},
{range, 4182, 4183},
{range, 4194, 4196},
{range, 4199, 4205},
{range, 4227, 4228},
{range, 4231, 4236},
{value, 4239},
{range, 4250, 4252},
{value, 6070},
{range, 6078, 6085},
{range, 6087, 6088},
{range, 6435, 6438},
{range, 6441, 6449},
{range, 6451, 6456},
{range, 6681, 6682},
{value, 6741},
{value, 6743},
{value, 6753},
{range, 6755, 6756},
{range, 6765, 6770},
{value, 6916},
{value, 6965},
{value, 6971},
{range, 6973, 6977},
{range, 6979, 6980},
{value, 7042},
{value, 7073},
{range, 7078, 7079},
{value, 7082},
{value, 7143},
{range, 7146, 7148},
{value, 7150},
{range, 7154, 7163},
{range, 7204, 7211},
{range, 7220, 7221},
{value, 7393},
{range, 7410, 7411},
{value, 7415},
{range, 12334, 12335},
{range, 43043, 43044},
{value, 43047},
{range, 43136, 43137},
{range, 43188, 43203},
{range, 43346, 43358},
{value, 43395},
{range, 43444, 43445},
{range, 43450, 43451},
{range, 43453, 43456},
{range, 43567, 43568},
{range, 43571, 43572},
{range, 43597, 43599},
{value, 43643},
{value, 43645},
{value, 43755},
{range, 43758, 43759},
{value, 43765},
{range, 44003, 44004},
{range, 44006, 44007},
{range, 44009, 44010},
{value, 44012},
{value, 69632},
{value, 69634},
{value, 69762},
{range, 69808, 69810},
{range, 69815, 69816},
{value, 69932},
{range, 69957, 69967},
{value, 70018},
{range, 70067, 70069},
{range, 70079, 70080},
{range, 70188, 70190},
{range, 70194, 70195},
{value, 70197},
{range, 70368, 70370},
{range, 70402, 70404},
{range, 70462, 70463},
{range, 70465, 70479},
{range, 70487, 70492},
{range, 70498, 70501},
{range, 70709, 70711},
{range, 70720, 70721},
{value, 70725},
{range, 70832, 70834},
{value, 70841},
{range, 70843, 70846},
{value, 70849},
{range, 71087, 71089},
{range, 71096, 71099},
{value, 71102},
{range, 71216, 71218},
{range, 71227, 71228},
{value, 71230},
{value, 71340},
{range, 71342, 71343},
{value, 71350},
{range, 71456, 71457},
{value, 71462},
{range, 71724, 71726},
{value, 71736},
{value, 72249},
{range, 72279, 72280},
{value, 72343},
{value, 72751},
{value, 72766},
{value, 72873},
{value, 72881},
{value, 72884},
{range, 73098, 73103},
{range, 73107, 73108},
{value, 73110},
{range, 73461, 73462},
{range, 94033, 94094},
{range, 119141, 119142},
{range, 119149, 119154}
];
range("Me") ->
[
{range, 1160, 1161},
{range, 6846, 6911},
{range, 8413, 8416},
{range, 8418, 8420},
{range, 42608, 42610}
];
range("Mn") ->
[
{range, 768, 879},
{range, 1155, 1159},
{range, 1425, 1469},
{value, 1471},
{range, 1473, 1474},
{range, 1476, 1477},
{range, 1479, 1487},
{range, 1552, 1562},
{range, 1611, 1631},
{value, 1648},
{range, 1750, 1756},
{range, 1759, 1764},
{range, 1767, 1768},
{range, 1770, 1773},
{value, 1809},
{range, 1840, 1868},
{range, 1958, 1968},
{range, 2027, 2035},
{value, 2045},
{range, 2070, 2073},
{range, 2075, 2083},
{range, 2085, 2087},
{range, 2089, 2095},
{range, 2137, 2141},
{range, 2259, 2273},
{range, 2275, 2306},
{value, 2362},
{value, 2364},
{range, 2369, 2376},
{value, 2381},
{range, 2385, 2391},
{range, 2402, 2403},
{value, 2433},
{value, 2492},
{range, 2497, 2502},
{value, 2509},
{range, 2530, 2533},
{range, 2558, 2562},
{range, 2620, 2621},
{range, 2625, 2648},
{range, 2672, 2673},
{value, 2677},
{range, 2689, 2690},
{value, 2748},
{range, 2753, 2760},
{range, 2765, 2767},
{range, 2786, 2789},
{range, 2810, 2817},
{value, 2876},
{value, 2879},
{range, 2881, 2886},
{range, 2893, 2902},
{range, 2914, 2917},
{value, 2946},
{value, 3008},
{range, 3021, 3023},
{value, 3072},
{value, 3076},
{range, 3134, 3136},
{range, 3142, 3159},
{range, 3170, 3173},
{value, 3201},
{value, 3260},
{value, 3263},
{value, 3270},
{range, 3276, 3284},
{range, 3298, 3301},
{range, 3328, 3329},
{range, 3387, 3388},
{range, 3393, 3397},
{value, 3405},
{range, 3426, 3429},
{range, 3530, 3534},
{range, 3538, 3543},
{value, 3633},
{range, 3636, 3646},
{range, 3655, 3662},
{value, 3761},
{range, 3764, 3772},
{range, 3784, 3791},
{range, 3864, 3865},
{value, 3893},
{value, 3895},
{value, 3897},
{range, 3953, 3966},
{range, 3968, 3972},
{range, 3974, 3975},
{range, 3981, 4029},
{value, 4038},
{range, 4141, 4144},
{range, 4146, 4151},
{range, 4153, 4154},
{range, 4157, 4158},
{range, 4184, 4185},
{range, 4190, 4192},
{range, 4209, 4212},
{value, 4226},
{range, 4229, 4230},
{value, 4237},
{value, 4253},
{range, 4957, 4959},
{range, 5906, 5919},
{range, 5938, 5940},
{range, 5970, 5983},
{range, 6002, 6015},
{range, 6068, 6069},
{range, 6071, 6077},
{value, 6086},
{range, 6089, 6099},
{range, 6109, 6111},
{range, 6155, 6157},
{range, 6277, 6278},
{value, 6313},
{range, 6432, 6434},
{range, 6439, 6440},
{value, 6450},
{range, 6457, 6463},
{range, 6679, 6680},
{range, 6683, 6685},
{value, 6742},
{range, 6744, 6752},
{value, 6754},
{range, 6757, 6764},
{range, 6771, 6783},
{range, 6832, 6845},
{range, 6912, 6915},
{value, 6964},
{range, 6966, 6970},
{value, 6972},
{value, 6978},
{range, 7019, 7027},
{range, 7040, 7041},
{range, 7074, 7077},
{range, 7080, 7081},
{range, 7083, 7085},
{value, 7142},
{range, 7144, 7145},
{value, 7149},
{range, 7151, 7153},
{range, 7212, 7219},
{range, 7222, 7226},
{range, 7376, 7378},
{range, 7380, 7392},
{range, 7394, 7400},
{value, 7405},
{value, 7412},
{range, 7416, 7423},
{range, 7616, 7679},
{range, 8400, 8412},
{value, 8417},
{range, 8421, 8447},
{range, 11503, 11505},
{value, 11647},
{range, 11744, 11775},
{range, 12330, 12333},
{range, 12441, 12442},
{value, 42607},
{range, 42612, 42621},
{range, 42654, 42655},
{range, 42736, 42737},
{value, 43010},
{value, 43014},
{value, 43019},
{range, 43045, 43046},
{range, 43204, 43213},
{range, 43232, 43249},
{value, 43263},
{range, 43302, 43309},
{range, 43335, 43345},
{range, 43392, 43394},
{value, 43443},
{range, 43446, 43449},
{value, 43452},
{value, 43493},
{range, 43561, 43566},
{range, 43569, 43570},
{range, 43573, 43583},
{value, 43587},
{value, 43596},
{value, 43644},
{value, 43696},
{range, 43698, 43700},
{range, 43703, 43704},
{range, 43710, 43711},
{value, 43713},
{range, 43756, 43757},
{range, 43766, 43776},
{value, 44005},
{value, 44008},
{range, 44013, 44015},
{value, 64286},
{range, 65024, 65039},
{range, 65056, 65071},
{range, 66045, 66175},
{value, 66272},
{range, 66422, 66431},
{range, 68097, 68111},
{range, 68152, 68159},
{range, 68325, 68330},
{range, 68900, 68911},
{range, 69446, 69456},
{value, 69633},
{range, 69688, 69702},
{range, 69759, 69761},
{range, 69811, 69814},
{range, 69817, 69818},
{range, 69888, 69890},
{range, 69927, 69931},
{range, 69933, 69941},
{value, 70003},
{range, 70016, 70017},
{range, 70070, 70078},
{range, 70089, 70092},
{range, 70191, 70193},
{value, 70196},
{range, 70198, 70199},
{range, 70206, 70271},
{value, 70367},
{range, 70371, 70383},
{range, 70400, 70401},
{range, 70459, 70460},
{value, 70464},
{range, 70502, 70655},
{range, 70712, 70719},
{range, 70722, 70724},
{value, 70726},
{range, 70750, 70783},
{range, 70835, 70840},
{value, 70842},
{range, 70847, 70848},
{range, 70850, 70851},
{range, 71090, 71095},
{range, 71100, 71101},
{range, 71103, 71104},
{range, 71132, 71167},
{range, 71219, 71226},
{value, 71229},
{range, 71231, 71232},
{value, 71339},
{value, 71341},
{range, 71344, 71349},
{range, 71351, 71359},
{range, 71453, 71455},
{range, 71458, 71461},
{range, 71463, 71471},
{range, 71727, 71735},
{range, 71737, 71738},
{range, 72193, 72202},
{range, 72243, 72248},
{range, 72251, 72254},
{range, 72263, 72271},
{range, 72273, 72278},
{range, 72281, 72283},
{range, 72330, 72342},
{range, 72344, 72345},
{range, 72752, 72765},
{value, 72767},
{range, 72850, 72872},
{range, 72874, 72880},
{range, 72882, 72883},
{range, 72885, 72959},
{range, 73009, 73029},
{range, 73031, 73039},
{range, 73104, 73106},
{value, 73109},
{value, 73111},
{range, 73459, 73460},
{range, 92912, 92916},
{range, 92976, 92982},
{range, 94095, 94098},
{range, 113821, 113822},
{range, 119143, 119145},
{range, 119163, 119170},
{range, 119173, 119179},
{range, 119210, 119213},
{range, 119362, 119364},
{range, 121344, 121398},
{range, 121403, 121452},
{value, 121461},
{value, 121476},
{range, 121499, 124927},
{range, 125136, 125183},
{range, 125252, 125263},
{range, 917760, 983039}
];
range("N") ->
[
{range, 48, 57},
{range, 178, 179},
{value, 185},
{range, 188, 190},
{range, 1632, 1641},
{range, 1776, 1785},
{range, 1984, 1993},
{range, 2406, 2415},
{range, 2534, 2543},
{range, 2548, 2553},
{range, 2662, 2671},
{range, 2790, 2799},
{range, 2918, 2927},
{range, 2930, 2945},
{range, 3046, 3058},
{range, 3174, 3183},
{range, 3192, 3198},
{range, 3302, 3311},
{range, 3416, 3422},
{range, 3430, 3448},
{range, 3558, 3567},
{range, 3664, 3673},
{range, 3792, 3801},
{range, 3872, 3891},
{range, 4160, 4169},
{range, 4240, 4249},
{range, 4969, 4991},
{range, 5870, 5872},
{range, 6112, 6121},
{range, 6128, 6143},
{range, 6160, 6169},
{range, 6470, 6479},
{range, 6608, 6621},
{range, 6784, 6793},
{range, 6800, 6809},
{range, 6992, 7001},
{range, 7088, 7097},
{range, 7232, 7241},
{range, 7248, 7257},
{value, 8304},
{range, 8308, 8313},
{range, 8320, 8329},
{range, 8528, 8578},
{range, 8581, 8585},
{range, 9312, 9371},
{range, 9450, 9471},
{range, 10102, 10131},
{value, 11517},
{value, 12295},
{range, 12321, 12329},
{range, 12344, 12346},
{range, 12690, 12693},
{range, 12832, 12841},
{range, 12872, 12879},
{range, 12881, 12895},
{range, 12928, 12937},
{range, 12977, 12991},
{range, 42528, 42537},
{range, 42726, 42735},
{range, 43056, 43061},
{range, 43216, 43225},
{range, 43264, 43273},
{range, 43472, 43481},
{range, 43504, 43513},
{range, 43600, 43609},
{range, 44016, 44025},
{range, 65296, 65305},
{range, 65799, 65846},
{range, 65856, 65912},
{range, 65930, 65931},
{range, 66273, 66303},
{range, 66336, 66348},
{value, 66369},
{range, 66378, 66383},
{range, 66513, 66559},
{range, 66720, 66729},
{range, 67672, 67679},
{range, 67705, 67711},
{range, 67751, 67807},
{range, 67835, 67839},
{range, 67862, 67870},
{range, 68028, 68029},
{range, 68032, 68095},
{range, 68160, 68175},
{range, 68221, 68222},
{range, 68253, 68287},
{range, 68331, 68335},
{range, 68440, 68447},
{range, 68472, 68479},
{range, 68521, 68607},
{range, 68858, 68863},
{range, 68912, 68921},
{range, 69216, 69375},
{range, 69405, 69414},
{range, 69457, 69460},
{range, 69714, 69743},
{range, 69872, 69881},
{range, 69942, 69951},
{range, 70096, 70105},
{range, 70113, 70143},
{range, 70384, 70393},
{range, 70736, 70745},
{range, 70864, 70873},
{range, 71248, 71257},
{range, 71360, 71369},
{range, 71472, 71483},
{range, 71904, 71934},
{range, 72784, 72815},
{range, 73040, 73049},
{range, 73120, 73129},
{range, 74752, 74863},
{range, 92768, 92777},
{range, 93008, 93017},
{range, 93019, 93026},
{range, 93824, 93846},
{range, 119520, 119551},
{range, 119648, 119807},
{range, 120782, 120831},
{range, 125127, 125135},
{range, 125264, 125273},
{range, 126065, 126123},
{range, 126125, 126127},
{range, 126129, 126463},
{range, 127232, 127247}
];
range("Nd") ->
[
{range, 48, 57},
{range, 1632, 1641},
{range, 1776, 1785},
{range, 1984, 1993},
{range, 2406, 2415},
{range, 2534, 2543},
{range, 2662, 2671},
{range, 2790, 2799},
{range, 2918, 2927},
{range, 3046, 3055},
{range, 3174, 3183},
{range, 3302, 3311},
{range, 3430, 3439},
{range, 3558, 3567},
{range, 3664, 3673},
{range, 3792, 3801},
{range, 3872, 3881},
{range, 4160, 4169},
{range, 4240, 4249},
{range, 6112, 6121},
{range, 6160, 6169},
{range, 6470, 6479},
{range, 6608, 6617},
{range, 6784, 6793},
{range, 6800, 6809},
{range, 6992, 7001},
{range, 7088, 7097},
{range, 7232, 7241},
{range, 7248, 7257},
{range, 42528, 42537},
{range, 43216, 43225},
{range, 43264, 43273},
{range, 43472, 43481},
{range, 43504, 43513},
{range, 43600, 43609},
{range, 44016, 44025},
{range, 65296, 65305},
{range, 66720, 66729},
{range, 68912, 68921},
{range, 69734, 69743},
{range, 69872, 69881},
{range, 69942, 69951},
{range, 70096, 70105},
{range, 70384, 70393},
{range, 70736, 70745},
{range, 70864, 70873},
{range, 71248, 71257},
{range, 71360, 71369},
{range, 71472, 71481},
{range, 71904, 71913},
{range, 72784, 72793},
{range, 73040, 73049},
{range, 73120, 73129},
{range, 92768, 92777},
{range, 93008, 93017},
{range, 120782, 120831},
{range, 125264, 125273}
];
range("Nl") ->
[
{range, 5870, 5872},
{range, 8544, 8578},
{range, 8581, 8584},
{value, 12295},
{range, 12321, 12329},
{range, 12344, 12346},
{range, 42726, 42735},
{range, 65856, 65908},
{value, 66369},
{range, 66378, 66383},
{range, 66513, 66559},
{range, 74752, 74863}
];
range("No") ->
[
{range, 178, 179},
{value, 185},
{range, 188, 190},
{range, 2548, 2553},
{range, 2930, 2945},
{range, 3056, 3058},
{range, 3192, 3198},
{range, 3416, 3422},
{range, 3440, 3448},
{range, 3882, 3891},
{range, 4969, 4991},
{range, 6128, 6143},
{range, 6618, 6621},
{value, 8304},
{range, 8308, 8313},
{range, 8320, 8329},
{range, 8528, 8543},
{value, 8585},
{range, 9312, 9371},
{range, 9450, 9471},
{range, 10102, 10131},
{value, 11517},
{range, 12690, 12693},
{range, 12832, 12841},
{range, 12872, 12879},
{range, 12881, 12895},
{range, 12928, 12937},
{range, 12977, 12991},
{range, 43056, 43061},
{range, 65799, 65846},
{range, 65909, 65912},
{range, 65930, 65931},
{range, 66273, 66303},
{range, 66336, 66348},
{range, 67672, 67679},
{range, 67705, 67711},
{range, 67751, 67807},
{range, 67835, 67839},
{range, 67862, 67870},
{range, 68028, 68029},
{range, 68032, 68095},
{range, 68160, 68175},
{range, 68221, 68222},
{range, 68253, 68287},
{range, 68331, 68335},
{range, 68440, 68447},
{range, 68472, 68479},
{range, 68521, 68607},
{range, 68858, 68863},
{range, 69216, 69375},
{range, 69405, 69414},
{range, 69457, 69460},
{range, 69714, 69733},
{range, 70113, 70143},
{range, 71482, 71483},
{range, 71914, 71934},
{range, 72794, 72815},
{range, 93019, 93026},
{range, 93824, 93846},
{range, 119520, 119551},
{range, 119648, 119807},
{range, 125127, 125135},
{range, 126065, 126123},
{range, 126125, 126127},
{range, 126129, 126463},
{range, 127232, 127247}
];
range("P") ->
[
{range, 33, 35},
{range, 37, 42},
{range, 44, 47},
{range, 58, 59},
{range, 63, 64},
{range, 91, 93},
{value, 95},
{value, 123},
{value, 125},
{value, 161},
{value, 167},
{value, 171},
{range, 182, 183},
{value, 187},
{value, 191},
{value, 894},
{value, 903},
{range, 1370, 1375},
{range, 1417, 1420},
{value, 1470},
{value, 1472},
{value, 1475},
{value, 1478},
{range, 1523, 1535},
{range, 1545, 1546},
{range, 1548, 1549},
{value, 1563},
{range, 1566, 1567},
{range, 1642, 1645},
{value, 1748},
{range, 1792, 1806},
{range, 2039, 2041},
{range, 2096, 2111},
{range, 2142, 2143},
{range, 2404, 2405},
{value, 2416},
{value, 2557},
{range, 2678, 2688},
{value, 2800},
{value, 3204},
{range, 3572, 3584},
{value, 3663},
{range, 3674, 3712},
{range, 3844, 3858},
{value, 3860},
{range, 3898, 3901},
{value, 3973},
{range, 4048, 4052},
{range, 4057, 4095},
{range, 4170, 4175},
{value, 4347},
{range, 4960, 4968},
{value, 5120},
{range, 5741, 5742},
{range, 5787, 5791},
{range, 5867, 5869},
{range, 5941, 5951},
{range, 6100, 6102},
{range, 6104, 6106},
{range, 6144, 6154},
{range, 6468, 6469},
{range, 6686, 6687},
{range, 6816, 6822},
{range, 6824, 6831},
{range, 7002, 7008},
{range, 7164, 7167},
{range, 7227, 7231},
{range, 7294, 7295},
{range, 7360, 7375},
{value, 7379},
{range, 8208, 8231},
{range, 8240, 8259},
{range, 8261, 8273},
{range, 8275, 8286},
{range, 8317, 8318},
{range, 8333, 8335},
{range, 8968, 8971},
{range, 9001, 9002},
{range, 10088, 10101},
{range, 10181, 10182},
{range, 10214, 10223},
{range, 10627, 10648},
{range, 10712, 10715},
{range, 10748, 10749},
{range, 11513, 11516},
{range, 11518, 11519},
{range, 11632, 11646},
{range, 11776, 11822},
{range, 11824, 11903},
{range, 12289, 12291},
{range, 12296, 12305},
{range, 12308, 12319},
{value, 12336},
{value, 12349},
{value, 12448},
{value, 12539},
{range, 42238, 42239},
{range, 42509, 42511},
{value, 42611},
{value, 42622},
{range, 42738, 42751},
{range, 43124, 43135},
{range, 43214, 43215},
{range, 43256, 43258},
{value, 43260},
{range, 43310, 43311},
{value, 43359},
{range, 43457, 43470},
{range, 43486, 43487},
{range, 43612, 43615},
{range, 43742, 43743},
{range, 43760, 43761},
{value, 44011},
{range, 64830, 64847},
{range, 65040, 65055},
{range, 65072, 65121},
{value, 65123},
{value, 65128},
{range, 65130, 65135},
{range, 65281, 65283},
{range, 65285, 65290},
{range, 65292, 65295},
{range, 65306, 65307},
{range, 65311, 65312},
{range, 65339, 65341},
{value, 65343},
{value, 65371},
{value, 65373},
{range, 65375, 65381},
{range, 65792, 65798},
{value, 66463},
{value, 66512},
{range, 66927, 67071},
{value, 67671},
{value, 67871},
{range, 67903, 67967},
{range, 68176, 68191},
{value, 68223},
{range, 68336, 68351},
{range, 68409, 68415},
{range, 68505, 68520},
{range, 69461, 69631},
{range, 69703, 69713},
{range, 69819, 69820},
{range, 69822, 69836},
{range, 69952, 69955},
{range, 70004, 70005},
{range, 70085, 70088},
{range, 70093, 70095},
{value, 70107},
{range, 70109, 70112},
{range, 70200, 70205},
{range, 70313, 70319},
{range, 70731, 70735},
{range, 70747, 70749},
{value, 70854},
{range, 71105, 71127},
{range, 71233, 71235},
{range, 71264, 71295},
{range, 71484, 71486},
{range, 71739, 71839},
{range, 72255, 72262},
{range, 72346, 72348},
{range, 72350, 72383},
{range, 72769, 72783},
{range, 72816, 72817},
{range, 73463, 73727},
{range, 74864, 74879},
{range, 92782, 92879},
{range, 92917, 92927},
{range, 92983, 92987},
{value, 92996},
{range, 93847, 93951},
{value, 113823},
{range, 121479, 121498},
{range, 125278, 126064}
];
range("Pc") ->
[
{value, 95},
{range, 8255, 8256},
{value, 8276},
{range, 65075, 65076},
{range, 65101, 65103},
{value, 65343}
];
range("Pd") ->
[
{value, 45},
{range, 1418, 1420},
{value, 1470},
{value, 5120},
{value, 6150},
{range, 8208, 8213},
{value, 11799},
{value, 11802},
{range, 11834, 11835},
{value, 11840},
{value, 12316},
{value, 12336},
{value, 12448},
{range, 65073, 65074},
{value, 65112},
{value, 65123},
{value, 65293}
];
range("Pe") ->
[
{value, 41},
{value, 93},
{value, 125},
{value, 3899},
{value, 3901},
{range, 5788, 5791},
{value, 8262},
{value, 8318},
{range, 8334, 8335},
{value, 8969},
{value, 8971},
{value, 9002},
{value, 10089},
{value, 10091},
{value, 10093},
{value, 10095},
{value, 10097},
{value, 10099},
{value, 10101},
{value, 10182},
{value, 10215},
{value, 10217},
{value, 10219},
{value, 10221},
{value, 10223},
{value, 10628},
{value, 10630},
{value, 10632},
{value, 10634},
{value, 10636},
{value, 10638},
{value, 10640},
{value, 10642},
{value, 10644},
{value, 10646},
{value, 10648},
{value, 10713},
{value, 10715},
{value, 10749},
{value, 11811},
{value, 11813},
{value, 11815},
{value, 11817},
{value, 12297},
{value, 12299},
{value, 12301},
{value, 12303},
{value, 12305},
{value, 12309},
{value, 12311},
{value, 12313},
{value, 12315},
{range, 12318, 12319},
{value, 64830},
{value, 65048},
{value, 65078},
{value, 65080},
{value, 65082},
{value, 65084},
{value, 65086},
{value, 65088},
{value, 65090},
{value, 65092},
{value, 65096},
{value, 65114},
{value, 65116},
{value, 65118},
{value, 65289},
{value, 65341},
{value, 65373},
{value, 65376},
{value, 65379}
];
range("Pf") ->
[
{value, 187},
{value, 8217},
{value, 8221},
{value, 8250},
{value, 11779},
{value, 11781},
{value, 11786},
{value, 11789},
{value, 11805},
{value, 11809}
];
range("Pi") ->
[
{value, 171},
{value, 8216},
{range, 8219, 8220},
{value, 8223},
{value, 8249},
{value, 11778},
{value, 11780},
{value, 11785},
{value, 11788},
{value, 11804},
{value, 11808}
];
range("Po") ->
[
{range, 33, 35},
{range, 37, 39},
{value, 42},
{value, 44},
{range, 46, 47},
{range, 58, 59},
{range, 63, 64},
{value, 92},
{value, 161},
{value, 167},
{range, 182, 183},
{value, 191},
{value, 894},
{value, 903},
{range, 1370, 1375},
{value, 1417},
{value, 1472},
{value, 1475},
{value, 1478},
{range, 1523, 1535},
{range, 1545, 1546},
{range, 1548, 1549},
{value, 1563},
{range, 1566, 1567},
{range, 1642, 1645},
{value, 1748},
{range, 1792, 1806},
{range, 2039, 2041},
{range, 2096, 2111},
{range, 2142, 2143},
{range, 2404, 2405},
{value, 2416},
{value, 2557},
{range, 2678, 2688},
{value, 2800},
{value, 3204},
{range, 3572, 3584},
{value, 3663},
{range, 3674, 3712},
{range, 3844, 3858},
{value, 3860},
{value, 3973},
{range, 4048, 4052},
{range, 4057, 4095},
{range, 4170, 4175},
{value, 4347},
{range, 4960, 4968},
{range, 5741, 5742},
{range, 5867, 5869},
{range, 5941, 5951},
{range, 6100, 6102},
{range, 6104, 6106},
{range, 6144, 6149},
{range, 6151, 6154},
{range, 6468, 6469},
{range, 6686, 6687},
{range, 6816, 6822},
{range, 6824, 6831},
{range, 7002, 7008},
{range, 7164, 7167},
{range, 7227, 7231},
{range, 7294, 7295},
{range, 7360, 7375},
{value, 7379},
{range, 8214, 8215},
{range, 8224, 8231},
{range, 8240, 8248},
{range, 8251, 8254},
{range, 8257, 8259},
{range, 8263, 8273},
{value, 8275},
{range, 8277, 8286},
{range, 11513, 11516},
{range, 11518, 11519},
{range, 11632, 11646},
{range, 11776, 11777},
{range, 11782, 11784},
{value, 11787},
{range, 11790, 11798},
{range, 11800, 11801},
{value, 11803},
{range, 11806, 11807},
{range, 11818, 11822},
{range, 11824, 11833},
{range, 11836, 11839},
{value, 11841},
{range, 11843, 11903},
{range, 12289, 12291},
{value, 12349},
{value, 12539},
{range, 42238, 42239},
{range, 42509, 42511},
{value, 42611},
{value, 42622},
{range, 42738, 42751},
{range, 43124, 43135},
{range, 43214, 43215},
{range, 43256, 43258},
{value, 43260},
{range, 43310, 43311},
{value, 43359},
{range, 43457, 43470},
{range, 43486, 43487},
{range, 43612, 43615},
{range, 43742, 43743},
{range, 43760, 43761},
{value, 44011},
{range, 65040, 65046},
{range, 65049, 65055},
{value, 65072},
{range, 65093, 65094},
{range, 65097, 65100},
{range, 65104, 65111},
{range, 65119, 65121},
{value, 65128},
{range, 65130, 65135},
{range, 65281, 65283},
{range, 65285, 65287},
{value, 65290},
{value, 65292},
{range, 65294, 65295},
{range, 65306, 65307},
{range, 65311, 65312},
{value, 65340},
{value, 65377},
{range, 65380, 65381},
{range, 65792, 65798},
{value, 66463},
{value, 66512},
{range, 66927, 67071},
{value, 67671},
{value, 67871},
{range, 67903, 67967},
{range, 68176, 68191},
{value, 68223},
{range, 68336, 68351},
{range, 68409, 68415},
{range, 68505, 68520},
{range, 69461, 69631},
{range, 69703, 69713},
{range, 69819, 69820},
{range, 69822, 69836},
{range, 69952, 69955},
{range, 70004, 70005},
{range, 70085, 70088},
{range, 70093, 70095},
{value, 70107},
{range, 70109, 70112},
{range, 70200, 70205},
{range, 70313, 70319},
{range, 70731, 70735},
{range, 70747, 70749},
{value, 70854},
{range, 71105, 71127},
{range, 71233, 71235},
{range, 71264, 71295},
{range, 71484, 71486},
{range, 71739, 71839},
{range, 72255, 72262},
{range, 72346, 72348},
{range, 72350, 72383},
{range, 72769, 72783},
{range, 72816, 72817},
{range, 73463, 73727},
{range, 74864, 74879},
{range, 92782, 92879},
{range, 92917, 92927},
{range, 92983, 92987},
{value, 92996},
{range, 93847, 93951},
{value, 113823},
{range, 121479, 121498},
{range, 125278, 126064}
];
range("Ps") ->
[
{value, 40},
{value, 91},
{value, 123},
{value, 3898},
{value, 3900},
{value, 5787},
{value, 8218},
{value, 8222},
{value, 8261},
{value, 8317},
{value, 8333},
{value, 8968},
{value, 8970},
{value, 9001},
{value, 10088},
{value, 10090},
{value, 10092},
{value, 10094},
{value, 10096},
{value, 10098},
{value, 10100},
{value, 10181},
{value, 10214},
{value, 10216},
{value, 10218},
{value, 10220},
{value, 10222},
{value, 10627},
{value, 10629},
{value, 10631},
{value, 10633},
{value, 10635},
{value, 10637},
{value, 10639},
{value, 10641},
{value, 10643},
{value, 10645},
{value, 10647},
{value, 10712},
{value, 10714},
{value, 10748},
{value, 11810},
{value, 11812},
{value, 11814},
{value, 11816},
{value, 11842},
{value, 12296},
{value, 12298},
{value, 12300},
{value, 12302},
{value, 12304},
{value, 12308},
{value, 12310},
{value, 12312},
{value, 12314},
{value, 12317},
{range, 64831, 64847},
{value, 65047},
{value, 65077},
{value, 65079},
{value, 65081},
{value, 65083},
{value, 65085},
{value, 65087},
{value, 65089},
{value, 65091},
{value, 65095},
{value, 65113},
{value, 65115},
{value, 65117},
{value, 65288},
{value, 65339},
{value, 65371},
{value, 65375},
{value, 65378}
];
range("S") ->
[
{value, 36},
{value, 43},
{range, 60, 62},
{value, 94},
{value, 96},
{value, 124},
{value, 126},
{range, 162, 166},
{range, 168, 169},
{value, 172},
{range, 174, 177},
{value, 180},
{value, 184},
{value, 215},
{value, 247},
{range, 706, 709},
{range, 722, 735},
{range, 741, 747},
{value, 749},
{range, 751, 767},
{value, 885},
{range, 900, 901},
{value, 1014},
{value, 1154},
{range, 1421, 1424},
{range, 1542, 1544},
{value, 1547},
{range, 1550, 1551},
{value, 1758},
{value, 1769},
{range, 1789, 1790},
{value, 2038},
{range, 2046, 2047},
{range, 2546, 2547},
{range, 2554, 2555},
{range, 2801, 2808},
{value, 2928},
{range, 3059, 3071},
{value, 3199},
{range, 3407, 3411},
{value, 3449},
{value, 3647},
{range, 3841, 3843},
{value, 3859},
{range, 3861, 3863},
{range, 3866, 3871},
{value, 3892},
{value, 3894},
{value, 3896},
{range, 4030, 4037},
{range, 4039, 4047},
{range, 4053, 4056},
{range, 4254, 4255},
{range, 5008, 5023},
{value, 6107},
{range, 6464, 6467},
{range, 6622, 6655},
{range, 7009, 7018},
{range, 7028, 7039},
{value, 8125},
{range, 8127, 8129},
{range, 8141, 8143},
{range, 8157, 8159},
{range, 8173, 8177},
{range, 8189, 8191},
{value, 8260},
{value, 8274},
{range, 8314, 8316},
{range, 8330, 8332},
{range, 8352, 8399},
{range, 8448, 8449},
{range, 8451, 8454},
{range, 8456, 8457},
{value, 8468},
{range, 8470, 8472},
{range, 8478, 8483},
{value, 8485},
{value, 8487},
{value, 8489},
{value, 8494},
{range, 8506, 8507},
{range, 8512, 8516},
{range, 8522, 8525},
{value, 8527},
{range, 8586, 8967},
{range, 8972, 9000},
{range, 9003, 9311},
{range, 9372, 9449},
{range, 9472, 10087},
{range, 10132, 10180},
{range, 10183, 10213},
{range, 10224, 10626},
{range, 10649, 10711},
{range, 10716, 10747},
{range, 10750, 11263},
{range, 11493, 11498},
{range, 11904, 12287},
{value, 12292},
{range, 12306, 12307},
{value, 12320},
{range, 12342, 12343},
{range, 12350, 12352},
{range, 12443, 12444},
{range, 12688, 12689},
{range, 12694, 12703},
{range, 12736, 12783},
{range, 12800, 12831},
{range, 12842, 12871},
{value, 12880},
{range, 12896, 12927},
{range, 12938, 12976},
{range, 12992, 13311},
{range, 19904, 19967},
{range, 42128, 42191},
{range, 42752, 42774},
{range, 42784, 42785},
{range, 42889, 42890},
{range, 43048, 43055},
{range, 43062, 43071},
{range, 43639, 43641},
{value, 43867},
{value, 64297},
{range, 64434, 64466},
{range, 65020, 65023},
{value, 65122},
{range, 65124, 65127},
{value, 65129},
{value, 65284},
{value, 65291},
{range, 65308, 65310},
{value, 65342},
{value, 65344},
{value, 65372},
{value, 65374},
{range, 65504, 65528},
{range, 65532, 65535},
{range, 65847, 65855},
{range, 65913, 65929},
{range, 65932, 66044},
{range, 67703, 67704},
{value, 68296},
{range, 71487, 71679},
{range, 92988, 92991},
{range, 92997, 93007},
{value, 113820},
{range, 118784, 119140},
{range, 119146, 119148},
{range, 119171, 119172},
{range, 119180, 119209},
{range, 119214, 119361},
{range, 119365, 119519},
{range, 119552, 119647},
{value, 120513},
{value, 120539},
{value, 120571},
{value, 120597},
{value, 120629},
{value, 120655},
{value, 120687},
{value, 120713},
{value, 120745},
{value, 120771},
{range, 120832, 121343},
{range, 121399, 121402},
{range, 121453, 121460},
{range, 121462, 121475},
{range, 121477, 121478},
{value, 126124},
{value, 126128},
{range, 126704, 127231},
{range, 127248, 131071}
];
range("Sc") ->
[
{value, 36},
{range, 162, 165},
{range, 1423, 1424},
{value, 1547},
{range, 2046, 2047},
{range, 2546, 2547},
{value, 2555},
{range, 2801, 2808},
{value, 3065},
{value, 3647},
{value, 6107},
{range, 8352, 8399},
{value, 43064},
{value, 65020},
{value, 65129},
{value, 65284},
{range, 65504, 65505},
{range, 65509, 65511},
{value, 126128}
];
range("Sk") ->
[
{value, 94},
{value, 96},
{value, 168},
{value, 175},
{value, 180},
{value, 184},
{range, 706, 709},
{range, 722, 735},
{range, 741, 747},
{value, 749},
{range, 751, 767},
{value, 885},
{range, 900, 901},
{value, 8125},
{range, 8127, 8129},
{range, 8141, 8143},
{range, 8157, 8159},
{range, 8173, 8177},
{range, 8189, 8191},
{range, 12443, 12444},
{range, 42752, 42774},
{range, 42784, 42785},
{range, 42889, 42890},
{value, 43867},
{range, 64434, 64466},
{value, 65342},
{value, 65344},
{value, 65507},
{range, 127995, 127999}
];
range("Sm") ->
[
{value, 43},
{range, 60, 62},
{value, 124},
{value, 126},
{value, 172},
{value, 177},
{value, 215},
{value, 247},
{value, 1014},
{range, 1542, 1544},
{value, 8260},
{value, 8274},
{range, 8314, 8316},
{range, 8330, 8332},
{value, 8472},
{range, 8512, 8516},
{value, 8523},
{range, 8592, 8596},
{range, 8602, 8603},
{value, 8608},
{value, 8611},
{value, 8614},
{value, 8622},
{range, 8654, 8655},
{value, 8658},
{value, 8660},
{range, 8692, 8959},
{range, 8992, 8993},
{value, 9084},
{range, 9115, 9139},
{range, 9180, 9185},
{value, 9655},
{value, 9665},
{range, 9720, 9727},
{value, 9839},
{range, 10176, 10180},
{range, 10183, 10213},
{range, 10224, 10239},
{range, 10496, 10626},
{range, 10649, 10711},
{range, 10716, 10747},
{range, 10750, 11007},
{range, 11056, 11076},
{range, 11079, 11084},
{value, 64297},
{value, 65122},
{range, 65124, 65127},
{value, 65291},
{range, 65308, 65310},
{value, 65372},
{value, 65374},
{value, 65506},
{range, 65513, 65516},
{value, 120513},
{value, 120539},
{value, 120571},
{value, 120597},
{value, 120629},
{value, 120655},
{value, 120687},
{value, 120713},
{value, 120745},
{value, 120771},
{range, 126704, 126975}
];
range("So") ->
[
{value, 166},
{value, 169},
{value, 174},
{value, 176},
{value, 1154},
{range, 1421, 1422},
{range, 1550, 1551},
{value, 1758},
{value, 1769},
{range, 1789, 1790},
{value, 2038},
{value, 2554},
{value, 2928},
{range, 3059, 3064},
{range, 3066, 3071},
{value, 3199},
{range, 3407, 3411},
{value, 3449},
{range, 3841, 3843},
{value, 3859},
{range, 3861, 3863},
{range, 3866, 3871},
{value, 3892},
{value, 3894},
{value, 3896},
{range, 4030, 4037},
{range, 4039, 4047},
{range, 4053, 4056},
{range, 4254, 4255},
{range, 5008, 5023},
{range, 6464, 6467},
{range, 6622, 6655},
{range, 7009, 7018},
{range, 7028, 7039},
{range, 8448, 8449},
{range, 8451, 8454},
{range, 8456, 8457},
{value, 8468},
{range, 8470, 8471},
{range, 8478, 8483},
{value, 8485},
{value, 8487},
{value, 8489},
{value, 8494},
{range, 8506, 8507},
{value, 8522},
{range, 8524, 8525},
{value, 8527},
{range, 8586, 8591},
{range, 8597, 8601},
{range, 8604, 8607},
{range, 8609, 8610},
{range, 8612, 8613},
{range, 8615, 8621},
{range, 8623, 8653},
{range, 8656, 8657},
{value, 8659},
{range, 8661, 8691},
{range, 8960, 8967},
{range, 8972, 8991},
{range, 8994, 9000},
{range, 9003, 9083},
{range, 9085, 9114},
{range, 9140, 9179},
{range, 9186, 9311},
{range, 9372, 9449},
{range, 9472, 9654},
{range, 9656, 9664},
{range, 9666, 9719},
{range, 9728, 9838},
{range, 9840, 10087},
{range, 10132, 10175},
{range, 10240, 10495},
{range, 11008, 11055},
{range, 11077, 11078},
{range, 11085, 11263},
{range, 11493, 11498},
{range, 11904, 12287},
{value, 12292},
{range, 12306, 12307},
{value, 12320},
{range, 12342, 12343},
{range, 12350, 12352},
{range, 12688, 12689},
{range, 12694, 12703},
{range, 12736, 12783},
{range, 12800, 12831},
{range, 12842, 12871},
{value, 12880},
{range, 12896, 12927},
{range, 12938, 12976},
{range, 12992, 13311},
{range, 19904, 19967},
{range, 42128, 42191},
{range, 43048, 43055},
{range, 43062, 43063},
{range, 43065, 43071},
{range, 43639, 43641},
{range, 65021, 65023},
{value, 65508},
{value, 65512},
{range, 65517, 65528},
{range, 65532, 65535},
{range, 65847, 65855},
{range, 65913, 65929},
{range, 65932, 66044},
{range, 67703, 67704},
{value, 68296},
{range, 71487, 71679},
{range, 92988, 92991},
{range, 92997, 93007},
{value, 113820},
{range, 118784, 119140},
{range, 119146, 119148},
{range, 119171, 119172},
{range, 119180, 119209},
{range, 119214, 119361},
{range, 119365, 119519},
{range, 119552, 119647},
{range, 120832, 121343},
{range, 121399, 121402},
{range, 121453, 121460},
{range, 121462, 121475},
{range, 121477, 121478},
{value, 126124},
{range, 126976, 127231},
{range, 127248, 127994},
{range, 128000, 131071}
];
range("Z") ->
[
{value, 32},
{value, 160},
{value, 5760},
{range, 8192, 8202},
{range, 8232, 8233},
{value, 8239},
{value, 8287},
{value, 12288}
];
range("Zl") ->
[{value, 8232}];
range("Zp") ->
[{value, 8233}];
range("Zs") ->
[
{value, 32},
{value, 160},
{value, 5760},
{range, 8192, 8202},
{value, 8239},
{value, 8287},
{value, 12288}
];
%%%
%% End generated code
%%%
% [#x20\t\n\r]
range(R) when
R == "\\s";
R == "\\S"
->
[
{value, 16#09},
{value, 16#0A},
{value, 16#0D},
{value, 16#20}
];
% the set of initial name characters, those matched by Letter | '_' | ':'
% AKA NameStartChar
range(R) when
R == "\\i";
R == "\\I"
->
% initial name characters
[
{value, $:},
{range, $A, $Z},
{value, $_},
{range, $a, $z},
{range, 16#C0, 16#D6},
{range, 16#D8, 16#F6},
{range, 16#F8, 16#2FF},
{range, 16#370, 16#37D},
{range, 16#37F, 16#1FFF},
{range, 16#200C, 16#200D},
{range, 16#2070, 16#218F},
{range, 16#2C00, 16#2FEF},
{range, 16#3001, 16#D7FF},
{range, 16#F900, 16#FDCF},
{range, 16#FDF0, 16#FFFD},
{range, 16#10000, 16#EFFFF}
];
% the set of name characters, those matched by NameChar
%% [4] NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6]
%% | [#xF8-#x2FF] | [#x370-#x37D] | [#x37F-#x1FFF] |
%% [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF]
%% | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD]
%% | [#x10000-#xEFFFF]
%% [4a] NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
%% [#x0300-#x036F] | [#x203F-#x2040]
range(R) when
R == "\\c";
R == "\\C"
->
% name char
[
{range, $-, $.},
{range, $0, $:},
{range, $A, $Z},
{value, $_},
{range, $a, $z},
{value, 16#B7},
{range, 16#C0, 16#D6},
{range, 16#D8, 16#F6},
{range, 16#F8, 16#37D},
{range, 16#37F, 16#1FFF},
{range, 16#200C, 16#200D},
{range, 16#203F, 16#2040},
{range, 16#2070, 16#218F},
{range, 16#2C00, 16#2FEF},
{range, 16#3001, 16#D7FF},
{range, 16#F900, 16#FDCF},
{range, 16#FDF0, 16#FFFD},
{range, 16#10000, 16#EFFFF}
];
range(R) when
R == "\\d";
R == "\\D"
->
range("Nd");
% [#x0000-#x10FFFF]-[\p{P}\p{Z}\p{C}]
% (all characters except the set of "punctuation",
% "separator" and "other" characters)
% subtract([{range, 16#0, 16#10FFFF]}, union(range("P"), union(range("Z"), range("C")))).
range(R) when
R == "\\w";
R == "\\W"
->
[
{value, 36},
{value, 43},
{range, 48, 57},
{range, 60, 62},
{range, 65, 90},
{value, 94},
{range, 96, 122},
{value, 124},
{value, 126},
{range, 162, 166},
{range, 168, 170},
{value, 172},
{range, 174, 181},
{range, 184, 186},
{range, 188, 190},
{range, 192, 893},
{range, 895, 902},
{range, 904, 1369},
{range, 1376, 1416},
{range, 1421, 1469},
{value, 1471},
{range, 1473, 1474},
{range, 1476, 1477},
{range, 1479, 1522},
{range, 1542, 1544},
{value, 1547},
{range, 1550, 1562},
{range, 1568, 1641},
{range, 1646, 1747},
{range, 1749, 1756},
{range, 1758, 1791},
{range, 1808, 2038},
{range, 2042, 2095},
{range, 2112, 2141},
{range, 2144, 2273},
{range, 2275, 2403},
{range, 2406, 2415},
{range, 2417, 2556},
{range, 2558, 2677},
{range, 2689, 2799},
{range, 2801, 3203},
{range, 3205, 3571},
{range, 3585, 3662},
{range, 3664, 3673},
{range, 3713, 3843},
{value, 3859},
{range, 3861, 3897},
{range, 3902, 3972},
{range, 3974, 4047},
{range, 4053, 4056},
{range, 4096, 4169},
{range, 4176, 4346},
{range, 4348, 4959},
{range, 4969, 5119},
{range, 5121, 5740},
{range, 5743, 5759},
{range, 5761, 5786},
{range, 5792, 5866},
{range, 5870, 5940},
{range, 5952, 6099},
{value, 6103},
{range, 6107, 6143},
{range, 6155, 6157},
{range, 6160, 6467},
{range, 6470, 6685},
{range, 6688, 6815},
{value, 6823},
{range, 6832, 7001},
{range, 7009, 7163},
{range, 7168, 7226},
{range, 7232, 7293},
{range, 7296, 7359},
{range, 7376, 7378},
{range, 7380, 8191},
{value, 8260},
{value, 8274},
{range, 8304, 8316},
{range, 8319, 8332},
{range, 8336, 8967},
{range, 8972, 9000},
{range, 9003, 10087},
{range, 10102, 10180},
{range, 10183, 10213},
{range, 10224, 10626},
{range, 10649, 10711},
{range, 10716, 10747},
{range, 10750, 11512},
{value, 11517},
{range, 11520, 11631},
{range, 11647, 11775},
{value, 11823},
{range, 11904, 12287},
{range, 12292, 12295},
{range, 12306, 12307},
{range, 12320, 12335},
{range, 12337, 12348},
{range, 12350, 12447},
{range, 12449, 12538},
{range, 12540, 42237},
{range, 42240, 42508},
{range, 42512, 42610},
{range, 42612, 42621},
{range, 42623, 42737},
{range, 42752, 43123},
{range, 43136, 43213},
{range, 43216, 43255},
{value, 43259},
{range, 43261, 43309},
{range, 43312, 43358},
{range, 43360, 43456},
{range, 43471, 43485},
{range, 43488, 43611},
{range, 43616, 43741},
{range, 43744, 43759},
{range, 43762, 44010},
{range, 44012, 55295},
{range, 63744, 64829},
{range, 64848, 65039},
{range, 65056, 65071},
{value, 65122},
{range, 65124, 65127},
{value, 65129},
{range, 65136, 65278},
{value, 65284},
{value, 65291},
{range, 65296, 65305},
{range, 65308, 65310},
{range, 65313, 65338},
{value, 65342},
{range, 65344, 65370},
{value, 65372},
{value, 65374},
{range, 65382, 65528},
{range, 65532, 65791},
{range, 65799, 66462},
{range, 66464, 66511},
{range, 66513, 66926},
{range, 67072, 67670},
{range, 67672, 67870},
{range, 67872, 67902},
{range, 67968, 68175},
{range, 68192, 68222},
{range, 68224, 68335},
{range, 68352, 68408},
{range, 68416, 68504},
{range, 68521, 69460},
{range, 69632, 69702},
{range, 69714, 69818},
{range, 69840, 69951},
{range, 69956, 70003},
{range, 70006, 70084},
{range, 70089, 70092},
{range, 70096, 70106},
{value, 70108},
{range, 70113, 70199},
{range, 70206, 70312},
{range, 70320, 70730},
{range, 70736, 70746},
{range, 70750, 70853},
{range, 70855, 71104},
{range, 71128, 71232},
{range, 71236, 71263},
{range, 71296, 71483},
{range, 71487, 71738},
{range, 71840, 72254},
{range, 72263, 72345},
{value, 72349},
{range, 72384, 72768},
{range, 72784, 72815},
{range, 72818, 73462},
{range, 73728, 74863},
{range, 74880, 92781},
{range, 92880, 92916},
{range, 92928, 92982},
{range, 92988, 92995},
{range, 92997, 93846},
{range, 93952, 113822},
{range, 118784, 119154},
{range, 119163, 121478},
{range, 121499, 125277},
{range, 126065, 917504},
{range, 917760, 983039},
{range, 1114110, 1114111}
];
range(Unknown) ->
{Unknown}. | src/xs_regex_util.erl | 0.705785 | 0.401834 | xs_regex_util.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% File: dvvset.erl
%%
%% @title Dotted Version Vector Set
%% @author <NAME> <<EMAIL>>
%% @author <NAME> <<EMAIL>>
%
%% @copyright The MIT License (MIT)
%% Copyright (C) 2013
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
%% associated documentation files (the "Software"), to deal in the Software without restriction,
%% including without limitation the rights to use, copy, modify, merge, publish, distribute,
%% sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in all copies or
%% substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
%% BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
%% NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
%% DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
%%
%% @doc
%% An Erlang implementation of *compact* Dotted Version Vectors, which
%% provides a container for a set of concurrent values (siblings) with causal
%% order information.
%%
%% For further reading, visit the
%% <a href="https://github.com/ricardobcl/Dotted-Version-Vectors/tree/ompact">github page</a>.
%% @end
%%
%% @reference
%% <a href="http://arxiv.org/abs/1011.5808">
%% Dotted Version Vectors: Logical Clocks for Optimistic Replication
%% </a>
%% @end
%%
%%-------------------------------------------------------------------
-module(dvvset).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/1,
new/2,
sync/1,
join/1,
update/2,
update/3,
size/1,
ids/1,
values/1,
equal/2,
less/2,
map/2,
last/2,
lww/2,
reconcile/2
]).
-export_type([clock/0, vector/0, id/0, value/0]).
% % @doc
%% STRUCTURE details:
%% * entries() are sorted by id()
%% * each counter() also includes the number of values in that id()
%% * the values in each triple of entries() are causally ordered and each new value goes to the head of the list
-type clock() :: {entries(), values()}.
-type vector() :: [{id(), counter()}].
-type entries() :: [{id(), counter(), values()}].
-type id() :: any().
-type values() :: [value()].
-type value() :: any().
-type counter() :: non_neg_integer().
%% @doc Constructs a new clock set without causal history,
%% and receives a list of values that gos to the anonymous list.
-spec new(value() | [value()]) -> clock().
new(Vs) when is_list(Vs) -> {[], Vs};
new(V) -> {[], [V]}.
%% @doc Constructs a new clock set with the causal history
%% of the given version vector / vector clock,
%% and receives a list of values that gos to the anonymous list.
%% The version vector SHOULD BE a direct result of join/1.
-spec new(vector(), value() | [value()]) -> clock().
new(VV, Vs) when is_list(Vs) ->
VVS = lists:sort(VV), % defense against non-order preserving serialization
{[{I, N, []} || {I, N} <- VVS], Vs};
new(VV, V) -> new(VV, [V]).
%% @doc Synchronizes a list of clocks using sync/2.
%% It discards (causally) outdated values,
%% while merging all causal histories.
-spec sync([clock()]) -> clock().
sync(L) -> lists:foldl(fun sync/2, {}, L).
%% Private function
-spec sync(clock(), clock()) -> clock().
sync({}, C) -> C;
sync(C ,{}) -> C;
sync(C1={E1,V1},C2={E2,V2}) ->
V = case less(C1,C2) of
true -> V2; % C1 < C2 => return V2
false -> case less(C2,C1) of
true -> V1; % C2 < C1 => return V1
false -> % keep all unique anonymous values and sync entries()
sets:to_list(sets:from_list(V1++V2))
end
end,
{sync2(E1,E2),V}.
%% Private function
-spec sync2(entries(), entries()) -> entries().
sync2([], C) -> C;
sync2(C, []) -> C;
sync2([{I1, N1, L1}=H1 | T1]=C1, [{I2, N2, L2}=H2 | T2]=C2) ->
if
I1 < I2 -> [H1 | sync2(T1, C2)];
I1 > I2 -> [H2 | sync2(T2, C1)];
true -> [merge(I1, N1, L1, N2, L2) | sync2(T1, T2)]
end.
%% Private function
-spec merge(id(), counter(), values(), counter(), values()) -> {id(), counter(), values()}.
merge(I, N1, L1, N2, L2) ->
LL1 = length(L1),
LL2 = length(L2),
case N1 >= N2 of
true ->
case N1 - LL1 >= N2 - LL2 of
true -> {I, N1, L1};
false -> {I, N1, lists:sublist(L1, N1 - N2 + LL2)}
end;
false ->
case N2 - LL2 >= N1 - LL1 of
true -> {I, N2, L2};
false -> {I, N2, lists:sublist(L2, N2 - N1 + LL1)}
end
end.
%% @doc Return a version vector that represents the causal history.
-spec join(clock()) -> vector().
join({C,_}) -> [{I, N} || {I, N, _} <- C].
%% @doc Advances the causal history with the given id.
%% The new value is the *anonymous dot* of the clock.
%% The client clock SHOULD BE a direct result of new/2.
-spec update(clock(), id()) -> clock().
update({C,[V]}, I) -> {event(C, I, V), []}.
%% @doc Advances the causal history of the
%% first clock with the given id, while synchronizing
%% with the second clock, thus the new clock is
%% causally newer than both clocks in the argument.
%% The new value is the *anonymous dot* of the clock.
%% The first clock SHOULD BE a direct result of new/2,
%% which is intended to be the client clock with
%% the new value in the *anonymous dot* while
%% the second clock is from the local server.
-spec update(clock(), clock(), id()) -> clock().
update({Cc,[V]}, Cr, I) ->
%% Sync both clocks without the new value
{C,Vs} = sync({Cc,[]}, Cr),
%% We create a new event on the synced causal history,
%% with the id I and the new value.
%% The anonymous values that were synced still remain.
{event(C, I, V), Vs}.
%% Private function
-spec event(entries(), id(), value()) -> entries().
event([], I, V) -> [{I, 1, [V]}];
event([{I, N, L} | T], I, V) -> [{I, N+1, [V | L]} | T];
event([{I1, _, _} | _]=C, I, V) when I1 > I -> [{I, 1, [V]} | C];
event([H | T], I, V) -> [H | event(T, I, V)].
%% @doc Returns the total number of values in this clock set.
-spec size(clock()) -> non_neg_integer().
size({C,Vs}) -> lists:sum([length(L) || {_,_,L} <- C]) + length(Vs).
%% @doc Returns all the ids used in this clock set.
-spec ids(clock()) -> [id()].
ids({C,_}) -> ([I || {I,_,_} <- C]).
%% @doc Returns all the values used in this clock set,
%% including the anonymous values.
-spec values(clock()) -> [value()].
values({C,Vs}) -> Vs ++ lists:append([L || {_,_,L} <- C]).
%% @doc Compares the equality of both clocks, regarding
%% only the causal histories, thus ignoring the values.
-spec equal(clock() | vector(), clock() | vector()) -> boolean().
equal({C1,_},{C2,_}) -> equal2(C1,C2); % DVVSet
equal(C1,C2) when is_list(C1) and is_list(C2) -> equal2(C1,C2). %vector clocks
%% Private function
-spec equal2(vector(), vector()) -> boolean().
equal2([], []) -> true;
equal2([{I, C, L1} | T1], [{I, C, L2} | T2])
when length(L1) =:= length(L2) ->
equal2(T1, T2);
equal2(_, _) -> false.
%% @doc Returns True if the first clock is causally older than
%% the second clock, thus values on the first clock are outdated.
%% Returns False otherwise.
-spec less(clock(), clock()) -> boolean().
less({C1,_}, {C2,_}) -> greater(C2, C1, false).
%% Private function
-spec greater(vector(), vector(), boolean()) -> boolean().
greater([], [], Strict) -> Strict;
greater([_|_], [], _) -> true;
greater([], [_|_], _) -> false;
greater([{I, N1, _} | T1], [{I, N2, _} | T2], Strict) ->
if
N1 == N2 -> greater(T1, T2, Strict);
N1 > N2 -> greater(T1, T2, true);
N1 < N2 -> false
end;
greater([{I1, _, _} | T1], [{I2, _, _} | _]=C2, _) when I1 < I2 -> greater(T1, C2, true);
greater(_, _, _) -> false.
%% @doc Maps (applies) a function on all values in this clock set,
%% returning the same clock set with the updated values.
-spec map(fun((value()) -> value()), clock()) -> clock().
map(F, {C,Vs}) ->
{[ {I, N, lists:map(F, V)} || {I, N, V} <- C], lists:map(F, Vs)}.
%% @doc Return a clock with the same causal history, but with only one
%% value in the anonymous placeholder. This value is the result of
%% the function F, which takes all values and returns a single new value.
-spec reconcile(Winner::fun(([value()]) -> value()), clock()) -> clock().
reconcile(F, C) ->
V = F(values(C)),
new(join(C),[V]).
%% @doc Returns the latest value in the clock set,
%% according to function F(A,B), which returns *true* if
%% A compares less than or equal to B, false otherwise.
-spec last(LessOrEqual::fun((value(),value()) -> boolean()), clock()) -> value().
last(F, C) ->
{_ ,_ , V2} = find_entry(F, C),
V2.
%% @doc Return a clock with the same causal history, but with only one
%% value in its original position. This value is the newest value
%% in the given clock, according to function F(A,B), which returns *true*
%% if A compares less than or equal to B, false otherwise.
-spec lww(LessOrEqual::fun((value(),value()) -> boolean()), clock()) -> clock().
lww(F, C={E,_}) ->
case find_entry(F, C) of
{id, I, V} -> {join_and_replace(I, V, E),[]};
{anonym, _, V} -> new(join(C),[V])
end.
%% find_entry/2 - Private function
find_entry(F, {[], [V|T]}) -> find_entry(F, null, V, {[],T}, anonym);
find_entry(F, {[{_, _, []} | T], Vs}) -> find_entry(F, {T,Vs});
find_entry(F, {[{I, _, [V|_]} | T], Vs}) -> find_entry(F, I, V, {T,Vs}, id).
%% find_entry/5 - Private function
find_entry(F, I, V, C, Flag) ->
Fun = fun (A,B) ->
case F(A,B) of
false -> {left,A}; % A is newer than B
true -> {right,B} % A is older than B
end
end,
find_entry2(Fun, I, V, C, Flag).
%% find_entry2/5 - Private function
find_entry2(_, I, V, {[], []}, anonym) -> {anonym, I , V};
find_entry2(_, I, V, {[], []}, id) -> {id, I, V};
find_entry2(F, I, V, {[], [V1 | T]}, Flag) ->
case F(V, V1) of
{left,V2} -> find_entry2(F, I, V2, {[],T}, Flag);
{right,V2} -> find_entry2(F, I, V2, {[],T}, anonym)
end;
find_entry2(F, I, V, {[{_, _, []} | T], Vs}, Flag) -> find_entry2(F, I, V, {T, Vs}, Flag);
find_entry2(F, I, V, {[{I1, _, [V1|_]} | T], Vs}, Flag) ->
case F(V, V1) of
{left,V2} -> find_entry2(F, I, V2, {T, Vs}, Flag);
{right,V2} -> find_entry2(F, I1, V2, {T, Vs}, Flag)
end.
%% Private function
join_and_replace(Ir, V, C) ->
[if
I == Ir -> {I, N, [V]};
true -> {I, N, []}
end
|| {I, N, _} <- C].
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
join_test() ->
A = new([v1]),
A1 = update(A,a),
B = new(join(A1),[v2]),
B1 = update(B, A1, b),
?assertEqual( join(A) , [] ),
?assertEqual( join(A1) , [{a,1}] ),
?assertEqual( join(B1) , [{a,1},{b,1}] ),
ok.
update_test() ->
A0 = update(new([v1]),a),
A1 = update(new(join(A0),[v2]), A0, a),
A2 = update(new(join(A1),[v3]), A1, b),
A3 = update(new(join(A0),[v4]), A1, b),
A4 = update(new(join(A0),[v5]), A1, a),
?assertEqual( A0 , {[{a,1,[v1]}],[]} ),
?assertEqual( A1 , {[{a,2,[v2]}],[]} ),
?assertEqual( A2 , {[{a,2,[]}, {b,1,[v3]}],[]} ),
?assertEqual( A3 , {[{a,2,[v2]}, {b,1,[v4]}],[]} ),
?assertEqual( A4 , {[{a,3,[v5,v2]}],[]} ),
ok.
sync_test() ->
X = {[{x,1,[]}],[]},
A = update(new([v1]),a),
Y = update(new([v2]),b),
A1 = update(new(join(A),[v2]), a),
A3 = update(new(join(A1),[v3]), b),
A4 = update(new(join(A1),[v3]), c),
F = fun (L,R) -> L>R end,
W = {[{a,1,[]}],[]},
Z = {[{a,2,[v2,v1]}],[]},
?assertEqual( sync([W,Z]) , {[{a,2,[v2]}],[]} ),
?assertEqual( sync([W,Z]) , sync([Z,W]) ),
?assertEqual( sync([A,A1]) , sync([A1,A]) ),
?assertEqual( sync([A4,A3]) , sync([A3,A4]) ),
?assertEqual( sync([A4,A3]) , {[{a,2,[]}, {b,1,[v3]}, {c,1,[v3]}],[]} ),
?assertEqual( sync([X,A]) , {[{a,1,[v1]},{x,1,[]}],[]} ),
?assertEqual( sync([X,A]) , sync([A,X]) ),
?assertEqual( sync([X,A]) , sync([A,X]) ),
?assertEqual( sync([A,Y]) , {[{a,1,[v1]},{b,1,[v2]}],[]} ),
?assertEqual( sync([Y,A]) , sync([A,Y]) ),
?assertEqual( sync([Y,A]) , sync([A,Y]) ),
?assertEqual( sync([A,X]) , sync([X,A]) ),
?assertEqual( lww(F,A4) , sync([A4,lww(F,A4)]) ),
ok.
syn_update_test() ->
A0 = update(new([v1]), a), % Mary writes v1 w/o VV
VV1 = join(A0), % Peter reads v1 with version vector (VV)
A1 = update(new([v2]), A0, a), % Mary writes v2 w/o VV
A2 = update(new(VV1,[v3]), A1, a), % Peter writes v3 with VV from v1
?assertEqual( VV1 , [{a,1}] ),
?assertEqual( A0 , {[{a,1,[v1]}],[]} ),
?assertEqual( A1 , {[{a,2,[v2,v1]}],[]} ),
%% now A2 should only have v2 and v3, since v3 was causally newer than v1
?assertEqual( A2 , {[{a,3,[v3,v2]}],[]} ),
ok.
event_test() ->
{A,_} = update(new([v1]),a),
?assertEqual( event(A,a,v2) , [{a,2,[v2,v1]}] ),
?assertEqual( event(A,b,v2) , [{a,1,[v1]}, {b,1,[v2]}] ),
ok.
lww_last_test() ->
F = fun (A,B) -> A =< B end,
F2 = fun ({_,TS1}, {_,TS2}) -> TS1 =< TS2 end,
X = {[{a,4,[5,2]},{b,1,[]},{c,1,[3]}],[]},
Y = {[{a,4,[5,2]},{b,1,[]},{c,1,[3]}],[10,0]},
Z = {[{a,4,[5,2]}, {b,1,[1]}], [3]},
A = {[{a,4,[{5, 1002345}, {7, 1002340}]}, {b,1,[{4, 1001340}]}], [{2, 1001140}]},
?assertEqual( last(F,X) , 5 ),
?assertEqual( last(F,Y) , 10 ),
?assertEqual( lww(F,X) , {[{a,4,[5]},{b,1,[]},{c,1,[]}],[]} ),
?assertEqual( lww(F,Y) , {[{a,4,[]},{b,1,[]},{c,1,[]}],[10]} ),
?assertEqual( lww(F,Z) , {[{a,4,[5]},{b,1,[]}],[]} ),
?assertEqual( lww(F2,A) , {[{a,4,[{5, 1002345}]}, {b,1,[]}], []} ),
ok.
reconcile_test() ->
F1 = fun (L) -> lists:sum(L) end,
F2 = fun (L) -> hd(lists:sort(L)) end,
X = {[{a,4,[5,2]},{b,1,[]},{c,1,[3]}],[]},
Y = {[{a,4,[5,2]},{b,1,[]},{c,1,[3]}],[10,0]},
?assertEqual( reconcile(F1,X) , {[{a,4,[]},{b,1,[]},{c,1,[]}],[10]} ),
?assertEqual( reconcile(F1,Y) , {[{a,4,[]},{b,1,[]},{c,1,[]}],[20]} ),
?assertEqual( reconcile(F2,X) , {[{a,4,[]},{b,1,[]},{c,1,[]}],[2]} ),
?assertEqual( reconcile(F2,Y) , {[{a,4,[]},{b,1,[]},{c,1,[]}],[0]} ),
ok.
less_test() ->
A = update(new(v1),[a]),
B = update(new(join(A),[v2]), a),
B2 = update(new(join(A),[v2]), b),
B3 = update(new(join(A),[v2]), z),
C = update(new(join(B),[v3]), A, c),
D = update(new(join(C),[v4]), B2, d),
?assert( less(A,B) ),
?assert( less(A,C) ),
?assert( less(B,C) ),
?assert( less(B,D) ),
?assert( less(B2,D) ),
?assert( less(A,D) ),
?assertNot( less(B2,C) ),
?assertNot( less(B,B2) ),
?assertNot( less(B2,B) ),
?assertNot( less(A,A) ),
?assertNot( less(C,C) ),
?assertNot( less(D,B2) ),
?assertNot( less(B3,D) ),
ok.
equal_test() ->
A = {[{a,4,[v5,v0]},{b,0,[]},{c,1,[v3]}], [v0]},
B = {[{a,4,[v555,v0]}, {b,0,[]}, {c,1,[v3]}], []},
C = {[{a,4,[v5,v0]},{b,0,[]}], [v6,v1]},
% compare only the causal history
?assert( equal(A,B) ),
?assert( equal(B,A) ),
?assertNot( equal(A,C) ),
?assertNot( equal(B,C) ),
ok.
size_test() ->
?assertEqual( 1 , ?MODULE:size(new([v1])) ),
?assertEqual( 5 , ?MODULE:size({[{a,4,[v5,v0]},{b,0,[]},{c,1,[v3]}],[v4,v1]}) ),
ok.
ids_values_test() ->
A = {[{a,4,[v0,v5]},{b,0,[]},{c,1,[v3]}], [v1]},
B = {[{a,4,[v0,v555]}, {b,0,[]}, {c,1,[v3]}], []},
C = {[{a,4,[]},{b,0,[]}], [v1,v6]},
?assertEqual( ids(A) , [a,b,c] ),
?assertEqual( ids(B) , [a,b,c] ),
?assertEqual( ids(C) , [a,b] ),
?assertEqual( lists:sort(values(A)) , [v0,v1,v3,v5] ),
?assertEqual( lists:sort(values(B)) , [v0,v3,v555] ),
?assertEqual( lists:sort(values(C)) , [v1,v6] ),
ok.
map_test() ->
A = {[{a,4,[]},{b,0,[]},{c,1,[]}],[10]},
B = {[{a,4,[5,0]},{b,0,[]},{c,1,[2]}],[20,10]},
F = fun (X) -> X*2 end,
?assertEqual( map(F,A) , {[{a,4,[]},{b,0,[]},{c,1,[]}],[20]} ),
?assertEqual( map(F,B) , {[{a,4,[10,0]},{b,0,[]},{c,1,[4]}],[40,20]} ),
ok.
-endif. | src/dvvset.erl | 0.612773 | 0.475484 | dvvset.erl | starcoder |
%% ``The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
%% compliance with the License. You should have received a copy of the
%% Erlang Public License along with this software. If not, it can be
%% retrieved via the world wide web at http://www.erlang.org/.
%%
%% Software distributed under the License is distributed on an "AS IS"
%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
%% the License for the specific language governing rights and limitations
%% under the License.
%%
%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
%% AB. All Rights Reserved.''
%%
%% Modified: 17 Jan 2007 by <NAME> <<EMAIL>>
%%
%% Erlang token scanning functions of io library.
%% @private
%% @hidden
-module(wrangler_scan).
-export([format_error/1, reserved_word/1, string/1, string/2,
string/4, tokens/3]).
-compile(export_all).
-import(lists, [member/2, reverse/1]).
-define(DEFAULT_TABWIDTH, 8).
-define(DEFAULT_FILEFORMAT, unix).
format_error({string, Quote, Head}) ->
["unterminated " ++
(string_thing(Quote) ++
(" starting with " ++
io_lib:write_string(Head, Quote)))];
format_error({illegal, Type}) ->
io_lib:fwrite("illegal ~w", [Type]);
format_error(char) -> "unterminated character";
format_error(scan) -> "premature end";
format_error({base, Base}) ->
io_lib:fwrite("illegal base '~w'", [Base]);
format_error(float) -> "bad float";
%%
format_error(Other) -> io_lib:write(Other).
string_thing($') -> "atom";
string_thing(_) -> "string".
%% string(CharList, StartPos)
%% Takes a list of characters and tries to tokenise them.
%%
%% Returns:
%% {ok,[Tok]}
%% {error,{ErrorPos,?MODULE,What},EndPos}
string(Cs) ->
string(Cs, {1, 1}, ?DEFAULT_TABWIDTH, ?DEFAULT_FILEFORMAT).
string(Cs, {Line, Col}) -> string(Cs, {Line, Col}, ?DEFAULT_TABWIDTH, ?DEFAULT_FILEFORMAT).
string(Cs, {Line, Col}, TabWidth, FileFormat)
when is_list(Cs), is_integer(Line), is_integer(Col), is_integer(TabWidth) ->
% %% Debug replacement line for chopping string into 1-char segments
% scan([], [], [], Pos, Cs, []).
scan(Cs, [], [], {Line, Col}, [], [],TabWidth,FileFormat).
%% tokens(Continuation, CharList, StartPos) ->
%% {done, {ok, [Tok], EndPos}, Rest} |
%% {done, {error,{ErrorPos,?MODULE,What}, EndPos}, Rest} |
%% {more, Continuation'}
%% This is the main function into the re-entrant scanner.
%%
%% The continuation has the form:
%% {RestChars,ScanStateStack,ScannedTokens,
%% CurrentPos,ContState,ErrorStack,ContFunArity5}
%% definitely should sperate {Line, Col} and TabWidth;; HL.
tokens([], Chars, {{Line, Col}, TabWidth, FileFormat}) ->
tokens({[], [], [], {Line, Col}, io, [], TabWidth, FileFormat, fun scan/8}, Chars, {{Line, Col}, TabWidth, FileFormat});
tokens({Cs, _Stack, _Toks, {Line, Col}, eof, TabWidth, FileFormat, _Fun}, eof, {_, TabWidth, FileFormat}) ->
{done, {eof, {Line, Col}}, Cs};
tokens({Cs, Stack, Toks, {Line, Col}, _State, Errors,TabWidth,FileFormat,Fun}, eof, {_, TabWidth, FileFormat}) ->
Fun(Cs ++ eof, Stack, Toks, {Line, Col}, eof, Errors, TabWidth,FileFormat);
tokens({Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth,FileFormat,Fun},Chars, {_, TabWidth,FileFormat}) ->
Fun(Cs ++ Chars, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat).
%% Scan loop.
%%
%% The scan_*/7 and sub_scan_*/7 functions does tail recursive calls
%% between themselves to change state. State data is kept on the Stack.
%% Results are passed on the Stack and on the stream (Cs). The variable
%% State in this loop is not the scan loop state, but the state for
%% instream handling by more/8 and done/6. The variable Stack is not
%% always a stack, it is just stacked state data for the scan loop, and
%% the variable Errors is a reversed list of scan error {Error,Pos} tuples.
%%
%% All the scan_*/7 functions have the same arguments (in the same order),
%% to keep the tail recursive calls (jumps) fast.
%%
%% When more data is needed from the stream, the tail recursion loop is
%% broken by calling more/8 that either returns to the I/O-server to
%% get more data or fetches it from a string, or by calling done/6 when
%% scanning is done.
%%
%% The last argument to more/8 is a fun to jump back to with more data
%% to continue scanning where it was interrupted.
%%
%% more/8 and done/6 handles scanning from I/O-server (Stream) or from String.
%%
%% String
more(Cs, Stack, Toks, {Line, Col}, eos, Errors, _TabWidth, _FileFormat, Fun) ->
erlang:error(badstate, [Cs, Stack, Toks, {Line, Col}, eos, Errors, Fun]);
% %% Debug clause for chopping string into 1-char segments
% more(Cs, Stack, Toks, Pos, [H|T], Errors, Fun) ->
% Fun(Cs++[H], Stack, Toks, Pos, T, Errors);
more(Cs, Stack, Toks, {Line, Col}, [], Errors, TabWidth, FileFormat, Fun) ->
Fun(Cs ++ eof, Stack, Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Stream
more(Cs, Stack, Toks, {Line, Col}, eof, Errors, TabWidth, FileFormat, Fun) ->
erlang:error(badstate, [Cs, Stack, Toks, {Line, Col}, eof, Errors, TabWidth, FileFormat, Fun]);
more(Cs, Stack, Toks, {Line, Col}, io, Errors, TabWidth, FileFormat, Fun) ->
{more, {Cs, Stack, Toks, {Line, Col}, io, Errors,TabWidth, FileFormat, Fun}}.
%% String
done(eof, [], Toks, {Line, Col}, eos, _TabWidth,_FileFormat) ->
{ok, reverse(Toks), {Line, Col}};
done(eof, Errors, _Toks, {Line, Col}, eos, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{error, {ErrorPos, ?MODULE, Error}, {Line, Col}};
done(Cs, Errors, Toks, {Line, Col}, eos, TabWidth,FileFormat) ->
scan(Cs, [], Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Debug clause for chopping string into 1-char segments
%% done(Cs, Errors, Toks, Pos, [H|T]) ->
%% scan(Cs++[H], [], Toks, Pos, T, Errors);
done(Cs, Errors, Toks, {Line, Col}, [], TabWidth,FileFormat) ->
scan(Cs ++ eof, [], Toks, {Line, Col}, eos, Errors, TabWidth,FileFormat);
%% Stream
done(Cs, [], [{dot, _} | _] = Toks, {Line, Col}, io, _TabWidth,_FileFormat) ->
{done, {ok, reverse(Toks), {Line, Col}}, Cs};
done(Cs, [], [_ | _], {Line, Col}, io, _TabWidth,_FileFormat) ->
{done,
{error, {{Line, Col}, ?MODULE, scan}, {Line, Col}}, Cs};
done(Cs, [], [], {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done, {eof, {Line, Col}}, Cs};
done(Cs, [], [{dot, _} | _] = Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done, {ok, reverse(Toks), {Line, Col}}, Cs};
done(Cs, [], _Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{done,
{error, {{Line, Col}, ?MODULE, scan}, {Line, Col}}, Cs};
done(Cs, Errors, _Toks, {Line, Col}, io, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{done, {error, {ErrorPos, ?MODULE, Error}, {Line, Col}},
Cs};
done(Cs, Errors, _Toks, {Line, Col}, eof, _TabWidth,_FileFormat) ->
{Error, ErrorPos} = lists:last(Errors),
{done, {error, {ErrorPos, ?MODULE, Error}, {Line, Col}},
Cs}.
%% The actual scan loop
%% Stack is assumed to be [].
scan([$\r|Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> %CR
case FileFormat of
mac ->scan(Cs, Stack, Toks, {Line+1, 1}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, Stack, Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat)
end;
scan([$\n | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) -> % Newline - skip
scan(Cs, Stack, Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
%%Begin of Adding by Huiqing
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C == $\t ->
scan(Cs, Stack, Toks, {Line, Col + TabWidth}, State, Errors, TabWidth,FileFormat);
%% End of adding by Huiqing
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\000, C =<$\s -> % Control chars - skip
scan(Cs, Stack, Toks, {Line, Col + 1}, State, Errors, TabWidth,FileFormat); %% This is problematic; not all chars occupy one space.
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\200, C =< $\240 -> % Control chars -skip
scan(Cs, Stack, Toks, {Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $a, C =< $z -> % Atoms
sub_scan_name(Cs, [C, fun scan_atom/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\337, C =< $\377, C /= $\367 -> % Atoms
sub_scan_name(Cs, [C, fun scan_atom/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $A,C =< $Z -> % Variables
sub_scan_name(Cs, [C, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $\300, C =< $\336, C /= $\327 -> % Variables
sub_scan_name(Cs, [C, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([$_ | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % _Variables
sub_scan_name(Cs, [$_, fun scan_variable/8], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([C | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 -> % Numbers
scan_number(Cs, [C], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan([$$ | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat) -> % Character constant
scan_char(Cs, Stack, Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan([$' | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % Quoted atom
scan_qatom(Cs, [$', {Line, Col}], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan([$" | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % String
scan_string(Cs, [$", {Line, Col}], Toks, {Line, Col+1},State, Errors, TabWidth,FileFormat);
scan([$% | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) -> % Comment
scan_comment(Cs, Stack, Toks, {Line, Col+2}, State,Errors, TabWidth,FileFormat);
%% Punctuation characters and operators, first recognise multiples.
%% Clauses are rouped by first character (a short with the same head has
%% to come after a longer).
%%
%% << <- <=
scan("<<" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'<<', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("<-" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'<-', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("<=" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'<=', {Line, Col}} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("<" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8 );
%% >> >=
scan(">>" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'>>', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan(">=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'>=', {Line, Col}} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan(">" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% -> --
scan("->" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'->', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("--" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'--', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("-" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
%% ++
scan("++" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'++', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("+" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% =:= =/= =< ==
scan("=:=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=:=', {Line, Col}} | Toks],{Line, Col + 3}, State, Errors, TabWidth,FileFormat);
scan("=:" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
scan("=/=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=/=', {Line, Col}} | Toks], {Line, Col + 3}, State, Errors, TabWidth,FileFormat);
scan("=/" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
scan("=<" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=<', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("=>" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'=>', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan(":=" ++ Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{':=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("==" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'==', {Line, Col}} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("=" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
%% /=
scan("/=" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'/=', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("/" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
%% ||
scan("||" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'||', {Line, Col}} | Toks], {Line, Col + 2}, State, Errors, TabWidth,FileFormat);
scan("|" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
%% :-
scan(":-" ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{':-', {Line, Col}} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
%% :: for typed records
scan("::"++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'::',{Line, Col}}|Toks], {Line, Col+2}, State, Errors, TabWidth,FileFormat);
scan(":" = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan/8);
scan("..."++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'...', {Line, Col}}|Toks], {Line, Col+3}, State, Errors, TabWidth, FileFormat);
scan(".."=Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
scan(".."++Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
scan(Cs, Stack, [{'..', {Line, Col}}|Toks], {Line, Col+2}, State, Errors, TabWidth, FileFormat);
scan("."=Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan/8);
%% Full stop and plain '.'
scan("." ++ Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_dot(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% All single-char punctuation characters and operators (except '.')
scan([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{list_to_atom([C]), {Line, Col}} | Toks], {Line, Col + 1}, State, Errors, TabWidth,FileFormat);
%%
scan([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan/8);
scan(Eof, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Eof, Errors, Toks, {Line, Col}, State, TabWidth,FileFormat).
scan_atom(Cs, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_atom(Name) of
Atom when is_atom(Atom) ->
case reserved_word(Atom) of
true ->
scan(Cs, [], [{Atom, {Line, Col}} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat);
false ->
scan(Cs, [], [{atom, {Line, Col}, Atom} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat)
end;
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, atom}, {Line, Col}} | Errors],TabWidth,FileFormat)
end.
scan_variable(Cs, Name, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat) ->
case catch list_to_atom(Name) of
A when is_atom(A) ->
scan(Cs, [], [{var, {Line, Col}, A} | Toks],
{Line, Col + length(Name)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, var}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
%% Scan for a name - unqouted atom or variable, after the first character.
%%
%% Stack argument: return fun.
%% Returns the scanned name on the stack, unreversed.
%%
sub_scan_name([C | Cs] = Css, Stack, Toks, {Line, Col},State, Errors,TabWidth,FileFormat) ->
case name_char(C) of
true ->
sub_scan_name(Cs, [C | Stack], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
false ->
[Fun | Name] = reverse(Stack),
Fun(Css, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
end;
sub_scan_name([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun sub_scan_name/8);
sub_scan_name(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[Fun | Name] = reverse(Stack),
Fun(Eof, Name, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
name_char(C) when C >= $a, C =< $z -> true;
name_char(C) when C >= $\337, C =< $\377, C /= $\367 -> true;
name_char(C) when C >= $A, C =< $Z -> true;
name_char(C) when C >= $\300, C =< $\336, C /= $\327 -> true;
name_char(C) when C >= $0, C =< $9 -> true;
name_char($_) -> true;
name_char($@) -> true;
name_char(_) -> false.
scan_char([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
sub_scan_escape(Cs, [fun scan_char_escape/8, $\\|Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_char([$\n | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, $\n} | Toks], {Line + 1, Col}, State, Errors, TabWidth,FileFormat);
scan_char([$ | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, 32} | Toks], {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_char([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_char/8);
scan_char(Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan_char_escape(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
scan_char_escape([nl | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, [], [{char, {Line, Col}, $\n} | Toks], {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_char_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
C1 = case Stack of [$\\|_] ->
list_to_atom("$\\"++[C]);
_ ->
C
end,
scan(Cs, [], [{char, {Line, Col-1}, C1} | Toks],{Line, Col + 1}, State, Errors, TabWidth,FileFormat);
scan_char_escape(Eof, _Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Eof, [{char, {Line, Col}} | Errors], [], {Line, Col + 1}, State, TabWidth,FileFormat).
scan_string([$" | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
scan(Cs, [], [{string, StartPos, S} | Toks],{Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string([$\r | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac -> scan_string(Cs, [$\r | Stack], Toks, {Line + 1, 1},
State, Errors, TabWidth,FileFormat);
_ -> scan_string(Cs, [$\r | Stack], Toks, {Line, Col+1},
State, Errors, TabWidth,FileFormat)
end;
scan_string([$\n | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_string([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
sub_scan_escape( Cs, [fun scan_string_escape/8, $\\ | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C==$\t ->
scan_string(Cs, [C | Stack], Toks, {Line, Col+TabWidth}, State,Errors, TabWidth,FileFormat);
scan_string([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [C | Stack], Toks, {Line, Col+1}, State,Errors, TabWidth,FileFormat);
scan_string([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_string/8);
scan_string(Eof, Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $", SS}, StartPos} | Errors], [],
{Line, Col}, State, TabWidth,FileFormat).
scan_string_escape([nl | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_string_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_string(Cs, [C| Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_string_escape(Eof, Stack, _Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
[StartPos, $" | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $", SS}, StartPos} | Errors], [],
{Line, Col + length(S) + 2}, State, TabWidth,FileFormat).
scan_qatom([$' | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $' | S] = reverse(Stack),
case catch list_to_atom(S) of
A when is_atom(A) ->
scan(Cs, [], [{atom, StartPos, A} | Toks],{Line, Col + 1}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,[{{illegal, atom}, StartPos} | Errors], TabWidth,FileFormat)
end;
scan_qatom([$\r|Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac -> scan_qatom(Cs,[$\r|Stack], Toks, {Line+1, 1}, State, Errors, TabWidth,FileFormat);
_ -> scan_qatom(Cs,[$\r|Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat)
end;
scan_qatom([$\n | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [$\n | Stack], Toks, {Line + 1, 1},State, Errors, TabWidth,FileFormat);
%% scan_qatom([$\\ | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
%% sub_scan_escape(Cs, [fun scan_qatom_escape/8, $\\ | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_qatom([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C==$\t ->
scan_qatom(Cs, [C | Stack], Toks, {Line, Col+TabWidth}, State, Errors, TabWidth,FileFormat);
scan_qatom([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [C | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_qatom([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_qatom/8);
scan_qatom(Eof, Stack, _Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
[StartPos, $' | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $', SS}, StartPos} | Errors], [],{Line, Col}, State, TabWidth,FileFormat).
scan_qatom_escape([nl | Cs], Stack, Toks, {Line, _Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [$\n | Stack], Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_qatom_escape([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_qatom(Cs, [C | Stack], Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat);
scan_qatom_escape(Eof, Stack, _Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
[StartPos, $' | S] = reverse(Stack),
SS = string:substr(S, 1, 16),
done(Eof, [{{string, $', SS}, StartPos} | Errors], [],
{Line, Col}, State, TabWidth,FileFormat).
%% Scan for a character escape sequence, in character literal or string.
%% A string is a syntactical sugar list (e.g "abc")
%% or a quoted atom (e.g 'EXIT').
%%
%% Stack argument: return fun.
%% Returns the resulting escape character on the stream.
%% The return atom 'nl' means that the escape sequence Backslash Newline
%% was found, i.e an actual Newline in the input.
%%
%% \<1-3> octal digits
sub_scan_escape([O1, O2, O3 | Cs], [Fun|Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7, O3 >= $0,
O3 =< $7 ->
case reverse(Stack) of
[_,$"|_]->
Fun([O1, O2, O3 | Cs], Stack, Toks, {Line, Col+2}, State,
Errors, TabWidth,FileFormat);
_ ->
C1=list_to_atom("$\\"++[O1, O2, O3]),
scan(Cs, [], [{char, {Line, Col-2}, C1} | Toks],{Line, Col + 3}, State, Errors, TabWidth,FileFormat)
end;
sub_scan_escape([O1, O2] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7 ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([O1, O2 | Cs], [_Fun | _Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7 ->
C1=list_to_atom("$\\"++[O1, O2]),
scan(Cs, [], [{char, {Line, Col-2}, C1} | Toks],{Line, Col + 2}, State, Errors, TabWidth,FileFormat);
%%Val = O1 * 8 + O2 - 9 * $0,
%% Fun([O1,O2 | Cs], Stack, Toks, {Line, Col+1}, State,
%% Errors, TabWidth,FileFormat);
sub_scan_escape([O1] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7 ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([O1 | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat)
when O1 >= $0, O1 =< $7 ->
%% Val = O1 - $0,
Fun([O1 | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
%% \^X -> CTL-X
sub_scan_escape([$^, C | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
%% Val = C band 31,
Fun([C | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
sub_scan_escape([$^] = Cs, Stack, Toks, {Line, Col},
State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape([$^ | Eof], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
Fun(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% \NL (backslash newline)
sub_scan_escape([$\n | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
Fun([nl | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
%% \X - familiar escape sequences
sub_scan_escape([C | Cs], [Fun | Stack], Toks,
{Line, Col}, State, Errors, TabWidth,FileFormat) ->
%%Val = escape_char(C),
Fun([C | Cs], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat);
%%
sub_scan_escape([], Stack, Toks, {Line, Col}, State,
Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,
fun sub_scan_escape/8);
sub_scan_escape(Eof, [Fun | Stack], Toks, {Line, Col},
State, Errors, TabWidth,FileFormat) ->
Fun(Eof, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
escape_char($n) -> $\n; %\n = LF
escape_char($r) -> $\r; %\r = CR
escape_char($t) ->
$\t; %\t = TAB
escape_char($v) -> $\v; %\v = VT
escape_char($b) -> $\b; %\b = BS
escape_char($f) -> $\f; %\f = FF
escape_char($e) ->
$\e; %\e = ESC
escape_char($s) ->
$\s; %\s = SPC
escape_char($d) ->
$\d; %\d = DEL
escape_char(C) -> C.
scan_number([$., C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_fraction(Cs, [C, $. | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_number([$.] = Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more(Cs, Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan_number/8);
scan_number([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_number(Cs, [C | Stack], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
scan_number([$# | Cs], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
case catch list_to_integer(reverse(Stack)) of
B when is_integer(B), B >= 2, B =< 1 + $Z - $A + 10 ->
scan_based_int(Cs, [B], Toks, {Line, Col}, State,Errors, TabWidth,FileFormat);
B ->
scan(Cs, [], Toks, {Line, Col}, State,[{{base, B}, {Line, Col}} | Errors], TabWidth,FileFormat)
end;
scan_number([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_number/8);
scan_number(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_integer(reverse(Stack)) of
N when is_integer(N) ->
scan(Cs, [], [{integer, {Line, Col}, reverse(Stack)} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, integer}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9, C < $0 + B ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $A, B > 10, C < $A + B - 10 ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_based_int([C | Cs], [B | Stack], Toks, {Line, Col},State, Errors,TabWidth,FileFormat)
when C >= $a, B > 10, C < $a + B - 10 ->
scan_based_int(Cs, [B, C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_based_int([], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan_based_int/8);
scan_based_int(Cs, [B | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
case catch erlang:list_to_integer(reverse(Stack), B) of
N when is_integer(N) ->
scan(Cs, [], [{integer, {Line, Col}, integer_to_list(B)++[$#| reverse(Stack)]} | Toks], %% "replaced 'N' with 'reverse(Stack)'";
{Line, Col + length(integer_to_list(B))+1+length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, integer}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_fraction([C | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_fraction(Cs, [C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_fraction([$e | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent_sign(Cs, [$E | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_fraction([$E | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent_sign(Cs, [$E | Stack], Toks, {Line, Col},State, Errors, TabWidth,FileFormat);
scan_fraction([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_fraction/8);
scan_fraction(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case catch list_to_float(reverse(Stack)) of
F when is_float(F) ->
scan(Cs, [], [{float, {Line, Col}, F} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, float}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_exponent_sign([$+ | Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, [$+ | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent_sign([$- | Cs], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, [$- | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent_sign([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors, TabWidth, FileFormat, fun scan_exponent_sign/8);
scan_exponent_sign(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan_exponent(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat).
scan_exponent([C | Cs], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat)
when C >= $0, C =< $9 ->
scan_exponent(Cs, [C | Stack], Toks, {Line, Col}, State, Errors, TabWidth,FileFormat);
scan_exponent([], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_exponent/8);
scan_exponent(Cs, Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
case catch list_to_float(reverse(Stack)) of
F when is_float(F) ->
scan(Cs, [], [{float, {Line, Col}, F} | Toks],
{Line, Col + length(Stack)}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, [], Toks, {Line, Col}, State,
[{{illegal, float}, {Line, Col}} | Errors], TabWidth,FileFormat)
end.
scan_comment([$\r| Cs], Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac ->
scan(Cs, Stack, Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
_ ->
scan(Cs, Stack, Toks, {Line, Col+1}, State, Errors, TabWidth,FileFormat)
end;
scan_comment([$\n| Cs], Stack, Toks, {Line, _Col},State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, Toks, {Line + 1, 1}, State, Errors, TabWidth,FileFormat);
scan_comment([_ | Cs], Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat) ->
scan_comment(Cs, Stack, Toks, {Line, Col + 1}, State,Errors, TabWidth,FileFormat);
scan_comment([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat,fun scan_comment/8);
scan_comment(Eof, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Eof, Errors, Toks, {Line, Col}, State, TabWidth,FileFormat).
scan_dot([$% | _] = Cs, _Stack, Toks, {Line, Col},State, Errors, TabWidth,FileFormat) ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks], {Line, Col + 1}, State, TabWidth,FileFormat);
scan_dot([$\r | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
case FileFormat of
mac ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks],{Line + 1, 1}, State, TabWidth,FileFormat);
_ ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks],{Line, Col+1}, State, TabWidth,FileFormat)
end;
scan_dot([$\n | Cs], _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks],{Line + 1, 1}, State, TabWidth,FileFormat);
scan_dot([C | Cs], _Stack, Toks, {Line, Col}, State,Errors, TabWidth,FileFormat)
when C >= $\000, C =< $\s -> %% This is problematic; some characters occupy 2 spaces.
done(Cs, Errors, [{dot, {Line, Col}} | Toks], {Line, Col + 2}, State, TabWidth,FileFormat);
scan_dot([C | Cs], _Stack, Toks, {Line, Col}, State,Errors,TabWidth,FileFormat)
when C >= $\200, C =< $\240 ->
done(Cs, Errors, [{dot, {Line, Col}} | Toks],{Line, Col + 2}, State, TabWidth,FileFormat);
scan_dot([], Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
more([], Stack, Toks, {Line, Col}, State, Errors,TabWidth, FileFormat, fun scan_dot/8);
scan_dot(eof, _Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
done(eof, Errors, [{dot, {Line, Col}} | Toks],{Line, Col}, State, TabWidth,FileFormat);
scan_dot(Cs, Stack, Toks, {Line, Col}, State, Errors, TabWidth,FileFormat) ->
scan(Cs, Stack, [{'.', {Line, Col}} | Toks], {Line, Col + 1}, State, Errors, TabWidth,FileFormat).
%% reserved_word(Atom) -> Bool
%% return 'true' if Atom is an Erlang reserved word, else 'false'.
reserved_word('after') -> true;
reserved_word('begin') -> true;
reserved_word('case') -> true;
reserved_word('try') ->
Opts = get_compiler_options(),
not member(disable_try, Opts);
reserved_word('cond') ->
Opts = get_compiler_options(),
not member(disable_cond, Opts);
reserved_word('catch') -> true;
reserved_word('andalso') -> true;
reserved_word('orelse') -> true;
reserved_word('end') -> true;
reserved_word('fun') -> true;
reserved_word('if') -> true;
reserved_word('let') -> true;
reserved_word('of') -> true;
reserved_word('query') -> true;
reserved_word('receive') -> true;
reserved_word('when') -> true;
reserved_word('bnot') -> true;
reserved_word('not') -> true;
reserved_word('div') -> true;
reserved_word('rem') -> true;
reserved_word('band') -> true;
reserved_word('and') -> true;
reserved_word('bor') -> true;
reserved_word('bxor') -> true;
reserved_word('bsl') -> true;
reserved_word('bsr') -> true;
reserved_word('or') -> true;
reserved_word('xor') -> true;
reserved_word(_) -> false.
get_compiler_options() ->
%% Who said that Erlang has no global variables?
case get(compiler_options) of
undefined ->
Opts = case catch ets:lookup(compiler__tab,
compiler_options)
of
[{compiler_options, O}] -> O;
_ -> []
end,
put(compiler_options, Opts),
Opts;
Opts -> Opts
end. | src/wrangler_scan.erl | 0.511717 | 0.466542 | wrangler_scan.erl | starcoder |
%% Copyright (c) 2015 <NAME>.
-module(symmetric_dataset).
-export([create/3]).
-export([create_onesided/2]).
-export([verify/2]).
%% Return a new dataset of key/value pairs for nodes A and B. For each
%% node the pairs will have the following characteristics:
%%
%% - N*(1-P) pairs will be shared between A & B.
%%
%% - N*P/2 pairs will be split evenly between A and B and not be shared.
%%
%% - N*P/2 will have modified values. The keys will exist on both A and
%% B but half will have new values on A and the other half have newer
%% value on B.
%%
%% Total pairs on each node: N*(1-P) + N*P/4 + N*P/2 = N -
%% N*P/4. Minimal number of pairs to be exchanged (assuming a single
%% iteration with ideal bloom filters and the transfer starting from A
%% to B): Unique pairs on B and modified/unmodified pairs on B + unique
%% pairs on A and modified pairs on A = (N*P/4 + N*P/2) + (N*P/4 +
%% N*P/4) = 5N*P/4. Transfer_size/dataset_size ratio: (5N*P/4) / (N -
%% N*P/4) = (5P)/(4-P). With P = 0.2 we get a ratio of 0.26.
%%
create(N, P, NumBulkBytes) when (trunc(N*P) > 0) and
(trunc(N*P) rem 4 =:= 0) and
(NumBulkBytes rem 16 =:= 0) ->
random:seed({1, 2, 3}),
BaseSet = base([], N, NumBulkBytes),
{NewOrUpdated, Shared} = lists:split(trunc(N * P), BaseSet),
{New, Updated} = split_in_two(NewOrUpdated),
{A, B} = new_or_updated(New, Updated),
Expected = Shared ++ New ++ update(Updated),
{A ++ Shared, B ++ Shared, Expected}.
create_onesided(N, NumBulkBytes) when (NumBulkBytes rem 16 =:= 0) ->
random:seed({1, 2, 3}),
base([], N, NumBulkBytes).
base(L, 0, _NumBulkBytes) -> L;
base(L, N, NumBulkBytes) ->
K = crypto:hash(sha256, "ds1" ++ integer_to_list(N)),
Seq = list_to_binary(lists:map(fun(_) -> random:uniform(255) end,
lists:seq(1, 16))),
Bulk = generate_bulk(Seq, NumBulkBytes),
V = {1, Bulk},
base([{K, V} | L], N - 1, NumBulkBytes).
generate_bulk(Seq, NumBulkBytes) ->
generate_bulk(Seq, <<>>, NumBulkBytes div 16).
generate_bulk(_Seq, Acc, 0) ->
Acc;
generate_bulk(Seq, Acc, Repeat) ->
generate_bulk(Seq, <<Acc/binary, Seq/binary>>, Repeat - 1).
new_or_updated(New, Updated) ->
{UniqueA, UniqueB} = split_in_two(New),
{L1, L2} = split_in_two(Updated),
UpdatedA = update(L1) ++ L2,
UpdatedB = L1 ++ update(L2),
{UniqueA ++ UpdatedA, UniqueB ++ UpdatedB}.
split_in_two(L) -> lists:split(trunc(length(L) / 2), L).
update(L) -> [{K, {Time + 1, Bulk}} || {K, {Time, Bulk}} <- L].
verify(Elements, ExpectedElements) ->
lager:info("num_elements_current=~p, num_elements_expected=~p.",
[length(Elements), length(ExpectedElements)]),
compare(lists:sort(Elements), lists:sort(ExpectedElements)).
compare([], []) ->
lager:info("Correct!~n", []),
match;
compare([], [H|_]) ->
lager:info("Incorrect. Expected elem ~p!~n", [H]);
compare([H|_], []) ->
lager:info("Incorrect. Extra elem ~p!~n", [H]);
compare([H1|L1], [H2|L2]) ->
case H1 =:= H2 of
true -> compare(L1, L2);
false -> lager:info("Incorrect. Elements ~p and ~p don't match!~n",
[H1, H2])
end. | src/data_layer/symmetric_dataset.erl | 0.535584 | 0.692549 | symmetric_dataset.erl | starcoder |
%% @author <NAME> <<EMAIL>>
%% @copyright 2012 <NAME>
%% @doc Geomap tile support functions
%% See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#lon.2Flat_to_tile_numbers
%% Copyright 2012 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(geomap_tiles).
-export([
map_tiles/4,
map_tiles/5,
url_tile/2,
url_tile/3,
xy_tile/2,
xy_tile/3,
bounding_box/3,
zoom/0
]).
-include("zotonic.hrl").
%% Default zoom factor for displaying the tiles
-define(ZOOM, 14).
%% @doc Return the default zoom factor
-spec zoom() -> integer().
zoom() ->
?ZOOM.
%% @doc Return the {x,y,z} tuples for 3x3 map and the offset of the point from the upper-left tile.
-spec map_tiles(Latitude::float(), Longitude::float(), Cols::integer(), Rows::integer()) -> {ok,list(),{integer(),integer()}}.
map_tiles(Latitude, Longitude, Cols, Rows) ->
map_tiles(Latitude, Longitude, Cols, Rows, ?ZOOM).
-spec map_tiles(Latitude::float(), Longitude::float(), Cols::integer(), Rows:: integer(), Zoom::integer()) -> {ok,list(),{float(),float()}}.
map_tiles(Latitude, Longitude, Cols, Rows, Zoom) ->
{X, Y} = xy_tile(Latitude, Longitude, Zoom),
BoundingBox = bounding_box(X,Y,Zoom),
{CX,CY} = correct_xy(Latitude, Longitude, X, Y, Cols, Rows, BoundingBox),
TileRows = to_rows(CX, CY, Cols, Rows, Zoom),
Tiles = [
[ {Xt,Yt,Zoom} || {Xt,Yt} <- Row ]
|| Row <- TileRows
],
{ok, Tiles, offset(X,Y,TileRows,Latitude,Longitude,BoundingBox)}.
% Correct x/y depending if the lat/long is the closest edge of the tile
correct_xy(Latitude, Longitude, X, Y, Cols, Rows, {North,South,East,West}) ->
HalfX = abs(East-West) / 2.0,
HalfY = abs(North-South) / 2.0,
OffsetX = abs(West-Longitude),
OffsetY = abs(North-Latitude),
CenteredX = case Cols rem 2 of
0 ->
case OffsetX < HalfX of
true -> X+1;
false -> X
end;
1 -> X
end,
CenteredY = case Rows rem 2 of
0 ->
case OffsetY < HalfY of
true -> Y;
false -> Y+1
end;
1 -> Y
end,
{CenteredX,CenteredY}.
% Calculate the offset in tiles for Lat/Long from the upperleft corner of the map
offset(X,Y,TileRows,Latitude,Longitude,{North,South,East,West}) ->
XTile = offset(fun({Xx,_}) -> Xx =:= X end, hd(TileRows), 0),
YTile = offset(fun([{_,Yy}|_]) -> Yy =:= Y end, TileRows, 0),
LongOffset = abs(West-Longitude),
LatOffset = abs(North-Latitude),
LongPerTile = abs(East-West),
LatPerTile = abs(North-South),
{float(XTile) + (1-LongOffset/LongPerTile),
float(YTile) + LatOffset/LatPerTile}.
offset(F, [H|T], N) ->
case F(H) of
true -> N;
false -> offset(F,T,N+1)
end.
%% @doc Generate the tile URL for a coordinate
-spec url_tile(Latitude::float(), Longitude::float()) -> URL::binary().
url_tile(Latitude, Longitude) ->
url_tile(Latitude, Longitude, ?ZOOM).
%% @doc Generate the tile URL for a coordinate and zoom factor
-spec url_tile(Latitude::float(), Longitude::float(), Zoom::integer()) -> URL::binary().
url_tile(Latitude, Longitude, Zoom) ->
{Xtile, Ytile} = xy_tile(Latitude, Longitude, Zoom),
xyz_url(Xtile, Ytile, Zoom).
%% @doc Return the bounding box in coordinates {North, South, East, West} for a tile.
bounding_box(X, Y, Zoom) ->
{tile2lat(Y, Zoom), tile2lat(Y+1,Zoom),
tile2long(X, Zoom), tile2long(X+1, Zoom)}.
tile2lat(Y, Zoom) ->
N = math:pi() - (2.0 * math:pi() * Y) / math:pow(2.0, Zoom),
(180.0 / math:pi() * math:atan(0.5 * (math:exp(N) - math:exp(-N)))).
tile2long(X, Zoom) ->
(X / math:pow(2.0, Zoom) * 360.0 - 180.0).
%% @doc Calculate the points around the XY point.
to_rows(X,Y,Cols,Rows,Zoom) ->
Max = 1 bsl Zoom,
[
to_row(X,Y1,Cols,Max)
|| Y1 <- row_range(Y,Rows,Max)
].
row_range(Y,1,_Max) ->
[Y];
row_range(Y,N,Max) ->
From = Y - N div 2,
To = From + N-1,
if
From < 0 ->
lists:seq(0,N-1);
To >= Max ->
lists:seq(Max-N, Max-1);
true ->
lists:seq(From, To)
end.
to_row(X,Y,Cols,Max) ->
From = X - Cols div 2,
[ norm(X1,Y,Max) || X1 <- lists:seq(From, From+Cols-1) ].
norm(X,Y,Max) when X < 0 ->
{X+Max, Y};
norm(X,Y,Max) ->
{X rem Max, Y}.
%% @doc Conncatenate the X, Y and Zoom factor into an OpenStreetMap tile URL
xyz_url(X,Y,Zoom) ->
iolist_to_binary([
"http://tile.openstreetmap.org",
$/, integer_to_list(Zoom),
$/, integer_to_list(X),
$/, integer_to_list(Y),
".png"
]).
%% @doc Calculate the xy coordinate of the tile for the lat/long
xy_tile(Latitude, Longitude) ->
xy_tile(Latitude, Longitude, ?ZOOM).
xy_tile(Latitude, Longitude, Zoom) ->
{long2tile(Longitude, Zoom), lat2tile(Latitude, Zoom)}.
long2tile(Longitude, Zoom) ->
floor((Longitude+180.0)/360.0 * math:pow(2,Zoom)).
lat2tile(Latitude, Zoom) ->
floor((1-math:log(math:tan(Latitude*math:pi()/180.0) + 1/math:cos(Latitude*math:pi()/180)) / math:pi()) /2.0 * math:pow(2,Zoom)).
%% From http://schemecookbook.org/Erlang/NumberRounding
floor(X) ->
T = erlang:trunc(X),
case (X - T) of
Neg when Neg < 0 -> T - 1;
Pos when Pos > 0 -> T;
_ -> T
end. | modules/mod_geomap/support/geomap_tiles.erl | 0.766643 | 0.485844 | geomap_tiles.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc DotMap.
%%
%% @reference <NAME>, <NAME>, and <NAME>
%% Delta State Replicated Data Types (2016)
%% [http://arxiv.org/pdf/1603.01529v1.pdf]
-module(dot_map).
-author("<NAME> <<EMAIL>>").
-behaviour(dot_store).
-export([
new/0,
is_empty/1,
fetch_keys/1,
fetch/3,
store/3,
merge/4,
any/2,
fold/3
]).
-type dot_map() :: dot_store:dot_map().
%% @doc Create an empty DotMap.
-spec new() -> dot_map().
new() ->
orddict:new().
%% @doc Check if a DotMap is empty.
-spec is_empty(dot_map()) -> boolean().
is_empty(DotMap) ->
orddict:is_empty(DotMap).
%% @doc Given a key, a DotMap and a default,
%% return:
%% - the correspondent value, if key present in the DotMap
%% - default, otherwise
-spec fetch(term(), dot_map(), dot_store:dot_store() | undefined) ->
dot_store:dot_store().
fetch(Key, DotMap, Default) ->
orddict_ext:fetch(Key, DotMap, Default).
%% @doc Get a list of keys in the DotMap.
-spec fetch_keys(dot_store:dot_map()) -> [term()].
fetch_keys(DotMap) ->
orddict:fetch_keys(DotMap).
%% @doc Stores a new {Key, DotStore} pair in the DotMap.
%% If `Key` already in the DotMap, then its value is replaced.
-spec store(term(), dot_store:dot_store(), dot_map()) -> dot_map().
store(Key, DotStore, DotMap) ->
orddict:store(Key, DotStore, DotMap).
%% @doc Merge two DotMap.
-spec merge(function(), dot_store:dot_store(),
dot_map(), dot_map()) -> dot_map().
merge(_Fun, _Default, [], []) ->
[];
merge(Fun, Default, [{Key, ValueA} | RestA], []) ->
do_merge(Fun, Default, Key, ValueA, Default, RestA, []);
merge(Fun, Default, [], [{Key, ValueB} | RestB]) ->
do_merge(Fun, Default, Key, Default, ValueB, [], RestB);
merge(Fun, Default, [{Key, ValueA} | RestA],
[{Key, ValueB} | RestB]) ->
do_merge(Fun, Default, Key, ValueA, ValueB, RestA, RestB);
merge(Fun, Default, [{KeyA, ValueA} | RestA],
[{KeyB, _} | _]=RestB) when KeyA < KeyB ->
do_merge(Fun, Default, KeyA, ValueA, Default, RestA, RestB);
merge(Fun, Default, [{KeyA, _} | _]=RestA,
[{KeyB, ValueB} | RestB]) when KeyA > KeyB ->
do_merge(Fun, Default, KeyB, Default, ValueB, RestA, RestB).
do_merge(Fun, Default, Key, ValueA, ValueB, RestA, RestB) ->
case Fun(ValueA, ValueB) of
Default ->
merge(Fun, Default, RestA, RestB);
Value ->
[{Key, Value} | merge(Fun, Default, RestA, RestB)]
end.
%% @doc True if Pred is true for at least one entry in the DotMap.
-spec any(function(), dot_map()) -> boolean().
any(Pred, DotMap) ->
lists:any(Pred, DotMap).
%% @doc Fold a DotMap.
-spec fold(function(), term(), dot_map()) -> term().
fold(Fun, AccIn, DotMap) ->
orddict:fold(Fun, AccIn, DotMap). | src/dot_map.erl | 0.628749 | 0.520862 | dot_map.erl | starcoder |
-module(foata_normal_form).
-export([create_from_graph/1, get_foata_normal_form/3]).
-export_type([foata_normal_form/0]).
-type foata_class() :: [trace_theory:production()].
-type foata_normal_form() :: [foata_class()].
-type stack_element() :: dependent | char().
-type stack() :: #{char() => stack_element()}.
%------------------------------------------------%
% API %
%------------------------------------------------%
%Function that is called to compute foata normal form using Dickert's graph
-spec create_from_graph(Graph :: graph_creation:graph_with_n_parents()) -> foata_normal_form().
create_from_graph(Graph) ->
create_from_graph(Graph, []).
%Function computes foata normal form based on Word, Dependent and Alphabet
-spec get_foata_normal_form(Word :: trace_theory:word(),
Dependent :: trace_theory:dependent(),
Alphabet :: trace_theory:alphabet()) ->
foata_normal_form().
get_foata_normal_form(Word, Dependent, Alphabet) ->
Stack = fill_stack(create_stack(Alphabet), Word, Dependent),
get_foata_normal_form_from_stack(Stack).
%-------------------------------------------------%
% create_from_graph helper functions %
%-------------------------------------------------%
%Recursive function that is performed while there is something in Graph
%First, it specifies which vertex should be deleted and inserted to FNF.
%Than it reduces n_parent in every neighbor of soon-be deleted vertex'es.
%Finally it deletes vertexes from graph and adds them to FNF table.
-spec create_from_graph(Graph :: graph_creation:graph_with_n_parents(),
FNF :: foata_normal_form()) ->
foata_normal_form().
create_from_graph(Graph, FNF) when map_size(Graph) == 0 ->
FNF;
create_from_graph(Graph, FNF) ->
GraphList = maps:to_list(Graph),
VertexToBeDeleted = lists:filter(fun(Element) ->
case Element of
{_,{_, 0}} -> true;
_ -> false
end
end, GraphList),
% VertexToBeDeleted -> list of vertexes that soon will be deleted.
NewGraph = lists:foldl(fun({Key, _}, GraphMap) ->
{Connections, _} = maps:get(Key, GraphMap),
NewGraph = lists:foldl(fun reduce_parents/2, GraphMap, Connections),
% Connections -> list of neighbors of vertex that is processed in this iteration.
maps:remove(Key, NewGraph)
% Vertex from VertexToBeDeleted after being used to decrease n_parent in its neighbors is deleted.
end, Graph, VertexToBeDeleted),
create_from_graph(NewGraph, FNF ++ [transform_deleted_vertex_table(VertexToBeDeleted)]).
%Function that is reducing n_parents
-spec reduce_parents(Connections :: graph_creation:connections(),
Graph :: graph_creation:graph_with_n_parents()) ->
graph_creation:graph_with_n_parents().
reduce_parents(Connection, Graph) ->
{NConnections, Parents} = maps:get(Connection, Graph),
maps:put(Connection, {NConnections, Parents - 1}, Graph).
%Function that returns list of productions. It takes them from table VertexToBeDeleted.
-spec transform_deleted_vertex_table(List :: graph_creation:connections()) -> foata_class().
transform_deleted_vertex_table(List) ->
lists:map(fun( {{Production, _}, _} ) ->
Production
end, List).
%--------------------------------------------------%
% create_foata_normal_form helper functions %
%--------------------------------------------------%
%Function creates stack, its a map. The keys are letters from alphabet and values are empty lists.
-spec create_stack(Alphabet :: trace_theory:alphabet()) -> #{char() => list()}.
create_stack(Alphabet) ->
lists:foldl(fun(Letter, Map) ->
maps:put(Letter, [], Map)
end, #{}, Alphabet).
%Function reads the word backwards and writes to a stack.
-spec fill_stack(Stack :: stack(),
Word :: trace_theory:word(),
Dependent :: trace_theory:dependent()) ->
stack().
fill_stack(Stack, Word, Dependent) ->
lists:foldr(fun(Letter, NewStack) ->
add_to_stack(NewStack, Letter, Dependent)
end, Stack, Word).
%Function adds to a stack data accordingly to a read letter.
-spec add_to_stack(Stack :: stack(), Letter :: char(), Dependent :: trace_theory:dependent()) -> stack().
add_to_stack(Stack, Letter, Dependent) ->
LetterList = maps:get(Letter, Stack),
StackWithLetter = maps:put(Letter, LetterList ++ [Letter], Stack),
lists:foldl(fun(DependentLetter, NewStack) ->
List = maps:get(DependentLetter, NewStack),
maps:put(DependentLetter, List ++ [dependent], NewStack)
end, StackWithLetter, determine_letters(Letter, Dependent)).
%Function returns a list of letters that are dependant with a Letter.
-spec determine_letters(Letter :: char(), Dependent :: trace_theory:dependent()) -> [char()].
determine_letters(Letter, Dependent) ->
lists:foldl(fun(Element, List) ->
case Element of
{Letter, Letter} -> List;
{Letter, NewLetter} -> List ++ [NewLetter];
_ -> List
end
end, [], Dependent).
%Function runs recursively while any stack is not empty. It reads the data level-by-level
-spec get_foata_normal_form_from_stack({Stack :: stack(), FNF :: foata_normal_form()} | stack()) ->
foata_normal_form() | {stack() | foata_normal_form()}.
get_foata_normal_form_from_stack({Stack, FNF}) when map_size(Stack) == 0 ->
remove_empty_classes(FNF);
get_foata_normal_form_from_stack({Stack, FNF}) ->
get_foata_normal_form_from_stack(lists:foldl(fun(Letter, {NewStack, NewFNF}) ->
List = maps:get(Letter, NewStack),
case List of
[] -> {maps:remove(Letter, NewStack), NewFNF};
[dependent | Tail] -> {maps:put(Letter, Tail, NewStack), NewFNF};
[Letter | Tail] -> [CurrentClass | ClassesBefore] = NewFNF,
{maps:put(Letter, Tail, NewStack), [[Letter | CurrentClass] | ClassesBefore]}
end
end, {Stack, [[] | FNF]}, maps:keys(Stack)));
get_foata_normal_form_from_stack(Stack) ->
get_foata_normal_form_from_stack({Stack, []}).
%Function removes empty classes witch will appear when whole level is filled with atom "dependent"
-spec remove_empty_classes(FNF :: foata_normal_form()) -> foata_normal_form().
remove_empty_classes(FNF) ->
lists:filter(fun(Element) ->
case Element of
[] -> false;
_ -> true
end
end, FNF). | src/foata_normal_form.erl | 0.549882 | 0.632474 | foata_normal_form.erl | starcoder |
%% Licensed under the Apache License, Version 2.0 (the "License"); you may not
%% use this file except in compliance with the License. You may obtain a copy of
%% the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
%% License for the specific language governing permissions and limitations under
%% the License.
-module(kai_version_SUITE).
-compile(export_all).
-include("kai.hrl").
-include("kai_test.hrl").
init_per_testcase(_TestCase, Conf) ->
kai_config:start_link([
{hostname, "localhost"},
{rpc_port, 11011},
{memcache_port, 11211},
{max_connections, 2},
{n, 1}, {r, 1}, {w, 1},
{number_of_buckets, 8},
{number_of_virtual_nodes, 2}
]),
kai_version:start_link(),
Conf.
end_per_testcase(_TestCase, _Conf) ->
kai_config:stop(),
kai_version:stop(),
ok.
all() -> [test_update, test_order,
test_cas_unique1,
test_cas_unique2,
test_cas_unique7,
test_cas_unique16
].
test_update() -> [].
test_update(_Conf) ->
VClock1 = vclock:increment(kai_config:get(node), vclock:fresh()),
Data1 = #data{
vector_clocks = VClock1
},
{ok, Data2} = kai_version:update(Data1),
ct:log(test_update, "Data2.vector_clocks: ~p~n", [Data2#data.vector_clocks]),
?assert(is_list(Data2#data.vector_clocks)),
?assert(vclock:descends(Data2#data.vector_clocks, Data1#data.vector_clocks)),
?assertNot(vclock:descends(Data1#data.vector_clocks, Data2#data.vector_clocks)),
ok.
test_order() -> [].
test_order(_Conf) ->
VClock1 = vclock:increment(node1, vclock:fresh()),
Data1 = #data{
vector_clocks = VClock1
},
%% trivial case
?assertEqual(1, length(kai_version:order([Data1]))),
%% two concurrent data
Data2 = Data1#data{
vector_clocks = vclock:increment(otherNode, vclock:fresh())
},
ListOfData12 = kai_version:order([Data1, Data2]),
?assertEqual(2, length(ListOfData12)),
%% one data is dropped
Data3 = Data1#data{
vector_clocks = vclock:increment(otherNode2, Data1#data.vector_clocks)
},
ListOfData23 = kai_version:order([Data1, Data2, Data3]),
?assertEqual(2, length(ListOfData23)),
ok.
test_cas_unique1() -> [].
test_cas_unique1(_Conf) ->
Data1 = #data{
checksum = list_to_binary(all_bit_on(16))
},
{ok, CasUnique} = kai_version:cas_unique([Data1]),
Expected = list_to_binary([<<1:4, 2#1111:4>>, all_bit_on(7)]),
?assertEqual(Expected, CasUnique),
ok.
test_cas_unique2() -> [].
test_cas_unique2(_Conf) ->
Data1 = #data{
checksum = <<16#FFFFFFFFFFFFFFFF:64, 0:64>>
},
Data2 = #data{
checksum = <<0:64, 16#FFFFFFFFFFFFFFFF:64>>
},
{ok, CasUnique} = kai_version:cas_unique([Data1, Data2]),
Expected = <<2:4, 2#1111:4, 16#FFFFFFF:26, 0:30>>,
?assertEqual(Expected, CasUnique),
ok.
test_cas_unique7() -> [].
test_cas_unique7(_Conf) ->
%% trunc(60/7) = 8
ListOfData = lists:map(fun (I) ->
#data{checksum = <<I:8, 0:120>>}
end,
lists:seq(1,7)),
{ok, CasUnique} = kai_version:cas_unique(ListOfData),
Expected = <<7:4, 1:8, 2:8, 3:8, 4:8, 5:8, 6:8, 7:8, 0:4>>,
?assertEqual(Expected, CasUnique),
ok.
test_cas_unique16() -> [].
test_cas_unique16(_Conf) ->
%% 16 exceeds 4bit range (2#1111 = 15)
ListOfData = lists:map(fun (I) ->
#data{checksum = <<I:4, 0:60>>}
end,
lists:seq(1,16)),
{error, Reason} = kai_version:cas_unique(ListOfData),
?assert(string:str(Reason, "16") > 0),
ok.
all_bit_on(Bytes) ->
lists:duplicate(Bytes, 16#FF).
all_bit_off(Bytes) ->
lists:duplicate(Bytes, 0). | test/kai_version_SUITE.erl | 0.575469 | 0.588002 | kai_version_SUITE.erl | starcoder |
%%%=============================================================================
%%% @doc Advent of code puzzle solution
%%% @end
%%%=============================================================================
-module(aoc2020_day11).
-behavior(aoc_puzzle).
-export([ parse/1
, solve1/1
, solve2/1
, info/0
]).
-include("aoc_puzzle.hrl").
%%------------------------------------------------------------------------------
%% @doc info/0
%% Returns info about this puzzle.
%% @end
%%------------------------------------------------------------------------------
-spec info() -> aoc_puzzle().
info() ->
#aoc_puzzle{ module = ?MODULE
, year = 2020
, day = 11
, name = "Seating System"
, expected = {2093, 1862}
, has_input_file = true
}.
%%==============================================================================
%% Types
%%==============================================================================
-type coord() :: { X :: integer()
, Y :: integer()
}.
-type grid() :: #{coord() => integer()}.
-type input_type() :: grid().
-type result1_type() :: any().
-type result2_type() :: result1_type().
%%------------------------------------------------------------------------------
%% @doc parse/1
%% Parses input file.
%% @end
%%------------------------------------------------------------------------------
-spec parse(Input :: binary()) -> input_type().
parse(Input) ->
to_map(string:tokens(binary_to_list(Input), "\n\r")).
%%------------------------------------------------------------------------------
%% @doc solve1/1
%% Solves part 1. Receives parsed input as returned from parse/1.
%% @end
%%------------------------------------------------------------------------------
-spec solve1(Grid :: input_type()) -> result1_type().
solve1(Grid) ->
iterate_until_same(Grid, fun compute_next1/3).
%%------------------------------------------------------------------------------
%% @doc solve2/1
%% Solves part 2. Receives parsed input as returned from parse/1.
%% @end
%%------------------------------------------------------------------------------
-spec solve2(Grid :: input_type()) -> result2_type().
solve2(Grid) ->
iterate_until_same(Grid, fun compute_next2/3).
%%==============================================================================
%% Internals
%%==============================================================================
%% Iterate until the grid does not change
iterate_until_same(Grid, Fun) ->
Next = iterate_one_step(Grid, Fun),
case Next =:= Grid of
true ->
maps:fold(fun(_, $#, Acc) -> Acc + 1;
(_, _, Acc) -> Acc
end, 0, Next);
false ->
iterate_until_same(Next, Fun)
end.
iterate_one_step(Grid, Fun) ->
maps:fold(
fun(K, V, Acc) ->
maps:put(K, Fun(K, V, Grid), Acc)
end, #{}, Grid).
%% Compute the next state of cell `V' at coordinate `Coord'.
compute_next1(Coord, V, OldGrid) ->
OccupiedAdj = occupied_adjacents(Coord, OldGrid),
case V of
$L when OccupiedAdj == 0 -> $#; % become occupied
$# when OccupiedAdj >= 4 -> $L; % become free
_ -> V % unchanged
end.
occupied_adjacents({X, Y}, Grid) ->
Deltas = [{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}],
lists:foldl(
fun({Dx, Dy}, Acc) ->
case maps:get({X + Dx, Y + Dy}, Grid, undefined) of
$# -> Acc + 1;
_ -> Acc
end
end, 0, Deltas).
%% Compute the next state of cell `V' at coordinate `Coord'.
compute_next2(Coord, V, OldGrid) ->
VisibleAdj = visible_adjacents(Coord, OldGrid),
case V of
$L when VisibleAdj == 0 -> $#; % become occupied
$# when VisibleAdj >= 5 -> $L; % become free
_ -> V % unchanged
end.
visible_adjacents(Coord, Grid) ->
Deltas = [{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}],
lists:foldl(
fun(Delta, Acc) ->
case find_first_in_direction(Coord, Delta, 1, Grid) of
{_, _} -> Acc + 1;
false -> Acc
end
end, 0, Deltas).
find_first_in_direction({X, Y} = Coord, {Dx, Dy} = Delta, Dist, Grid) ->
VisibleCoord = {X + Dx * Dist, Y + Dy * Dist},
case maps:get(VisibleCoord, Grid, undefined) of
$# -> VisibleCoord;
$. -> find_first_in_direction(Coord, Delta, Dist + 1, Grid);
_ -> false
end.
%% Parse input lines to a map
-spec to_map([string()]) -> grid().
to_map(Lines) ->
{_, Grid} =
lists:foldl(
fun(L, {Y, Map}) ->
{_, MapOut} =
lists:foldl(
fun(C, {X, Acc}) ->
{X + 1, maps:put({X, Y}, C, Acc)}
end, {0, Map}, L),
{Y + 1, MapOut}
end, {0, #{}}, Lines),
Grid.
%%%_* Emacs ====================================================================
%%% Local Variables:
%%% allout-layout: t
%%% erlang-indent-level: 2
%%% End: | src/2020/aoc2020_day11.erl | 0.517083 | 0.61607 | aoc2020_day11.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(erlfdb_subspace).
-record(erlfdb_subspace, {
prefix
}).
-export([
create/1,
create/2,
add/2,
key/1,
pack/1,
pack/2,
pack_vs/1,
pack_vs/2,
unpack/2,
range/1,
range/2,
contains/2,
subspace/2
]).
-define(PREFIX(S), S#erlfdb_subspace.prefix).
create(Tuple) ->
create(Tuple, <<>>).
create(#erlfdb_subspace{} = Subspace, Tuple) when is_tuple(Tuple) ->
create(Tuple, ?PREFIX(Subspace));
create(Tuple, Prefix) when is_tuple(Tuple), is_binary(Prefix) ->
#erlfdb_subspace{
prefix = erlfdb_tuple:pack(Tuple, Prefix)
}.
add(#erlfdb_subspace{} = Subspace, Item) ->
create({Item}, ?PREFIX(Subspace)).
key(#erlfdb_subspace{} = Subspace) ->
Subspace#erlfdb_subspace.prefix.
pack(Subspace) ->
pack(Subspace, {}).
pack(#erlfdb_subspace{} = Subspace, Tuple) when is_tuple(Tuple) ->
erlfdb_tuple:pack(Tuple, ?PREFIX(Subspace)).
pack_vs(Subspace) ->
pack_vs(Subspace, {}).
pack_vs(#erlfdb_subspace{} = Subspace, Tuple) when is_tuple(Tuple) ->
erlfdb_tuple:pack_vs(Tuple, ?PREFIX(Subspace)).
unpack(#erlfdb_subspace{} = Subspace, Key) ->
case contains(Subspace, Key) of
true ->
Prefix = ?PREFIX(Subspace),
SubKey = binary:part(Key, size(Prefix), size(Key) - size(Prefix)),
erlfdb_tuple:unpack(SubKey);
false ->
erlang:error({key_not_in_subspace, Subspace, Key})
end.
range(Subspace) ->
range(Subspace, {}).
range(#erlfdb_subspace{} = Subspace, Tuple) when is_tuple(Tuple) ->
Prefix = ?PREFIX(Subspace),
PrefixLen = size(Prefix),
{Start, End} = erlfdb_tuple:range(Tuple),
{
<<Prefix:PrefixLen/binary, Start/binary>>,
<<Prefix:PrefixLen/binary, End/binary>>
}.
contains(#erlfdb_subspace{} = Subspace, Key) ->
Prefix = ?PREFIX(Subspace),
PrefLen = size(Prefix),
case Key of
<<Prefix:PrefLen/binary, _/binary>> ->
true;
_ ->
false
end.
subspace(#erlfdb_subspace{} = Subspace, Tuple) ->
create(Subspace, Tuple). | src/erlfdb_subspace.erl | 0.503418 | 0.459137 | erlfdb_subspace.erl | starcoder |
-module(object_SUITE).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl").
-export([all/0,
init_per_suite/1, end_per_suite/1,
init_per_testcase/2, end_per_testcase/2]).
-export([call_test/1, cast_test/1, info_test/1, message_test/1, stop_test/1]).
all() ->
[ call_test,
cast_test,
info_test,
message_test,
stop_test
].
init_per_suite(Config) ->
application:ensure_all_started(ebus),
Config.
end_per_suite(Config) ->
Config.
init_per_testcase(_, Config) ->
{ok, B} = ebus:starter(),
[{bus, B} | Config].
end_per_testcase(_, Config) ->
case proplists:get_value(bus, Config, undefined) of
undefined ->
ok;
B -> exit(B, normal)
end.
call_test(Config) ->
B = ?config(bus, Config),
meck:new(call_test, [non_strict]),
meck:expect(call_test, init, fun([init_arg]) -> {ok, init_state};
(A) -> erlang:error({bad_init, A})
end),
meck:expect(call_test, handle_call,
fun(_, _, State) when State /= init_state ->
erlang:error({bad_state, State});
({reply, R}, _, State) ->
{reply, R, State};
({reply, R, A}, _, State) ->
{reply, R, State, A};
({noreply, R}, From, State) ->
spawn(fun() ->
gen_server:reply(From, R)
end),
{noreply, State};
({noreply, R, A}, From, State) ->
spawn(fun() ->
gen_server:reply(From, R)
end),
{noreply, State, A};
({stop, Reason, R}, _, State) ->
{stop, Reason, R, State}
end),
meck:expect(call_test, handle_info,
fun({filter_match, _, Msg}, State) ->
?assertEqual("com.helium.test.Call", ebus_message:interface(Msg)),
?assertEqual("Notice", ebus_message:member(Msg)),
{noreply, State}
end),
meck:expect(call_test, handle_continue,
fun(test_continue, State) ->
{noreply, State}
end),
meck:expect(call_test, terminate,
fun(normal, _State) -> ok end),
Path = "/com/helium/test/Call",
{ok, O} = ebus_object:start(B, Path, call_test, [init_arg], []),
%% Validate that init was called with the start arguments
?assert(meck:called(call_test, init, [[init_arg]])),
%% Check reply
?assertEqual(test_reply, gen_server:call(O, {reply, test_reply})),
%% Check noreply
?assertEqual(later_reply, gen_server:call(O, {noreply, later_reply})),
?assertEqual(ok, ebus:add_match(B, #{type => signal})),
SignalAction = {signal, "com.helium.test.Call", "Notice"},
{ok, F} = ebus:add_filter(B, O, #{ path => Path }),
%% Test reply result action
?assertEqual(ok, gen_server:call(O, {reply, ok, SignalAction})),
meck:wait(1, call_test, handle_info, '_', 1000),
%% Test noreply result action
?assertEqual(ok, gen_server:call(O, {noreply, ok, SignalAction})),
meck:wait(2, call_test, handle_info, '_', 1000),
%% Test reply continue action
?assertEqual(ok, gen_server:call(O, {reply, ok, {continue, test_continue}})),
meck:wait(1, call_test, handle_continue, '_', 1000),
%% Test noreply continue action
?assertEqual(ok, gen_server:call(O, {noreply, ok, {continue, test_continue}})),
meck:wait(2, call_test, handle_continue, '_', 1000),
ebus:remove_filter(B, F),
%% Check stop
?assertEqual(test_stop, gen_server:call(O, {stop, normal, test_stop})),
meck:validate(call_test),
meck:unload(call_test),
ok.
cast_test(Config) ->
B = ?config(bus, Config),
meck:new(cast_test, [non_strict]),
meck:expect(cast_test, init,
fun([init_arg]) ->
{ok, init_state};
(A) ->
erlang:error({bad_init, A})
end),
meck:expect(cast_test, handle_cast,
fun(_, State) when State /= init_state ->
erlang:error({bad_state, State});
(noreply, State) ->
{noreply, State};
({noreply, A}, State) ->
{noreply, State, A};
({stop, Reason}, State) ->
{stop, Reason, State};
(M, _) ->
erlang:error({unhandled, M})
end),
meck:expect(cast_test, handle_info,
fun({filter_match, _, Msg}, State) ->
?assertEqual("com.helium.test.Cast", ebus_message:interface(Msg)),
?assertEqual("Notice", ebus_message:member(Msg)),
{noreply, State}
end),
meck:expect(cast_test, handle_continue,
fun(test_continue, State) ->
{noreply, State}
end),
meck:expect(cast_test, terminate,
fun(normal, _State) -> ok end),
Path = "/com/helium/test/Cast",
{ok, O} = ebus_object:start(B, Path, cast_test, [init_arg], []),
%% Validate that init was called with the start arguments
?assert(meck:called(cast_test, init, [[init_arg]])),
%% Check noreply
gen_server:cast(O, noreply),
meck:wait(cast_test, handle_cast, [noreply, '_'], 1000),
?assertEqual(ok, ebus:add_match(B, #{ type => signal})),
SignalAction = {signal, "com.helium.test.Cast", "Notice"},
{ok, F} = ebus:add_filter(B, O, #{ path => Path }),
%% Test result signal action, which noreplies to the cast and we
%% should then see a handle_info with a filter_match.
gen_server:cast(O, {noreply, SignalAction}),
meck:wait(cast_test, handle_info, [{filter_match, '_', '_'}, '_'], 1000),
%% Test result continue. This is the only case that will invoke
%% handle_continue so we rely on validate to catch any failures to
%% call it.
gen_server:cast(O, {noreply, {continue, test_continue}}),
ebus:remove_filter(B, F),
%% Check stop. Wait for the handle_cast call to allow the coverage
%% to see the actual stop response from the callback.
gen_server:cast(O, {stop, normal}),
meck:wait(cast_test, handle_cast, [{stop, normal}, '_'], 1000),
meck:validate(cast_test),
meck:unload(cast_test),
ok.
info_test(Config) ->
B = ?config(bus, Config),
meck:new(info_test, [non_strict]),
meck:expect(info_test, init, fun([init_arg]) -> {ok, init_state};
(A) -> erlang:error({bad_init, A})
end),
meck:expect(info_test, handle_info,
fun(_, State) when State /= init_state ->
erlang:error({bad_state, State});
(noreply, State) ->
{noreply, State};
({noreply, A}, State) ->
{noreply, State, A};
({stop, Reason}, State) ->
{stop, Reason, State};
({filter_match, _, Msg}, State) ->
?assertEqual("com.helium.test.Info", ebus_message:interface(Msg)),
?assertEqual("Notice", ebus_message:member(Msg)),
{noreply, State};
(M, _) ->
erlang:error({unhandled, M})
end),
meck:expect(info_test, handle_continue,
fun(test_continue, State) ->
{noreply, State}
end),
meck:expect(info_test, terminate,
fun(normal, _State) -> ok end),
Path = "/com/helium/test/Info",
{ok, O} = ebus_object:start(B, Path, info_test, [init_arg], []),
%% Validate that init was called with the start arguments
?assert(meck:called(info_test, init, [[init_arg]])),
%% Check noreply
erlang:send(O, noreply),
meck:wait(info_test, handle_info, [noreply, '_'], 1000),
?assertEqual(ok, ebus:add_match(B, #{type => signal})),
SignalAction = {signal, "com.helium.test.Info", "Notice"},
{ok, F} = ebus:add_filter(B, O, #{ path => Path }),
%% Test result action
erlang:send(O, {noreply, SignalAction}),
meck:wait(info_test, handle_info, [{filter_match, '_', '_'}, '_'], 1000),
%% Test continue action
erlang:send(O, {noreply, {continue, test_continue}}),
ebus:remove_filter(B, F),
%% Check stop
erlang:send(O, {stop, normal}),
meck:wait(info_test, handle_info, [{stop, '_'}, '_'], 1000),
meck:wait(info_test, terminate, '_', 1000),
meck:validate(info_test),
meck:unload(info_test),
ok.
message_test(Config) ->
B = ?config(bus, Config),
Dest = "com.helium.test",
?assertEqual(ok, ebus:request_name(B, Dest, [{replace_existing, true}])),
Path = "/com/helium/test/Message",
?assertEqual(ok, ebus:add_match(B, #{path => Path})),
meck:new(message_test, [non_strict]),
meck:expect(message_test, init,
fun([init_arg]) -> {ok, init_state};
(A) -> erlang:error({bad_init, A})
end),
meck:expect(message_test, handle_message,
fun("NoReply", Msg, State) ->
?assertEqual(Path, ebus_message:path(Msg)),
{noreply, State};
("Reply", Msg, State) ->
?assertEqual(Path, ebus_message:path(Msg)),
{reply, [bool], [true], State};
("ErrorReply", Msg, State) ->
?assertEqual(Path, ebus_message:path(Msg)),
{reply_error, "org.freedesktop.DBus.Error.Failed", undefined, State};
("Stop", _, State) ->
{stop, normal, State}
end),
meck:expect(message_test, terminate,
fun(_, _State) -> ok end),
{ok, O} = ebus_object:start(B, Path, message_test, [init_arg], []),
%% Validate that init was called with the start arguments
?assert(meck:called(message_test, init, [[init_arg]])),
{ok, NoReplyMsg} = ebus_message:new_call(Dest, Path, "NoReply"),
ebus:send(B, NoReplyMsg),
meck:wait(message_test, handle_message, ["NoReply", '_', '_'], 1000),
{ok, ReplyMsg} = ebus_message:new_call(Dest, Path, "Reply"),
ebus:send(B, ReplyMsg),
meck:wait(message_test, handle_message, ["Reply", '_', '_'], 1000),
{ok, ErrorReplyMsg} = ebus_message:new_call(Dest, Path, "ErrorReply"),
ebus:send(B, ErrorReplyMsg),
meck:wait(message_test, handle_message, ["ErrorReply", '_', '_'], 1000),
{ok, StopMsg} = ebus_message:new_call(Dest, Path, "Stop"),
ebus:send(B, StopMsg),
meck:wait(message_test, handle_message, ["Stop", '_', '_'], 1000),
meck:wait(message_test, terminate, '_', 1000),
?assert(not erlang:is_process_alive(O)),
meck:validate(message_test),
meck:unload(message_test),
ok.
stop_test(Config) ->
B = ?config(bus, Config),
meck:new(stop_test, [non_strict]),
meck:expect(stop_test, init,
fun([init_arg]) ->
{ok, init_state};
(A) ->
erlang:error({bad_init, A})
end),
meck:expect(stop_test, terminate,
fun(normal, _State) -> ok;
(Reason, _State) ->
erlang:error({invalid_exit, Reason})
end),
Path = "/com/helium/test/Stop",
{ok, O} = ebus_object:start(B, Path, stop_test, [init_arg], []),
ebus_object:stop(O, normal),
meck:wait(stop_test, terminate, '_', 1000),
meck:validate(stop_test),
meck:unload(stop_test),
ok. | test/object_SUITE.erl | 0.512937 | 0.417925 | object_SUITE.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_views_server).
-behaviour(gen_server).
-export([
start_link/0
]).
-export([
accepted/1
]).
-export([
init/1,
terminate/2,
handle_call/3,
handle_cast/2,
handle_info/2,
code_change/3,
format_status/2
]).
-define(MAX_ACCEPTORS, 5).
-define(MAX_WORKERS, 100).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
accepted(Worker) when is_pid(Worker) ->
gen_server:call(?MODULE, {accepted, Worker}, infinity).
init(_) ->
process_flag(trap_exit, true),
couch_views_jobs:set_timeout(),
St = #{
acceptors => #{},
workers => #{},
max_acceptors => max_acceptors(),
max_workers => max_workers()
},
{ok, spawn_acceptors(St)}.
terminate(_, _St) ->
ok.
handle_call({accepted, Pid}, _From, St) ->
#{
acceptors := Acceptors,
workers := Workers
} = St,
case maps:is_key(Pid, Acceptors) of
true ->
St1 = St#{
acceptors := maps:remove(Pid, Acceptors),
workers := Workers#{Pid => true}
},
{reply, ok, spawn_acceptors(St1)};
false ->
LogMsg = "~p : unknown acceptor processs ~p",
couch_log:error(LogMsg, [?MODULE, Pid]),
{stop, {unknown_acceptor_pid, Pid}, St}
end;
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
handle_info({'EXIT', Pid, Reason}, St) ->
#{
acceptors := Acceptors,
workers := Workers
} = St,
% In Erlang 21+ could check map keys directly in the function head
case {maps:is_key(Pid, Acceptors), maps:is_key(Pid, Workers)} of
{true, false} -> handle_acceptor_exit(St, Pid, Reason);
{false, true} -> handle_worker_exit(St, Pid, Reason);
{false, false} -> handle_unknown_exit(St, Pid, Reason)
end;
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
format_status(_Opt, [_PDict, State]) ->
#{
workers := Workers,
acceptors := Acceptors
} = State,
Scrubbed = State#{
workers => {map_size, maps:size(Workers)},
acceptors => {map_size, maps:size(Acceptors)}
},
[{data, [{"State",
Scrubbed
}]}].
% Worker process exit handlers
handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
St1 = St#{acceptors := maps:remove(Pid, Acceptors)},
LogMsg = "~p : acceptor process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
handle_worker_exit(#{workers := Workers} = St, Pid, normal) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
{noreply, spawn_acceptors(St1)};
handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
LogMsg = "~p : indexer process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
handle_unknown_exit(St, Pid, Reason) ->
LogMsg = "~p : unknown process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unknown_pid_exit, Pid}, St}.
spawn_acceptors(St) ->
#{
workers := Workers,
acceptors := Acceptors,
max_acceptors := MaxAcceptors,
max_workers := MaxWorkers
} = St,
ACnt = maps:size(Acceptors),
WCnt = maps:size(Workers),
case ACnt < MaxAcceptors andalso (ACnt + WCnt) < MaxWorkers of
true ->
Pid = couch_views_indexer:spawn_link(),
NewSt = St#{acceptors := Acceptors#{Pid => true}},
spawn_acceptors(NewSt);
false ->
St
end.
max_acceptors() ->
config:get_integer("couch_views", "max_acceptors", ?MAX_ACCEPTORS).
max_workers() ->
config:get_integer("couch_views", "max_workers", ?MAX_WORKERS). | src/couch_views/src/couch_views_server.erl | 0.556641 | 0.410874 | couch_views_server.erl | starcoder |
%% ---------------------------------------------------------------------
%%
%% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% ---------------------------------------------------------------------
%% @doc Data format and lookup utilities for access logs (I/O usage
%% stats).
-module(riak_cs_access).
-export([
archive_period/0,
log_flush_interval/0,
max_flush_size/0,
make_object/3,
get_usage/4
]).
-include("riak_cs.hrl").
-ifdef(TEST).
-ifdef(EQC).
-compile([export_all]).
-include_lib("eqc/include/eqc.hrl").
-endif.
-include_lib("eunit/include/eunit.hrl").
-endif.
-export_type([slice/0]).
-type slice() :: {Start :: calendar:datetime(),
End :: calendar:datetime()}.
-define(NODEKEY, <<"MossNode">>).
%% @doc Retrieve the number of seconds that should elapse between
%% archivings of access stats. This setting is controlled by the
%% `access_archive_period' environment variable of the `riak_cs'
%% application.
-spec archive_period() -> {ok, integer()}|{error, term()}.
archive_period() ->
case application:get_env(riak_cs, access_archive_period) of
{ok, AP} when is_integer(AP), AP > 0 ->
{ok, AP};
_ ->
{error, "riak_cs:access_archive_period was not an integer"}
end.
%% @doc Retrieve the number of seconds that should elapse between
%% flushes of access stats. This setting is controlled by the
%% `access_log_flush_interval' environment variable of the `riak_cs'
%% application.
-spec log_flush_interval() -> {ok, integer()}|{error, term()}.
log_flush_interval() ->
case application:get_env(riak_cs, access_log_flush_factor) of
{ok, AF} when is_integer(AF), AF > 0 ->
case archive_period() of
{ok, AP} ->
case AP rem AF of
0 ->
{ok, AP div AF};
_ ->
{error, "riak_cs:access_log_flush_interval"
" does not evenly divide"
" riak_cs:access_archive_period"}
end;
APError ->
APError
end;
_ ->
{error, "riak_cs:access_log_flush_interval was not an integer"}
end.
%% @doc Retrieve the maximum number of records that should be added to
%% the log before the log is automatically archived. This setting is
%% controlled by the `access_log_flush_size' environment variable of
%% the `riak_cs' application.
-spec max_flush_size() -> {ok, integer()}|{error, term()}.
max_flush_size() ->
case application:get_env(riak_cs, access_log_flush_size) of
{ok, AP} when is_integer(AP), AP > 0 ->
{ok, AP};
_ ->
{error, "riak_cs:access_log_flush_size was not a positive integer"}
end.
%% @doc Create a Riak object for storing a user/slice's access data.
%% The list of stats (`Accesses') must contain a list of proplists.
%% The keys of the proplist must be either atoms or binaries, to be
%% encoded as JSON keys. The values of the proplists must be numbers,
%% as the values for each key will be summed in the stored object.
-spec make_object(iodata(),
[[{atom()|binary(), number()}]],
slice())
-> riakc_obj:riakc_obj().
make_object(User, Accesses, {Start, End}) ->
{ok, Period} = archive_period(),
Aggregate = aggregate_accesses(Accesses),
rts:new_sample(?ACCESS_BUCKET, User, Start, End, Period,
[{?NODEKEY, node()}|Aggregate]).
aggregate_accesses(Accesses) ->
Merged = lists:foldl(fun merge_ops/2, [], Accesses),
%% now mochijson-ify
[ {OpName, {struct, Stats}} || {OpName, Stats} <- Merged ].
merge_ops({OpName, Stats}, Acc) ->
case lists:keytake(OpName, 1, Acc) of
{value, {OpName, Existing}, RemAcc} ->
[{OpName, merge_stats(Stats, Existing)}|RemAcc];
false ->
[{OpName, Stats}|Acc]
end.
%% `Stats' had better be an orddict
merge_stats(Stats, Acc) ->
orddict:merge(fun(_K, V1, V2) -> V1+V2 end, Acc, Stats).
%% @doc Produce a usage compilation for the given `User' between
%% `Start' and `End' times, inclusive. The result is an orddict in
%% which the keys are Riak CS node names. The value for each key is a
%% list of samples. Each sample is an orddict full of metrics.
-spec get_usage(pid(),
term(), %% TODO: riak_cs:user_key() type doesn't exist
calendar:datetime(),
calendar:datetime()) ->
{Usage::orddict:orddict(), Errors::[{slice(), term()}]}.
get_usage(Riak, User, Start, End) ->
{ok, Period} = archive_period(),
{Usage, Errors} = rts:find_samples(Riak, ?ACCESS_BUCKET, User,
Start, End, Period),
{group_by_node(Usage), Errors}.
group_by_node(Samples) ->
lists:foldl(fun(Sample, Acc) ->
{value, {?NODEKEY, Node}, Other} =
lists:keytake(?NODEKEY, 1, Sample),
orddict:append(Node, Other, Acc)
end,
orddict:new(),
Samples).
-ifdef(TEST).
-ifdef(EQC).
archive_period_test() ->
true = eqc:quickcheck(archive_period_prop()).
%% make sure archive_period accepts valid periods, but bombs on
%% invalid ones
archive_period_prop() ->
?FORALL(I, oneof([rts:valid_period_g(),
choose(-86500, 86500)]), % purposely outside day boundary
begin
application:set_env(riak_cs, access_archive_period, I),
case archive_period() of
{ok, I} ->
valid_period(I);
{error, _Reason} ->
not valid_period(I)
end
end).
%% a valid period is an integer 1-86400 that evenly divides 86400 (the
%% number of seconds in a day)
valid_period(I) ->
is_integer(I) andalso I > 0.
make_object_test() ->
true = eqc:quickcheck(make_object_prop()).
%% check that an archive object is in the right bucket, with a key
%% containing the end time and the username, with application/json as
%% the content type, and a value that is a JSON representation of the
%% sum of each access metric plus start and end times
make_object_prop() ->
?FORALL(Accesses,
list({op_g(), access_g()}),
begin
application:set_env(riak_cs, access_archive_period, 60000),
%% trust rts:make_object_prop to check all of the
%% bucket/key/time/etc. properties
User = <<"AAABBBCCCDDDEEEFFF">>,
T0 = {{2012,02,16},{10,44,00}},
T1 = {{2012,02,16},{10,45,00}},
Obj = make_object(User, Accesses, {T0, T1}),
Unique = lists:usort(
[ if is_atom(K) -> atom_to_binary(K, latin1);
is_binary(K) -> K
end || {K, _V} <- lists:flatten(Accesses)]),
{struct, MJ} = mochijson2:decode(
riakc_obj:get_update_value(Obj)),
Paired = [{{struct, sum_access(K, Accesses)},
proplists:get_value(K, MJ)}
|| K <- Unique],
?WHENFAIL(
io:format(user, "keys: ~p~nAccesses: ~p~nPaired: ~p~n",
[MJ, Accesses, Paired]),
[] == [ {X, Y} || {X, Y} <- Paired, X =/= Y ])
end).
%% create something vaguely user-key-ish; not actually a good
%% representation since user keys are 20-byte base64 strings, not any
%% random character, but this will hopefully find some odd corner
%% cases in case that key format changes
user_key_g() ->
?LET(L, ?SUCHTHAT(X, list(char()), X /= []), list_to_binary(L)).
op_g() ->
elements([<<"BucketRead">>, <<"BucketCreate">>,
<<"KeyRead">>, <<"KeyReadACL">>]).
%% create an access proplist
access_g() ->
?LET(L, list(access_field_g()), lists:ukeysort(1, L)).
%% create one access metric
access_field_g() ->
{elements([<<"Count">>, <<"SystemErrorCount">>,
<<"BytesOut">>, <<"BytesOutIncomplete">>]),
oneof([int(), largeint()])}.
%% sum a given access metric K, given a list of accesses
sum_access(K, Accesses) ->
lists:foldl(fun({MK, Access}, Sum) when K == MK ->
orddict:merge(fun(_K, V1, V2) -> V1+V2 end,
Access, Sum);
(_, Sum) -> Sum
end,
[],
Accesses).
-endif. % EQC
-endif. % TEST | src/riak_cs_access.erl | 0.573917 | 0.407687 | riak_cs_access.erl | starcoder |
%% -----------------------------------------------------------------------------
%%
%% Hamcrest Erlang.
%%
%% Copyright (c) 2010 <NAME> (<EMAIL>)
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in
%% all copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
%% THE SOFTWARE.
%% -----------------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @copyright 2010 <NAME>.
%% -----------------------------------------------------------------------------
%% module annotations
-module(hamcrest_matchers_SUITE).
-include_lib("common_test/include/ct.hrl").
-include("qc.hrl").
-include("../include/hamcrest.hrl").
-compile(export_all).
-include("test.hrl").
all() ->
application:set_env(hamcrest, heckle, [?MODULE, say, []]),
?CT_REGISTER_TESTS(?MODULE).
say(MS, Actual) ->
io:format("checking ~p against ~p~n", [Actual, MS]).
anything_always_matches(_) ->
P = ?FORALL(X, binary(),
true == assert_that(X, is(anything()))),
?EQC(P).
is_matches_the_same_way_as_the_underlying_matcher(_) ->
P = ?FORALL(X, binary(),
is(equal_to(X)) == equal_to(X)),
?EQC(P).
is_provides_convenient_shortcut_for_equal_to(_) ->
P = ?FORALL(X, binary(),
is(X) == equal_to(X)),
?EQC(P).
is_not_evaluates_to_logical_negation_of_underlying_matcher(_) ->
P = ?FORALL(X, {binary(), binary()},
begin
#'hamcrest.matchspec'{matcher=F1} = equal_to(X),
#'hamcrest.matchspec'{matcher=F2} = is_not(equal_to(X)),
F1(X) == not(F2(X))
end),
?EQC(P).
is_not_provides_convenient_shortcut_for_not_equal_to(_) ->
P = ?FORALL({X, _Y}, {binary(), binary()},
begin
#'hamcrest.matchspec'{matcher=F1} = equal_to(X),
#'hamcrest.matchspec'{matcher=F2} = is_not(X),
F1(X) == not(F2(X))
end),
?EQC(P).
reflexivity_of_equal_to(_) ->
P = ?FORALL(X, binary(),
begin
Y = X,
assert_that(X, equal_to(Y))
end),
?EQC(P).
symmetry_of_equal_to(_) ->
P = ?FORALL(X, int(),
begin
Y = X,
assert_that(X, equal_to(Y)),
Z = Y,
assert_that(Z, equal_to(X))
end),
?EQC(P).
exactly_equal_to_works_on_types_and_values(_) ->
true = assert_that(atom, exactly_equal_to(atom)),
?assertException(
error,
{assertion_failed,
[{expected,atom},
{actual,"atom"},
{matcher,exactly_equal_to}]},
assert_that("atom", exactly_equal_to(atom))),
true = assert_that(1, exactly_equal_to(1)),
?assertException(
error,
{assertion_failed,
[{expected, 1},
{actual, 1.0},
{matcher, exactly_equal_to}]},
assert_that(1.0, exactly_equal_to(1))).
any_of_checks_the_logical_disjunction_of_a_list_of_matchers(_) ->
P = ?FORALL(XS, list(char()),
?IMPLIES(length(XS) > 0,
begin
M = lists:map(fun(_) -> fun(_) -> true end end, XS),
assert_that(ignored, any_of(M))
end)),
?EQC(P).
will_fail_asserts_failure(_) ->
F = fun() -> erlang:error({assertion_failed, "Unexpected value"}) end,
assert_that(F, will_fail()).
will_fail_asserts_failure_against_given_condition(_) ->
F = fun() -> erlang:error({nomatch, "Unexpected value"}) end,
assert_that(F, will_fail(error, {nomatch, "Unexpected value"})).
%% TODO: check will_fail during 'failure' conditions
will_fail_should_fail_if_the_operation_succeeds(_) ->
P = ?FORALL(X, binary(),
begin
F = fun() -> X end,
FI = erlang:fun_info(F),
ok ==
?assertException(error,
{assertion_failed,
[{expected,
{error,{nomatch,unexpected_value}}},
{actual, FI},
{matcher,will_fail}]},
assert_that(F, will_fail(error, {nomatch, unexpected_value})))
end),
?EQC(P).
greater_than_should_behave_like_built_in_operator(_) ->
P = ?FORALL({X, Y}, {oneof([int(), real()]), oneof([int(), real()])},
?IMPLIES(Y > X,
assert_that(Y, greater_than(X)))),
?EQC(P).
greater_than_should_fail_with_error_unlike_built_in_operator(_) ->
P = ?FORALL({X, Y}, {oneof([int(), real()]), oneof([int(), real()])},
?IMPLIES(Y < X,
begin
try (assert_that(Y, greater_than(X))) of
Term ->
ct:pal("Term = ~p", [Term]), false
catch error:Reason ->
assert_that(Reason,
equal_to({assertion_failed,
[{expected, X},
{actual, Y},
{matcher, greater_than}]}))
end
end)),
?EQC(P).
greater_than_or_equal_to_should_behave_like_built_in_operator(_) ->
P = ?FORALL({X, Y},
{oneof([int(), real()]), oneof([int(), real()])},
begin
#'hamcrest.matchspec'{matcher=M} = greater_than_or_equal_to(X),
(Y >= X) == M(Y)
end),
?EQC(P).
less_than_should_behave_like_built_in_operator(_) ->
P = ?FORALL({X, Y},
{oneof([int(), real()]), oneof([int(), real()])},
begin
#'hamcrest.matchspec'{matcher=M} = less_than(X),
(Y < X) == M(Y)
end),
?EQC(P).
less_than_or_equal_to_should_behave_like_built_in_operator(_) ->
P = ?FORALL({X, Y},
{oneof([int(), real()]), oneof([int(), real()])},
begin
#'hamcrest.matchspec'{matcher=M} = less_than_or_equal_to(X),
(Y =< X) == M(Y)
end),
?EQC(P).
contains_string_should_get_proper_subset_in_all_cases(_) ->
P = ?FORALL(X, noshrink(non_empty(list(char()))),
?IMPLIES(length(X) > 0,
begin
Y = round(length(X) / 2),
SubStr = string:left(X, Y),
true = assert_that(X, contains_string(SubStr))
end)),
?EQC(P).
contains_string_should_not_create_matcher_for_empty_strings(_) ->
?assertException(error, function_clause, contains_string([])).
contains_string_should_not_match_empty_string(_) ->
P = ?FORALL(X, list(char()),
?IMPLIES(length(X) > 0,
assert_that(fun() ->
assert_that(X, contains_string("foo bar baz"))
end, will_fail())
)),
?EQC(P).
starts_with_should_only_match_first_portion_of_string(_) ->
P = ?FORALL(Xs, noshrink(non_empty(list(char()))),
?IMPLIES(length(Xs) > 1,
begin
Y = erlang:max(round(length(Xs) / 2), 2),
LStr = string:left(Xs, Y),
RStr = string:right(Xs, Y),
true = assert_that(Xs, starts_with(LStr)),
if RStr /= LStr ->
assert_that(match(Xs, starts_with(RStr)),
is_false());
true -> true
end
end)),
?EQC(P).
ends_with_should_only_match_last_portion_of_string(_) ->
P = ?FORALL(Xs, noshrink(non_empty(list(char()))),
?IMPLIES(length(Xs) > 0,
begin
Y = round(length(Xs) / 2),
_LStr = string:left(Xs, Y),
RStr = string:right(Xs, Y),
case (assert_that(Xs, ends_with(RStr))) of
true -> true;
false ->
ct:pal("X = ~p~n", [Xs]),
ct:pal("Y = ~p~n", [Y]),
ct:pal("RStr = ~p~n", [RStr]),
false
end
%%Val = (ends_with(LStr))(X),
%%not Val
end)),
?EQC(P).
has_length_should_match_length(_) ->
P = ?FORALL(XS, list(int()),
begin
assert_that(XS, has_length(length(XS)))
end),
?EQC(P).
match_mfa(_) ->
P = ?FORALL(X, list(char()),
?IMPLIES(length(X) > 0,
begin
assert_that(hd(X), reverse_match_mfa(lists, member, [X]))
end)),
?EQC(P).
reverse_match_mfa_should_flip_its_arguments(_) ->
P = ?FORALL(X, list(char()),
?IMPLIES(length(X) > 0,
begin
Head = hd(X),
assert_that(X, contains_member(Head)),
assert_that(sets:from_list(X), contains_member(Head)),
assert_that(gb_sets:from_list(X), contains_member(Head)),
assert_that(ordsets:from_list(X), contains_member(Head))
end)),
?EQC(P).
has_same_contents_as_should_ignore_order(_) ->
P = ?FORALL(X, non_empty(list(int())),
?IMPLIES(length(X) > 0,
assert_that(lists:reverse(X), has_same_contents_as(X)))),
?EQC(P).
has_same_contents_as_should_recognise_singular_errors(_) ->
P = ?FORALL(X, noshrink(non_empty(list(int()))),
?IMPLIES(length(sets:to_list(sets:from_list(X))) > 0,
begin
Xs = sets:to_list(sets:from_list(X)),
assert_that(
fun() -> assert_that(tl(Xs), has_same_contents_as([a, b, c])) end,
will_fail())
end)),
?EQC(P).
has_same_contents_as_should_work_for_empty_lists(_) ->
?assertThat([], has_same_contents_as([])).
match_mfa_should_fail_if_mf_is_invalid(_) ->
NoSuchMod = non_existing, NoSuchFunc = nor_i,
#'hamcrest.matchspec'{matcher=M} = match_mfa(NoSuchMod, NoSuchFunc, []),
M(any_input) == false.
match_mfa_should_fail_if_func_is_invalid(_) ->
NoSuchFunc = this_function_doesnt_exist,
#'hamcrest.matchspec'{matcher=M} = match_mfa(lists, NoSuchFunc, []),
M(any_input) == false.
match_is_alive_should_identify_correct_process_status(_) ->
Loop = fun(L) -> L(L) end,
OkPid = spawn(fun() -> Loop(Loop) end),
Sender = self(),
BadPid = spawn(fun() -> ct:pal("~p dying...", [self()]), Sender ! ready, exit(normal) end),
receive ready -> ok end,
?assertThat(OkPid, isalive()),
?assertThat(BadPid, isdead()).
is_empty_works_for_lists(_) ->
?assertThat([], isempty()),
Empty = fun hamcrest:isempty/0,
?assertThat([1,2,3], is_not(Empty)).
is_empty_works_for_tuples(_) ->
?assertThat({}, isempty()),
Empty = fun hamcrest:isempty/0,
?assertThat({ok, server_id}, is_not(Empty)).
is_empty_works_for_sets(_) ->
?assertThat(sets:new(), isempty()),
Empty = fun hamcrest:isempty/0,
?assertThat(sets:from_list([1,2,3]), is_not(Empty)).
is_empty_works_for_gb_sets(_) ->
?assertThat(gb_sets:new(), isempty()),
Empty = fun hamcrest:isempty/0,
?assertThat(gb_sets:from_list([1,2,3]), is_not(Empty)).
is_empty_pukes_for_other_inputs(_) ->
?assertThat(
fun() -> ?assertThat(10, isempty()) end,
will_fail()
). | test/hamcrest_matchers_SUITE.erl | 0.523908 | 0.464841 | hamcrest_matchers_SUITE.erl | starcoder |
-module(stream_client_util_spec).
-include_lib("espec.hrl").
spec() ->
describe("stream client util", fun() ->
describe("#generate_headers", fun() ->
it("should generate Host and User-Agent headers", fun() ->
Result = stream_client_util:generate_headers(),
Expected = [
{"Host", "api.twitter.com"},
{"User-Agent", "Twerl"}
],
?assertEqual(Expected, Result)
end)
end),
describe("#generate_auth_headers", fun() ->
it("should include default headers if none passed", fun() ->
Result = stream_client_util:generate_auth_headers("user", "pass"),
Headers = stream_client_util:generate_headers(),
Expected = [
{"Authorization", "Basic " ++ binary_to_list(base64:encode("user" ++ ":" ++ "pass"))} | Headers
],
?assertEqual(Expected, Result)
end),
it("should allow custom headers to be passed", fun() ->
Result = stream_client_util:generate_auth_headers("user", "pass", []),
Expected = [
{"Authorization", "Basic " ++ binary_to_list(base64:encode("user" ++ ":" ++ "pass"))}
],
?assertEqual(Expected, Result)
end)
end),
describe("#userids_to_follow", fun() ->
it("should return an error when no users are passed", fun() ->
Result = stream_client_util:userids_to_follow([]),
Expected = {error, no_args_passed},
?assertEqual(Expected, Result)
end),
it("should return the correct url for one user", fun() ->
Result = stream_client_util:userids_to_follow(["1"]),
Expected = {ok, "follow=1"},
?assertEqual(Expected, Result)
end),
it("should return the correct url for two users", fun() ->
Result = stream_client_util:userids_to_follow(["1", "2"]),
Expected = {ok, "follow=1,2"},
?assertEqual(Expected, Result)
end)
end)
end). | spec/stream_client_util_spec.erl | 0.675336 | 0.459743 | stream_client_util_spec.erl | starcoder |
%%==============================================================================
%% Copyright 2016-2021 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%%%-------------------------------------------------------------------
%%% @doc
%% Implements Levenshtein distance between two strings
%%% @end
%%%
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2016-2021, <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(levenshtein).
-copyright('<NAME> <<EMAIL>>').
%% Compiler options
-compile({inline, [{min, 3}]}).
%% Library functions
-export([distance/2]).
%% ===================================================================
%% Library functions.
%% ===================================================================
%%--------------------------------------------------------------------
%% Function: distance(String1 String2) -> Distance.
%% @doc
%% Computes the Levenshtein distance between String1 and String2.
%% @end
%%--------------------------------------------------------------------
-spec distance(string(), string()) -> integer().
%%--------------------------------------------------------------------
distance([], S) -> length(S);
distance(S, []) -> length(S);
distance(S, T) ->
case equal(S, T, 0, true) of
true -> 0;
TLen ->
V0 = lists:seq(0, TLen),
i(V0, 0, S, T)
end.
%% ===================================================================
%% Internal functions.
%% ===================================================================
equal([], [], _, Equal) -> Equal;
equal([H | S], [H | T], Len, true) -> equal(S, T, Len + 1, true);
equal(_, T, Len, _) -> Len + length(T).
i(V0, _, [], _) -> lists:last(V0);
i(V0, I, [Si | S], T) ->
V1N = [I + 1 | j(Si, T, V0, I + 1)],
i(V1N, I + 1, S, T).
j(_, [], _, _) -> [];
j(Si, [Si | T], [V0j | V0 = [V0j1 | _]], V1j) ->
V1j1 = min(V1j + 1, V0j1 + 1, V0j),
[V1j1 | j(Si, T, V0, V1j1)];
j(Si, [_ | T], [V0j | V0 = [V0j1 | _]], V1j) ->
V1j1 = 1 + min(V1j, V0j1, V0j),
[V1j1 | j(Si, T, V0, V1j1)].
min(A, B, C) when A < B, A < C -> A;
min(A, B, C) when B < A, B < C -> B;
min(_, _, C) -> C. | src/levenshtein.erl | 0.597373 | 0.409516 | levenshtein.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2022. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
%% Purpose: Calculate tight bounds for integer operations.
%%
%% Reference:
%%
%% <NAME>, <NAME> (2 ed). <NAME> -
%% Pearson Education, Inc. Chapter 4. Arithmetic Bounds.
%%
%%
-module(beam_bounds).
-export(['band'/2,'bor'/2,'bxor'/2]).
-type range() :: {integer(), integer()} | 'any'.
-spec 'band'(range(), range()) -> range().
'band'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Min = min_band(A, B, C, D),
Max = max_band(A, B, C, D),
{Min,Max};
'band'(_, {C,D}) when C >= 0 ->
{0,D};
'band'({A,B}, _) when A >= 0 ->
{0,B};
'band'(_, _) ->
any.
-spec 'bor'(range(), range()) -> range().
'bor'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Min = min_bor(A, B, C, D),
Max = max_bor(A, B, C, D),
{Min,Max};
'bor'(_, _) ->
any.
-spec 'bxor'(range(), range()) -> range().
'bxor'({A,B}, {C,D}) when A >= 0, A bsr 256 =:= 0, C >= 0, C bsr 256 =:= 0 ->
Max = max_bxor(A, B, C, D),
{0,Max};
'bxor'(_, _) ->
any.
min_band(A, B, C, D) ->
M = 1 bsl (upper_bit(A bor C) + 1),
min_band(A, B, C, D, M).
min_band(A, _B, C, _D, 0) ->
A band C;
min_band(A, B, C, D, M) ->
if
(bnot A) band (bnot C) band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_band(NewA, B, C, D, 0);
_ ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_band(A, B, NewC, D, 0);
_ ->
min_band(A, B, C, D, M bsr 1)
end
end;
true ->
min_band(A, B, C, D, M bsr 1)
end.
max_band(A, B, C, D) ->
M = 1 bsl upper_bit(B bxor D),
max_band(A, B, C, D, M).
max_band(_A, B, _C, D, 0) ->
B band D;
max_band(A, B, C, D, M) ->
if
B band (bnot D) band M =/= 0 ->
case (B band (bnot M)) bor (M - 1) of
NewB when NewB >= A ->
max_band(A, NewB, C, D, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
(bnot B) band D band M =/= 0 ->
case (D band (bnot M)) bor (M - 1) of
NewD when NewD >= C ->
max_band(A, B, C, NewD, 0);
_ ->
max_band(A, B, C, D, M bsr 1)
end;
true ->
max_band(A, B, C, D, M bsr 1)
end.
min_bor(A, B, C, D) ->
M = 1 bsl upper_bit(A bxor C),
min_bor(A, B, C, D, M).
min_bor(A, _B, C, _D, 0) ->
A bor C;
min_bor(A, B, C, D, M) ->
if
(bnot A) band C band M =/= 0 ->
case (A bor M) band -M of
NewA when NewA =< B ->
min_bor(NewA, B, C, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
A band (bnot C) band M =/= 0 ->
case (C bor M) band -M of
NewC when NewC =< D ->
min_bor(A, B, NewC, D, 0);
_ ->
min_bor(A, B, C, D, M bsr 1)
end;
true ->
min_bor(A, B, C, D, M bsr 1)
end.
max_bor(A, B, C, D) ->
Intersection = B band D,
M = 1 bsl upper_bit(Intersection),
max_bor(Intersection, A, B, C, D, M).
max_bor(_Intersection, _A, B, _C, D, 0) ->
B bor D;
max_bor(Intersection, A, B, C, D, M) ->
if
Intersection band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bor(Intersection, A, NewB, C, D, 0);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bor(Intersection, A, B, C, NewD, 0);
_ ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end
end;
true ->
max_bor(Intersection, A, B, C, D, M bsr 1)
end.
max_bxor(A, B, C, D) ->
M = 1 bsl upper_bit(B band D),
max_bxor(A, B, C, D, M).
max_bxor(_A, B, _C, D, 0) ->
B bxor D;
max_bxor(A, B, C, D, M) ->
if
B band D band M =/= 0 ->
case (B - M) bor (M - 1) of
NewB when NewB >= A ->
max_bxor(A, NewB, C, D, M bsr 1);
_ ->
case (D - M) bor (M - 1) of
NewD when NewD >= C ->
max_bxor(A, B, C, NewD, M bsr 1);
_ ->
max_bxor(A, B, C, D, M bsr 1)
end
end;
true ->
max_bxor(A, B, C, D, M bsr 1)
end.
upper_bit(Val) ->
upper_bit_1(Val, 0).
upper_bit_1(Val0, N) ->
case Val0 bsr 1 of
0 -> N;
Val -> upper_bit_1(Val, N + 1)
end. | lib/compiler/src/beam_bounds.erl | 0.637031 | 0.604282 | beam_bounds.erl | starcoder |
%%-------------------------------------------------------------------
%%
%% Copyright (c) 2015, <NAME> <<EMAIL>>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%%-------------------------------------------------------------------
%% @doc Behaviour for implementing meters for `sbroker' and `sregulator'.
%%
%% A custom meter must implement the `sbroker_meter' behaviour. The first
%% callback is `init/2', which starts the meter:
%% ```
%% -callback init(Time :: integer(), Args :: any()) ->
%% {State :: any(), UpdateTime :: integer() | infinity}.
%% '''
%% `Time' is the time, in `native' time units, of the meter at creation. Some
%% other callbacks will receive the current time of the meter as the second last
%% argument. It is monotically increasing, so subsequent calls will have the
%% same or a greater time.
%%
%% `Args' is the arguments for the meter. It can be any term.
%%
%% `State' is the state of the meter and used in the next call.
%%
%% `UpdateTime' represents the next time a meter wishes to call
%% `handle_update/4' to update itself. If a message is not recevued the update
%% should occur at or after `UpdateTime'. The time must be greater than or equal
%% to `Time'. If a meter does not require an update then `UpdateTime' should be
%% `infinity'.
%%
%% When updating the meter, `handle_update/5':
%% ```
%% -callback handle_update(QueueDelay :: non_neg_integer(),
%% ProcessDelay :: non_neg_integer(),
%% RelativeTime :: integer(), Time :: integer(),
%% State :: any()) ->
%% {NState :: any(), UpdateTime :: integer() | infinity}.
%% '''
%% `QueueDelay' is the approximate time a message spends in the message queue of
%% the process. `ProcessDelay' is the average time spent processing a message
%% since the last update. `RelativeTime' is an approximation of the
%% `RelativeTime' for an `ask' request if a match was to occur immediately. If
%% the process has not matched a request for a significant period of time this
%% value can grow large and become inaccurate.
%%
%% The other variables are equivalent to those in `init/2', with `NState' being
%% the new state.
%%
%% When handling a message, `handle_info/3':
%% ```
%% -callback handle_info(Msg :: any(), Time :: integer(), State :: any()) ->
%% {NState :: any(), TimeoutTime :: integer() | infinity}.
%% '''
%% `Msg' is the message, and may be intended for another callback.
%%
%% The other variables are equivalent to those in `init/2', with `NState' being
%% the new state.
%%
%% The other variables are equivalent to those in `init/3', with `NState' being
%% the new state.
%%
%% When changing the state due to a code change, `code_change/4':
%% ```
%% -callback code_change(OldVsn :: any(), Time :: integer(), State :: any(),
%% Extra :: any()) ->
%% {NState :: any(), TimeoutTime :: integer() | infinity}.
%% '''
%% On an upgrade `OldVsn' is version the state was created with and on an
%% downgrade is the same form except `{down, OldVsn}'. `OldVsn' is defined by
%% the vsn attribute(s) of the old version of the callback module. If no such
%% attribute is defined, the version is the checksum of the BEAM file. `Extra'
%% is from `{advanced, Extra}' in the update instructions.
%%
%% The other variables are equivalent to those in `init/3', with `NState' being
%% the new state.
%%
%% When changing the configuration of a queue, `config_change/4':
%% ```
%% -callback config_change(Args :: any(), Time :: integer(), State :: any()) ->
%% {NState :: any(), TimeoutTime :: integer() | infinity}.
%% '''
%% The variables are equivalent to those in `init/2', with `NState' being the
%% new state.
%%
%% When cleaning up the meter, `terminate/2':
%% ```
%% -callback terminate(Reason :: sbroker_handlers:reason(), State :: any()) ->
%% any().
%% '''
%% `Reason' is `stop' if the meter is being shutdown, `change' if the meter is
%% being replaced by another meter, `{bad_return_value, Return}' if a previous
%% callback returned an invalid term or `{Class, Reason, Stack}' if a previous
%% callback raised an exception.
%%
%% `State' is the current state of the meter.
%%
%% The return value is ignored.
-module(sbroker_meter).
%% private api
-export([code_change/6]).
-export([terminate/3]).
%% types
-callback init(Time :: integer(), Args :: any()) ->
{State :: any(), UpdateTime :: integer() | infinity}.
-callback handle_update(QueueDelay :: non_neg_integer(),
ProcessDelay :: non_neg_integer(),
RelativeTime :: integer(), Time :: integer(),
State :: any()) ->
{NState :: any(), UpdateTime :: integer() | infinity}.
-callback handle_info(Msg :: any(), Time :: integer(), State :: any()) ->
{NState :: any(), UpdateTime :: integer() | infinity}.
-callback code_change(OldVsn :: any(), Time :: integer(), State :: any(),
Extra :: any()) ->
{NState :: any(), TimeoutTime :: integer() | infinity}.
-callback config_change(Args :: any(), Time :: integer(), State :: any()) ->
{NState :: any(), UpdateTime :: integer() | infinity}.
-callback terminate(Reason :: sbroker_handlers:reason(), State :: any()) ->
any().
%% private api
%% @private
-spec code_change(Module, OldVsn, Send, Time, State, Extra) ->
{NState, TimeoutTime} when
Module :: module(),
OldVsn :: any(),
Send :: integer(),
Time :: integer(),
State :: any(),
Extra :: any(),
NState :: any(),
TimeoutTime :: integer() | infinity.
code_change(Mod, OldVsn, _, Time, State, Extra) ->
Mod:code_change(OldVsn, Time, State, Extra).
%% @private
-spec terminate(Module, Reason, State) -> any() when
Module :: module(),
Reason :: sbroker_handlers:reason(),
State :: any().
terminate(Mod, Reason, State) ->
Mod:terminate(Reason, State). | deps/sbroker/src/sbroker_meter.erl | 0.798305 | 0.479077 | sbroker_meter.erl | starcoder |
%%==============================================================================
%% Copyright 2021 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%==============================================================================
%%%-------------------------------------------------------------------
%%% @doc
%%% A JSON stream library based on:
%%% The JavaScript Object Notation (JSON) Data Interchange Format (rfc7159)
%%%
%%% JSON is represented as follows:
%%%
%%% value : true | false | null | object | array | number | string
%%%
%%% object : map
%%% array : [value*]
%%% string : UTF-8 binary
%%% number : integer() | float()
%%% true : atom(true)
%%% false : atom(false)
%%% null : atom(null)
%%%
%%% Strings can be represented by atoms when generating JSON, but will not
%%% not be generated when converting JSON to erlang. Strings are restricted
%%% to UTF-8
%%%
%%% When converting Erlang terms to JSON iolists are generated but
%%% it can generate a binary if so instructed.
%%%
%%% Objects as maps, multiple occurrences of the members is not supported.
%%%
%%% @end
%%%
%% @author <NAME> <<EMAIL>>
%% @copyright (C) 2021, <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
-module(jstream).
-copyright('<NAME> <<EMAIL>>').
%% Library functions
-export([encode/1, encode/2,
decode/1, decode/2]).
%% Exported types
-export_type([json/0]).
%% Types
-type json() :: true | false | null |
number() | jstring() |
object() | array().
-type jstring() :: binary().
-type object() :: map().
-type array() :: [json()].
-type cont() :: {decode, stack()} |
{object, {complete(), expect()}, acc(), stack()} |
{array, {first(), complete()}, array(), stack()} |
{string, binary(), stack()} |
{unescape, binary(), stack()} |
{number, stage(), phase(), list(), stack()}.
-type complete() :: boolean().
-type expect() :: name | comma | colon.
-type first() :: boolean().
-type stage() :: sign | zero | pre | post.
-type phase() :: int | float | exp.
-type acc() :: [{jstring(), json()}].
-type stack() :: [{array, array()} |
{name, acc()} |
{value, {name(), acc()}}
].
-type name() :: jstring().
%% Defines
%% Defines for encode_float/1.
-define(BIG_POW, (1 bsl 52)).
-define(MIN_EXP, (-1074)).
%% Char macros
-define(NULL, 0).
-define(BEL, 7).
-define(BS, 8).
-define(HT, 9).
-define(LF, 10).
-define(VT, 11).
-define(FF, 12).
-define(CR, 13).
-define(SPC, 32).
%% Decode macros
-define(IS_INT(C), C>=$0, C=<$9).
-define(IS_POS_INT(C), C>=$1, C=<$9).
-define(IS_SIGN(C), C == $-; C == $+).
-define(IS_EXP(C), C==$E; C==$e).
-define(ZERO_OR_POST(Stage), Stage == zero; Stage == post).
-define(EXP_ZERO_OR_POST(C, Stage),
((Stage == zero) orelse (Stage == post))
andalso ((C == $E) orelse (C == $e))).
%% ===================================================================
%% Library functions.
%% ===================================================================
%%--------------------------------------------------------------------
%% Function: encode(Term) -> JSON.
%% @doc
%% Encodes the structured Erlang term as an iolist.
%% Equivalent of encode(Term, iolist) -> JSON.
%% @end
%%--------------------------------------------------------------------
-spec encode(json()) -> iodata().
%%--------------------------------------------------------------------
encode(true) -> <<"true">>;
encode(false) -> <<"false">>;
encode(null) -> <<"null">>;
encode(Object = #{}) -> encode_object(Object);
encode([]) -> <<"[]">>;
encode(List = [_ | _]) -> encode_array(List);
encode(I) when is_integer(I) -> integer_to_binary(I);
encode(F) when is_float(F) -> encode_float(F);
encode(String) when is_binary(String) -> encode_string(String);
encode(String) when is_atom(String) ->
encode_string(atom_to_binary(String, utf8)).
%%--------------------------------------------------------------------
%% Function: encode(Term, Options) -> JSON.
%% @doc
%% Encodes the structured Erlang term as an iolist or binary.
%% Encode will give an exception if the erlang term is not well formed.
%% Options are:
%% binary -> a binary is returned
%% iolist -> an iolist is returned (default)
%% @end
%%--------------------------------------------------------------------
-spec encode(json(), binary) -> binary().
%%--------------------------------------------------------------------
encode(V, iolist) -> encode(V);
encode(V, binary) -> iolist_to_binary(encode(V)).
%%--------------------------------------------------------------------
%% Function: decode(JSON) -> {Term, Binary} | {more, Continuation}.
%% @doc
%% Decodes the binary into a tuple of structured Erlang term and the
%% remaining binary or a continuation if the binary did not contain
%% a complete JSON value. The continuation can be used by decode/2 with
%% a binary containing the rest of the JSON value to decode.
%% @end
%%--------------------------------------------------------------------
-spec decode(binary()) -> {json(), binary()} | {more, cont()}.
%%--------------------------------------------------------------------
decode(B) -> do_decode(B, []).
%%--------------------------------------------------------------------
%% Function: decode(JSON, Options) -> Term.
%% @doc
%% Decodes a binary and a continuation into a tuple of structured
%% Erlang term and the remaining binary or a continuation if the binary
%% did not contain a complete JSON value. The continuation can be used
%% with a binary containing the rest of the JSON value to decode.
%% @end
%%--------------------------------------------------------------------
-spec decode(binary(), cont()) -> {json(), binary()} | {more, cont()}.
%%--------------------------------------------------------------------
decode(B, {decode, S}) -> do_decode(B, S);
decode(B, {object, State, Acc, S}) -> object(B, State, Acc, S);
decode(B, {array, State, Acc, S}) -> array(B, State, Acc, S);
decode(B, {string, Acc, S}) -> string(B, Acc, S);
decode(B, {unescape, Acc, S}) -> unescape(B, Acc, S);
decode(B, {number, State, Phase, Acc, S}) -> number(B, State, Phase, Acc, S).
%% ===================================================================
%% Encoding
%% ===================================================================
encode_object(Object) ->
case maps:fold(fun element/3, [], Object) of
[] -> <<"{}">>;
[_ | Members] -> [<<"{">>, Members, <<"}">>]
end.
element(N, V, Acc) -> [<<",">>, encode(N), <<":">>, encode(V) | Acc].
encode_array(A) ->
[_ | Es] = lists:foldr(fun(E, Acc) -> [<<",">>, encode(E) |Acc] end, [], A),
[<<"[">>, Es, <<"]">>].
encode_string(String) ->
case escapeable(String) of
true -> [<<"\"">>, escape(String, <<>>), <<"\"">>];
false -> [<<"\"">>, String, <<"\"">>]
end.
escapeable(<<>>) -> false;
escapeable(<<0, _/binary>>) -> true;
escapeable(<<1, _/binary>>) -> true;
escapeable(<<2, _/binary>>) -> true;
escapeable(<<3, _/binary>>) -> true;
escapeable(<<4, _/binary>>) -> true;
escapeable(<<5, _/binary>>) -> true;
escapeable(<<6, _/binary>>) -> true;
escapeable(<<7, _/binary>>) -> true;
escapeable(<<8, _/binary>>) -> true;
escapeable(<<9, _/binary>>) -> true;
escapeable(<<10, _/binary>>) -> true;
escapeable(<<11, _/binary>>) -> true;
escapeable(<<12, _/binary>>) -> true;
escapeable(<<13, _/binary>>) -> true;
escapeable(<<14, _/binary>>) -> true;
escapeable(<<15, _/binary>>) -> true;
escapeable(<<16, _/binary>>) -> true;
escapeable(<<17, _/binary>>) -> true;
escapeable(<<18, _/binary>>) -> true;
escapeable(<<19, _/binary>>) -> true;
escapeable(<<20, _/binary>>) -> true;
escapeable(<<21, _/binary>>) -> true;
escapeable(<<22, _/binary>>) -> true;
escapeable(<<23, _/binary>>) -> true;
escapeable(<<24, _/binary>>) -> true;
escapeable(<<25, _/binary>>) -> true;
escapeable(<<26, _/binary>>) -> true;
escapeable(<<27, _/binary>>) -> true;
escapeable(<<28, _/binary>>) -> true;
escapeable(<<29, _/binary>>) -> true;
escapeable(<<30, _/binary>>) -> true;
escapeable(<<31, _/binary>>) -> true;
escapeable(<<34, _/binary>>) -> true;
escapeable(<<47, _/binary>>) -> true;
escapeable(<<92, _/binary>>) -> true;
escapeable(<<_/utf8, T/binary>>) -> escapeable(T).
escape(<<>>, Acc) -> Acc;
escape(<<?NULL, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0000">>);
escape(<<1, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0001">>);
escape(<<2, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0002">>);
escape(<<3, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0003">>);
escape(<<4, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0004">>);
escape(<<5, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0005">>);
escape(<<6, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0006">>);
escape(<<?BEL, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0007">>);
escape(<<?BS, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\b">>);
escape(<<?HT, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\t">>);
escape(<<?LF, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\n">>);
escape(<<?VT, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u000B">>);
escape(<<?FF, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\f">>);
escape(<<?CR, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\r">>);
escape(<<14, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u000E">>);
escape(<<15, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u000F">>);
escape(<<16, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0010">>);
escape(<<17, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0011">>);
escape(<<18, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0012">>);
escape(<<19, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0013">>);
escape(<<20, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0014">>);
escape(<<21, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0015">>);
escape(<<22, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0016">>);
escape(<<23, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0017">>);
escape(<<24, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0018">>);
escape(<<25, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u0019">>);
escape(<<26, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u001A">>);
escape(<<27, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u001B">>);
escape(<<28, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u001C">>);
escape(<<29, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\u001D">>);
escape(<<$", T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\\"">>);
escape(<<$\/, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\/">>);
escape(<<$\\, T/binary>>, Acc) -> escape(T, <<Acc/binary, "\\\\">>);
escape(<<H/utf8, T/binary>>, Acc) -> escape(T, <<Acc/binary, H>>).
%% ===================================================================
%% encode_float/1 the implementation based on
%% "Printing Floating-Point Numbers Quickly and Accurately"
%% by <NAME> and <NAME> in Proceedings of the SIGPLAN '96
%% Conference on Programming Language Design and Implementation.
%% ===================================================================
encode_float(0.0) -> "0.0";
encode_float(Float) when is_float(Float) ->
{Sign, Frac, Exp} = mantissa_exponent(Float),
{Place, Digits} = float_to_digits(Float, Exp, Frac, (Frac band 1) =:= 0),
insert_decimal(Place, << <<($0 + D)>> || <<D>> <= Digits>>, Sign).
mantissa_exponent(F) ->
case <<F:64/float>> of
<<Sign:1, 0:11, M:52>> -> % denormalized
E = log2floor(M),
{sign(Sign), M bsl (53 - E), E - 52 - 1075};
<<Sign:1, BE:11, M:52>> when BE < 2047 ->
{sign(Sign), M + ?BIG_POW, BE - 1075}
end.
sign(0) -> <<>>;
sign(1) -> <<$->>.
float_to_digits(Float, Exp, Frac, Ok) when Exp >= 0, Frac =:= ?BIG_POW ->
BExp = 1 bsl Exp,
scale(Frac * BExp * 4, 4, BExp * 2, BExp, Ok, Float);
float_to_digits(Float, Exp, Frac, Ok) when Exp >=0 ->
BExp = 1 bsl Exp,
scale(Frac * BExp * 2, 2, BExp, BExp, Ok, Float);
float_to_digits(Float, Exp, Frac, Ok) when Exp < ?MIN_EXP ->
BExp = 1 bsl (?MIN_EXP - Exp),
scale(Frac * 2, 1 bsl (1 - Exp), BExp, BExp, Ok, Float);
float_to_digits(Float, Exp, Frac,Ok) when Exp > ?MIN_EXP,Frac =:= ?BIG_POW ->
scale(Frac * 4, 1 bsl (2 - Exp), 2, 1, Ok, Float);
float_to_digits(Float, Exp, Frac, Ok) ->
scale(Frac * 2, 1 bsl (1 - Exp), 1, 1, Ok, Float).
scale(R, S, MPlus, MMinus, Ok, Float) ->
case int_ceil(math:log10(abs(Float)) - 1.0e-10) of
Est when Est >= 0 ->
fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est, Ok);
Est ->
Scale = int_pow(10, -Est),
fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est, Ok)
end.
fixup(R, S, MPlus, MMinus, K, Ok = true) when R + MPlus >= S ->
{K + 1, generate(R, S, MPlus, MMinus, Ok, <<>>)};
fixup(R, S, MPlus, MMinus, K, Ok = false) when R + MPlus > S ->
{K + 1, generate(R, S, MPlus, MMinus, Ok, <<>>)};
fixup(R, S, MPlus, MMinus, K, Ok) ->
{K, generate(R * 10, S, MPlus * 10, MMinus * 10, Ok, <<>>)}.
generate(R0, S, MPlus, MMinus, true, Acc) ->
D = R0 div S,
R = R0 rem S,
generate(R =< MMinus, R + MPlus >= S, D, R, S, MPlus, MMinus, true, Acc);
generate(R0, S, MPlus, MMinus, false, Acc) ->
D = R0 div S,
R = R0 rem S,
generate(R < MMinus, R + MPlus > S, D, R, S, MPlus, MMinus, false, Acc).
generate(true, false, D, _, _, _, _, _, Acc) -> <<Acc/binary, D>>;
generate(true, true, D, R, S, _, _, _, Acc) when R * 2 < S -> <<Acc/binary, D>>;
generate(true, true, D, _, _, _, _, _, Acc) -> <<Acc/binary, (D + 1)>>;
generate(false, true, D, _, _, _, _, _, Acc) -> <<Acc/binary, (D + 1)>>;
generate(false, false, D, R, S, MPlus, MMinus, Ok, Acc) ->
generate(R * 10, S, MPlus * 10, MMinus * 10, Ok, <<Acc/binary,D>>).
insert_decimal(0, S, Sign) -> <<Sign/binary, "0.", S/binary>>;
insert_decimal(Place, <<S>>, Sign) when Place < 0, Place > -4 ->
<<Sign/binary, "0.", (binary:copy(<<$0>>, -Place))/binary, S>>;
insert_decimal(Place, S = <<_>>, Sign) when Place < 0 ->
insert_exp(S, integer_to_binary(Place - 1), Sign);
insert_decimal(Place, S, Sign) when Place < 0 ->
ExpL = integer_to_binary(Place - 1),
case -Place =< byte_size(ExpL) of
true ->
Naughts = binary:copy(<<$0>>, -Place),
<<Sign/binary, "0.", Naughts/binary, S/binary>>;
false ->
insert_exp(S, ExpL, Sign)
end;
insert_decimal(Place, S = <<_>>, Sign) ->
ExpL = integer_to_binary(Place - 1),
case Place =< byte_size(ExpL) + 2 of
true ->
Naughts = binary:copy(<<$0>>, Place - 1),
<<Sign/binary, S/binary, Naughts/binary, ".0">>;
false ->
insert_exp(S, ExpL, Sign)
end;
insert_decimal(Place, S, Sign) when Place >= byte_size(S) ->
L = byte_size(S),
ExpL = integer_to_binary(Place - 1),
case Place - L =< byte_size(ExpL) of
true ->
Naughts = binary:copy(<<$0>>, Place - L),
<<Sign/binary, S/binary, Naughts/binary, ".0">>;
false ->
insert_exp(S, ExpL, Sign)
end;
insert_decimal(Place, S, Sign) ->
Int = binary_part(S, {0, Place}),
Frac = binary_part(S, {Place, byte_size(S) - Place}),
<<Sign/binary, Int/binary, ".", Frac/binary>>.
insert_exp(<<C>>, ExpL, Sign) -> <<Sign/binary, C, ".0e", ExpL/binary>>;
insert_exp(<<C, S/binary>>, ExpL, Sign) ->
<<Sign/binary, C, ".", S/binary, "e", ExpL/binary>>.
int_ceil(X) when is_float(X) ->
T = trunc(X),
case (X - T) of
Neg when Neg =< 0 -> T;
Pos when Pos > 0 -> T + 1
end.
int_pow(X, 0) when is_integer(X) -> 1;
int_pow(X, N) when is_integer(X), is_integer(N), N > 0 -> int_pow(X, N, 1).
int_pow(X, N, R) when N < 2 -> R * X;
int_pow(X, N, R) ->
int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
log2floor(Int) when is_integer(Int), Int > 0 -> log2floor(Int, 0).
log2floor(0, N) -> N;
log2floor(Int, N) -> log2floor(Int bsr 1, 1 + N).
%% ===================================================================
%% Decoding
%% ===================================================================
do_decode(<<>>, S) -> {more, {decode, args = S}};
do_decode(<<?HT, T/binary>>, S) -> do_decode(T, S);
do_decode(<<?LF, T/binary>>, S) -> do_decode(T, S);
do_decode(<<?CR, T/binary>>, S) -> do_decode(T, S);
do_decode(<<?SPC, T/binary>>, S) -> do_decode(T, S);
do_decode(<<"true", T/binary>>, S) -> pop(true, T, S);
do_decode(<<"false", T/binary>>, S) -> pop(false, T, S);
do_decode(<<"null", T/binary>>, S) -> pop(null, T, S);
do_decode(<<${, T/binary>>, S) -> object(T, {true, name}, [], S);
do_decode(<<$[, T/binary>>, S) -> array(T, {false, false}, [],S);
do_decode(<<$", T/binary>>, S) -> string(T, <<>>, S);
do_decode(<<$-, T/binary>>, S) -> number(T, pre, int, [$-], S);
do_decode(B = <<$0, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$1, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$2, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$3, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$4, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$5, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$6, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$7, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$8, _/binary>>, S) -> number(B, pre, int, [], S);
do_decode(B = <<$9, _/binary>>, S) -> number(B, pre, int, [], S).
object(<<>>, State, Acc, S) -> {more, {object, State, Acc, S}};
object(<<?HT, T/binary>>, State, Acc, S) -> object(T, State, Acc, S);
object(<<?LF, T/binary>>, State, Acc, S) -> object(T, State, Acc, S);
object(<<?CR, T/binary>>, State, Acc, S) -> object(T, State, Acc, S);
object(<<?SPC, T/binary>>, State, Acc,S) -> object(T, State, Acc, S);
object(<<$}, T/binary>>, {true,_}, Acc, S) -> pop(maps:from_list(Acc), T, S);
object(<<$,, T/binary>>, {true, comma}, Acc, S) -> object(T,{false,name},Acc,S);
object(<<$", T/binary>>, {_, name}, Acc, S) -> string(T, <<>>, [{name, Acc}|S]);
object(<<$:, T/binary>>, {false, colon},Acc,S) -> do_decode(T,[{value,Acc}|S]).
array(<<>>, State, Acc, S) -> {more, {array, State, Acc, S}};
array(<<?HT, T/binary>>, State, Acc, S) -> array(T, State, Acc, S);
array(<<?LF, T/binary>>, State, Acc, S) -> array(T, State, Acc, S);
array(<<?CR, T/binary>>, State, Acc, S) -> array(T, State, Acc, S);
array(<<?SPC, T/binary>>, State, Acc, S) -> array(T, State, Acc, S);
array(<<$,, T/binary>>, {false, true}, Acc, S) -> array(T, {true, false},Acc,S);
array(<<$], T/binary>>, {false, _}, Acc, S) -> pop(lists:reverse(Acc), T, S);
array(T, {_, false}, Acc, S) -> do_decode(T, [{array, Acc} | S]).
string(<<>>, Acc, S) -> {more, {string, Acc, S}};
string(<<$\\, T/binary>>, Acc, S) -> unescape(T, Acc, S);
string(<<$", T/binary>>, Acc, S) -> pop(Acc, T, S);
string(<<H/utf8, T/binary>>, Acc, S) -> string(T, <<Acc/binary, H/utf8>>, S).
unescape(<<>>, Acc, S) -> {more, {unescape, Acc, S}};
unescape(<<$", T/binary>>, Acc, S) -> string(T, <<Acc/binary, $">>, S);
unescape(<<$\\, T/binary>>, Acc, S) -> string(T, <<Acc/binary,$\\ >>, S);
unescape(<<$/, T/binary>>, Acc, S) -> string(T, <<Acc/binary, $/>>, S);
unescape(<<$0, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?NULL>>,S);
unescape(<<$a, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?BEL>>, S);
unescape(<<$b, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?BS>>, S);
unescape(<<$t, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?HT>>, S);
unescape(<<$n, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?LF>>, S);
unescape(<<$f, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?FF>>, S);
unescape(<<$v, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?VT>>, S);
unescape(<<$r, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?CR>>, S);
unescape(<<$s, T/binary>>, Acc, S) -> string(T, <<Acc/binary, ?SPC>>, S);
unescape(<<$u, A, B, C, D, T/binary>>, Acc, S) ->
string(T, <<Acc/binary,(list_to_integer([A, B, C, D]))/utf8>>,S);
unescape(<<H, T/binary>>, Acc, S) ->
string(T, <<Acc/binary,$\\, H>>, S).
number(<<$0, T/binary>>, pre, int, Acc, S) -> number(T, zero, int, [$0|Acc],S);
number(<<H, T/binary>>, pre, exp, Acc, S) when ?IS_SIGN(H) ->
number(T, sign, exp, [H | Acc], S);
number(<<H, T/binary>>, pre, exp, Acc, S) when ?IS_INT(H) ->
number(T, post, exp, [H | Acc], S);
number(<<H, T/binary>>, pre, float, Acc, S) when ?IS_INT(H) ->
number(T, post, float, [H | Acc], S);
number(<<H, T/binary>>, pre, Phase, Acc, S) when ?IS_POS_INT(H) ->
number(T, post, Phase, [H | Acc], S);
number(<<H, T/binary>>, sign, Phase, Acc, S) when ?IS_INT(H) ->
number(T, post, Phase, [H | Acc], S);
number(<<H, T/binary>>, post, Phase, Acc, S) when ?IS_INT(H) ->
number(T, post, Phase, [H | Acc], S);
number(<<$., T/binary>>,Stage,int,Acc,S) when ?ZERO_OR_POST(Stage) ->
number(T, pre, float, [$. | Acc], S);
number(<<E,T/binary>>,Stage,int,Acc,S) when ?EXP_ZERO_OR_POST(E, Stage) ->
number(T, pre, exp, [E, $0, $. | Acc], S);
number(<<E, T/binary>>, post, float, Acc, S) when ?IS_EXP(E) ->
number(T, pre, exp, [E | Acc], S);
number(B, Stage, int, Acc, S) when ?ZERO_OR_POST(Stage) ->
pop(list_to_integer(lists:reverse(Acc)), B, S);
number(B, post, _, Acc, S) ->
pop(list_to_float(lists:reverse(Acc)), B, S);
number(<<>>, State, Phase, Acc, S) ->
{more, {number, State, Phase, Acc, S}}.
pop(V, B, []) -> {V, B};
pop(V, B, [{array, Acc} | S]) -> array(B, {false, true}, [V | Acc], S);
pop(N, B, [{name, Acc} | S]) -> object(B, {false, colon}, {N, Acc}, S);
pop(V, B, [{value, {N, Acc}} | S]) -> object(B, {true,comma},[{N, V} | Acc], S). | src/jstream.erl | 0.554229 | 0.464902 | jstream.erl | starcoder |
%% Copyright (c) 2014 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_db_dict.erl
%% Author : <NAME>
%% Purpose : Interface to an erlog database built with dict.
%% The database is a dict where the key is the functor pair {Name,Arity}.
%% The value is: built_in |
%% {clauses,NextTag,[{Tag,Head,Body}]} |
%% {code,{Module,Function}}.
%% Built-ins are defined by the system and cannot manipulated by user
%% code.
-module(erlog_db_dict).
-export([new/1]).
-export([add_built_in/2,add_compiled_proc/4,asserta_clause/4,assertz_clause/4]).
-export([retract_clause/3,abolish_clauses/2]).
-export([get_procedure/2,get_procedure_type/2]).
-export([get_interpreted_functors/1]).
%% Return {ok,E} or catch thrown error and just return it.
-define(RET_CATCH(E), try
{ok,E}
catch
throw:Error -> Error
end).
%% new(InitArgs) -> Db.
new(_Args) ->
dict:new().
%% add_built_in(Db, Functor) -> Db.
%% Add functor as a built-in in the database.
add_built_in(Db, Functor) ->
dict:store(Functor, built_in, Db).
%% add_compiled_code(Db, Functor, Module, Function) -> {ok,Db} | error.
%% Add functor as a compiled procedure with code in M:F in the
%% database. Check that it is not a built-in, if so return error.
add_compiled_proc(Db, Functor, M, F) ->
Code = {code,{M,F}},
Fun = fun (built_in) -> throw(error);
(_) -> Code
end,
?RET_CATCH(dict:update(Functor, Fun, Code, Db)).
%% asserta_clause(Db, Functor, Head, Body) -> {ok,NewDb} | error.
%% assertz_clause(Db, Functor, Head, Body) -> {ok,NewDb} | error.
%% We DON'T check format and just put it straight into the database.
asserta_clause(Db, Functor, Head, Body) ->
Fun = fun ({clauses,T,Cs}) ->
{clauses,T+1,[{T,Head,Body}|Cs]};
(_) -> throw(error)
end,
?RET_CATCH(dict:update(Functor, Fun, {clauses,1,[{0,Head,Body}]}, Db)).
assertz_clause(Db, Functor, Head, Body) ->
Fun = fun ({clauses,T,Cs}) ->
{clauses,T+1,Cs ++ [{T,Head,Body}]};
(_) -> throw(error)
end,
?RET_CATCH(dict:update(Functor, Fun, {clauses,1,[{0,Head,Body}]}, Db)).
%% retract_clause(Db, Functor, ClauseTag) -> {ok,NewDb} | error.
%% Retract (remove) the clause with tag ClauseTag from the list of
%% clauses of Functor.
retract_clause(Db, Functor, Tag) ->
case dict:find(Functor, Db) of
{ok,{clauses,Nt,Cs}} -> %We can retract here
Db1 = dict:store(Functor,
{clauses,Nt,lists:keydelete(Tag, 1, Cs)}, Db),
{ok,Db1};
{ok,_} -> error; %We can't retract here
error -> {ok,Db} %Do nothing
end.
%% abolish_clause(Db, Functor) -> {ok,NewDb} | error.
abolish_clauses(Db, Functor) ->
case dict:find(Functor, Db) of
{ok,built_in} -> error; %Can't abolish here
{ok,{code,_}} -> {ok,dict:erase(Functor, Db)};
{ok,{clauses,_,_}} -> {ok,dict:erase(Functor, Db)};
error -> {ok,Db} %Do nothing
end.
%% get_procedure(Db, Functor) ->
%% built_in | {code,{Mod,Func}} | {clauses,[Clause]} | undefined.
%% Return the procedure type and data for a functor.
get_procedure(Db, Functor) ->
case dict:find(Functor, Db) of
{ok,built_in} -> built_in;
{ok,{code,_}=P} -> P;
{ok,{clauses,_,Cs}} -> {clauses,Cs};
error -> undefined
end.
%% get_procedure(Db, Functor) ->
%% built_in | compiled | interpreted | undefined.
%% Return the procedure type for a functor.
get_procedure_type(Db, Functor) ->
case dict:find(Functor, Db) of
{ok,built_in} -> built_in;
{ok,{code,_}} -> compiled;
{ok,{clauses,_,_}} -> interpreted;
error -> undefined
end.
%% get_intepreted_functors(Db) -> [Functor].
get_interpreted_functors(Db) ->
dict:fold(fun (Func, {clauses,_,_}, Fs) -> [Func|Fs];
(_, _, Fs) -> Fs
end, [], Db). | src/erlog_db_dict.erl | 0.61451 | 0.448487 | erlog_db_dict.erl | starcoder |
%% Copyright 2018 Erlio GmbH Basel Switzerland (http://erl.io)
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(vmq_diversity_script).
-behaviour(gen_server).
%% API functions
-export([start_link/1,
stats/1,
call_function/3]).
%% gen_server callbacks
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-record(state, {luastates=[],
working_luastates=[],
queue=[],
samples= #{}}).
-define(MAX_SAMPLES, 100).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Starts the server
%%
%% @spec start_link() -> {ok, Pid} | ignore | {error, Error}
%% @end
%%--------------------------------------------------------------------
start_link(StatePids) ->
gen_server:start_link(?MODULE, [StatePids], []).
stats(Pid) ->
gen_server:call(Pid, stats, infinity).
call_function(Pid, Function, Args) ->
%% Sandbox the gen_server call and ensure that we don't crash.
%% As vmq_plugin calls are not executed within a try-catch an
%% error in the plugin code can crash session/queue. As the Lua
%% support should provide a sandboxed environment we saveguard
%% calls into the Lua environment.
case catch gen_server:call(Pid, {call_function, Function, Args}, infinity) of
{'EXIT', Reason} ->
lager:error("can't call into Lua sandbox for function ~p due to ~p", [Function, Reason]),
error;
Ret ->
Ret
end.
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Initializes the server
%%
%% @spec init(Args) -> {ok, State} |
%% {ok, State, Timeout} |
%% ignore |
%% {stop, Reason}
%% @end
%%--------------------------------------------------------------------
init([StatePids]) ->
{ok, #state{luastates=StatePids}}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling call messages
%%
%% @spec handle_call(Request, From, State) ->
%% {reply, Reply, State} |
%% {reply, Reply, State, Timeout} |
%% {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, Reply, State} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_call({call_function, Function, Args}, From, State) ->
NewState = queue_function_call(Function, Args, From, State),
{noreply, schedule_function_call(NewState)};
handle_call(stats, _From, #state{samples=Samples} = State) ->
{reply, avg_t(Samples), State#state{samples=#{}}}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling cast messages
%%
%% @spec handle_cast(Msg, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_cast(_Msg, State) ->
{noreply, State}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Handling all non call/cast messages
%%
%% @spec handle_info(Info, State) -> {noreply, State} |
%% {noreply, State, Timeout} |
%% {stop, Reason, State}
%% @end
%%--------------------------------------------------------------------
handle_info({call_function_response, Ref, Reply},
#state{luastates=LuaStates, working_luastates=WorkingLuaStates} = State) ->
case lists:keyfind(Ref, 1, WorkingLuaStates) of
false ->
{noreply, State};
{Ref, LuaStatePid, Item} ->
{From, Function, _, Ts1} = Item,
Ts2 = os:timestamp(),
gen_server:reply(From, Reply),
{noreply, schedule_function_call(
ch_state(Function, Ts1, Ts2,
State#state{
%% round-robin: append instead of prepend
luastates=LuaStates ++ [LuaStatePid],
working_luastates=lists:keydelete(
Ref, 1, WorkingLuaStates
)}))}
end.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function is called by a gen_server when it is about to
%% terminate. It should be the opposite of Module:init/1 and do any
%% necessary cleaning up. When it returns, the gen_server terminates
%% with Reason. The return value is ignored.
%%
%% @spec terminate(Reason, State) -> void()
%% @end
%%--------------------------------------------------------------------
terminate(_Reason, _State) ->
ok.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Convert process state when code is changed
%%
%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
%% @end
%%--------------------------------------------------------------------
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
queue_function_call(Function, Args, From, #state{queue=Queue} = State) ->
Item = {From, Function, Args, os:timestamp()},
State#state{queue=[Item|Queue]}.
schedule_function_call(#state{queue=Queue, luastates=[LuaStatePid|LuaStatesRest],
working_luastates=WorkingLuaStates} = State)
when length(Queue) > 0 ->
[{_From, Function, Args, _Ts} = Item|NewQueueRev] = lists:reverse(Queue),
Ref = vmq_diversity_script_state:call_function(LuaStatePid, Function, Args),
State#state{
queue=lists:reverse(NewQueueRev),
luastates=LuaStatesRest,
working_luastates=[{Ref, LuaStatePid, Item}|WorkingLuaStates]};
schedule_function_call(State) ->
%% All LuaStates currently occupied or no item in the queue
State.
ch_state(Function, Ts1, Ts2, #state{samples=Samples} = State) ->
State#state{samples=add_ts(Function, Ts1, Ts2, Samples)}.
add_ts(Function, Ts1, Ts2, Samples) ->
T = timer:now_diff(Ts2, Ts1),
case maps:find(Function, Samples) of
{ok, FunSamples} when length(FunSamples) < ?MAX_SAMPLES ->
maps:put(Function, [T|FunSamples], Samples);
{ok, FunSamples} ->
maps:put(Function, lists:droplast([T|FunSamples]), Samples);
error ->
maps:put(Function, [T], Samples)
end.
avg_t(Samples) ->
maps:fold(fun(K, V, Acc) ->
[{K, lists:sum(V) / length(V)}|Acc]
end, [], Samples). | apps/vmq_diversity/src/vmq_diversity_script.erl | 0.527317 | 0.498535 | vmq_diversity_script.erl | starcoder |
%% Copyright (c) 2008-2013 <NAME>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%% File : erlog_parse.erl
%% Author : <NAME>
%% Purpose : Erlog parser
%%
%% Parses Erlog tokens into Erlog terms. Based on the Standard prolog
%% parser and directly coded from the parser description. To handle
%% back-tracking in the parser we use a continuation style using funs
%% where each fun handles one step of what follows. This allows
%% back-tracking. This may not be a specially efficient way of
%% parsing but it is simple and easy to derive from the
%%% description. No logical variables are necessary here.
-module(erlog_parse).
-export([term/1,term/2,format_error/1]).
-export([prefix_op/1,infix_op/1,postfix_op/1]).
-compile({nowarn_unused_function,[type/1,line/1,val/1]}).
%% -compile(export_all).
term(Toks) -> term(Toks, 1).
term(Toks, _) ->
case term(Toks, 1200, fun(Ts, T) -> all_read(Ts, T) end) of
{succeed,Term} -> {ok,Term};
{fail,{Line,Error}} -> {error,{Line,?MODULE,Error}}
end.
all_read([{'.',_}], Term) -> {succeed,Term};
all_read([{T,L}|_], _) -> syntax_error(L, {operator_expected,T});
all_read([{_,L,V}|_], _) -> syntax_error(L, {operator_expected,V});
all_read([], _) -> syntax_error(9999, premature_end).
syntax_error(Line, Error) -> {fail,{Line,Error}}.
%% syntax_error(Line, Error) ->
%% io:fwrite("se: ~p\n", [{Line,Error}]), {fail,{Line,Error}}.
format_error(premature_end) -> "premature end";
format_error({operator_expected,T}) ->
io_lib:fwrite("operator expected before: ~w", [T]);
format_error({illegal,T}) ->
io_lib:fwrite("illegal token: ~w", [T]);
format_error(no_term) -> "missing term";
format_error({op_priority,Op}) ->
io_lib:fwrite("operator priority clash: ~w", [Op]);
format_error({expected,T}) ->
io_lib:fwrite("~w or operator expected", [T]).
%% term(Tokens, Precedence, Next) -> {succeed,Term} | {fail,Error}.
term([{number,_,N}|Toks], Prec, Next) -> rest_term(Toks, N, 0, Prec, Next);
term([{string,_,S}|Toks], Prec, Next) -> rest_term(Toks, S, 0, Prec, Next);
term([{'(',_}|Toks], Prec, Next) ->
bracket_term(Toks, Prec, Next);
term([{' (',_}|Toks], Prec, Next) ->
bracket_term(Toks, Prec, Next);
term([{'{',L},{'}',_}|Toks], Prec, Next) ->
term([{atom,L,'{}'}|Toks], Prec, Next);
term([{'{',_}|Toks0], Prec, Next) ->
term(Toks0, 1200,
fun (Toks1, Term) ->
expect(Toks1, '}', Term,
fun (Toks2, Term1) ->
rest_term(Toks2, {'{}',Term1}, 0, Prec, Next)
end)
end);
term([{'[',_},{']',_}|Toks], Prec, Next) ->
rest_term(Toks, [], 0, Prec, Next);
term([{'[',_}|Toks0], Prec, Next) ->
term(Toks0, 999,
fun (Toks1, E) ->
list_elems(Toks1, [E],
fun (Toks2, List) ->
rest_term(Toks2, List, 0, Prec, Next)
end)
end);
term([{var,_,V}|Toks], Prec, Next) -> rest_term(Toks, {V}, 0, Prec, Next);
term([{atom,_,F},{'(',_}|Toks0], Prec, Next) ->
%% Compound term in functional syntax.
term(Toks0, 999,
fun (Toks1, A) ->
arg_list(Toks1, [A],
fun (Toks2, Args) ->
%% Equivalence of '.'/2 and lists.
Term = case {F,Args} of
{'.',[H,T]} -> [H|T];
_ -> list_to_tuple([F|Args])
end,
rest_term(Toks2, Term, 0, Prec, Next)
end)
end);
term([{atom,L,Op}|Toks0], Prec, Next) ->
case prefix_op(Op) of
{yes,OpP,ArgP} when Prec >= OpP ->
case possible_right_operand(Toks0) of
true ->
%% First try as prefix op, then as atom.
Next1 = fun (Toks1, Arg) ->
rest_term(Toks1, {Op,Arg}, OpP, Prec, Next)
end,
cp([fun () -> term(Toks0, ArgP, Next1) end,
fun () -> rest_term(Toks0, Op, 0, Prec, Next) end]);
false -> rest_term(Toks0, Op, 0, Prec, Next)
end;
{yes,_,_} ->
syntax_error(L, {op_priority,Op});
no -> rest_term(Toks0, Op, 0, Prec, Next)
end;
term([{T,L}|_], _, _) -> syntax_error(L, {illegal,T});
term([{_,L,V}|_], _, _) -> syntax_error(L, {illegal,V});
term([], _, _) -> syntax_error(9999, no_term).
%% possible_right_operand(Tokens) -> true | false.
%% Test if there maybe a possible right operand.
possible_right_operand([{')',_}|_]) -> false;
possible_right_operand([{'}',_}|_]) -> false;
possible_right_operand([{']',_}|_]) -> false;
possible_right_operand([{',',_}|_]) -> false;
possible_right_operand([{'|',_}|_]) -> false;
possible_right_operand(_) -> true.
%% bracket_term(Tokens, Precedence, Next) ->
%% {succeed,Term} | {fail,Error}.
bracket_term(Toks0, Prec, Next) ->
term(Toks0, 1200,
fun (Toks1, Term) ->
expect(Toks1, ')', Term,
fun (Toks2, Term1) ->
rest_term(Toks2, Term1, 0, Prec, Next)
end)
end).
%% rest_term(Tokens, Term, LeftPrec, Precedence, Next) ->
%% {succeed,Term} | {fail,Error}.
%% Have a term to the left, test if operator follows or just go on.
rest_term([{atom,L,Op}|Toks0], Term, Left, Prec, Next) ->
cp([fun () -> infix_term(Op, L, Toks0, Term, Left, Prec, Next) end,
fun () -> postfix_term(Op, L, Toks0, Term, Left, Prec, Next) end,
fun () -> Next([{atom,L,Op}|Toks0], Term) end]);
rest_term([{',',L}|Toks0], Term, Left, Prec, Next) ->
%% , is an operator as well as a separator.
if Prec >= 1000, Left < 1000 ->
term(Toks0, 1000,
fun (Toks1, RArg) ->
rest_term(Toks1, {',',Term,RArg}, 1000, Prec, Next)
end);
true -> Next([{',',L}|Toks0], Term)
end;
rest_term(Toks, Term, _, _, Next) ->
Next(Toks, Term).
%% infix_term(Operator, Line, Tokens, Term, LeftPrec, Prec, Next) ->
%% {succeed,Term} | {fail,Error}.
%% Test if infix operator of correct priority, fail with
%% operator_expected if not an operator to have some error.
infix_term(Op, L, Toks0, Term, Left, Prec, Next) ->
case infix_op(Op) of
{yes,LAP,OpP,RAP} when Prec >= OpP, Left =< LAP ->
term(Toks0, RAP,
fun (Toks1, Arg2) ->
rest_term(Toks1, {Op,Term,Arg2}, OpP, Prec, Next)
end);
{yes,_,_,_} -> syntax_error(L, {op_priority,Op});
no -> fail
end.
%% postfix_term(Operator, Line, Tokens, Term, LeftPrec, Prec, Next) ->
%% {succeed,Term} | {fail,Error}.
%% Test if postfix operator of correct priority, fail with
%% operator_expected if not an operator to have some error.
postfix_term(Op, L, Toks0, Term, Left, Prec, Next) ->
case postfix_op(Op) of
{yes,ArgP,OpP} when Prec >= OpP, Left =< ArgP ->
rest_term(Toks0, {Op,Term}, OpP, Prec, Next);
{yes,_,_} -> syntax_error(L, {op_priority,Op});
no -> fail
end.
%% list_elems(Tokens, RevElems, Next) ->
%% {succeed,Term} | {fail,Error}.
list_elems([{',',_}|Toks0], REs, Next) ->
term(Toks0, 999,
fun (Toks1, E) ->
list_elems(Toks1, [E|REs], Next)
end);
list_elems([{'|',_}|Toks0], REs, Next) ->
term(Toks0, 999,
fun (Toks1, E) ->
expect(Toks1, ']', lists:reverse(REs, E), Next)
end);
list_elems(Toks, REs, Next) ->
expect(Toks, ']', lists:reverse(REs), Next).
%% arg_list(Tokens, RevArgs, Next) -> {succeed,Term} | {fail,Error}.
arg_list([{',',_}|Toks0], RAs, Next) ->
term(Toks0, 999,
fun (Toks1, Arg) ->
arg_list(Toks1, [Arg|RAs], Next)
end);
arg_list(Toks, RAs, Next) ->
expect(Toks, ')', lists:reverse(RAs), Next).
%% expect(Tokens, TokenType, Term, Next) -> {succeed,Term} | {fail,Error}.
expect([T|Toks], Tok, Term, Next) ->
case type(T) of
Tok -> Next(Toks, Term);
_ -> syntax_error(line(T), {expected,Tok})
end;
expect([], Tok, _, _) -> syntax_error(9999, {expected,Tok}).
%% cp(Choices) -> {succeed,Term} | {fail,_} | fail.
%% Special choice point handler for parser. If all clauses fail then
%% fail with first fail value, this usually gives better error report.
cp([C|Cs]) ->
case C() of
{succeed,Res} -> {succeed,Res};
{fail,_}=Fail -> cp(Cs, Fail); %Try rest with first fail
fail -> cp(Cs) %Stay till we get reason
end.
cp([C|Cs], Fail) ->
case C() of
{succeed,Res} -> {succeed,Res};
{fail,_} -> cp(Cs, Fail); %Drop this fail, use first
fail -> cp(Cs, Fail)
end;
cp([], Fail) -> Fail.
%% type(Tok) -> Line.
%% line(Tok) -> Line.
%% val(Tok) -> Value.
type(Tok) -> element(1, Tok).
line(Tok) -> element(2, Tok).
val(Tok) -> element(3, Tok).
%% prefix_op(Op) -> {yes,Prec,ArgPrec} | no.
prefix_op('?-') -> {yes,1200,1199}; %fx 1200
prefix_op(':-') -> {yes,1200,1199}; %fx 1200
prefix_op('\\+') -> {yes,900,900}; %fy 900
prefix_op('+') -> {yes,200,200}; %fy 200
prefix_op('-') -> {yes,200,200}; %fy 200
prefix_op('\\') -> {yes,200,200}; %fy 200
prefix_op(_Op) -> no. %The rest
%% postfix_op(Op) -> {yes,ArgPrec,Prec} | no.
postfix_op('+') -> {yes,500,500};
postfix_op('*') -> {yes,400,400};
postfix_op(_Op) -> no.
%% infix_op(Op) -> {yes,LeftArgPrec,Prec,RightArgPrec} | no.
infix_op(':-') -> {yes,1199,1200,1199}; %xfx 1200
infix_op('-->') -> {yes,1199,1200,1199}; %xfx 1200
infix_op(';') -> {yes,1099,1100,1100}; %xfy 1100
infix_op('->') -> {yes,1049,1050,1050}; %xfy 1050
infix_op(',') -> {yes,999,1000,1000}; %xfy 1000
infix_op('=') -> {yes,699,700,699}; %xfx 700
infix_op('\\=') -> {yes,699,700,699}; %xfx 700
infix_op('\\==') -> {yes,699,700,699}; %xfx 700
infix_op('==') -> {yes,699,700,699}; %xfx 700
infix_op('@<') -> {yes,699,700,699}; %xfx 700
infix_op('@=<') -> {yes,699,700,699}; %xfx 700
infix_op('@>') -> {yes,699,700,699}; %xfx 700
infix_op('@>=') -> {yes,699,700,699}; %xfx 700
infix_op('=..') -> {yes,699,700,699}; %xfx 700
infix_op('is') -> {yes,699,700,699}; %xfx 700
infix_op('=:=') -> {yes,699,700,699}; %xfx 700
infix_op('=\\=') -> {yes,699,700,699}; %xfx 700
infix_op('<') -> {yes,699,700,699}; %xfx 700
infix_op('=<') -> {yes,699,700,699}; %xfx 700
infix_op('>') -> {yes,699,700,699}; %xfx 700
infix_op('>=') -> {yes,699,700,699}; %xfx 700
infix_op(':') -> {yes,599,600,600}; %xfy 600
infix_op('+') -> {yes,500,500,499}; %yfx 500
infix_op('-') -> {yes,500,500,499}; %yfx 500
infix_op('/\\') -> {yes,500,500,499}; %yfx 500
infix_op('\\/') -> {yes,500,500,499}; %yfx 500
infix_op('*') -> {yes,400,400,399}; %yfx 400
infix_op('/') -> {yes,400,400,399}; %yfx 400
infix_op('//') -> {yes,400,400,399}; %yfx 400
infix_op('rem') -> {yes,400,400,399}; %yfx 400
infix_op('mod') -> {yes,400,400,399}; %yfx 400
infix_op('<<') -> {yes,400,400,399}; %yfx 400
infix_op('>>') -> {yes,400,400,399}; %yfx 400
infix_op('**') -> {yes,199,200,199}; %xfx 200
infix_op('^') -> {yes,199,200,200}; %xfy 200
infix_op(_Op) -> no. | src/erlog_parse.erl | 0.554229 | 0.526404 | erlog_parse.erl | starcoder |
-module(prometheus_buckets).
-export([default/0,
exponential/3,
linear/3]).
-export_type([bucket_bound/0,
buckets/0]).
%%====================================================================
%% Types
%%====================================================================
-type bucket_bound() :: number() | infinity.
-type buckets() :: [bucket_bound(), ...].
%%====================================================================
%% Public API
%%====================================================================
%% @doc
%% Default histogram buckets.
%% <pre lang="erlang">
%% 1> prometheus_buckets:default().
%% [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10]
%% </pre>
%% Please note these buckets are floats and represent seconds so you'll
%% have to use {@link prometheus_histogram:dobserve/3} or
%% configure `duration_unit` as `seconds'.
%% @end
-spec default() -> buckets().
default() -> [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10].
%% @doc
%% Creates `Count' buckets, where the lowest bucket has an
%% upper bound of `Start' and each following bucket's upper bound is `Factor'
%% times the previous bucket's upper bound. The returned list is meant to be
%% used for the `buckets' key of histogram constructors options.
%% <pre lang="erlang">
%% 3> prometheus_buckets:exponential(100, 1.2, 3).
%% [100, 120, 144]
%% </pre>
%% The function raises `{invalid_value, Value, Message}' error if `Count'
%% isn't positive, if `Start' isn't positive, or if `Factor' is less than or
%% equals to 1.
%% @end
-spec exponential(number(), number(), pos_integer()) -> buckets().
exponential(_Start, _Factor, Count) when Count < 1 ->
erlang:error({invalid_value, Count, "Buckets count should be positive"});
exponential(Start, _Factor, _Count) when Start =< 0 ->
erlang:error({invalid_value, Start, "Buckets start should be positive"});
exponential(_Start, Factor, _Count) when Factor =< 1 ->
erlang:error({invalid_value, Factor,
"Buckets factor should be greater than 1"});
exponential(Start, Factor, Count) ->
[try_to_maintain_integer_bounds(Start*math:pow(Factor, I)) ||
I <- lists:seq(0, Count-1)].
%% @doc
%% Creates `Count' buckets, each `Width' wide, where the lowest
%% bucket has an upper bound of `Start'. The returned list is meant to be
%% used for the `buckets' key of histogram constructors options.
%% <pre lang="erlang">
%% 2> prometheus_buckets:linear(10, 5, 6).
%% [10, 15, 20, 25, 30, 35]
%% </pre>
%% The function raises `{invalid_value, Value, Message}' error if `Count'
%% is zero or negative.
%% @end
-spec linear(number(), number(), pos_integer()) -> buckets().
linear(_Start, _Step, Count) when Count < 1 ->
erlang:error({invalid_value, Count, "Buckets count should be positive"});
linear(Start, Step, Count) ->
linear(Start, Step, Count, []).
%%====================================================================
%% Private Parts
%%====================================================================
linear(_Current, _Step, 0, Acc) ->
lists:reverse(Acc);
linear(Current, Step, Count, Acc) ->
linear(try_to_maintain_integer_bounds(Current + Step),
Step,
Count - 1,
[Current|Acc]).
-spec try_to_maintain_integer_bounds(integer()) -> integer();
(float()) -> integer() | float().
try_to_maintain_integer_bounds(Bound) when is_integer(Bound) -> Bound;
try_to_maintain_integer_bounds(Bound) when is_float(Bound) ->
TBound = trunc(Bound),
case TBound == Bound of
true -> TBound;
false -> Bound
end. | src/prometheus_buckets.erl | 0.745584 | 0.500916 | prometheus_buckets.erl | starcoder |
-module(ai_pq).
-export([
empty/0,
add/3,
move/4,
remove/3,
prune/2,
collect/2
]).
-spec empty() -> {integer(),tuple()}.
empty() -> gb_trees:empty().
%% @spec remove(Priority, Value, Tree) -> Tree1
%% @doc Delete a `Value' associated with `Priority' from `Tree'
-spec remove(Priority :: integer(),Value :: term(),
Tree :: term())-> term().
remove(Priority, Value, Tree) ->
case gb_trees:lookup(Priority, Tree) of
{value, [Value]} ->
gb_trees:delete(Priority, Tree);
{value, Values} ->
gb_trees:enter(Priority, lists:delete(Value, Values), Tree)
end.
%% @spec add(Priority, Value, Tree) -> Tree1
%% @doc Insert a `Value' with associated `Priority' into `Tree'
-spec add(Priority::integer(),Value :: term(),
Tree :: term()) -> term().
add(Priority, Value, Tree) ->
NewVal = case gb_trees:lookup(Priority, Tree) of
none -> [Value];
{value, ValueList} -> [Value|ValueList]
end,
gb_trees:enter(Priority, NewVal, Tree).
%% @spec move(OldPriority, NewPriority, Value, Tree) -> Tree1
%% @doc Change the priority of `Value' from `OldPriority' to `NewPriority'
-spec move(OldPriority::integer(),NewPriority::integer(),
Value::term(),Tree::term())-> term().
move(OldPriority, NewPriority, Value, Tree) ->
add(NewPriority, Value, remove(OldPriority, Value, Tree)).
%% @spec prune(Tree, Priority) -> Tree1
%% @doc Remove nodes with priority less than or equal to `Priority'
-spec prune({integer(),tuple()},integer())-> {integer(),tuple()}.
prune({Size, TreeNode}, Priority) ->
{Tree1, NumDeleted} = prune_nodes(TreeNode, Priority),
{Size - NumDeleted, Tree1}.
%% @spec collect(Tree, Priority) -> List.
%% @doc Fold over values with priority greater than `Priority'
-spec collect({integer(),tuple()},integer())-> list().
collect({_Size, TreeNode}, Priority) ->
collect_nodes(fun(V, Acc) -> [V|Acc] end,[], TreeNode, Priority).
%% 当前节点的值大于Priority
%% 收集右侧子树的所有的值,左侧子树也可能存在适合的值
collect_nodes(Function, Acc, {K, V, S, L}, Priority) when K > Priority ->
Acc0 = collect_nodes(Function,Acc, L, Priority),
Acc1 = lists:foldl(Function, Acc0, V),
collect_nodes(Function, Acc1, S, Priority);
collect_nodes(Function, Acc, {K, _V, _S, L}, Priority) when K =< Priority ->
collect_nodes(Function, Acc, L, Priority);
collect_nodes(_Function, Acc, nil, _Priority) -> Acc.
%% 当前节点的值比Priority大
%% 右侧子树不动,遍历左侧子树
prune_nodes({K, V, S, L},Priority) when K > Priority ->
{Tree1, NumDeleted} = prune_nodes(S, Priority),
{{K, V, Tree1, L}, NumDeleted};
%% 当前节点值比Priority小
%% 需要对左右子树都进行遍历
prune_nodes({K, _V, S, L}, Priority) when K =< Priority ->
%% 遍历左子数只是为了计算有多少项目
{_, NumDeleted_S} = prune_nodes(S, Priority),
%% 右子树也会存在需要清除的项目
{Tree1, NumDeleted_L} = prune_nodes(L, Priority),
{Tree1, NumDeleted_S + NumDeleted_L + 1};
prune_nodes(nil, _Priority) -> {nil, 0}. | src/stdlib/ai_pq.erl | 0.616243 | 0.490907 | ai_pq.erl | starcoder |
%% @doc
%% This module implements a RADIUS proxy.
%%
%% It accepts following configuration:
%%
%% ```
%% [{default_route, {{127, 0, 0, 1}, 1813, <<"secret">>}, pool_name},
%% {options, [{type, realm}, {strip, true}, {separator, "@"}]},
%% {routes, [{"^test-[0-9].", {{127, 0, 0, 1}, 1815, <<"secret1">>}, pool_name}]}]
%% '''
%%
%% Where the pool_name is optional field that contains list of
%% RADIUS servers pool name that will be used for fail-over.
%%
%% Pools of RADIUS servers are defined in eradius configuration:
%%
%% ```
%% {servers_pool, [{pool_name, [
%% {{127, 0, 0, 1}, 1815, <<"secret">>, [{retries, 3}]},
%% {{127, 0, 0, 1}, 1816, <<"secret">>}]}]}
%% '''
%%
%% == WARNING ==
%%
%% Define `routes' carefully. The `test' here in example above, is
%% a regular expression that may cause to problemts with performance.
-module(eradius_proxy).
-behaviour(eradius_server).
-export([radius_request/3, validate_arguments/1, get_routes_info/1,
put_default_route_to_pool/2, put_routes_to_pool/2]).
-ifdef(TEST).
-export([resolve_routes/4, validate_options/1, new_request/3,
get_key/4, strip/4]).
-endif.
-include_lib("kernel/include/logger.hrl").
-include("eradius_lib.hrl").
-include("dictionary.hrl").
-define(DEFAULT_TYPE, realm).
-define(DEFAULT_STRIP, false).
-define(DEFAULT_SEPARATOR, "@").
-define(DEFAULT_TIMEOUT, 5000).
-define(DEFAULT_RETRIES, 1).
-define(DEFAULT_CLIENT_RETRIES, 3).
-define(DEFAULT_OPTIONS, [{type, ?DEFAULT_TYPE},
{strip, ?DEFAULT_STRIP},
{separator, ?DEFAULT_SEPARATOR},
{timeout, ?DEFAULT_TIMEOUT},
{retries, ?DEFAULT_RETRIES}]).
-type route() :: eradius_client:nas_address() |
{eradius_client:nas_address(), PoolName :: atom()}.
-type routes() :: [{Name :: string(), eradius_client:nas_address()}] |
[{Name :: string(), eradius_client:nas_address(), PoolName :: atom()}].
-type undefined_route() :: {undefined, 0, []}.
radius_request(Request, _NasProp, Args) ->
DefaultRoute = get_proxy_opt(default_route, Args, {undefined, 0, []}),
Routes = get_proxy_opt(routes, Args, []),
Options = proplists:get_value(options, Args, ?DEFAULT_OPTIONS),
Username = eradius_lib:get_attr(Request, ?User_Name),
{NewUsername, Route} = resolve_routes(Username, DefaultRoute, Routes, Options),
Retries = proplists:get_value(retries, Options, ?DEFAULT_RETRIES),
Timeout = proplists:get_value(timeout, Options, ?DEFAULT_TIMEOUT),
SendOpts = [{retries, Retries}, {timeout, Timeout}],
send_to_server(new_request(Request, Username, NewUsername), Route, SendOpts).
validate_arguments(Args) ->
DefaultRoute = get_proxy_opt(default_route, Args, {undefined, 0, []}),
Options = proplists:get_value(options, Args, ?DEFAULT_OPTIONS),
Routes = get_proxy_opt(routes, Args, undefined),
case {validate_route(DefaultRoute), validate_options(Options), compile_routes(Routes)} of
{false, _, _} -> default_route;
{_, false, _} -> options;
{_, _, false} -> routes;
{_, _, NewRoutes} ->
{true, [{default_route, DefaultRoute}, {options, Options}, {routes, NewRoutes}]}
end.
compile_routes(undefined) -> [];
compile_routes(Routes) ->
RoutesOpts = lists:map(fun (Route) ->
{Name, Relay, Pool} = route(Route),
case re:compile(Name) of
{ok, R} ->
case validate_route({Relay, Pool}) of
false -> false;
_ -> {R, Relay, Pool}
end;
{error, {Error, Position}} ->
throw("Error during regexp compilation - " ++ Error ++ " at position " ++ integer_to_list(Position))
end
end, Routes),
RelaysRegexps = lists:any(fun(Route) -> Route == false end, RoutesOpts),
if RelaysRegexps == false ->
RoutesOpts;
true ->
false
end.
% @private
-spec send_to_server(Request :: #radius_request{},
Route :: undefined_route() | route(),
Options :: eradius_client:options()) ->
{reply, Reply :: #radius_request{}} | term().
send_to_server(_Request, {undefined, 0, []}, _) ->
{error, no_route};
send_to_server(#radius_request{reqid = ReqID} = Request, {{Server, Port, Secret}, Pool}, Options) ->
Pools = application:get_env(eradius, servers_pool, []),
UpstreamServers = proplists:get_value(Pool, Pools, []),
case eradius_client:send_request({Server, Port, Secret}, Request, [{failover, UpstreamServers} | Options]) of
{ok, Result, Auth} ->
decode_request(Result, ReqID, Secret, Auth);
no_active_servers ->
% If all RADIUS servers are marked as inactive for now just use
% just skip fail-over mechanism and use default given Peer
send_to_server(Request, {Server, Port, Secret}, Options);
Error ->
?LOG(error, "~p: error during send_request (~p)", [?MODULE, Error]),
Error
end;
send_to_server(#radius_request{reqid = ReqID} = Request, {Server, Port, Secret}, Options) ->
case eradius_client:send_request({Server, Port, Secret}, Request, Options) of
{ok, Result, Auth} -> decode_request(Result, ReqID, Secret, Auth);
Error ->
?LOG(error, "~p: error during send_request (~p)", [?MODULE, Error]),
Error
end.
% @private
decode_request(Result, ReqID, Secret, Auth) ->
case eradius_lib:decode_request(Result, Secret, Auth) of
Reply = #radius_request{} ->
{reply, Reply#radius_request{reqid = ReqID}};
Error ->
?LOG(error, "~p: request is incorrect (~p)", [?MODULE, Error]),
Error
end.
% @private
-spec validate_route(Route :: route()) -> boolean().
validate_route({{Host, Port, Secret}, PoolName}) when is_atom(PoolName) ->
validate_route({Host, Port, Secret});
validate_route({_Host, Port, _Secret}) when not is_integer(Port); Port =< 0; Port > 65535 -> false;
validate_route({_Host, _Port, Secret}) when not is_list(Secret), not is_binary(Secret) -> false;
validate_route({Host, _Port, _Secret}) when is_list(Host) -> true;
validate_route({Host, Port, Secret}) when is_tuple(Host) ->
case inet_parse:ntoa(Host) of
{error, _} -> false;
Address -> validate_route({Address, Port, Secret})
end;
validate_route({Host, _Port, _Secret}) when is_binary(Host) -> true;
validate_route(_) -> false.
% @private
-spec validate_options(Options :: [proplists:property()]) -> boolean().
validate_options(Options) ->
Keys = proplists:get_keys(Options),
lists:all(fun(Key) -> validate_option(Key, proplists:get_value(Key, Options)) end, Keys).
% @private
-spec validate_option(Key :: atom(), Value :: term()) -> boolean().
validate_option(type, Value) when Value =:= realm; Value =:= prefix -> true;
validate_option(type, _Value) -> false;
validate_option(strip, Value) when is_boolean(Value) -> true;
validate_option(strip, _Value) -> false;
validate_option(separator, Value) when is_list(Value) -> true;
validate_option(timeout, Value) when is_integer(Value) -> true;
validate_option(retries, Value) when is_integer(Value) -> true;
validate_option(_, _) -> false.
% @private
-spec new_request(Request :: #radius_request{},
Username :: undefined | binary(),
NewUsername :: string()) ->
NewRequest :: #radius_request{}.
new_request(Request, Username, Username) -> Request;
new_request(Request, _Username, NewUsername) ->
eradius_lib:set_attr(eradius_lib:del_attr(Request, ?User_Name),
?User_Name, NewUsername).
% @private
-spec resolve_routes(Username :: undefined | binary(),
DefaultRoute :: undefined_route() | route(),
Routes :: routes(), Options :: [proplists:property()]) ->
{NewUsername :: string(), Route :: route()}.
resolve_routes( undefined, DefaultRoute, _Routes, _Options) ->
{undefined, DefaultRoute};
resolve_routes(Username, DefaultRoute, Routes, Options) ->
Type = proplists:get_value(type, Options, ?DEFAULT_TYPE),
Strip = proplists:get_value(strip, Options, ?DEFAULT_STRIP),
Separator = proplists:get_value(separator, Options, ?DEFAULT_SEPARATOR),
case get_key(Username, Type, Strip, Separator) of
{not_found, NewUsername} ->
{NewUsername, DefaultRoute};
{Key, NewUsername} ->
{NewUsername, find_suitable_relay(Key, Routes, DefaultRoute)}
end.
find_suitable_relay(_Key, [], DefaultRoute) -> DefaultRoute;
find_suitable_relay(Key, [{Regexp, Relay} | Routes], DefaultRoute) ->
case re:run(Key, Regexp, [{capture, none}]) of
nomatch -> find_suitable_relay(Key, Routes, DefaultRoute);
_ -> Relay
end;
find_suitable_relay(Key, [{Regexp, Relay, PoolName} | Routes], DefaultRoute) ->
case re:run(Key, Regexp, [{capture, none}]) of
nomatch -> find_suitable_relay(Key, Routes, DefaultRoute);
_ -> {Relay, PoolName}
end.
% @private
-spec get_key(Username :: binary() | string() | [], Type :: atom(), Strip :: boolean(), Separator :: list()) ->
{Key :: not_found | string(), NewUsername :: string()}.
get_key([], _, _, _) -> {not_found, []};
get_key(Username, Type, Strip, Separator) when is_binary(Username) ->
get_key(binary_to_list(Username), Type, Strip, Separator);
get_key(Username, realm, Strip, Separator) ->
Realm = lists:last(string:tokens(Username, Separator)),
{Realm, strip(Username, realm, Strip, Separator)};
get_key(Username, prefix, Strip, Separator) ->
Prefix = hd(string:tokens(Username, Separator)),
{Prefix, strip(Username, prefix, Strip, Separator)};
get_key(Username, _, _, _) -> {not_found, Username}.
% @private
-spec strip(Username :: string(), Type :: atom(), Strip :: boolean(), Separator :: list()) ->
NewUsername :: string().
strip(Username, _, false, _) -> Username;
strip(Username, realm, true, Separator) ->
case string:tokens(Username, Separator) of
[Username] -> Username;
[_ | _] = List ->
[_ | Tail] = lists:reverse(List),
string:join(lists:reverse(Tail), Separator)
end;
strip(Username, prefix, true, Separator) ->
case string:tokens(Username, Separator) of
[Username] -> Username;
[_ | Tail] -> string:join(Tail, Separator)
end.
route({RouteName, RouteRelay}) -> {RouteName, RouteRelay, undefined};
route({_RouteName, _RouteRelay, _Pool} = Route) -> Route.
get_routes_info(HandlerOpts) ->
DefaultRoute = lists:keyfind(default_route, 1, HandlerOpts),
Routes = lists:keyfind(routes, 1, HandlerOpts),
Options = lists:keyfind(options, 1, HandlerOpts),
Retries = case Options of
false ->
?DEFAULT_CLIENT_RETRIES;
{options, Opts} ->
proplists:get_value(retries, Opts, ?DEFAULT_CLIENT_RETRIES)
end,
{DefaultRoute, Routes, Retries}.
put_default_route_to_pool(false, _) -> ok;
put_default_route_to_pool({default_route, {Host, Port, _Secret}}, Retries) ->
eradius_client:store_radius_server_from_pool(Host, Port, Retries);
put_default_route_to_pool({default_route, {Host, Port, _Secret}, _PoolName}, Retries) ->
eradius_client:store_radius_server_from_pool(Host, Port, Retries);
put_default_route_to_pool(_, _) -> ok.
put_routes_to_pool(false, _Retries) -> ok;
put_routes_to_pool({routes, Routes}, Retries) ->
lists:foreach(fun (Route) ->
case Route of
{_RouteName, {Host, Port, _Secret}} ->
eradius_client:store_radius_server_from_pool(Host, Port, Retries);
{_RouteName, {Host, Port, _Secret}, _Pool} ->
eradius_client:store_radius_server_from_pool(Host, Port, Retries);
{Host, Port, _Secret, _Opts} ->
eradius_client:store_radius_server_from_pool(Host, Port, Retries);
_ -> ok
end
end, Routes).
get_proxy_opt(_, [], Default) -> Default;
get_proxy_opt(OptName, [{OptName, AddrOrRoutes} | _], _) -> AddrOrRoutes;
get_proxy_opt(OptName, [{OptName, Addr, Pool} | _], _) -> {Addr, Pool};
get_proxy_opt(OptName, [_ | Args], Default) -> get_proxy_opt(OptName, Args, Default). | src/eradius_proxy.erl | 0.512449 | 0.55935 | eradius_proxy.erl | starcoder |
%%%-------------------------------------------------------------------
%%% Copyright 2014 The RySim Authors. All rights reserved.
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
%%% @doc
%%% Implementation of various validators used during the parsing of
%%% input files.
%%% @end
%%%-------------------------------------------------------------------
-module(validators).
-include("rysim.hrl").
-export([validate_params/2]).
%% ===================================================================
%% API
%% ===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Validates the list of provided parameters is of the correct length
%% and type for the given distribution type.
%% @spec validate_params(Type :: distribution_type(), Params :: [float()]) -> Params :: [float()]
%% @end
%% --------------------------------------------------------------------
validate_params(gaussian_tail, Params) when is_list(Params),
length(Params) == 2 ->
validate_params_acc(Params, []);
validate_params(exponential, Params) when is_list(Params),
length(Params) == 1 ->
validate_params_acc(Params, []);
validate_params(flat, Params) when is_list(Params),
length(Params) == 2 ->
validate_params_acc(Params, []);
validate_params(lognormal, Params) when is_list(Params),
length(Params) == 2 ->
validate_params_acc(Params, []);
validate_params(poisson, Params) when is_list(Params),
length(Params) == 1 ->
validate_params_acc(Params, []);
validate_params(bernoulli, Params) when is_list(Params),
length(Params) == 1 ->
validate_params_acc(Params, []);
validate_params(binomial, Params) when is_list(Params),
length(Params) == 2 ->
validate_params_acc(Params, []);
validate_params(negative_binomial, Params) when is_list(Params),
length(Params) == 2 ->
validate_params_acc(Params, []);
validate_params(geometric, Params) when is_list(Params),
length(Params) == 1 ->
validate_params_acc(Params, []);
validate_params(Type, Params) ->
error_logger:error_msg("~p:~p: Unable to validate_params with Type = ~p and Params = ~p!",
[?MODULE, ?LINE, Type, Params]),
throw(badparams).
%% ===================================================================
%% Private Functions
%% ===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Validates the list of provided parameters is of the correct type.
%% @spec validate_params_acc(Type :: string, Params :: [float()]) -> Params :: [float()]
%% @end
%% --------------------------------------------------------------------
validate_params_acc([], Result) when is_list(Result) ->
lists:reverse(Result);
validate_params_acc([Head|Tail], List) when is_number(Head),
is_list(Tail),
is_list(List) ->
validate_params_acc(Tail, [Head|List]);
validate_params_acc(_, _) ->
throw(badparam). | erlang/rysim_des/src/validators.erl | 0.607197 | 0.534795 | validators.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2007-2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Functions for formatting data.
-module(riak_core_format).
-export([fmt/2,
human_size_fmt/2,
human_time_fmt/2]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% @doc Created a string `Str' based on the format string `FmtStr' and
%% list of args `Args'.
-spec fmt(string(), list()) -> Str::string().
fmt(FmtStr, Args) ->
lists:flatten(io_lib:format(FmtStr, Args)).
%% @doc Create a human friendly string `Str' for number of bytes
%% `Bytes' and format based on format string `Fmt'.
-spec human_size_fmt(string(), non_neg_integer()) -> Str::string().
human_size_fmt(Fmt, Bytes) ->
Fmt2 = Fmt ++ " ~s",
{Value, Units} = human_size(Bytes, ["B","KB","MB","GB","TB","PB"]),
fmt(Fmt2, [Value, Units]).
%% @doc Create a human friendly string `Str' for the given time in
%% microseconds `Micros'. Format according to format string
%% `Fmt'.
-spec human_time_fmt(string(), non_neg_integer()) -> Str::string().
human_time_fmt(Fmt, Micros) ->
Fmt2 = Fmt ++ " ~s",
{Value, Units} = human_time(Micros),
fmt(Fmt2, [Value, Units]).
%%%===================================================================
%%% Private
%%%===================================================================
%% @private
%%
%% @doc Formats a byte size into a human-readable size with units.
%% Thanks StackOverflow:
%% http://stackoverflow.com/questions/2163691/simpler-way-to-format-bytesize-in-a-human-readable-way
-spec human_size(non_neg_integer(), list()) -> iolist().
human_size(S, [_|[_|_] = L]) when S >= 1024 -> human_size(S/1024, L);
human_size(S, [M|_]) ->
{float(S), M}.
%% @private
%%
%% @doc Given a number of `Micros' returns a human friendly time
%% duration in the form of `{Value, Units}'.
-spec human_time(non_neg_integer()) -> {Value::number(), Units::string()}.
human_time(Micros) ->
human_time(Micros, {1000, "us"}, [{1000, "ms"}, {60, "s"}, {60, "min"}, {24, "hr"}, {365, "d"}]).
-spec human_time(non_neg_integer(), {pos_integer(), string()},
[{pos_integer(), string()}]) ->
{number(), string()}.
human_time(T, {Divisor, Unit}, Units) when T < Divisor orelse Units == [] ->
{float(T), Unit};
human_time(T, {Divisor, _}, [Next|Units]) ->
human_time(T / Divisor, Next, Units).
-ifdef(TEST).
human_time_fmt_test() ->
FiveUS = 5,
FiveMS = 5000,
FiveS = 5000000,
FiveMin = FiveS * 60,
FiveHr = FiveMin * 60,
FiveDay = FiveHr * 24,
[?assertEqual(Expect, human_time_fmt("~.1f", T))
|| {T, Expect} <- [{FiveUS, "5.0 us"},
{FiveMS, "5.0 ms"},
{FiveS, "5.0 s"},
{FiveMin, "5.0 min"},
{FiveHr, "5.0 hr"},
{FiveDay, "5.0 d"}]].
-endif. | src/riak_core_format.erl | 0.677687 | 0.445891 | riak_core_format.erl | starcoder |
%% ``Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% The Initial Developer of the Original Code is Ericsson Utvecklings
%% AB. Portions created by Ericsson are Copyright 1999, Ericsson
%% Utvecklings AB. All Rights Reserved.''
%%
%% @private
%% @copyright <NAME> 2001-2003. Portions created by Ericsson
%% are Copyright 1999, Ericsson Utvecklings AB. All Rights Reserved.
%% @author <NAME> <<EMAIL>>
%% @see edoc
%% @end
%% @doc Tokeniser for EDoc. Based on the Erlang standard library module
%% {@link //stdlib/erl_scan}.
-module(edoc_scanner).
%% NOTE: the interface to this module is ancient and should be updated.
%% Please do not regard these exported functions as stable. Their
%% behaviour is described in the documentation of the module `erl_scan'.
%%
%% Since there are no `full stop' tokens in EDoc specifications, the
%% `tokens' function *always* returns `{more, Continuation}' unless an
%% error occurs.
-export([string/1,string/2,format_error/1]).
-import(lists, [reverse/1]).
string(Cs) -> string(Cs, 1).
string(Cs, StartPos) ->
case scan(Cs, StartPos) of
{ok,Toks} -> {ok,Toks,StartPos};
{error,E} -> {error,E,StartPos}
end.
%% format_error(Error)
%% Return a string describing the error.
format_error({string,Quote,Head}) ->
["unterminated string starting with " ++ io_lib:write_string(Head,Quote)];
format_error({illegal,Type}) -> io_lib:fwrite("illegal ~w", [Type]);
format_error(char) -> "unterminated character";
format_error(scan) -> "premature end";
format_error({base,Base}) -> io_lib:fwrite("illegal base '~w'", [Base]);
format_error(float) -> "bad float";
format_error(Other) -> io_lib:write(Other).
%% Reserved words, not atoms:
reserved('where') -> true;
reserved(_) -> false.
%% scan(CharList, StartPos)
%% This takes a list of characters and tries to tokenise them.
%%
%% The token list is built in reverse order (in a stack) to save appending
%% and then reversed when all the tokens have been collected. Most tokens
%% are built in the same way.
%%
%% Returns:
%% {ok,[Tok]}
%% {error,{ErrorPos,edoc_scanner,What}}
scan(Cs, Pos) ->
scan1(Cs, [], Pos).
%% scan1(Characters, TokenStack, Position)
%% Scan a list of characters into tokens.
scan1([$\n|Cs], Toks, Pos) -> % Newline
scan1(Cs, Toks, Pos+1);
scan1([C|Cs], Toks, Pos) when C >= 0, C =< $ -> % Skip blanks
scan1(Cs, Toks, Pos);
scan1([C|Cs], Toks, Pos) when C >= $a, C =< $z -> % Unquoted atom
scan_atom(C, Cs, Toks, Pos);
scan1([C|Cs], Toks, Pos) when C >= $0, C =< $9 -> % Numbers
scan_number(C, Cs, Toks, Pos);
scan1([$-,C| Cs], Toks, Pos) when C >= $0, C =< $9 -> % Signed numbers
scan_signed_number($-, C, Cs, Toks, Pos);
scan1([$+,C| Cs], Toks, Pos) when C >= $0, C =< $9 -> % Signed numbers
scan_signed_number($+, C, Cs, Toks, Pos);
scan1([C|Cs], Toks, Pos) when C >= $A, C =< $Z -> % Variables
scan_variable(C, Cs, Toks, Pos);
scan1([$_|Cs], Toks, Pos) -> % Variables
scan_variable($_, Cs, Toks, Pos);
scan1([$$|Cs], Toks, Pos) -> % Character constant
case scan_char_const(Cs, Toks, Pos) of
{ok, Result} ->
{ok, Result};
{error, truncated_char} ->
scan_error(char, Pos);
{error, illegal_character} ->
scan_error({illegal, char}, Pos)
end;
scan1([$'|Cs0], Toks, Pos) -> % Quoted atom
case scan_string(Cs0, $', Pos) of
{S,Cs1,Pos1} ->
case catch list_to_atom(S) of
A when is_atom(A) ->
scan1(Cs1, [{atom,Pos,A}|Toks], Pos1);
_Error -> scan_error({illegal,atom}, Pos)
end;
{error, premature_end} ->
scan_error({string,$',Cs0}, Pos);
{error, truncated_char} ->
scan_error(char, Pos);
{error, illegal_character} ->
scan_error({illegal, atom}, Pos)
end;
scan1([$"|Cs0], Toks, Pos) -> % String
case scan_string(Cs0, $", Pos) of
{S,Cs1,Pos1} ->
case Toks of
[{string, Pos0, S0} | Toks1] ->
scan1(Cs1, [{string, Pos0, S0 ++ S} | Toks1],
Pos1);
_ ->
scan1(Cs1, [{string,Pos,S}|Toks], Pos1)
end;
{error, premature_end} ->
scan_error({string,$",Cs0}, Pos);
{error, truncated_char} ->
scan_error(char, Pos);
{error, illegal_character} ->
scan_error({illegal, string}, Pos)
end;
%% Punctuation characters and operators, first recognise multiples.
scan1([$=,$>|Cs], Toks, Pos) ->
scan1(Cs, [{'=>',Pos}|Toks], Pos);
scan1([$<,$<|Cs], Toks, Pos) ->
scan1(Cs, [{'<<',Pos}|Toks], Pos);
scan1([$>,$>|Cs], Toks, Pos) ->
scan1(Cs, [{'>>',Pos}|Toks], Pos);
scan1([$-,$>|Cs], Toks, Pos) ->
scan1(Cs, [{'->',Pos}|Toks], Pos);
scan1([$:,$:|Cs], Toks, Pos) ->
scan1(Cs, [{'::',Pos}|Toks], Pos);
scan1([$/,$/|Cs], Toks, Pos) ->
scan1(Cs, [{'//',Pos}|Toks], Pos);
scan1([$.,$.,$.|Cs], Toks, Pos) ->
scan1(Cs, [{'...',Pos}|Toks], Pos);
scan1([$.,$.|Cs], Toks, Pos) ->
scan1(Cs, [{'..',Pos}|Toks], Pos);
scan1([C|Cs], Toks, Pos) -> % Punctuation character
P = list_to_atom([C]),
scan1(Cs, [{P,Pos}|Toks], Pos);
scan1([], Toks0, _Pos) ->
Toks = reverse(Toks0),
{ok,Toks}.
%% Note that `_' is not accepted as a variable token.
scan_variable(C, Cs, Toks, Pos) ->
{Wcs,Cs1} = scan_name(Cs, []),
W = [C|reverse(Wcs)],
case W of
"_" ->
scan1(Cs1, [{an_var,Pos,'_'}|Toks], Pos);
_ ->
case catch list_to_atom(W) of
A when is_atom(A) ->
scan1(Cs1, [{var,Pos,A}|Toks], Pos);
_ ->
scan_error({illegal,variable}, Pos)
end
end.
scan_atom(C, Cs, Toks, Pos) ->
{Wcs,Cs1} = scan_name(Cs, []),
W = [C|reverse(Wcs)],
case catch list_to_atom(W) of
A when is_atom(A) ->
case reserved(A) of
true ->
scan1(Cs1, [{A,Pos}|Toks], Pos);
false ->
scan1(Cs1, [{atom,Pos,A}|Toks], Pos)
end;
_ ->
scan_error({illegal,token}, Pos)
end.
%% scan_name(Cs) -> lists:splitwith(fun (C) -> name_char(C) end, Cs).
scan_name([C|Cs], Ncs) ->
case name_char(C) of
true ->
scan_name(Cs, [C|Ncs]);
false ->
{Ncs,[C|Cs]} % Must rebuild here, sigh!
end;
scan_name([], Ncs) ->
{Ncs,[]}.
name_char(C) when C >= $a, C =< $z -> true;
name_char(C) when C >= $\337, C =< $\377, C /= $\367 -> true;
name_char(C) when C >= $A, C =< $Z -> true;
name_char(C) when C >= $\300, C =< $\336, C /= $\327 -> true;
name_char(C) when C >= $0, C =< $9 -> true;
name_char($_) -> true;
name_char($@) -> true;
name_char(_) -> false.
%% scan_string(CharList, QuoteChar, Pos) ->
%% {StringChars,RestChars, NewPos}
scan_string(Cs, Quote, Pos) ->
scan_string(Cs, [], Quote, Pos).
scan_string([Quote|Cs], Scs, Quote, Pos) ->
{reverse(Scs),Cs,Pos};
scan_string([], _Scs, _Quote, _Pos) ->
{error, premature_end};
scan_string(Cs0, Scs, Quote, Pos) ->
case scan_char(Cs0, Pos) of
{C,Cs,Pos1} ->
%% Only build the string here
scan_string(Cs, [C|Scs], Quote, Pos1);
Error ->
Error
end.
%% Note that space characters are not allowed
scan_char_const([$\040 | _Cs0], _Toks, _Pos) ->
{error, illegal_character};
scan_char_const(Cs0, Toks, Pos) ->
case scan_char(Cs0, Pos) of
{C,Cs,Pos1} ->
scan1(Cs, [{char,Pos,C}|Toks], Pos1);
Error ->
Error
end.
%% {Character,RestChars,NewPos} = scan_char(Chars, Pos)
%% Read a single character from a string or character constant. The
%% pre-scan phase has checked for errors here.
%% Note that control characters are not allowed.
scan_char([$\\|Cs], Pos) ->
scan_escape(Cs, Pos);
scan_char([C | _Cs], _Pos) when C =< 16#1f ->
{error, illegal_character};
scan_char([C|Cs], Pos) ->
{C,Cs,Pos};
scan_char([], _Pos) ->
{error, truncated_char}.
%% The following conforms to Standard Erlang escape sequences.
scan_escape([O1, O2, O3 | Cs], Pos) when % \<1-3> octal digits
O1 >= $0, O1 =< $3, O2 >= $0, O2 =< $7, O3 >= $0, O3 =< $7 ->
Val = (O1*8 + O2)*8 + O3 - 73*$0,
{Val,Cs,Pos};
scan_escape([O1, O2 | Cs], Pos) when
O1 >= $0, O1 =< $7, O2 >= $0, O2 =< $7 ->
Val = (O1*8 + O2) - 9*$0,
{Val,Cs,Pos};
scan_escape([O1 | Cs], Pos) when
O1 >= $0, O1 =< $7 ->
{O1 - $0,Cs,Pos};
scan_escape([$^, C | Cs], Pos) -> % \^X -> CTL-X
if C >= $\100, C =< $\137 ->
{C - $\100,Cs,Pos};
true -> {error, illegal_control_character}
end;
scan_escape([C | Cs], Pos) ->
case escape_char(C) of
C1 when C1 > $\000 -> {C1,Cs,Pos};
_ -> {error, undefined_escape_sequence}
end;
scan_escape([], _Pos) ->
{error, truncated_char}.
%% Note that we return $\000 for undefined escapes.
escape_char($b) -> $\010; % \b = BS
escape_char($d) -> $\177; % \d = DEL
escape_char($e) -> $\033; % \e = ESC
escape_char($f) -> $\014; % \f = FF
escape_char($n) -> $\012; % \n = LF
escape_char($r) -> $\015; % \r = CR
escape_char($s) -> $\040; % \s = SPC
escape_char($t) -> $\011; % \t = HT
escape_char($v) -> $\013; % \v = VT
escape_char($\\) -> $\134; % \\ = \
escape_char($') -> $\047; % \' = '
escape_char($") -> $\042; % \" = "
escape_char(_C) -> $\000.
%% scan_number(Char, CharList, TokenStack, Pos)
%% We handle sign and radix notation:
%% [+-]<digits> - the digits in base [+-]10
%% [+-]<digits>.<digits>
%% [+-]<digits>.<digits>E+-<digits>
%% [+-]<digits>#<digits> - the digits read in base [+-]B
%%
%% Except for explicitly based integers we build a list of all the
%% characters and then use list_to_integer/1 or list_to_float/1 to
%% generate the value.
%% SPos == Start position
%% CPos == Current position
scan_number(C, Cs0, Toks, Pos) ->
{Ncs,Cs,Pos1} = scan_integer(Cs0, [C], Pos),
scan_after_int(Cs, Ncs, Toks, Pos, Pos1).
scan_signed_number(S, C, Cs0, Toks, Pos) ->
{Ncs,Cs,Pos1} = scan_integer(Cs0, [C, S], Pos),
scan_after_int(Cs, Ncs, Toks, Pos, Pos1).
scan_integer([C|Cs], Stack, Pos) when C >= $0, C =< $9 ->
scan_integer(Cs, [C|Stack], Pos);
scan_integer(Cs, Stack, Pos) ->
{Stack,Cs,Pos}.
scan_after_int([$.,C|Cs0], Ncs0, Toks, SPos, CPos) when C >= $0, C =< $9 ->
{Ncs,Cs,CPos1} = scan_integer(Cs0, [C,$.|Ncs0], CPos),
scan_after_fraction(Cs, Ncs, Toks, SPos, CPos1);
scan_after_int(Cs, Ncs, Toks, SPos, CPos) ->
N = list_to_integer(reverse(Ncs)),
scan1(Cs, [{integer,SPos,N}|Toks], CPos).
scan_after_fraction([$E|Cs], Ncs, Toks, SPos, CPos) ->
scan_exponent(Cs, [$E|Ncs], Toks, SPos, CPos);
scan_after_fraction([$e|Cs], Ncs, Toks, SPos, CPos) ->
scan_exponent(Cs, [$e|Ncs], Toks, SPos, CPos);
scan_after_fraction(Cs, Ncs, Toks, SPos, CPos) ->
case catch list_to_float(reverse(Ncs)) of
N when is_float(N) ->
scan1(Cs, [{float,SPos,N}|Toks], CPos);
_Error -> scan_error({illegal,float}, SPos)
end.
%% scan_exponent(CharList, NumberCharStack, TokenStack, StartPos, CurPos)
%% Generate an error here if E{+|-} not followed by any digits.
scan_exponent([$+|Cs], Ncs, Toks, SPos, CPos) ->
scan_exponent1(Cs, [$+|Ncs], Toks, SPos, CPos);
scan_exponent([$-|Cs], Ncs, Toks, SPos, CPos) ->
scan_exponent1(Cs, [$-|Ncs], Toks, SPos, CPos);
scan_exponent(Cs, Ncs, Toks, SPos, CPos) ->
scan_exponent1(Cs, Ncs, Toks, SPos, CPos).
scan_exponent1([C|Cs0], Ncs0, Toks, SPos, CPos) when C >= $0, C =< $9 ->
{Ncs,Cs,CPos1} = scan_integer(Cs0, [C|Ncs0], CPos),
case catch list_to_float(reverse(Ncs)) of
N when is_float(N) ->
scan1(Cs, [{float,SPos,N}|Toks], CPos1);
_Error -> scan_error({illegal,float}, SPos)
end;
scan_exponent1(_, _, _, _, CPos) ->
scan_error(float, CPos).
scan_error(In, Pos) ->
{error,{Pos,edoc_scanner,In}}. | lib/edoc/src/edoc_scanner.erl | 0.622804 | 0.482185 | edoc_scanner.erl | starcoder |
%% @private
-module(aws_signature_utils).
-export([hmac_sha256/2,
hmac_sha256_hexdigest/2,
sha256_hexdigest/1,
binary_join/2,
base16/1,
hex/2,
parse_path_and_query/1,
uri_encode_path/1
]).
%% @doc Creates an HMAC-SHA256 hexdigest for `Key' and `Message'.
-spec hmac_sha256_hexdigest(binary(), binary()) -> binary().
hmac_sha256_hexdigest(Key, Message) ->
base16(hmac_sha256(Key, Message)).
%% @doc Creates an HMAC-SHA256 binary for `Key' and `Message'.
-spec hmac_sha256(binary(), binary()) -> binary().
hmac_sha256(Key, Message) ->
crypto_hmac(sha256, Key, Message).
%% @doc Creates a SHA256 hexdigest for `Value'.
-spec sha256_hexdigest(binary()) -> binary().
sha256_hexdigest(Value) ->
base16(crypto:hash(sha256, Value)).
%% @doc Joins binary values using the specified separator.
-spec binary_join(Parts :: [binary()], Separator :: binary()) -> binary().
binary_join([], _) -> <<"">>;
binary_join([H], _) -> H;
binary_join([H1, H2 | T], Sep) ->
binary_join([<<H1/binary, Sep/binary, H2/binary>> | T], Sep).
%% @doc Encodes binary data with base 16 encoding.
-spec base16(binary()) -> binary().
base16(Data) ->
<< <<(hex(N div 16, lower)), (hex(N rem 16, lower))>> || <<N>> <= Data >>.
%% @doc Converts an integer between 0 and 15 into a hexadecimal char.
-spec hex(0..15, lower | upper) -> byte().
hex(N, _Case) when N >= 0, N < 10 ->
N + $0;
hex(N, lower) when N < 16 ->
N - 10 + $a;
hex(N, upper) when N < 16 ->
N - 10 + $A.
%% @doc Parses the given URL, returning the path and query components.
%%
%% An alternative to `uri_string:parse/1' to support OTP below 21.
-spec parse_path_and_query(binary()) -> {binary(), binary()}.
parse_path_and_query(URL) when is_binary(URL) ->
%% From https://datatracker.ietf.org/doc/html/rfc3986#appendix-B
{ok, Regex} = re:compile("^(([a-z][a-z0-9\\+\\-\\.]*):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?", [caseless]),
case re:run(URL, Regex, [{capture, all, binary}]) of
{match, [_, _1, _2, _3, _4, Path, _6, Query | _]} ->
{Path, Query};
{match, [_, _1, _2, _3, _4, Path | _]} ->
{Path, <<"">>};
_ ->
{<<"">>, <<"">>}
end.
%% @doc URI-encodes the given path.
%%
%% Escapes all characters except for "/" and the unreserved
%% characters listed in https://tools.ietf.org/html/rfc3986#section-2.3
%%
%% See the UriEncode function in the docs: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
-spec uri_encode_path(binary()) -> binary().
uri_encode_path(Path) when is_binary(Path) ->
<< (uri_encode_path_byte(Byte)) || <<Byte>> <= Path >>.
-spec uri_encode_path_byte(byte()) -> binary().
uri_encode_path_byte($/) -> <<"/">>;
uri_encode_path_byte(Byte)
when $0 =< Byte, Byte =< $9;
$a =< Byte, Byte =< $z;
$A =< Byte, Byte =< $Z;
Byte =:= $~;
Byte =:= $_;
Byte =:= $-;
Byte =:= $. ->
<<Byte>>;
uri_encode_path_byte(Byte) ->
H = Byte band 16#F0 bsr 4,
L = Byte band 16#0F,
<<"%", (aws_signature_utils:hex(H, upper)), (aws_signature_utils:hex(L, upper))>>.
%% This can be simplified if we drop support for OTP < 21
%% This can be removed if we drop support for OTP < 23
-ifdef(OTP_RELEASE). % OTP >= 21
-if(?OTP_RELEASE >= 23).
-define(USE_CRYPTO_MAC_4, true).
-else.
-undef(USE_CRYPTO_MAC_4).
-endif.
-else. % OTP < 21
-undef(USE_CRYPTO_MAC_4).
-endif.
-spec crypto_hmac(atom(), binary(), binary()) -> binary().
-ifdef(USE_CRYPTO_MAC_4).
crypto_hmac(Sha, Key, Data) -> crypto:mac(hmac, Sha, Key, Data).
-else.
crypto_hmac(Sha, Key, Data) -> crypto:hmac(Sha, Key, Data).
-endif.
%%====================================================================
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
%% sha256_hexdigest/1 returns a SHA256 hexdigest for an empty value.
sha256_hexdigest_with_empty_value_test() ->
?assertEqual(
<<"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855">>,
sha256_hexdigest(<<"">>)).
%% sha256_hexdigest/1 returns a SHA256 hexdigest for a non-empty body.
sha256_hexdigest_test() ->
?assertEqual(
<<"315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3">>,
sha256_hexdigest(<<"Hello, world!">>)).
%% hmac_sha256/2 returns a SHA256 HMAC for a message.
hmac_sha256_test() ->
?assertEqual(
<<110, 158, 242, 155, 117, 255, 252, 91,
122, 186, 229, 39, 213, 143, 218, 219,
47, 228, 46, 114, 25, 1, 25, 118,
145, 115, 67, 6, 95, 88, 237, 74>>,
hmac_sha256(<<"key">>, <<"message">>)).
%% hmac_sha256_hexdigest/2 returns an HMAC SHA256 hexdigest for a message.
hmac_sha256_hexdigest_test() ->
?assertEqual(
<<"6e9ef29b75fffc5b7abae527d58fdadb2fe42e7219011976917343065f58ed4a">>,
hmac_sha256_hexdigest(<<"key">>, <<"message">>)).
%% binary_join/2 joins a list of binary values, separated by a separator
%% character, into a single binary value
binary_join_test() ->
?assertEqual(
binary_join([<<"a">>, <<"b">>, <<"c">>], <<",">>),
<<"a,b,c">>).
%% binary_join/2 correctly joins binary values with a multi-character
%% separator
binary_join_with_multi_character_separator_test() ->
?assertEqual(
binary_join([<<"a">>, <<"b">>, <<"c">>], <<", ">>),
<<"a, b, c">>).
%% binary_join/2 converts a list containing a single binary into the
%% binary itself.
binary_join_with_single_element_list_test() ->
?assertEqual(binary_join([<<"a">>], <<",">>), <<"a">>).
%% binary_join/2 returns an empty binary value when an empty list is given
binary_join_with_empty_list_test() ->
?assertEqual(binary_join([], <<",">>), <<"">>).
%% parse_path_and_query/1 returns empty path and query if none is present
parse_path_and_query_with_root_url_test() ->
?assertEqual(
parse_path_and_query(<<"https://example.com">>),
{<<"">>, <<"">>}).
%% parse_path_and_query/1 parses just path
parse_path_and_query_with_just_path_test() ->
?assertEqual(
parse_path_and_query(<<"https://example.com/te%20st/path">>),
{<<"/te%20st/path">>, <<"">>}).
%% parse_path_and_query/1 parses just query
parse_path_and_query_with_just_query_test() ->
?assertEqual(
parse_path_and_query(<<"https://example.com?a=1&b&c=2">>),
{<<"">>, <<"a=1&b&c=2">>}).
%% parse_path_and_query/1 parses both path and query in a full URL
parse_path_and_query_with_full_url_test() ->
?assertEqual(
parse_path_and_query(<<"https://example.com/path/to/file/?a=1&b&c=2#fragment">>),
{<<"/path/to/file/">>, <<"a=1&b&c=2">>}).
%% uri_encode_path/1 keeps forward slash and unreserved characters unchanged
uri_encode_path_with_forward_slash_test() ->
?assertEqual(uri_encode_path(<<"/a1/~_-./">>), <<"/a1/~_-./">>).
%% uri_encode_path/1 escapes reserved characters
uri_encode_path_with_reserved_characters_test() ->
?assertEqual(uri_encode_path(<<"/a+b%c[d] e:f/">>), <<"/a%2Bb%25c%5Bd%5D%20e%3Af/">>).
-endif. | src/aws_signature_utils.erl | 0.664214 | 0.549278 | aws_signature_utils.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% xs_regex - XML Schema regex translation
%%
%% Copyright (c) 2017-2018 <NAME> All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(xs_regex).
%% FLAGS - 'https://www.w3.org/TR/xpath-functions-31/#flags'
%% s: If present, the match operates in "dot-all" mode. (Perl calls this the
%% single-line mode.) If the s flag is not specified, the meta-character .
%% matches any character except a newline (#x0A) or carriage return (#x0D)
%% character. In dot-all mode, the meta-character . matches any character
%% whatsoever. Suppose the input contains "hello" and "world" on two lines.
%% This will not be matched by the regular expression "hello.*world"
%% unless dot-all mode is enabled.
%% m: If present, the match operates in multi-line mode. By default, the
%% meta-character ^ matches the start of the entire string, while $ matches
%% the end of the entire string. In multi-line mode, ^ matches the start of
%% any line (that is, the start of the entire string, and the position
%% immediately after a newline character other than a newline that appears
%% as the last character in the string), while $ matches the end of any line
%% (that is, the position immediately before a newline character, and the end
%% of the entire string if there is no newline character at the end of the
%% string). Newline here means the character #x0A only.
%% i: If present, the match operates in case-insensitive mode. The detailed
%% rules are as follows. In these rules, a character C2 is considered to
%% be a case-variant of another character C1 if the following XPath
%% expression returns true when the two characters are considered as strings
%% of length one, and the -Unicode codepoint collation- is used:
%% Note that the case-variants of a character under this definition are
%% always single characters.
%% When a normal character (Char) is used as an atom, it represents the set
%% containing that character and all its case-variants. For example,
%% the regular expression "z" will match both "z" and "Z".
%% A character range (production charRange in the XSD 1.0 grammar, replaced
%% by productions charRange and singleChar in XSD 1.1) represents the set
%% containing all the characters that it would match in the absence of the
%% "i" flag, together with their case-variants. For example, the regular
%% expression "[A-Z]" will match all the letters A-Z and all the letters
%% a-z. It will also match certain other characters such as #x212A
%% (KELVIN SIGN), since fn:lower-case("#x212A") is "k".
%% This rule applies also to a character range used in a character class
%% subtraction (charClassSub): thus [A-Z-[IO]] will match characters such
%% as "A", "B", "a", and "b", but will not match "I", "O", "i", or "o".
%% The rule also applies to a character range used as part of a negative
%% character group: thus [^Q] will match every character except "Q" and "q"
%% (these being the only case-variants of "Q" in Unicode).
%% A back-reference is compared using case-blind comparison: that is, each
%% character must either be the same as the corresponding character of the
%% previously matched string, or must be a case-variant of that character.
%% For example, the strings "Mum", "mom", "Dad", and "DUD" all match the
%% regular expression "([md])[aeiou]\1" when the "i" flag is used.
%% All other constructs are unaffected by the "i" flag. For example, "\p{Lu}"
%% continues to match upper-case letters only.
%% x: If present, whitespace characters (#x9, #xA, #xD and #x20) in the regular
%% expression are removed prior to matching with one exception: whitespace
%% characters within character class expressions (charClassExpr) are not
%% removed. This flag can be used, for example, to break up long regular
%% expressions into readable lines.
%% q: if present, all characters in the regular expression are treated as
%% representing themselves, not as metacharacters. In effect, every
%% character that would normally have a special meaning in a regular
%% expression is implicitly escaped by preceding it with a backslash.
%% Furthermore, when this flag is present, the characters $ and \ have no
%% special significance when used in the replacement string supplied to
%% the fn:replace function.
%% This flag can be used in conjunction with the i flag. If it is used
%% together with the m, s, or x flag, that flag has no effect.
%% ====================================================================
%% API functions
%% ====================================================================
-export([
normalize/1,
translate/1
]).
-export([compile/2, analyze/2]).
-export([transform_replace/2]).
-export([simple_escape/1]).
-export([get_depth/1]).
-type regex() :: list(branch()).
-type branch() :: list({branch, piece()}).
-type piece() :: {piece, re_atom(), one | quantifier()}.
-type re_atom() ::
'^'
| '$'
| {char, char()}
| char_class()
| {paren, regex()}
| {nc_paren, regex()}
| {back_ref, integer()}.
-type char_class() :: string() | char_class_esc().
-type char_class_esc() ::
{char_class, string()}
| {neg_char_class, string()}
| char_group().
-type char_group() ::
{group, list(group_part())}
| {neg_group, list(group_part())}
| {subtract, char_group(), char_group()}.
-type group_part() :: {range, integer(), integer()} | {value, integer()}.
-type quantifier() :: {q, string()}.
% possibly nested capture grouping
-type groups() :: [{group, integer()} | {group, integer(), groups()}].
%% Normalizes hex and decimal XML Character references in a string.
%% Example: "AB C" -> "AB C"
%% "AB0C" -> [65, 66, 65296, 67]
-spec normalize(string() | binary()) -> {ok, string() | binary()}.
normalize(String) when is_binary(String) ->
{ok, Ret} = normalize(unicode:characters_to_list(String)),
{ok, unicode:characters_to_binary(Ret)};
normalize(String) ->
{ok, xs_regex_util:decode_string(String)}.
%% Translates an XML Schema regex string into Erlang flavor.
-spec translate(string() | binary()) -> {ok, string()} | {error, _}.
translate([]) ->
{ok, []};
translate(Binary) when is_binary(Binary) ->
translate(unicode:characters_to_list(Binary));
translate(String) when is_list(String) ->
% Should not fail
{ok, Tokens, _} = xs_regex_scanner:string(String),
%io:format("~p~n",[Tokens]),
case xs_regex_parser:parse(Tokens) of
{ok, Tree} ->
%io:format("~p~n",[Tree]),
case catch translate_1(Tree, {[], 0}) of
{error, _} = Err ->
Err;
Trans ->
{ok, Trans}
end;
{error, {_, _, O}} ->
{error, {invalid_regex, lists:flatten(O)}}
end.
analyze([]) ->
{ok, []};
analyze(Binary) when is_binary(Binary) ->
analyze(unicode:characters_to_list(Binary));
analyze(String) when is_list(String) ->
% Should not fail
{ok, Tokens, _} = xs_regex_scanner:string(String),
%io:format("~p~n",[Tokens]),
case xs_regex_parser:parse(Tokens) of
{ok, Tree} ->
%io:format("~p~n",[Tree]),
case catch translate_1(Tree, {[], 0}) of
{error, _} = Err ->
Err;
Trans ->
{ok, Trans, Tree}
end;
{error, {_, _, O}} ->
{error, {invalid_regex, lists:flatten(O)}}
end.
%% simply escapes the backslash escape character in a string
-spec simple_escape(string()) -> string().
simple_escape([]) -> [];
simple_escape([$\\ | T]) -> [$\\, $\\ | simple_escape(T)];
simple_escape([H | T]) -> [H | simple_escape(T)].
%% Transforms an XML replacement string ('$' followed by decimal digit)
%% to Erlang flavor ( \g{N} ). Uses depth to ensure correct capture.
-spec transform_replace(string() | binary(), non_neg_integer()) ->
{ok, binary()} | {error, invalid_replacement}.
transform_replace(String, Depth) when is_list(String) ->
transform_replace(unicode:characters_to_binary(String), Depth);
transform_replace(String, Depth) ->
case transform_repl1(String, Depth, <<>>) of
{error, _} = Err ->
Err;
Repl ->
{ok, Repl}
end.
%% Returns the capturing pattern depth of a pattern.
%% Needed when transforming a replacement pattern.
-spec get_depth(binary()) -> {ok, non_neg_integer()}.
get_depth(String) ->
{ok, get_depth(String, 0)}.
-spec analyze(binary(), binary()) -> {boolean(), any(), groups()} | {error, _}.
analyze(Expr0, Flags) ->
try
FlagList1 = regex_flags(Flags),
X = lists:member(extended, FlagList1),
FlagList = FlagList1 ++ [{newline, any}, unicode, ucp, no_start_optimize],
Opts = FlagList -- [do_qe],
Q = lists:member(do_qe, FlagList),
Expr =
if
X ->
strip_esc_ws(Expr0);
true ->
Expr0
end,
{Expr1, Tree1} =
if
Q == false ->
{ok, Tr, Tree} = analyze(Expr),
{Tr, Tree};
true ->
{<<"\\Q", Expr/binary, "\\E">>, []}
end,
{ok, MP} = re:compile(Expr1, Opts),
case catch re:run("", MP) of
nomatch ->
{false, MP, translate_2(Tree1)};
{match, _} ->
{true, MP, translate_2(Tree1)};
_ ->
{false, MP, translate_2(Tree1)}
end
catch
_:{error, {invalid_flag, _}} = E ->
E;
_:E ->
{error, {invalid_regex, E}}
end.
%% Matching the zero-length-string is (sometimes) an error in XQuery,
%% so can be tested here.
%% Takes a transformed regular expression as a string and a
%% string of flag characters.
%% Flag characters can only be "s, m, i, x and q". See comment above.
%% Returns {MatchesZeroLengthString, MP}
-spec compile(binary(), binary()) -> {boolean(), any()} | {error, _}.
compile(Expr0, Flags) ->
try
FlagList1 = regex_flags(Flags),
X = lists:member(extended, FlagList1),
FlagList = FlagList1 ++ [{newline, any}, unicode, ucp, no_start_optimize],
Opts = FlagList -- [do_qe],
Q = lists:member(do_qe, FlagList),
Expr =
if
X ->
strip_esc_ws(Expr0);
true ->
Expr0
end,
Expr1 =
if
Q == false ->
{ok, Tr} = translate(Expr),
Tr;
true ->
<<"\\Q", Expr/binary, "\\E">>
end,
{ok, MP} = re:compile(Expr1, Opts),
case catch re:run("", MP) of
nomatch ->
{false, MP};
{match, _} ->
{true, MP};
_ ->
{false, MP}
end
catch
_:{error, {invalid_flag, _}} = E ->
E;
_:E ->
{error, {invalid_regex, E}}
end.
%% ====================================================================
%% Internal functions
%% ====================================================================
-spec translate_1(regex(), {list(), integer()}) -> string() | {group, group_part()} | {error, _}.
% regex()
translate_1([H | _] = All, CurrCnt) when not is_integer(H) ->
Fun = fun(G, {Open, Cnt}) ->
NewCnt = count_capturing_patterns(G) + Cnt,
{translate_1(G, {Open, Cnt}), {Open, NewCnt}}
end,
{[Hd | Tl], _} = lists:mapfoldl(Fun, CurrCnt, All),
Hd ++ lists:flatten(["|" ++ X || X <- Tl]);
% string()
translate_1([H | _] = Str, _CurrCnt) when is_integer(H) ->
Str;
translate_1({branch, Pieces}, CurrCnt) ->
HasBackRef = lists:any(fun(P) -> is_back_ref(P) end, Pieces),
NewPieces =
if
HasBackRef ->
check_back_refs(Pieces, CurrCnt);
true ->
Pieces
end,
{Hd, Tl, Rest} = maybe_strip_anchors(NewPieces),
Out = [translate_1(X, CurrCnt) || X <- Rest],
Hd ++ lists:flatten(Out) ++ Tl;
translate_1({piece, Atom, Quant}, CurrCnt) ->
Quant1 = check_quantifier(Quant),
translate_1(Atom, CurrCnt) ++ Quant1;
translate_1('^', _) ->
"\\^";
translate_1('$', _) ->
"\\$";
translate_1({back_ref, Int}, _) ->
"\\g{" ++ integer_to_list(Int) ++ "}";
translate_1({char, C}, _) when is_list(C) -> C;
translate_1({char, C}, _) ->
[C];
translate_1({char_class, Cc}, _) ->
Range = xs_regex_util:range(Cc),
"(?-i:[" ++ xs_regex_util:range_to_regex(Range) ++ "])";
translate_1({neg_char_class, Cc}, _) ->
Range = xs_regex_util:range(Cc),
"(?-i:[^" ++ xs_regex_util:range_to_regex(Range) ++ "])";
translate_1({paren, RegEx}, {Open, Cnt}) ->
Cnt1 = Cnt + 1,
"(" ++ translate_1(RegEx, {[Cnt1 | Open], Cnt1}) ++ ")";
translate_1({nc_paren, RegEx}, CurrCnt) ->
"(?:" ++ translate_1(RegEx, CurrCnt) ++ ")";
translate_1({RegEx, {q, Quant}}, CurrCnt) ->
translate_1(RegEx, CurrCnt) ++ Quant;
translate_1({group, _} = G, _) ->
translate_group(G);
translate_1({neg_group, _} = G, _) ->
translate_group(G);
translate_1({subtract, G1, G2}, _) ->
translate_group({subtract, combine_group(G1), combine_group(G2)});
translate_1(Int, _) when is_integer(Int) ->
Int;
translate_1(Tree, _) ->
{error, {unknown, Tree}}.
%% return the grouping layout of the regex
-spec translate_2(regex(), integer()) -> {integer(), list()}.
translate_2(All) ->
{_, Deep} = translate_2(All, 0),
lists:flatten(Deep).
translate_2({paren, Paren}, Cnt) ->
Cnt1 = Cnt + 1,
{Cnt2, Paren1} = translate_2(Paren, Cnt1),
case lists:flatten(Paren1) of
[] ->
{Cnt2, {group, Cnt1}};
F ->
{Cnt2, {group, Cnt1, F}}
end;
translate_2({nc_paren, Paren}, Cnt) ->
translate_2(Paren, Cnt);
translate_2({branch, Pieces}, Cnt) ->
translate_2(Pieces, Cnt);
translate_2({group, Pieces}, Cnt) ->
translate_2(Pieces, Cnt);
translate_2({piece, Pieces, _}, Cnt) ->
translate_2(Pieces, Cnt);
translate_2([H], Cnt) ->
{Cnt1, H1} = translate_2(H, Cnt),
{Cnt1, [H1]};
translate_2([H | T], Cnt) ->
{Cnt1, H1} = translate_2(H, Cnt),
{Cnt2, T2} = translate_2(T, Cnt1),
{Cnt2, [H1 | T2]};
translate_2(_, Cnt) ->
{Cnt, []}.
maybe_strip_anchors([{piece, '^', _}]) ->
{"^", "", []};
maybe_strip_anchors([{piece, '^', _} | Pieces]) ->
Hd = "^",
case lists:last(Pieces) of
{piece, '$', _} ->
{Hd, "$", lists:droplast(Pieces)};
_ ->
{Hd, "", Pieces}
end;
maybe_strip_anchors(Pieces) ->
case lists:last(Pieces) of
{piece, '$', _} ->
{"", "$", lists:droplast(Pieces)};
_ ->
{"", "", Pieces}
end.
translate_group({group, G}) ->
ok = no_back_ref(G),
case is_all_value(G) of
{true, Str} ->
"[" ++ Str ++ "]";
false ->
NewGroup = combine_group({group, G}),
translate_group(NewGroup)
end;
% double negative
translate_group({neg_group, [{neg_char_class, Neg}]}) ->
translate_group({group, [{char_class, Neg}]});
translate_group({neg_group, G}) ->
ok = no_back_ref(G),
case is_all_value(G) of
{true, Str} ->
"[^" ++ Str ++ "]";
false ->
NewGroup = combine_group({neg_group, G}),
translate_group(NewGroup)
end;
translate_group({subtract, {group, _} = G1, {neg_group, _} = G2}) ->
S1 = translate_group_as_set(G1),
S2 = translate_group_as_set(G2),
% sub neg == intersect
S3 = xs_regex_util:intersection(S1, S2),
"[" ++ xs_regex_util:range_to_regex(S3) ++ "]";
translate_group({subtract, {neg_group, R0} = G1, {group, _} = G2}) ->
S1 = translate_group_as_set(G1),
S2 = translate_group_as_set(G2),
% sub pos from neg == neg sub
S3 = xs_regex_util:subtract(S1, S2),
case xs_regex_util:range_to_regex(S3) of
[] ->
"^[" ++ xs_regex_util:range_to_regex(R0) ++ "]";
M ->
"^[" ++ M ++ "]"
end;
translate_group({subtract, G1, G2}) ->
S1 = translate_group_as_set(G1),
S2 = translate_group_as_set(G2),
S3 = xs_regex_util:subtract(S1, S2),
case xs_regex_util:range_to_regex(S3) of
[] ->
"";
M ->
"[" ++ M ++ "]"
end;
translate_group(_) ->
{error, translate_group}.
combine_group({Type, List}) when
Type == group;
Type == neg_group
->
List1 = lists:map(
fun
({neg_char_class, C}) ->
G = xs_regex_util:range(C),
{neg_group, G};
({char_class, C}) ->
G = xs_regex_util:range(C),
{group, G};
(O) ->
O
end,
List
),
Negatives = [N || N <- List1, element(1, N) == neg_group],
Positives = [N || N <- List1, element(1, N) == group],
Rest = [
N
|| N <- List1,
element(1, N) =/= group,
element(1, N) =/= neg_group
],
Pos = lists:foldl(
fun({group, R}, Acc) ->
S = xs_regex_util:range_to_set(R),
xs_regex_util:union(Acc, S)
end,
xs_regex_util:range_to_set(Rest),
Positives
),
if
Negatives == [] ->
{Type, Pos};
true ->
Neg = lists:foldl(
fun({neg_group, R}, Acc) ->
S = xs_regex_util:range_to_set(R),
xs_regex_util:union(Acc, S)
end,
xs_regex_util:range_to_set([]),
Negatives
),
Rest1 = xs_regex_util:subtract(Neg, Pos),
if
Type == group ->
{neg_group, Rest1};
true ->
{Type, Rest1}
end
end;
combine_group(List) ->
Fun = fun
({char_class, Name}) ->
xs_regex_util:range(Name);
({value, _} = V) ->
[V];
({range, _, _} = R) ->
[R]
end,
Ranges = lists:map(Fun, List),
SetHd = xs_regex_util:range_to_set(hd(Ranges)),
lists:foldl(
fun(R, A) ->
S = xs_regex_util:range_to_set(R),
xs_regex_util:union(A, S)
end,
SetHd,
tl(Ranges)
).
translate_group_as_set({neg_group, _} = G) ->
{_, R} = combine_group(G),
xs_regex_util:range_to_set(R);
translate_group_as_set({group, _} = G) ->
{_, R} = combine_group(G),
xs_regex_util:range_to_set(R).
is_all_value(G) ->
All = lists:all(
fun
({value, _}) ->
true;
({range, _, _}) ->
true;
(_) ->
false
end,
G
),
if
All ->
{true, xs_regex_util:range_to_regex(G)};
true ->
false
end.
no_back_ref([]) ->
ok;
no_back_ref([{value, $\\}, {value, N} | _]) when N >= $0, N =< $9 ->
{error, group_backref};
no_back_ref([_ | T]) ->
no_back_ref(T).
check_quantifier(one) -> "";
check_quantifier({q, "{," ++ _}) -> {error, no_min};
check_quantifier({q, Quant}) -> Quant.
is_back_ref({_, {back_ref, _}, _}) -> true;
is_back_ref(_) -> false.
count_capturing_patterns(Pieces) ->
count_capturing_patterns(Pieces, 0).
count_capturing_patterns(Term, Cnt) when is_list(Term) ->
Cnts = [count_capturing_patterns(T, 0) || T <- Term],
lists:foldl(
fun(I, To) ->
I + To
end,
Cnt,
Cnts
);
count_capturing_patterns({branch, Term}, Cnt) ->
count_capturing_patterns(Term, Cnt);
count_capturing_patterns({paren, Term}, Cnt) ->
count_capturing_patterns(Term, Cnt + 1);
count_capturing_patterns(Term, Cnt) when is_tuple(Term) ->
Cnts = [count_capturing_patterns(T, 0) || T <- tuple_to_list(Term)],
lists:foldl(
fun(I, To) ->
I + To
end,
Cnt,
Cnts
);
count_capturing_patterns(_, Cnt) ->
Cnt.
check_back_refs(Pieces, Cnt) ->
check_back_refs(Pieces, [], Cnt).
check_back_refs([], Acc, _) ->
lists:reverse(Acc);
check_back_refs([{piece, {back_ref, N}, one} = H | T], Acc, {Open, Cnt}) ->
C = count_capturing_patterns(Acc) + Cnt,
case lists:member(N, Open) of
true ->
throw({error, badbackref});
false ->
if
N > C, N < 10 ->
throw({error, badbackref});
N > C ->
Rem = N rem 10,
Div = N div 10,
if
Div == 0 ->
throw({error, badbackref});
true ->
check_back_refs(
[
{piece, {back_ref, Div}, one},
{piece, {char, integer_to_list(Rem)}, one}
| T
],
Acc,
{Open, Cnt}
)
end;
true ->
check_back_refs(T, [H | Acc], {Open, Cnt})
end
end;
check_back_refs([H | T], Acc, Cnt) ->
check_back_refs(T, [H | Acc], Cnt).
regex_flags(<<>>) ->
[dollar_endonly];
regex_flags([]) ->
[dollar_endonly];
regex_flags(Flags) when is_binary(Flags) ->
regex_flags(binary_to_list(Flags));
regex_flags(Flags) when is_list(Flags) ->
% uses a map to only save last occurance
Fun = fun(Char, Map) ->
case Char of
$s -> maps:put(s, [dollar_endonly, dotall], Map);
$m -> maps:put(m, multiline, Map);
$i -> maps:put(i, caseless, Map);
$x -> maps:put(x, extended, Map);
$q -> maps:put(q, do_qe, Map);
C -> throw({error, {invalid_flag, C}})
end
end,
M = lists:foldl(Fun, #{}, Flags),
lists:flatten(maps:values(M)).
get_depth(<<>>, D) -> D;
get_depth(<<$), T/binary>>, D) -> get_depth(T, D + 1);
get_depth(<<_, T/binary>>, D) -> get_depth(T, D).
transform_repl1(<<>>, _, Acc) ->
Acc;
transform_repl1(<<$\\, $$, T/binary>>, D, Acc) ->
transform_repl1(T, D, <<Acc/binary, $\\, $$>>);
transform_repl1(<<$\\, $\\, T/binary>>, D, Acc) ->
transform_repl1(T, D, <<Acc/binary, $\\, $\\>>);
transform_repl1(<<$\\, _/binary>>, _, _Acc) ->
{error, invalid_replacement};
transform_repl1(<<$$, H2/utf8, T/binary>>, D, Acc) when H2 >= $0, H2 =< $9 ->
{Nums, Rest} = get_digits(T, <<>>),
Int = binary_to_integer(<<H2/utf8, Nums/binary>>),
{NewInt, Tail} = chop_to(Int, D, <<>>),
%io:format("~p~n",[{Nums,Rest,T,Int,NewInt,Tail}]),
transform_repl1(Rest, D, <<Acc/binary, "\\g{", NewInt/binary, "}", Tail/binary>>);
transform_repl1(<<$$, _/binary>>, _, _Acc) ->
{error, invalid_replacement};
transform_repl1(<<H/utf8, T/binary>>, D, Acc) ->
transform_repl1(T, D, <<Acc/binary, H/utf8>>).
%returns {DigitsAsBinary,RestBinary}
get_digits(<<>>, Acc) ->
{Acc, <<>>};
get_digits(<<H/utf8, T/binary>>, Acc) when H >= $0, H =< $9 ->
get_digits(T, <<Acc/binary, H/utf8>>);
get_digits(Bin, Acc) ->
{Acc, Bin}.
%returns {IntAsBinary,RestBinary}
chop_to(Int, Max, Acc) when Int > Max ->
Next = Int div 10,
Rem = integer_to_binary(Int rem 10),
chop_to(Next, Max, <<Rem/binary, Acc/binary>>);
chop_to(Int, _Max, Acc) ->
{integer_to_binary(Int), Acc}.
strip_esc_ws(Bin) when is_binary(Bin) ->
strip_esc_ws(unicode:characters_to_list(Bin));
strip_esc_ws([]) ->
[];
strip_esc_ws([$[ | T]) ->
[$[ | no_strip_esc_ws(T)];
strip_esc_ws([H | T]) when
H == 16#9;
H == 16#A;
H == 16#D;
H == 16#20
->
strip_esc_ws(T);
strip_esc_ws([H | T]) ->
[H | strip_esc_ws(T)].
no_strip_esc_ws([]) -> [];
no_strip_esc_ws([$] | T]) -> [$] | strip_esc_ws(T)];
no_strip_esc_ws([H | T]) -> [H | no_strip_esc_ws(T)]. | src/xs_regex.erl | 0.683525 | 0.479138 | xs_regex.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(smoosh_priority_queue).
-export([new/0, last_updated/2, is_key/2, in/4, in/5, out/1, size/1, info/1]).
-record(priority_queue, {
dict=dict:new(),
tree=gb_trees:empty()
}).
new() ->
#priority_queue{}.
last_updated(Key, #priority_queue{dict=Dict}) ->
case dict:find(Key, Dict) of
{ok, {_Priority, {LastUpdatedMTime, _MInt}}} ->
LastUpdatedMTime;
error ->
false
end.
is_key(Key, #priority_queue{dict=Dict}) ->
dict:is_key(Key, Dict).
in(Key, Value, Priority, Q) ->
in(Key, Value, Priority, infinity, Q).
in(Key, Value, Priority, Capacity, #priority_queue{dict=Dict, tree=Tree}) ->
Tree1 = case dict:find(Key, Dict) of
{ok, TreeKey} ->
gb_trees:delete_any(TreeKey, Tree);
error ->
Tree
end,
Now = {erlang:monotonic_time(), erlang:unique_integer([monotonic])},
TreeKey1 = {Priority, Now},
Tree2 = gb_trees:enter(TreeKey1, {Key, Value}, Tree1),
Dict1 = dict:store(Key, TreeKey1, Dict),
truncate(Capacity, #priority_queue{dict=Dict1, tree=Tree2}).
out(#priority_queue{dict=Dict,tree=Tree}) ->
case gb_trees:is_empty(Tree) of
true ->
false;
false ->
{_, {Key, Value}, Tree1} = gb_trees:take_largest(Tree),
Dict1 = dict:erase(Key, Dict),
{Key, Value, #priority_queue{dict=Dict1, tree=Tree1}}
end.
size(#priority_queue{tree=Tree}) ->
gb_trees:size(Tree).
info(#priority_queue{tree=Tree}=Q) ->
[{size, ?MODULE:size(Q)}|
case gb_trees:is_empty(Tree) of
true ->
[];
false ->
{Min, _, _} = gb_trees:take_smallest(Tree),
{Max, _, _} = gb_trees:take_largest(Tree),
[{min, Min}, {max, Max}]
end].
truncate(infinity, Q) ->
Q;
truncate(Capacity, Q) when Capacity > 0 ->
truncate(Capacity, ?MODULE:size(Q), Q).
truncate(Capacity, Size, Q) when Size =< Capacity ->
Q;
truncate(Capacity, Size, #priority_queue{dict=Dict, tree=Tree}) when Size > 0 ->
{_, {Key, _}, Tree1} = gb_trees:take_smallest(Tree),
Q1 = #priority_queue{dict=dict:erase(Key, Dict), tree=Tree1},
truncate(Capacity, ?MODULE:size(Q1), Q1). | src/smoosh/src/smoosh_priority_queue.erl | 0.721547 | 0.466785 | smoosh_priority_queue.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc
%% This module implements a central storage manager for riak_ensemble.
%% Previously, individual ensembles as well as the ensemble manager would
%% independently save their own state to disk. However, such an approach
%% scaled poorly as the number of independent ensembles increased. It was
%% not uncommon to see thousands of synchronous writes issued to disk per
%% second, overwhelming the I/O subsystem. To solve this issue, this storage
%% manager was created.
%%
%% Rather than storing data independently, the storage manager combines the
%% state from multiple ensembles as well as the ensemble manager into a
%% single entity that is stored together in a single file. Since this file
%% is now a critical single point of failure, the storage manager uses the
%% new {@link riak_ensemble_save} logic to save this data to disk such that
%% there are four redundant copies to recover from.
%%
%% This manager is also responsible for coalescing multiple writes together
%% to reduce disk traffic. Individual writes are staged in an ETS table and
%% then flushed to disk after a delay (eg. 50ms).
%%
%% There are two ways to save data to disk that are used by other components
%% in riak_ensemble: synchronous and asynchronous.
%%
%% For synchronous writes components use the sequence:
%% riak_ensemble_storage:put(Key, Data),
%% riak_ensemble_storage:sync().
%% The sync() call than blocks until the data has successfully been written,
%% to disk.
%%
%% For asynchronous writes, components simply use put() without sync(). The
%% data will then be written to disk either when another component calls sync,
%% or after next storage manager tick (eg. every 5 seconds).
%%
-module(riak_ensemble_storage).
-behaviour(gen_server).
%% API
-export([start_link/0]).
-export([get/1, put/2, sync/0]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-define(ETS, ets_riak_ensemble_storage).
-define(SYNC_DELAY, riak_ensemble_config:storage_delay()).
-define(TICK, riak_ensemble_config:storage_tick()).
-type gen_server_from() :: any().
-record(state, {savefile :: file:filename(),
waiting :: [gen_server_from()],
previous :: binary(),
timer :: reference()}).
-type state() :: #state{}.
%%%===================================================================
%%% API
%%%===================================================================
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec sync() -> ok.
sync() ->
gen_server:call(?MODULE, sync, infinity).
-spec put(term(), term()) -> true.
put(Key, Value) ->
ets:insert(?ETS, {Key, Value}).
-spec get(term()) -> {ok, term()} | not_found.
get(Key) ->
try
Value = ets:lookup_element(?ETS, Key, 2),
{ok, Value}
catch
_:_ ->
%% Retry through the server in case data is being loaded
gen_server:call(?MODULE, {get, Key}, infinity)
end.
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
init([]) ->
{ok, Root} = application:get_env(riak_ensemble, data_root),
File = filename:join([Root, "ensembles", "ensemble_facts"]),
_ = ets:new(?ETS, [named_table, public, {read_concurrency, true},
{write_concurrency, true}]),
case riak_ensemble_save:read(File) of
{ok, Bin} ->
Existing = binary_to_term(Bin),
true = ets:insert(?ETS, Existing);
_ ->
ok
end,
schedule_tick(),
{ok, #state{savefile=File, waiting=[], timer=undefined}}.
handle_call({get, Key}, _From, State) ->
Reply = case ets:lookup(?ETS, Key) of
[{_, Value}] ->
{ok, Value};
_ ->
not_found
end,
{reply, Reply, State};
handle_call(sync, From, State=#state{waiting=Waiting}) ->
Waiting2 = [From|Waiting],
State2 = maybe_schedule_sync(State),
State3 = State2#state{waiting=Waiting2},
{noreply, State3};
handle_call(_Request, _From, State) ->
{reply, ok, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(tick, State) ->
State2 = tick(State),
schedule_tick(),
{noreply, State2};
handle_info(do_sync, State) ->
{noreply, do_sync(State)};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%%===================================================================
%%% Internal functions
%%%===================================================================
-spec schedule_tick() -> ok.
schedule_tick() ->
_ = erlang:send_after(?TICK, self(), tick),
ok.
-spec tick(state()) -> state().
tick(State) ->
State2 = maybe_schedule_sync(State),
State2.
-spec maybe_schedule_sync(state()) -> state().
maybe_schedule_sync(State=#state{timer=undefined}) ->
Timer = erlang:send_after(?SYNC_DELAY, self(), do_sync),
State#state{timer=Timer};
maybe_schedule_sync(State) ->
State.
-spec do_sync(state()) -> state().
do_sync(State=#state{savefile=File, waiting=Waiting, previous=PrevData}) ->
Data = term_to_binary(ets:tab2list(?ETS)),
case Data of
PrevData ->
ok;
_ ->
ok = riak_ensemble_save:write(File, Data)
end,
_ = [gen_server:reply(From, ok) || From <- Waiting],
State#state{waiting=[], timer=undefined, previous=Data}. | deps/riak_ensemble/src/riak_ensemble_storage.erl | 0.653901 | 0.500122 | riak_ensemble_storage.erl | starcoder |
%%%--------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2017 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%--------------------------------------------------------------------
%%% @doc
%%% Cumulative histogram implemented on a list, allows for updating latest
%%% counter and for shifting the histogram values to the right.
%%% @end
%%%--------------------------------------------------------------------
-module(cumulative_histogram).
-author("<NAME>").
-type size() :: pos_integer().
-type histogram() :: [integer()].
-export_type([size/0, histogram/0]).
%% API
-export([new/1, shift/2, increment/2, decrement/2, merge/2]).
%%%===================================================================
%%% API
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns new histogram of given size
%% @end
%%--------------------------------------------------------------------
-spec new(size()) -> histogram().
new(Size) ->
new(Size, 0).
%%--------------------------------------------------------------------
%% @doc
%% Shifts right all values in the histogram by given offset
%% @end
%%--------------------------------------------------------------------
-spec shift(histogram(), ShiftSize :: non_neg_integer()) -> histogram().
shift(Histogram, 0) ->
Histogram;
shift(Histogram = [Head | _Rest], ShiftSize) when ShiftSize >= length(Histogram) ->
new(length(Histogram), Head);
shift(Histogram = [Head | _Rest], ShiftSize) ->
NewSlots = new(ShiftSize, Head),
lists:sublist(NewSlots ++ Histogram, length(Histogram)).
%%--------------------------------------------------------------------
%% @doc
%% Increments first value in given histogram by adding N.
%% @end
%%--------------------------------------------------------------------
-spec increment(histogram(), non_neg_integer()) -> histogram().
increment([Head | Rest], N) ->
[Head + N | Rest].
%%--------------------------------------------------------------------
%% @doc
%% Decrements first value in given histogram by subtracting N.
%% @end
%%--------------------------------------------------------------------
-spec decrement(histogram(), non_neg_integer()) -> histogram().
decrement([Head | Rest], N) ->
[Head - N | Rest].
%%--------------------------------------------------------------------
%% @doc
%% Merges 2 histograms of equal size
%% @end
%%--------------------------------------------------------------------
-spec merge(histogram(), histogram()) -> histogram().
merge(Histogram1, Histogram2) ->
lists:zipwith(fun(X, Y) -> X + Y end, Histogram1, Histogram2).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns new histogram of given size with default values.
%% @end
%%--------------------------------------------------------------------
-spec new(size(), non_neg_integer()) -> histogram().
new(Size, DefaultValue) ->
lists:duplicate(Size, DefaultValue). | src/modules/datastore/utils/file_popularity/cumulative_histogram.erl | 0.516595 | 0.501953 | cumulative_histogram.erl | starcoder |
%%-------------------------------------------------------------------
%% The MIT License (MIT)
%% Copyright (c) 2018 AdRoll, Inc. <NAME> and <NAME>
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy
%% of this software and associated documentation files (the "Software"), to deal
%% in the Software without restriction, including without limitation the rights
%% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
%% copies of the Software, and to permit persons to whom the Software is
%% furnished to do so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in all
%% copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
%% SOFTWARE.
%%
%%-------------------------------------------------------------------
-module(spillway_SUITE).
-include_lib("eunit/include/eunit.hrl").
-behaviour(ct_suite).
-export([all/0, init_per_suite/1, end_per_suite/1, suite/0, init_per_testcase/2,
end_per_testcase/2]).
-export([complex/1, simple/1]).
-define(TABLE, test_counter).
%%%=============================================================================
%%% common_test callbacks
%%%=============================================================================
all() ->
[simple, complex].
suite() ->
[{timetrap, {seconds, 15}}].
init_per_suite(Conf) ->
Conf.
end_per_suite(_Conf) ->
ok.
init_per_testcase(_Module, Conf) ->
ok = application:start(spillway),
Conf.
end_per_testcase(_Module, _Conf) ->
ok = application:stop(spillway),
ok.
%%%=============================================================================
%%% Tests
%%%=============================================================================
simple(_) ->
?assertEqual(false, spillway:enter(?TABLE, 0)),
?assertEqual({true, 1}, spillway:enter(?TABLE, 2)),
?assertEqual({true, 2}, spillway:enter(?TABLE, 2)),
?assertEqual(false, spillway:enter(?TABLE, 2)),
?assertEqual(1, spillway:leave(?TABLE)),
?assertEqual(0, spillway:leave(?TABLE)),
?assertEqual(0, spillway:leave(?TABLE)),
ok.
spawn_proc(ProcN, Limit, SignalGo, SignalStop, Parent) ->
spawn_monitor(fun() ->
monitor(process, SignalGo),
monitor(process, SignalStop),
receive
{'DOWN', _, process, SignalGo, _} ->
case spillway:enter(?TABLE, ProcN, Limit) of
{true, N} ->
send_parent(Parent, {entered, self(), N}),
receive
{'DOWN', _, process, SignalStop, _} ->
spillway:leave(?TABLE, ProcN)
end;
false ->
send_parent(Parent, {not_entered, self()}),
ok
end
end
end).
complex(_) ->
NProcs = 2000,
Limit = 140000,
Parent = self(),
SignalGo = signal(),
SignalStop = signal(),
Processes =
[element(1, spawn_proc(ProcN, Limit, SignalGo, SignalStop, Parent))
|| ProcN <- lists:seq(1, NProcs)],
SignalGo ! go,
%% Collect enter msg
N = collect_values(Processes, 0),
?assert(N =< Limit),
Value = spillway:cur(?TABLE),
?assert(Value =< Limit),
%% wait for all the workers to leave and finish
SignalStop ! go,
wait_for_down(Processes),
?assertMatch(0, spillway:cur(?TABLE)),
ok.
send_parent(Parent, Msg) ->
Parent ! Msg.
collect_values([], Max) ->
Max;
collect_values(ProcessesSignal, Cur) ->
receive
{not_entered, Pid} ->
collect_values(ProcessesSignal -- [Pid], Cur);
{entered, Pid, M} when M > Cur ->
collect_values(ProcessesSignal -- [Pid], M);
{entered, Pid, _} ->
collect_values(ProcessesSignal -- [Pid], Cur)
end.
wait_for_down([]) ->
ok;
wait_for_down(ProcessesExited) ->
receive
{'DOWN', _, process, Pid, _} ->
wait_for_down(ProcessesExited -- [Pid])
end.
signal() ->
spawn(fun() ->
receive
go ->
ok
end
end). | test/spillway_SUITE.erl | 0.543348 | 0.439807 | spillway_SUITE.erl | starcoder |
%% -*- coding: latin-1 -*-
%% ---------------------------------------------------------------------
%% Licensed under the Apache License, Version 2.0 (the "License"); you may
%% not use this file except in compliance with the License. You may obtain
%% a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @author <NAME> <<EMAIL>>
%% @copyright 2010-2013 <NAME>
%% @doc Metaprogramming in Erlang.
-module(merl).
-export([term/1, var/1, print/1, show/1]).
-export([quote/1, quote/2, qquote/2, qquote/3]).
-export([template/1, tree/1, subst/2, tsubst/2, alpha/2, match/2, switch/2]).
-export([template_vars/1, meta_template/1]).
-export([compile/1, compile/2, compile_and_load/1, compile_and_load/2]).
%% NOTE: this module may not include merl.hrl!
-type tree() :: erl_syntax:syntaxTree().
-type tree_or_trees() :: tree() | [tree()].
-type pattern() :: tree() | template().
-type pattern_or_patterns() :: pattern() | [pattern()].
-type env() :: [{Key::id(), pattern_or_patterns()}].
-type id() :: atom() | integer().
%% A list of strings or binaries is assumed to represent individual lines,
%% while a flat string or binary represents source code containing newlines.
-type text() :: string() | binary() | [string()] | [binary()].
-type location() :: erl_anno:location().
%% ------------------------------------------------------------------------
%% Compiling and loading code directly to memory
%% @equiv compile(Code, [])
compile(Code) ->
compile(Code, []).
%% @doc Compile a syntax tree or list of syntax trees representing a module
%% into a binary BEAM object.
%% @see compile_and_load/2
%% @see compile/1
compile(Code, Options) when not is_list(Code)->
case type(Code) of
form_list -> compile(erl_syntax:form_list_elements(Code));
_ -> compile([Code], Options)
end;
compile(Code, Options0) when is_list(Options0) ->
Forms = [erl_syntax:revert(F) || F <- Code],
Options = [verbose, report_errors, report_warnings, binary | Options0],
compile:noenv_forms(Forms, Options).
%% @equiv compile_and_load(Code, [])
compile_and_load(Code) ->
compile_and_load(Code, []).
%% @doc Compile a syntax tree or list of syntax trees representing a module
%% and load the resulting module into memory.
%% @see compile/2
%% @see compile_and_load/1
compile_and_load(Code, Options) ->
case compile(Code, Options) of
{ok, ModuleName, Binary} ->
code:load_binary(ModuleName, "", Binary),
{ok, Binary};
Other -> Other
end.
%% ------------------------------------------------------------------------
%% Utility functions
-spec var(atom()) -> tree().
%% @doc Create a variable.
var(Name) ->
erl_syntax:variable(Name).
-spec term(term()) -> tree().
%% @doc Create a syntax tree for a constant term.
term(Term) ->
erl_syntax:abstract(Term).
%% @doc Pretty-print a syntax tree or template to the standard output. This
%% is a utility function for development and debugging.
print(Ts) when is_list(Ts) ->
lists:foreach(fun print/1, Ts);
print(T) ->
io:put_chars(erl_prettypr:format(tree(T))),
io:nl().
%% @doc Print the structure of a syntax tree or template to the standard
%% output. This is a utility function for development and debugging.
show(Ts) when is_list(Ts) ->
lists:foreach(fun show/1, Ts);
show(T) ->
io:put_chars(pp(tree(T), 0)),
io:nl().
pp(T, I) ->
[lists:duplicate(I, $\s),
limit(lists:flatten([atom_to_list(type(T)), ": ",
erl_prettypr:format(erl_syntax_lib:limit(T,3))]),
79-I),
$\n,
pp_1(lists:filter(fun (X) -> X =/= [] end, subtrees(T)), I+2)
].
pp_1([G], I) ->
pp_2(G, I);
pp_1([G | Gs], I) ->
[pp_2(G, I), lists:duplicate(I, $\s), "+\n" | pp_1(Gs, I)];
pp_1([], _I) ->
[].
pp_2(G, I) ->
[pp(E, I) || E <- G].
%% limit string to N characters, stay on a single line and compact whitespace
limit([$\n | Cs], N) -> limit([$\s | Cs], N);
limit([$\r | Cs], N) -> limit([$\s | Cs], N);
limit([$\v | Cs], N) -> limit([$\s | Cs], N);
limit([$\t | Cs], N) -> limit([$\s | Cs], N);
limit([$\s, $\s | Cs], N) -> limit([$\s | Cs], N);
limit([C | Cs], N) when C < 32 -> limit(Cs, N);
limit([C | Cs], N) when N > 3 -> [C | limit(Cs, N-1)];
limit([_C1, _C2, _C3, _C4 | _Cs], 3) -> "...";
limit(Cs, 3) -> Cs;
limit([_C1, _C2, _C3 | _], 2) -> "..";
limit(Cs, 2) -> Cs;
limit([_C1, _C2 | _], 1) -> ".";
limit(Cs, 1) -> Cs;
limit(_, _) -> [].
%% ------------------------------------------------------------------------
%% Parsing and instantiating code fragments
-spec qquote(Text::text(), Env::env()) -> tree_or_trees().
%% @doc Parse text and substitute meta-variables.
%%
%% @equiv qquote(1, Text, Env)
qquote(Text, Env) ->
qquote(1, Text, Env).
-spec qquote(StartPos::location(), Text::text(), Env::env()) -> tree_or_trees().
%% @doc Parse text and substitute meta-variables. Takes an initial scanner
%% starting position as first argument.
%%
%% The macro `?Q(Text, Env)' expands to `merl:qquote(?LINE, Text, Env)'.
%%
%% @see quote/2
qquote(StartPos, Text, Env) ->
subst(quote(StartPos, Text), Env).
-spec quote(Text::text()) -> tree_or_trees().
%% @doc Parse text.
%%
%% @equiv quote(1, Text)
quote(Text) ->
quote(1, Text).
-spec quote(StartPos::location(), Text::text()) -> tree_or_trees().
%% @doc Parse text. Takes an initial scanner starting position as first
%% argument.
%%
%% The macro `?Q(Text)' expands to `merl:quote(?LINE, Text, Env)'.
%%
%% @see quote/1
quote({Line, Col}, Text)
when is_integer(Line), is_integer(Col) ->
quote_1(Line, Col, Text);
quote(StartPos, Text) when is_integer(StartPos) ->
quote_1(StartPos, undefined, Text).
quote_1(StartLine, StartCol, Text) ->
%% be backwards compatible as far as R12, ignoring any starting column
StartPos = case erlang:system_info(version) of
"5.6" ++ _ -> StartLine;
"5.7" ++ _ -> StartLine;
"5.8" ++ _ -> StartLine;
_ when StartCol =:= undefined -> StartLine;
_ -> {StartLine, StartCol}
end,
FlatText = flatten_text(Text),
{ok, Ts, _} = erl_scan:string(FlatText, StartPos),
merge_comments(StartLine, erl_comment_scan:string(FlatText), parse_1(Ts)).
parse_1(Ts) ->
%% if dot tokens are present, it is assumed that the text represents
%% complete forms, not dot-terminated expressions or similar
case split_forms(Ts) of
{ok, Fs} -> parse_forms(Fs);
error ->
parse_2(Ts)
end.
split_forms(Ts) ->
split_forms(Ts, [], []).
split_forms([{dot,_}=T|Ts], Fs, As) ->
split_forms(Ts, [lists:reverse(As, [T]) | Fs], []);
split_forms([T|Ts], Fs, As) ->
split_forms(Ts, Fs, [T|As]);
split_forms([], Fs, []) ->
{ok, lists:reverse(Fs)};
split_forms([], [], _) ->
error; % no dot tokens found - not representing form(s)
split_forms([], _, [T|_]) ->
fail("incomplete form after ~p", [T]).
parse_forms([Ts | Tss]) ->
case erl_parse:parse_form(Ts) of
{ok, Form} -> [Form | parse_forms(Tss)];
{error, R} -> parse_error(R)
end;
parse_forms([]) ->
[].
parse_2(Ts) ->
%% one or more comma-separated expressions?
%% (recall that Ts has no dot tokens if we get to this stage)
case erl_parse:parse_exprs(Ts ++ [{dot,0}]) of
{ok, Exprs} -> Exprs;
{error, E} ->
parse_3(Ts ++ [{'end',0}, {dot,0}], [E])
end.
parse_3(Ts, Es) ->
%% try-clause or clauses?
case erl_parse:parse_exprs([{'try',0}, {atom,0,true}, {'catch',0} | Ts]) of
{ok, [{'try',_,_,_,_,_}=X]} ->
%% get the right kind of qualifiers in the clause patterns
erl_syntax:try_expr_handlers(X);
{error, E} ->
parse_4(Ts, [E|Es])
end.
parse_4(Ts, Es) ->
%% fun-clause or clauses? (`(a)' is also a pattern, but `(a,b)' isn't,
%% so fun-clauses must be tried before normal case-clauses
case erl_parse:parse_exprs([{'fun',0} | Ts]) of
{ok, [{'fun',_,{clauses,Cs}}]} -> Cs;
{error, E} ->
parse_5(Ts, [E|Es])
end.
parse_5(Ts, Es) ->
%% case-clause or clauses?
case erl_parse:parse_exprs([{'case',0}, {atom,0,true}, {'of',0} | Ts]) of
{ok, [{'case',_,_,Cs}]} -> Cs;
{error, E} ->
%% select the best error to report
parse_error(lists:last(lists:sort([E|Es])))
end.
parse_error({L, M, R}) when is_atom(M), is_integer(L) ->
fail("~w: ~s", [L, M:format_error(R)]);
parse_error({{L,C}, M, R}) when is_atom(M), is_integer(L), is_integer(C) ->
fail("~w:~w: ~s", [L,C,M:format_error(R)]);
parse_error({_, M, R}) when is_atom(M) ->
fail(M:format_error(R));
parse_error(R) ->
fail("unknown parse error: ~p", [R]).
%% ------------------------------------------------------------------------
%% Templates, substitution and matching
%% Leaves are normal syntax trees, and inner nodes are tuples
%% {template,Type,Attrs,Groups} where Groups are lists of lists of nodes.
%% Metavariables are 1-tuples {VarName}, where VarName is an atom or an
%% integer. {'_'} and {0} work as anonymous variables in matching. Glob
%% metavariables are tuples {'*',VarName}, and {'*','_'} and {'*',0} are
%% anonymous globs.
%% Note that although template() :: tree() | ..., it is implied that these
%% syntax trees are free from metavariables, so pattern() :: tree() |
%% template() is in fact a wider type than template().
-type template() :: tree()
| {id()}
| {'*',id()}
| {template, atom(), term(), [[template()]]}.
-type template_or_templates() :: template() | [template()].
-spec template(pattern_or_patterns()) -> template_or_templates().
%% @doc Turn a syntax tree or list of trees into a template or templates.
%% Templates can be instantiated or matched against, and reverted back to
%% normal syntax trees using {@link tree/1}. If the input is already a
%% template, it is not modified further.
%%
%% @see subst/2
%% @see match/2
%% @see tree/1
template(Trees) when is_list(Trees) ->
[template_0(T) || T <- Trees];
template(Tree) ->
template_0(Tree).
template_0({template, _, _, _}=Template) -> Template;
template_0({'*',_}=Template) -> Template;
template_0({_}=Template) -> Template;
template_0(Tree) ->
case template_1(Tree) of
false -> Tree;
{Name} when is_list(Name) ->
fail("bad metavariable: '~s'", [tl(Name)]); % drop v/n from name
Template -> Template
end.
%% returns either a template or a lifted metavariable {String}, or 'false'
%% if Tree contained no metavariables
template_1(Tree) ->
case subtrees(Tree) of
[] ->
case metavar(Tree) of
{"v_"++Cs}=V when Cs =/= [] -> V; % to be lifted
{"n0"++Cs}=V when Cs =/= [] -> V; % to be lifted
{"v@"++Cs} when Cs =/= [] -> {'*',list_to_atom(Cs)};
{"n9"++Cs} when Cs =/= [] -> {'*',list_to_integer(Cs)};
{"v"++Cs} -> {list_to_atom(Cs)};
{"n"++Cs} -> {list_to_integer(Cs)};
false -> false
end;
Gs ->
case template_2(Gs, [], false) of
Gs1 when is_list(Gs1) ->
{template, type(Tree), erl_syntax:get_attrs(Tree), Gs1};
Other ->
Other
end
end.
template_2([G | Gs], As, Bool) ->
case template_3(G, [], false) of
{"v_"++Cs}=V when Cs =/= [] -> V; % lift further
{"n0"++Cs}=V when Cs =/= [] -> V; % lift further
{"v@"++Cs} when Cs =/= [] -> {'*',list_to_atom(Cs)}; % stop
{"n9"++Cs} when Cs =/= [] -> {'*',list_to_integer(Cs)}; % stop
{"v"++Cs} when is_list(Cs) -> {list_to_atom(Cs)}; % stop
{"n"++Cs} when is_list(Cs) -> {list_to_integer(Cs)}; % stop
false -> template_2(Gs, [G | As], Bool);
G1 -> template_2(Gs, [G1 | As], true)
end;
template_2([], _As, false) -> false;
template_2([], As, true) -> lists:reverse(As).
template_3([T | Ts], As, Bool) ->
case template_1(T) of
{"v_"++Cs} when Cs =/= [] -> {"v"++Cs}; % lift
{"n0"++Cs} when Cs =/= [] -> {"n"++Cs}; % lift
false -> template_3(Ts, [T | As], Bool);
T1 -> template_3(Ts, [T1 | As], true)
end;
template_3([], _As, false) -> false;
template_3([], As, true) -> lists:reverse(As).
%% @doc Turn a template into a syntax tree representing the template.
%% Meta-variables in the template are turned into normal Erlang variables if
%% their names (after the metavariable prefix characters) begin with an
%% uppercase character. E.g., `_@Foo' in the template becomes the variable
%% `Foo' in the meta-template. Furthermore, variables ending with `@' are
%% automatically wrapped in a call to merl:term/1, so e.g. `_@Foo@ in the
%% template becomes `merl:term(Foo)' in the meta-template.
-spec meta_template(template_or_templates()) -> tree_or_trees().
meta_template(Templates) when is_list(Templates) ->
[meta_template_1(T) || T <- Templates];
meta_template(Template) ->
meta_template_1(Template).
meta_template_1({template, Type, Attrs, Groups}) ->
erl_syntax:tuple(
[erl_syntax:atom(template),
erl_syntax:atom(Type),
erl_syntax:abstract(Attrs),
erl_syntax:list([erl_syntax:list([meta_template_1(T) || T <- G])
|| G <- Groups])]);
meta_template_1({Var}=V) ->
meta_template_2(Var, V);
meta_template_1({'*',Var}=V) ->
meta_template_2(Var, V);
meta_template_1(Leaf) ->
erl_syntax:abstract(Leaf).
meta_template_2(Var, V) when is_atom(Var) ->
case atom_to_list(Var) of
[C|_]=Name when C >= $A, C =< $Z ; C >= $À, C =< $Þ, C /= $× ->
case lists:reverse(Name) of
"@"++([_|_]=RevRealName) -> % don't allow empty RealName
RealName = lists:reverse(RevRealName),
erl_syntax:application(erl_syntax:atom(merl),
erl_syntax:atom(term),
[erl_syntax:variable(RealName)]);
_ ->
%% plain automatic metavariable
erl_syntax:variable(Name)
end;
_ ->
erl_syntax:abstract(V)
end;
meta_template_2(Var, V) when is_integer(Var) ->
if Var > 9, (Var rem 10) =:= 9 ->
%% at least 2 digits, ends in 9: make it a Q-variable
if Var > 99, (Var rem 100) =:= 99 ->
%% at least 3 digits, ends in 99: wrap in merl:term/1
Name = "Q" ++ integer_to_list(Var div 100),
erl_syntax:application(erl_syntax:atom(merl),
erl_syntax:atom(term),
[erl_syntax:variable(Name)]);
true ->
%% plain automatic Q-variable
Name = integer_to_list(Var div 10),
erl_syntax:variable("Q" ++ Name)
end;
true ->
erl_syntax:abstract(V)
end.
-spec template_vars(template_or_templates()) -> [id()].
%% @doc Return an ordered list of the metavariables in the template.
template_vars(Template) ->
template_vars(Template, []).
template_vars(Templates, Vars) when is_list(Templates) ->
lists:foldl(fun template_vars_1/2, Vars, Templates);
template_vars(Template, Vars) ->
template_vars_1(Template, Vars).
template_vars_1({template, _, _, Groups}, Vars) ->
lists:foldl(fun (G, V) -> lists:foldl(fun template_vars_1/2, V, G) end,
Vars, Groups);
template_vars_1({Var}, Vars) ->
ordsets:add_element(Var, Vars);
template_vars_1({'*',Var}, Vars) ->
ordsets:add_element(Var, Vars);
template_vars_1(_, Vars) ->
Vars.
-spec tree(template_or_templates()) -> tree_or_trees().
%% @doc Revert a template to a normal syntax tree. Any remaining
%% metavariables are turned into `@'-prefixed atoms or `909'-prefixed
%% integers.
%% @see template/1
tree(Templates) when is_list(Templates) ->
[tree_1(T) || T <- Templates];
tree(Template) ->
tree_1(Template).
tree_1({template, Type, Attrs, Groups}) ->
%% flattening here is needed for templates created via source transforms
Gs = [lists:flatten([tree_1(T) || T <- G]) || G <- Groups],
erl_syntax:set_attrs(make_tree(Type, Gs), Attrs);
tree_1({Var}) when is_atom(Var) ->
erl_syntax:atom(list_to_atom("@"++atom_to_list(Var)));
tree_1({Var}) when is_integer(Var) ->
erl_syntax:integer(list_to_integer("909"++integer_to_list(Var)));
tree_1({'*',Var}) when is_atom(Var) ->
erl_syntax:atom(list_to_atom("@@"++atom_to_list(Var)));
tree_1({'*',Var}) when is_integer(Var) ->
erl_syntax:integer(list_to_integer("9099"++integer_to_list(Var)));
tree_1(Leaf) ->
Leaf. % any syntax tree, not necessarily atomic (due to substitutions)
-spec subst(pattern_or_patterns(), env()) -> tree_or_trees().
%% @doc Substitute metavariables in a pattern or list of patterns, yielding
%% a syntax tree or list of trees as result. Both for normal metavariables
%% and glob metavariables, the substituted value may be a single element or
%% a list of elements. For example, if a list representing `1, 2, 3' is
%% substituted for `var' in either of `[foo, _@var, bar]' or `[foo, _@@var,
%% bar]', the result represents `[foo, 1, 2, 3, bar]'.
subst(Trees, Env) when is_list(Trees) ->
[subst_0(T, Env) || T <- Trees];
subst(Tree, Env) ->
subst_0(Tree, Env).
subst_0(Tree, Env) ->
tree_1(subst_1(template(Tree), Env)).
-spec tsubst(pattern_or_patterns(), env()) -> template_or_templates().
%% @doc Like subst/2, but does not convert the result from a template back
%% to a tree. Useful if you want to do multiple separate substitutions.
%% @see subst/2
%% @see tree/2
tsubst(Trees, Env) when is_list(Trees) ->
[subst_1(template(T), Env) || T <- Trees];
tsubst(Tree, Env) ->
subst_1(template(Tree), Env).
subst_1({template, Type, Attrs, Groups}, Env) ->
Gs1 = [lists:flatten([subst_1(T, Env) || T <- G]) || G <- Groups],
{template, Type, Attrs, Gs1};
subst_1({Var}=V, Env) ->
case lists:keyfind(Var, 1, Env) of
{Var, TreeOrTrees} -> TreeOrTrees;
false -> V
end;
subst_1({'*',Var}=V, Env) ->
case lists:keyfind(Var, 1, Env) of
{Var, TreeOrTrees} -> TreeOrTrees;
false -> V
end;
subst_1(Leaf, _Env) ->
Leaf.
-spec alpha(pattern_or_patterns(), [{id(), id()}]) -> template_or_templates().
%% @doc Alpha converts a pattern (renames variables). Similar to tsubst/1,
%% but only renames variables (including globs).
%% @see tsubst/2
alpha(Trees, Env) when is_list(Trees) ->
[alpha_1(template(T), Env) || T <- Trees];
alpha(Tree, Env) ->
alpha_1(template(Tree), Env).
alpha_1({template, Type, Attrs, Groups}, Env) ->
Gs1 = [lists:flatten([alpha_1(T, Env) || T <- G]) || G <- Groups],
{template, Type, Attrs, Gs1};
alpha_1({Var}=V, Env) ->
case lists:keyfind(Var, 1, Env) of
{Var, NewVar} -> {NewVar};
false -> V
end;
alpha_1({'*',Var}=V, Env) ->
case lists:keyfind(Var, 1, Env) of
{Var, NewVar} -> {'*',NewVar};
false -> V
end;
alpha_1(Leaf, _Env) ->
Leaf.
-spec match(pattern_or_patterns(), tree_or_trees()) ->
{ok, env()} | error.
%% @doc Match a pattern against a syntax tree (or patterns against syntax
%% trees) returning an environment mapping variable names to subtrees; the
%% environment is always sorted on keys. Note that multiple occurrences of
%% metavariables in the pattern is not allowed, but is not checked.
%%
%% @see template/1
%% @see switch/2
match(Patterns, Trees) when is_list(Patterns), is_list(Trees) ->
try {ok, match_1(Patterns, Trees, [])}
catch
error -> error
end;
match(Patterns, Tree) when is_list(Patterns) -> match(Patterns, [Tree]);
match(Pattern, Trees) when is_list(Trees) -> match([Pattern], Trees);
match(Pattern, Tree) ->
try {ok, match_template(template(Pattern), Tree, [])}
catch
error -> error
end.
match_1([P|Ps], [T | Ts], Dict) ->
match_1(Ps, Ts, match_template(template(P), T, Dict));
match_1([], [], Dict) ->
Dict;
match_1(_, _, _Dict) ->
erlang:error(merl_match_arity).
%% match a template against a syntax tree
match_template({template, Type, _, Gs}, Tree, Dict) ->
case type(Tree) of
Type -> match_template_1(Gs, subtrees(Tree), Dict);
_ -> throw(error) % type mismatch
end;
match_template({Var}, _Tree, Dict)
when Var =:= '_' ; Var =:= 0 ->
Dict; % anonymous variable
match_template({Var}, Tree, Dict) ->
orddict:store(Var, Tree, Dict);
match_template(Tree1, Tree2, Dict) ->
%% if Tree1 is not a template, Tree1 and Tree2 are both syntax trees
case compare_trees(Tree1, Tree2) of
true -> Dict;
false -> throw(error) % different trees
end.
match_template_1([G1 | Gs1], [G2 | Gs2], Dict) ->
match_template_2(G1, G2, match_template_1(Gs1, Gs2, Dict));
match_template_1([], [], Dict) ->
Dict;
match_template_1(_, _, _Dict) ->
throw(error). % shape mismatch
match_template_2([{Var} | Ts1], [_ | Ts2], Dict)
when Var =:= '_' ; Var =:= 0 ->
match_template_2(Ts1, Ts2, Dict); % anonymous variable
match_template_2([{Var} | Ts1], [Tree | Ts2], Dict) ->
match_template_2(Ts1, Ts2, orddict:store(Var, Tree, Dict));
match_template_2([{'*',Var} | Ts1], Ts2, Dict) ->
match_glob(lists:reverse(Ts1), lists:reverse(Ts2), Var, Dict);
match_template_2([T1 | Ts1], [T2 | Ts2], Dict) ->
match_template_2(Ts1, Ts2, match_template(T1, T2, Dict));
match_template_2([], [], Dict) ->
Dict;
match_template_2(_, _, _Dict) ->
throw(error). % shape mismatch
%% match the tails in reverse order; no further globs allowed
match_glob([{'*',Var} | _], _, _, _) ->
fail("multiple glob variables in same match group: ~w", [Var]);
match_glob([T1 | Ts1], [T2 | Ts2], Var, Dict) ->
match_glob(Ts1, Ts2, Var, match_template(T1, T2, Dict));
match_glob([], _Group, Var, Dict) when Var =:= '_' ; Var =:= 0 ->
Dict; % anonymous glob variable
match_glob([], Group, Var, Dict) ->
orddict:store(Var, lists:reverse(Group), Dict);
match_glob(_, _, _, _Dict) ->
throw(error). % shape mismatch
%% compare two syntax trees for equivalence
compare_trees(T1, T2) ->
Type1 = type(T1),
case type(T2) of
Type1 ->
case subtrees(T1) of
[] ->
case subtrees(T2) of
[] -> compare_leaves(Type1, T1, T2);
_Gs2 -> false % shape mismatch
end;
Gs1 ->
case subtrees(T2) of
[] -> false; % shape mismatch
Gs2 -> compare_trees_1(Gs1, Gs2)
end
end;
_Type2 ->
false % different tree types
end.
compare_trees_1([G1 | Gs1], [G2 | Gs2]) ->
compare_trees_2(G1, G2) andalso compare_trees_1(Gs1, Gs2);
compare_trees_1([], []) ->
true;
compare_trees_1(_, _) ->
false. % shape mismatch
compare_trees_2([T1 | Ts1], [T2 | Ts2]) ->
compare_trees(T1, T2) andalso compare_trees_2(Ts1, Ts2);
compare_trees_2([], []) ->
true;
compare_trees_2(_, _) ->
false. % shape mismatch
compare_leaves(Type, T1, T2) ->
case Type of
atom ->
erl_syntax:atom_value(T1)
=:= erl_syntax:atom_value(T2);
char ->
erl_syntax:char_value(T1)
=:= erl_syntax:char_value(T2);
float ->
erl_syntax:float_value(T1)
=:= erl_syntax:float_value(T2);
integer ->
erl_syntax:integer_value(T1)
=:= erl_syntax:integer_value(T2);
string ->
erl_syntax:string_value(T1)
=:= erl_syntax:string_value(T2);
operator ->
erl_syntax:operator_name(T1)
=:= erl_syntax:operator_name(T2);
text ->
erl_syntax:text_string(T1)
=:= erl_syntax:text_string(T2);
variable ->
erl_syntax:variable_name(T1)
=:= erl_syntax:variable_name(T2);
_ ->
true % trivially equal nodes
end.
%% @doc Match against one or more clauses with patterns and optional guards.
%%
%% Note that clauses following a default action will be ignored.
%%
%% @see match/2
-type switch_clause() ::
{pattern_or_patterns(), guarded_actions()}
| {pattern_or_patterns(), guard_test(), switch_action()}
| default_action().
-type guarded_actions() :: guarded_action() | [guarded_action()].
-type guarded_action() :: switch_action() | {guard_test(), switch_action()}.
-type switch_action() :: fun( (env()) -> any() ).
-type guard_test() :: fun( (env()) -> boolean() ).
-type default_action() :: fun( () -> any() ).
-spec switch(tree_or_trees(), [switch_clause()]) -> any().
switch(Trees, [{Patterns, GuardedActions} | Cs]) when is_list(GuardedActions) ->
switch_1(Trees, Patterns, GuardedActions, Cs);
switch(Trees, [{Patterns, GuardedAction} | Cs]) ->
switch_1(Trees, Patterns, [GuardedAction], Cs);
switch(Trees, [{Patterns, Guard, Action} | Cs]) ->
switch_1(Trees, Patterns, [{Guard, Action}], Cs);
switch(_Trees, [Default | _Cs]) when is_function(Default, 0) ->
Default();
switch(_Trees, []) ->
erlang:error(merl_switch_clause);
switch(_Tree, _) ->
erlang:error(merl_switch_badarg).
switch_1(Trees, Patterns, GuardedActions, Cs) ->
case match(Patterns, Trees) of
{ok, Env} ->
switch_2(Env, GuardedActions, Trees, Cs);
error ->
switch(Trees, Cs)
end.
switch_2(Env, [{Guard, Action} | Bs], Trees, Cs)
when is_function(Guard, 1), is_function(Action, 1) ->
case Guard(Env) of
true -> Action(Env);
false -> switch_2(Env, Bs, Trees, Cs)
end;
switch_2(Env, [Action | _Bs], _Trees, _Cs) when is_function(Action, 1) ->
Action(Env);
switch_2(_Env, [], Trees, Cs) ->
switch(Trees, Cs);
switch_2(_Env, _, _Trees, _Cs) ->
erlang:error(merl_switch_badarg).
%% ------------------------------------------------------------------------
%% Internal utility functions
fail(Text) ->
fail(Text, []).
fail(Fs, As) ->
throw({error, lists:flatten(io_lib:format(Fs, As))}).
flatten_text([L | _]=Lines) when is_list(L) ->
lists:foldr(fun(S, T) -> S ++ [$\n | T] end, "", Lines);
flatten_text([B | _]=Lines) when is_binary(B) ->
lists:foldr(fun(S, T) -> binary_to_list(S) ++ [$\n | T] end, "", Lines);
flatten_text(Text) when is_binary(Text) ->
binary_to_list(Text);
flatten_text(Text) ->
Text.
-spec metavar(tree()) -> {string()} | false.
%% Check if a syntax tree represents a metavariable. If not, 'false' is
%% returned; otherwise, this returns a 1-tuple with a string containing the
%% variable name including lift/glob prefixes but without any leading
%% metavariable prefix, and instead prefixed with "v" for a variable or "i"
%% for an integer.
%%
%% Metavariables are atoms starting with @, variables starting with _@,
%% strings starting with "'@, or integers starting with 909. Following the
%% prefix, one or more _ or 0 characters (unless it's the last character in
%% the name) may be used to indicate "lifting" of the variable one or more
%% levels , and after that, a @ or 9 character indicates a glob metavariable
%% rather than a normal metavariable. If the name after the prefix is _ or
%% 0, the variable is treated as an anonymous catch-all pattern in matches.
metavar(Tree) ->
case type(Tree) of
atom ->
case erl_syntax:atom_name(Tree) of
"@" ++ Cs when Cs =/= [] -> {"v"++Cs};
_ -> false
end;
variable ->
case erl_syntax:variable_literal(Tree) of
"_@" ++ Cs when Cs =/= [] -> {"v"++Cs};
_ -> false
end;
integer ->
case erl_syntax:integer_value(Tree) of
N when N >= 9090 ->
case integer_to_list(N) of
"909" ++ Cs -> {"n"++Cs};
_ -> false
end;
_ -> false
end;
string ->
case erl_syntax:string_value(Tree) of
"'@" ++ Cs -> {"v"++Cs};
_ -> false
end;
_ ->
false
end.
%% wrappers around erl_syntax functions to provide more uniform shape of
%% generic subtrees (maybe this can be fixed in syntax_tools one day)
type(T) ->
case erl_syntax:type(T) of
nil -> list;
Type -> Type
end.
subtrees(T) ->
case erl_syntax:type(T) of
tuple ->
[erl_syntax:tuple_elements(T)]; %% don't treat {} as a leaf
nil ->
[[], []]; %% don't treat [] as a leaf, but as a list
list ->
case erl_syntax:list_suffix(T) of
none ->
[erl_syntax:list_prefix(T), []];
S ->
[erl_syntax:list_prefix(T), [S]]
end;
binary_field ->
[[erl_syntax:binary_field_body(T)],
erl_syntax:binary_field_types(T)];
clause ->
case erl_syntax:clause_guard(T) of
none ->
[erl_syntax:clause_patterns(T), [],
erl_syntax:clause_body(T)];
G ->
[erl_syntax:clause_patterns(T), [G],
erl_syntax:clause_body(T)]
end;
receive_expr ->
case erl_syntax:receive_expr_timeout(T) of
none ->
[erl_syntax:receive_expr_clauses(T), [], []];
E ->
[erl_syntax:receive_expr_clauses(T), [E],
erl_syntax:receive_expr_action(T)]
end;
record_access ->
case erl_syntax:record_access_type(T) of
none ->
[[erl_syntax:record_access_argument(T)], [],
[erl_syntax:record_access_field(T)]];
R ->
[[erl_syntax:record_access_argument(T)], [R],
[erl_syntax:record_access_field(T)]]
end;
record_expr ->
case erl_syntax:record_expr_argument(T) of
none ->
[[], [erl_syntax:record_expr_type(T)],
erl_syntax:record_expr_fields(T)];
V ->
[[V], [erl_syntax:record_expr_type(T)],
erl_syntax:record_expr_fields(T)]
end;
record_field ->
case erl_syntax:record_field_value(T) of
none ->
[[erl_syntax:record_field_name(T)], []];
V ->
[[erl_syntax:record_field_name(T)], [V]]
end;
_ ->
erl_syntax:subtrees(T)
end.
make_tree(list, [P, []]) -> erl_syntax:list(P);
make_tree(list, [P, [S]]) -> erl_syntax:list(P, S);
make_tree(tuple, [E]) -> erl_syntax:tuple(E);
make_tree(binary_field, [[B], Ts]) -> erl_syntax:binary_field(B, Ts);
make_tree(clause, [P, [], B]) -> erl_syntax:clause(P, none, B);
make_tree(clause, [P, [G], B]) -> erl_syntax:clause(P, G, B);
make_tree(receive_expr, [C, [], _A]) -> erl_syntax:receive_expr(C);
make_tree(receive_expr, [C, [E], A]) -> erl_syntax:receive_expr(C, E, A);
make_tree(record_access, [[E], [], [F]]) -> erl_syntax:record_access(E, F);
make_tree(record_access, [[E], [T], [F]]) -> erl_syntax:record_access(E, T, F);
make_tree(record_expr, [[], [T], F]) -> erl_syntax:record_expr(T, F);
make_tree(record_expr, [[E], [T], F]) -> erl_syntax:record_expr(E, T, F);
make_tree(record_field, [[N], []]) -> erl_syntax:record_field(N);
make_tree(record_field, [[N], [E]]) -> erl_syntax:record_field(N, E);
make_tree(Type, Groups) ->
erl_syntax:make_tree(Type, Groups).
merge_comments(_StartLine, [], [T]) -> T;
merge_comments(_StartLine, [], Ts) -> Ts;
merge_comments(StartLine, Comments, Ts) ->
merge_comments(StartLine, Comments, Ts, []).
merge_comments(_StartLine, [], [], [T]) -> T;
merge_comments(_StartLine, [], [T], []) -> T;
merge_comments(_StartLine, [], Ts, Acc) ->
lists:reverse(Acc, Ts);
merge_comments(StartLine, Cs, [], Acc) ->
merge_comments(StartLine, [], [],
[erl_syntax:set_pos(
erl_syntax:comment(Indent, Text),
StartLine + Line - 1)
|| {Line, _, Indent, Text} <- Cs] ++ Acc);
merge_comments(StartLine, [C|Cs], [T|Ts], Acc) ->
{Line, _Col, Indent, Text} = C,
CommentLine = StartLine + Line - 1,
case erl_syntax:get_pos(T) of
Pos when Pos < CommentLine ->
%% TODO: traverse sub-tree rather than only the top level nodes
merge_comments(StartLine, [C|Cs], Ts, [T|Acc]);
CommentLine ->
Tc = erl_syntax:add_postcomments(
[erl_syntax:comment(Indent, Text)], T),
merge_comments(StartLine, Cs, [Tc|Ts], Acc);
_ ->
Tc = erl_syntax:add_precomments(
[erl_syntax:comment(Indent, Text)], T),
merge_comments(StartLine, Cs, [Tc|Ts], Acc)
end. | src/merl.erl | 0.580828 | 0.617974 | merl.erl | starcoder |
%%% @doc Minimal vixie-cron expression parser. Based on the information in
%%% https://en.wikipedia.org/wiki/Cron
%%%
%%% @todo Add support for "L", "W", and "?"
%%% @todo Check for invalid expressions
%%%
%%% Copyright 2017 <NAME> <<EMAIL>>
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%% @end
%%% @copyright <NAME> <<EMAIL>>
%%% @author <NAME> <<EMAIL>>
%%%
-module(erl_vcron).
-author("<EMAIL>").
-github("https://github.com/marcelog").
-homepage("http://marcelog.github.com/").
-license("Apache License 2.0").
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Exports.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([applies/2]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Public API.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Given a datetime() and a vixie-like cron expression, will return true
%% if the expression covers the datetime().
-spec applies(calendar:datetime(), string()) -> boolean().
applies(
_DateTime = {{Year, Month, Day}, {Hour, Minute, _Second}},
Expression
) ->
[
MinuteExpression,
HourExpression,
DoMExpression,
MonthExpression,
DoWExpression
] = string:tokens(Expression, " "),
DoW = case calendar:day_of_the_week(Year, Month, Day) of
7 -> 0;
DoW_ -> DoW_
end,
%last_day_of_the_month(Year, Month)
applies_standard(Minute, 60, MinuteExpression) andalso
applies_standard(Hour, 60, HourExpression) andalso
applies_standard(Day, 31, DoMExpression) andalso
applies_standard(Month, 12, MonthExpression) andalso
applies_standard(DoW, 6, DoWExpression).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Private API.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% @doc Tries to match a number for expressions like "*/1", "X-Y", "X", "X,Y,Z"
-spec applies_standard(
non_neg_integer(), pos_integer(), string()
) -> boolean().
applies_standard(_Number, _Max, "*") ->
true;
% In vixie-cron, slashes can be combined with ranges to specify step values.
% For example, */5 in the minutes field indicates every 5 minutes. It is
% shorthand for the more verbose POSIX form 5,10,15,20,25,30,35,40,45,50,55,00.
% POSIX does not define a use for slashes; its rationale (commenting on a
% BSD extension) notes that the definition is based on System V format
% but does not exclude the possibility of extensions
applies_standard(Number, Max, [$*, $/|Interval]) ->
List = generate_from_interval(list_to_integer(Interval), Max),
is_in_list(Number, List);
applies_standard(Number, _Max, String) ->
% Commas are used to separate items of a list. For example,
% using "MON,WED,FRI" in the 5th field (day of week) means
% Mondays, Wednesdays and Fridays.
IsList = string:tokens(String, ","),
% Hyphens define ranges. For example, 2000-2010
% indicates every year between 2000 and 2010, inclusive.
IsRange = string:tokens(String, "-"),
List = if
length(IsList) > 1 ->
[ list_to_integer(M) || M <- IsList];
length(IsRange) > 1 ->
[Min, Max] = IsRange,
lists:seq(list_to_integer(Min), list_to_integer(Max));
true ->
[list_to_integer(String)]
end,
is_in_list(Number, List).
%% @doc Returns a list from an expression like */5.
-spec generate_from_interval(
pos_integer(), pos_integer()
) -> [non_neg_integer()].
generate_from_interval(Interval, Max) ->
generate_from_interval(0, Max, Interval, []).
%% @doc Tail recursion for generate_from_interval/2.
-spec generate_from_interval(
non_neg_integer(), pos_integer(), pos_integer(), [non_neg_integer()]
) -> [non_neg_integer()].
generate_from_interval(Current, Max, _Interval, Acc) when Current > Max ->
Acc;
generate_from_interval(Current, Max, Interval, Acc)
when (Current + Interval) =:= Max ->
% @todo: This is actually a kludge. Will add "Max" which doesn't make
% sense for hours/minutes (will include 0-60), but will make it work for
% months (1-12). For hours/minutes, including Max (which is 60) doesn't make
% sense, but it does makes sense for months (they go from 1 to 12, although
% there is no such thing as the month 0 in Erlang).
[0, Max|Acc];
generate_from_interval(Current, Max, Interval, Acc) ->
NewCurrent = Current + Interval,
generate_from_interval(NewCurrent, Max, Interval, [NewCurrent|Acc]).
%% @doc true if the given list contains the element.
-spec is_in_list(non_neg_integer(), [non_neg_integer()]) -> boolean().
is_in_list(Element, List) ->
Result = lists:filter(fun(E) -> Element =:= E end, List),
length(Result) > 0. | src/erl_vcron.erl | 0.615666 | 0.409811 | erl_vcron.erl | starcoder |
-module(solution).
-export([main/0]).
% 12s CPU
% 1024M RAM
% 50kB Code
% 1 <= N <= M <= 10^9
% M - N <= 10^6 (max number of gap positions to test)
% Pi(10^4.5) = Pi(31623) = 3401 (number of primes), from WolframAlpha query
% Every gap, except 4:(3,5) and 6:(5,7)is of the form 30*n, 30*n+12 or 30*n+18
% source: https://de.wikipedia.org/wiki/Primzahlzwilling#Eigenschaften
main() ->
N = read_int(),
M = read_int(),
Pairs = pairs(N, M),
write_int(Pairs).
% number of pairs in {N,..,M}
pairs(N, _M) when N < 1 ->
n_too_small;
pairs(N, M) when N > M ->
n_larger_m;
pairs(_N, M) when M > 1000000000 ->
m_too_large;
pairs(N, M) when M - N < 2 ->
0;
pairs(N, M) ->
Primes = get_primes(),
%io:format("Primes=~p~n", [Primes]),
pairs(N+1, M-1, 0, Primes).
% I: check pair (I-1,I+1)
% I_max: maximal feasible I value
% Found: pairs found so far
pairs(I, I_max, Found, _Primes) when I > I_max ->
Found;
pairs(I, I_max, Found, Primes) when I =:= 4; I =:= 6 ->
% pairs at 4:(3,5) and 6:(5, 7)
pairs(I + 2, I_max, Found + 1, Primes);
pairs(I, I_max, Found, Primes) when I < 6 ->
% 2:(1,3) 3:(2,4), 5:(4,6) - nothing here
pairs(I+1, I_max, Found, Primes);
pairs(I, I_max, Found, Primes) ->
% for I >= 6 we can use the gap property, so we
% need to check only 3 out of 30 positions (10^5 checks)
pairs(I, I_max, I rem 30, Found, Primes).
% test the positions with remainder from {0, 29}
pairs(I, I_max, _R, Found, _Primes) when I > I_max ->
% search finished
Found;
pairs(I, I_max, 0, Found, Primes) ->
% need to test candidate
Found2 = test(I, Found, Primes),
pairs(I+12, I_max, 12, Found2, Primes);
pairs(I, I_max, 12, Found, Primes) ->
Found2 = test(I, Found, Primes),
pairs(I+6, I_max, 18, Found2, Primes);
pairs(I, I_max, 18, Found, Primes) ->
Found2 = test(I, Found, Primes),
pairs(I+12, I_max, 0, Found2, Primes);
pairs(I, I_max, R, Found, Primes) when R < 12 ->
% skip to next candidate
pairs(I+(12-R), I_max, 12, Found, Primes);
pairs(I, I_max, R, Found, Primes) when R < 18 ->
pairs(I+(18-R), I_max, 18, Found, Primes);
pairs(I, I_max, R, Found, Primes) ->
pairs(I+(30-R), I_max, 0, Found, Primes).
% test the pair around gap position I, updating Found, using Primes
test(I, Found, Primes) ->
Test = test_prime(I+1, Primes) andalso test_prime(I-1, Primes),
case Test of
false ->
Found;
true ->
Found+1
end.
% test if I is prime, using a list of about 3401 prime divisors
% this assumes the largest prime divisor is within the list
test_prime(_I, []) -> true;
test_prime(1, _Primes) -> false;
test_prime(I, [P|_T]) when P*P > I -> true;
test_prime(I, [P|_T]) when I rem P =:= 0 -> false;
test_prime(I, [_P|T]) ->
test_prime(I, T).
% get the primes within {2, .., Max} as ascending list
get_primes() ->
Max = 31623,
lists:foldl(fun(D, Primes) ->
case is_prime(D) of
false ->
Primes;
true ->
[D|Primes]
end
end, [], lists:seq(Max, 2, -1)).
% test if I is prime
is_prime(1) ->
false;
is_prime(I) when I =:= 2; I =:= 3; I =:= 5; I =:= 7 ->
true;
is_prime(I) when I rem 2 =:= 0; I rem 3 =:= 0; I rem 5 =:= 0; I rem 7 =:= 0 ->
false;
is_prime(I) -> is_prime(I, 11).
% test if I is prime by trying all divisors D .. SQRT(D)
% assumes I >= 2 and I is odd
% This is too slow for all test cases, using up to 31623 divisors,
% so we only use it to build the list of prime divisors in get_primes()
is_prime(I, D) when D * D > I -> true;
is_prime(I, D) when I rem D =:= 0 -> false;
is_prime(I, D) ->
is_prime(I, D + 2).
read_int() ->
{ok, [I]} = io:fread("", "~d"),
I.
write_int(I) ->
io:format("~p~n", [I]). | Mathematics/Number Theory/Twins/solution.erl | 0.528047 | 0.462655 | solution.erl | starcoder |
-module(spigg_analyze).
-export([ beam/1
, forms/1
]).
-include("spigg.hrl").
%% Q: How should we deal with the implicit import of the erlang module?
%% A: All unqualified calls that do not refer to a local function or an
%% imported function, can be regarded as calls to the erlang module.
%%
%% Q: Why not analyze erl files directly?
%% A: When doing so, you have to deal with:
%% * macros
%% * parse transforms
%% * include files
%%
%% Q: How to deal with higher order functions? e.g. lists:map?
%% e.g. a function may be pure in itself, if the fun(s) passed
%% to it is pure. Also, a function may construct a fun with a
%% side effect and just return it.
%% A: Spigg tracks the introduction of side effects, which may
%% may not be the same place as side effects are executed.
%% Thus, referring to fun erlang:spawn/1 is the same as introducing
%% the "spawn" side effect.
-record(mod_data, { name = undefined :: module()
, imports = #{} :: #{{atom(), arity()} => module()}
, exports = ordsets:new() :: ordsets:ordset(mfa())
, raw_functions = [] :: [erl_parse:abstract_form()]
}).
-spec beam(Path::string()) -> {ok, spigg:db()} |
{error, not_found}.
beam(Path) when is_list(Path) ->
case beam_lib:chunks(Path, [abstract_code]) of
{ok, {_Mod, [{abstract_code, {raw_abstract_v1, Code}}]}} ->
forms(Code);
{error, beam_lib, {file_error, _Path, enoent}} ->
{error, not_found}
end.
-spec forms([erl_parse:abstract_form()]) -> {ok, spigg:db()}.
forms(Forms) when is_list(Forms) ->
ModData = analyze_module(Forms),
RawFunctions = ModData#mod_data.raw_functions,
Functions = analyze_local(RawFunctions, ModData, []),
{ok, #db{functions = Functions}}.
analyze_module(Forms) ->
analyze_module(Forms, #mod_data{}).
analyze_module([], ModData) -> ModData;
analyze_module([{attribute, _Line, module, ModName}|Rest],
#mod_data{name=undefined}=ModData) ->
analyze_module(Rest, ModData#mod_data{name=ModName});
analyze_module([{attribute, _Line, import, {Mod, FAs}}|Rest],
#mod_data{imports=OldImports}=ModData) ->
NewImports = maps:from_list([{FA, Mod} || FA <- FAs]),
Imports = maps:merge(OldImports, NewImports),
analyze_module(Rest, ModData#mod_data{imports=Imports});
analyze_module([{attribute, _Line, _Key, _Value}|Rest], ModData) ->
analyze_module(Rest, ModData);
analyze_module([{eof, _Line}|Rest], ModData) ->
analyze_module(Rest, ModData);
analyze_module([{function, _, _, _, _}=Function|Rest],
#mod_data{raw_functions=Functions}=ModData) ->
analyze_module(Rest, ModData#mod_data{raw_functions=[Function|Functions]}).
analyze_local([], _ModData, Funs) ->
maps:from_list(Funs);
analyze_local([{function, _Line, Name, Arity, Code}|Rest], ModData, Funs) ->
{SideEffects, Calls} = analyze_code(Code, ModData, [], []),
F = #function { calls = Calls
, native_side_effects = SideEffects
},
MFA = {ModData#mod_data.name, Name, Arity},
analyze_local(Rest, ModData, [{MFA, F}|Funs]).
analyze_code([], _ModData, SideEffects, Calls) ->
{SideEffects, Calls};
analyze_code([{atom, _Line, _Val}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{bc, _Line, Expr, Generators}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Expr|Generators] ++ Code, ModData, SideEffects, Calls);
analyze_code([{b_generate, _Line, Lhs, Rhs}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Lhs, Rhs|Code], ModData, SideEffects, Calls);
analyze_code([{bin, _Line, _Val}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{block, _Line, SubCode}|Code], ModData, SideEffects, Calls) ->
analyze_code(SubCode ++ Code, ModData, SideEffects, Calls);
analyze_code([{call, _Line, {'fun', _Line, {clauses, Clauses}}, Args}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Clauses ++ Args ++ Code, ModData, SideEffects, Calls);
analyze_code([{call, _Line, {named_fun, _Line, _Name, Clauses}, Args}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Clauses ++ Args ++ Code, ModData, SideEffects, Calls);
analyze_code([ {call, Line, {remote, _, {atom, _, Mod}, {atom, _, Fun}}, Args}
| Code], ModData, SideEffects, Calls) ->
% Fully qualified function call
Call = {Line, {Mod, Fun, length(Args)}},
analyze_code(Args ++ Code, ModData, SideEffects, [Call|Calls]);
analyze_code([{call, Line, {atom, _, Fun}, Args}|Code],
ModData, SideEffects, Calls) ->
Arity = length(Args),
Mod = identify_source_module(ModData, Fun, Arity),
Call = {Line, {Mod, Fun, Arity}},
analyze_code(Args ++ Code, ModData, SideEffects, [Call|Calls]);
analyze_code([{call, Line, {remote, _, ModExpr, FunExpr}, Args}|Code],
ModData, SideEffects, Calls) ->
% Dynamic call
Call = {Line, {erlang, apply, 3}},
analyze_code([ModExpr, FunExpr|Args] ++ Code, ModData, SideEffects,
[Call|Calls]);
analyze_code([{call, _Line, Expr, Args}|Code], ModData, SideEffects, Calls) ->
% Fun call
analyze_code([Expr|Args] ++ Code, ModData, SideEffects, Calls);
analyze_code([{'case', _Line, Expr, Clauses}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Expr|Clauses] ++ Code, ModData, SideEffects, Calls);
analyze_code([{'catch', _Line, Expr}|Code], ModData, SideEffects, Calls) ->
analyze_code([Expr|Code], ModData, SideEffects, Calls);
analyze_code([{char, _Line, _Val}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{clause, _Line, _Args, _Guards, Code}|Clauses],
ModData, SideEffects, Calls) ->
%% Guards are side effect free by design, so we disregard them completely.
analyze_code(Code ++ Clauses, ModData, SideEffects, Calls);
analyze_code([{cons, _Line, HeadExpr, TailExpr}|Code],
ModData, SideEffects, Calls) ->
analyze_code([HeadExpr, TailExpr|Code], ModData, SideEffects, Calls);
analyze_code([{float, _Line, _Val}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{'fun', _Line, {clauses, Clauses}}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Clauses ++ Code, ModData, SideEffects, Calls);
analyze_code([{'fun', Line, {function, Fun, Arity}}|Code],
ModData, SideEffects, Calls) ->
Mod = identify_source_module(ModData, Fun, Arity),
Call = {Line, {Mod, Fun, Arity}},
analyze_code(Code, ModData, SideEffects, [Call|Calls]);
analyze_code([{'fun', Line,
{function, {atom, _, M}, {atom, _, F}, {integer, _, A}}}|Code],
ModData, SideEffects, Calls) ->
Call = {Line, {M, F, A}},
analyze_code(Code, ModData, SideEffects, [Call|Calls]);
analyze_code([{'fun', _Line, {function, _M, _F, _A}}|Code],
ModData, SideEffects, Calls) ->
%% Dynamic fun reference. This is a blind spot in our current
%% implementation, since creating a dynamic fun is not the same
%% as calling erlang:apply/3.
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{'if', _Line, Clauses}|Code], ModData, SideEffects, Calls) ->
analyze_code(Clauses ++ Code, ModData, SideEffects, Calls);
analyze_code([{integer, _Line, _Val}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{generate, _Line, Lhs, Rhs}|Code], ModData, SideEffects, Calls) ->
analyze_code([Lhs, Rhs|Code], ModData, SideEffects, Calls);
analyze_code([{lc, _Line, Lhs, Generators}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Lhs|Generators] ++ Code, ModData, SideEffects, Calls);
analyze_code([{map, _Line, Elements}|Code], ModData, SideEffects, Calls) ->
analyze_code(Elements ++ Code, ModData, SideEffects, Calls);
analyze_code([{map, _Line, MapExpr, KeyExprs}|Code],
ModData, SideEffects, Calls) ->
analyze_code([MapExpr|KeyExprs] ++ Code, ModData, SideEffects, Calls);
analyze_code([{map_field_assoc, _Line, Lhs, Rhs}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Lhs, Rhs|Code], ModData, SideEffects, Calls);
analyze_code([{map_field_exact, _Line, Lhs, Rhs}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Lhs, Rhs|Code], ModData, SideEffects, Calls);
analyze_code([{match, _Line, _Lhs, Rhs}|Code], ModData, SideEffects, Calls) ->
analyze_code([Rhs|Code], ModData, SideEffects, Calls);
analyze_code([{named_fun, _Line, _Name, Clauses}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Clauses ++ Code, ModData, SideEffects, Calls);
analyze_code([{nil, _Line}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{op, _, _Op, Expr}|Code], ModData, SideEffects0, Calls0) ->
{SideEffects, Calls} = analyze_code(listify(Expr), ModData,
SideEffects0, Calls0),
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{op, _, _Op, Lhs, Rhs}|Code],
ModData, SideEffects0, Calls0) ->
{SideEffects1, Calls1} = analyze_code(listify(Lhs), ModData,
SideEffects0, Calls0),
{SideEffects2, Calls2} = analyze_code(listify(Rhs), ModData,
SideEffects1, Calls1),
analyze_code(Code, ModData, SideEffects2, Calls2);
analyze_code([{'receive', Line, Clauses}|Code], ModData, SideEffects0, Calls) ->
SideEffects = ordsets:add_element({Line, 'msg_receive'}, SideEffects0),
analyze_code(Clauses++Code, ModData, SideEffects, Calls);
analyze_code([{'receive', Line, Clauses, _Tmo, After}|Code],
ModData, SideEffects0, Calls) ->
SideEffects = ordsets:add_element({Line, 'msg_receive'}, SideEffects0),
analyze_code(Clauses++After++Code, ModData, SideEffects, Calls);
analyze_code([{record, _Line, Expr, _Name, Fields}|Code],
ModData, SideEffects, Calls) ->
analyze_code([Expr|Fields]++Code, ModData, SideEffects, Calls);
analyze_code([{record, _Line, _Name, Fields}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Fields++Code, ModData, SideEffects, Calls);
analyze_code([{record_field, _Line, FieldExpr, ValExpr}|Code],
ModData, SideEffects, Calls) ->
analyze_code([FieldExpr, ValExpr|Code], ModData, SideEffects, Calls);
analyze_code([ {record_field, _Line, RcrdExpr, _Record, {atom, _Line, _Field}}
|Code], ModData, SideEffects, Calls) ->
analyze_code([RcrdExpr|Code], ModData, SideEffects, Calls);
analyze_code([{record_index, _Line, _Record, {atom, _Line, _Field}}|Code],
ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{string, _Line, _String}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls);
analyze_code([{'try', _Line, Exprs, MatchClauses, ErrorClauses, After}|Code],
ModData, SideEffects, Calls) ->
analyze_code(lists:append([Exprs, MatchClauses,ErrorClauses, After, Code]),
ModData, SideEffects, Calls);
analyze_code([{tuple, _Line, Elements}|Code], ModData, SideEffects, Calls) ->
analyze_code(Elements++Code, ModData, SideEffects, Calls);
analyze_code([{var, _Line, _}|Code], ModData, SideEffects, Calls) ->
analyze_code(Code, ModData, SideEffects, Calls).
listify(Expressions) when is_list(Expressions) -> Expressions;
listify(Expression) -> [Expression].
identify_source_module(#mod_data{name=Name, raw_functions=Raw,
imports=Imports}, Fun, Arity) ->
IsLocalF = fun({function, _Line, F, A, _Code}) ->
F =:= Fun andalso A =:= Arity
end,
case lists:any(IsLocalF, Raw) of
true -> Name;
false ->
case maps:find({Fun, Arity}, Imports) of
{ok, Mod} -> Mod;
error -> erlang
end
end. | src/spigg_analyze.erl | 0.538741 | 0.408513 | spigg_analyze.erl | starcoder |
-module(aec_target).
%% API
-export([recalculate/2,
determine_delta_header_height/1,
verify/2]).
-include("blocks.hrl").
%% Return height of the header to be used as a start point for target calculations,
%% based on the following formula:
%% delta_height(Header) = Header.height - aec_governance:key_blocks_to_check_difficulty_count().
%% Returns {error | chain_too_short_to_recalculate_target} if initial height is a negative value
%% or it points to genesis block.
-spec determine_delta_header_height(
aec_headers:header()) -> {ok, non_neg_integer()}
| {error, chain_too_short_to_recalculate_target}.
determine_delta_header_height(Header) ->
Height = aec_headers:height(Header),
BlocksCount = aec_governance:key_blocks_to_check_difficulty_count(),
InitialHeight = Height - BlocksCount,
GenesisHeight = aec_block_genesis:height(),
case InitialHeight > GenesisHeight of
true ->
{ok, InitialHeight};
false ->
{error, chain_too_short_to_recalculate_target}
end.
%% Target recalculation.
%%
%% Some concepts:
%%
%% Difficulty = HIGHEST_TARGET / Target
%% Rate = Capacity / Difficulty (blocks/ms)
%% Capacity = number of potential solutions per ms generated by miners
%%
%% DesiredTimeBetweenBlocks = aec_governance:expected_block_mine_rate()
%% DesiredRate = 1 / DesiredTimeBetweenBlocks
%%
%% The basic idea of the algorithm is to estimate the current network capacity
%% based on the `N` (= 10) previous blocks and use that to set the new
%% target:
%%
%% NewDifficulty = EstimatedCapacity / DesiredRate
%% NewTarget = HIGHEST_TARGET / NewDifficulty
%% = HIGHEST_TARGET * DesiredRate / EstimatedCapacity
%%
%% We can estimate the network capacity used to mine a given block `i` as
%%
%% EstimatedCapacity[i] = Difficulty[i] / MiningTime[i]
%% MiningTime[i] = Time[i + 1] - Time[i]
%%
%% The estimated capacity across all `N` blocks is then the weighted (by time)
%% average of the estimated capacities for each block.
%%
%% EstimatedCapacity = Sum(EstimatedCapacity[i] * MiningTime[i]) / TotalTime
%% = Sum(Difficulty[i]) / TotalTime
%% = Sum(HIGHEST_TARGET / Target[i]) / TotalTime
%%
%% Now, the problem is that we can't do any floating point arithmetic (to
%% ensure the calculation can be verified by other nodes), so we pick a
%% reasonably big integer K (= HIGHEST_TARGET * 2^32) and compute
%%
%% EstimatedCapacity ≈ Sum(K * HIGHEST_TARGET div Target[i]) / TotalTime / K
%%
%% Then
%%
%% NewTarget = HIGHEST_TARGET * DesiredRate / EstimatedCapacity
%% ≈ HIGHEST_TARGET * DesiredRate * TotalTime * K / Sum(K * HIGHEST_TARGET div Target[i])
%% ≈ DesiredRate * TotalTime * K / Sum(K div Target[i])
%% ≈ TotalTime * K div (DesiredTimeBetweenBlocks * Sum(K div Target[i]))
%%
-spec recalculate(aec_headers:header(), nonempty_list(aec_headers:header())) -> non_neg_integer().
recalculate(Top, PrevHeaders0) ->
%% Ensure the list of previous headers are in order - oldest first.
PrevHeaders = lists:keysort(#header.height, PrevHeaders0),
K = aec_pow:scientific_to_integer(?HIGHEST_TARGET_SCI) * (1 bsl 32),
SumKDivTargets = lists:sum([ K div aec_pow:scientific_to_integer(aec_headers:target(Hd))
|| Hd <- PrevHeaders ]),
DesiredTimeBetweenBlocks = aec_governance:expected_block_mine_rate(),
Last = hd(PrevHeaders), %% Oldest first!
TotalTime = mining_time_between(Last, Top),
NewTargetInt = TotalTime * K div (DesiredTimeBetweenBlocks * SumKDivTargets),
min(?HIGHEST_TARGET_SCI, aec_pow:integer_to_scientific(NewTargetInt)).
-spec verify(aec_headers:header(), nonempty_list(aec_headers:header())) ->
ok | {error, {wrong_target, non_neg_integer(), non_neg_integer()}}.
verify(Top, PrevHeaders) ->
HeaderTarget = aec_headers:target(Top),
ExpectedTarget = recalculate(Top, PrevHeaders),
case HeaderTarget == ExpectedTarget of
true ->
ok;
false ->
{error, {wrong_target, HeaderTarget, ExpectedTarget}}
end.
%% Internals
-spec mining_time_between(aec_headers:header(), aec_headers:header()) -> integer().
mining_time_between(Header1, Header2) ->
Time1 = aec_headers:time_in_msecs(Header1),
Time2 = aec_headers:time_in_msecs(Header2),
max(1, Time2 - Time1). | apps/aecore/src/aec_target.erl | 0.611382 | 0.69125 | aec_target.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
-module(couch_log_lager).
-behaviour(couch_log).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([
debug/2,
info/2,
notice/2,
warning/2,
error/2,
critical/2,
alert/2,
emergency/2,
set_level/1
]).
-spec debug(string(), list()) -> ok.
debug(Fmt, Args) ->
lager:debug(Fmt, Args).
-spec info(string(), list()) -> ok.
info(Fmt, Args) ->
lager:info(Fmt, Args).
-spec notice(string(), list()) -> ok.
notice(Fmt, Args) ->
lager:notice(Fmt, Args).
-spec warning(string(), list()) -> ok.
warning(Fmt, Args) ->
lager:warning(Fmt, Args).
-spec error(string(), list()) -> ok.
error(Fmt, Args) ->
lager:error(Fmt, Args).
-spec critical(string(), list()) -> ok.
critical(Fmt, Args) ->
lager:critical(Fmt, Args).
-spec alert(string(), list()) -> ok.
alert(Fmt, Args) ->
lager:alert(Fmt, Args).
-spec emergency(string(), list()) -> ok.
emergency(Fmt, Args) ->
lager:emergency(Fmt, Args).
-spec set_level(atom()) -> ok.
set_level(Level) ->
Handlers = gen_event:which_handlers(lager_event),
lists:foreach(fun(Handler) ->
ok = lager:set_loglevel(Handler, Level)
end, Handlers).
-ifdef(TEST).
callbacks_test_() ->
{setup,
fun setup/0,
fun cleanup/1,
[
?_assertEqual(info, lager:get_loglevel(lager_console_backend)),
?_assertEqual(ok, set_level(debug)),
?_assertEqual(debug, lager:get_loglevel(lager_console_backend)),
?_assertEqual(ok, set_level(alert)),
?_assertEqual(alert, lager:get_loglevel(lager_console_backend))
]
}.
setup() ->
setup_lager().
setup_lager() ->
Handlers = gen_event:which_handlers(error_logger),
HasTTY = lists:member(error_logger_tty_h, Handlers),
error_logger:tty(false),
application:load(lager),
application:set_env(lager, handlers, [{lager_console_backend, info}]),
application:set_env(lager, error_logger_redirect, false),
application:set_env(lager, async_threshold, undefined),
lager:start(),
HasTTY.
cleanup(HasTTY) ->
application:stop(lager),
application:stop(goldrush),
error_logger:tty(HasTTY).
-endif. | src/couch_log_lager.erl | 0.511961 | 0.415492 | couch_log_lager.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2018, OpenCensus Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-----------------------------------------------------------------------
-module(oc_stat_transform).
-export([parse_transform/2]).
%% @doc
%% `oc_stat_transform' is a parse transform that can detect `oc_stat:record' calls
%% with constant measure names and generate remote measure module call from that.
%% At the run-time this means we don't have to do a lookup for the module name and
%% if measure doesn't exist, `{unknown_measure, Name}' error will be thrown.
%% @end
parse_transform(Forms, _Options) ->
HiForms = lists:map(fun walk_ast/1, Forms),
HiForms.
walk_ast({function, Line, Name, Args, Clauses}) ->
{function, Line, Name, Args, walk_clauses([], Clauses)};
walk_ast(Form) ->
Form.
walk_clauses(Acc, []) ->
lists:reverse(Acc);
walk_clauses(Acc, [{clause, Line, Arguments, Guards, Body}|Rest]) ->
reset_gensym(),
walk_clauses([{clause, Line, Arguments, Guards, walk_body([], Body)}|Acc], Rest).
walk_body(Acc, []) ->
lists:reverse(Acc);
walk_body(Acc, [H|R]) ->
walk_body([transform_statement(H)|Acc], R).
transform_statement({call, Line,
{remote, _, {atom, _, oc_stat}, {atom, _, record}},
[Tags, {cons, _, _, _} = Measurements]}=_Stmt) ->
gen_record_calls(Line, Tags, erl_syntax:list_elements(Measurements));
transform_statement({call, Line,
{remote, _, {atom, _, oc_stat}, {atom, _, record}},
[Tags, {MType, _, _}=Measurement, Value]}=_Stmt)
when is_atom(MType) orelse is_binary(MType) orelse is_list(MType) ->
gen_record_calls(Line, Tags, [{tuple, Line, [Measurement, Value]}]);
transform_statement(Stmt) when is_tuple(Stmt) ->
list_to_tuple(transform_statement(tuple_to_list(Stmt)));
transform_statement(Stmt) when is_list(Stmt) ->
[transform_statement(S) || S <- Stmt];
transform_statement(Stmt) ->
Stmt.
%% =============================================================================
%% private
%% =============================================================================
gen_record_calls(Line, Tags, Measurements) ->
CTags = {var, Line, gensym("CTags")},
GTags = {var, Line, gensym("GTags")},
{block, Line,
[{match, Line, CTags, Tags},
{match, Line, GTags, gen_prepare_tags(Line, CTags)}]
++
[measure_module_record_call(Line, MeasureName, GTags, Value)
|| {tuple, _, [{_, _, MeasureName}, Value]} <- Measurements]}.
measure_module_record_call(Line, MeasureName, GTags, Value) ->
{'try', Line,
[{call, Line,
{remote, Line, {atom, Line, oc_stat_measure:module_name(MeasureName)}, {atom, Line, record}},
[GTags, Value]}],
[{clause, Line, [{var, Line, '_'}], [], [{atom, 279, 'ok'}]}],
[{clause, Line,
[{tuple, Line,
[{atom, Line, error}, {atom, Line, undef}, {var, Line, '_'}]}],
[],
[{call, Line,
{remote, Line, {atom, Line, erlang}, {atom, Line, error}},
[{tuple, Line,
[{atom, Line, unknown_measure}, erl_parse:abstract(MeasureName)]}]}]}],
[]}.
gen_prepare_tags(Line, CTags) ->
{'case', Line, CTags,
[{clause, Line,
[{var, Line, '_'}],
[[{call, Line, {atom, Line, is_map}, [CTags]}]],
[CTags]},
{clause, Line,
[{var, Line, '_'}],
[],
[{call, Line,
{remote, Line, {atom, Line, oc_tags}, {atom, Line, from_ctx}},
[CTags]}]}]}.
gensym(Name) ->
put(oc_gensym_counter, get(oc_gensym_counter) + 1),
list_to_atom(
lists:flatten(
io_lib:format("$oc_gen_~s_~B$", [Name, get(oc_gensym_counter)]))).
reset_gensym() ->
put(oc_gensym_counter, 0). | src/oc_stat_transform.erl | 0.623606 | 0.570002 | oc_stat_transform.erl | starcoder |
-module(unicodedata_normalization).
-include_lib("ucd/include/ucd.hrl").
-export([ quick_check/2
, normalize/2
, canonical_decomposition/1
, compatibility_decomposition/1
, canonical_composition/1
, canonical_ordering/1
]).
-type normalization_form() :: nfc
| nfkc
| nfd
| nfkd.
-export_type([normalization_form/0]).
-spec quick_check(normalization_form(), string()) -> yes | no | maybe.
quick_check(Form, String) ->
quick_check_1(Form, String, 0, yes).
quick_check_1(_, [], _, Result) ->
Result;
quick_check_1(Form, [CP | CPs], LastClass, Result) ->
case ucd:combining_class(CP) of
Class when LastClass > Class, Class /= 0 -> no;
Class -> case quick_check_2(Form, CP) of
no -> no;
maybe -> quick_check_1(Form, CPs, Class, maybe);
_ -> quick_check_1(Form, CPs, Class, Result)
end
end.
quick_check_2(nfc, CP) -> ucd:nfc_quick_check(CP);
quick_check_2(nfkc, CP) -> ucd:nfkc_quick_check(CP);
quick_check_2(nfd, CP) -> ucd:nfd_quick_check(CP);
quick_check_2(nfkd, CP) -> ucd:nfkd_quick_check(CP).
-spec normalize(normalization_form(), string()) -> string().
normalize(Form, String) ->
case quick_check(Form, String) of
yes -> String;
_ -> normalize_1(Form, String)
end.
normalize_1(nfc, String) ->
String1 = canonical_decomposition(String),
String2 = canonical_ordering(String1),
canonical_composition(String2);
normalize_1(nfd, String) ->
String1 = canonical_decomposition(String),
canonical_ordering(String1);
normalize_1(nfkc, String) ->
String1 = compatibility_decomposition(String),
String2 = canonical_ordering(String1),
canonical_composition(String2);
normalize_1(nfkd, String) ->
String1 = compatibility_decomposition(String),
canonical_ordering(String1).
-spec canonical_decomposition(string()) -> string().
canonical_decomposition(String) ->
canonical_decomposition_1(String, []).
canonical_decomposition_1([], Acc) ->
lists:reverse(Acc);
canonical_decomposition_1([CP | CPs], AccIn) ->
AccOut =
case decomposition(CP) of
DCPs when is_list(DCPs) -> push(canonical_decomposition(DCPs), AccIn);
_ -> [CP | AccIn]
end,
canonical_decomposition_1(CPs, AccOut).
-spec compatibility_decomposition(string()) -> string().
compatibility_decomposition(String) ->
compatibility_decomposition_1(String, []).
compatibility_decomposition_1([], Acc) ->
lists:reverse(Acc);
compatibility_decomposition_1([CP | CPs], AccIn) ->
AccOut =
case decomposition(CP) of
undefined -> [CP | AccIn];
{_, DCPs} -> push(compatibility_decomposition(DCPs), AccIn);
DCPs -> push(compatibility_decomposition(DCPs), AccIn)
end,
compatibility_decomposition_1(CPs, AccOut).
-spec canonical_composition(string()) -> string().
canonical_composition(String) ->
canonical_composition_1(String, undefined, [], []).
canonical_composition_1([], undefined, [], Acc) ->
lists:reverse(Acc);
canonical_composition_1([], Starter, DecAcc, Acc) ->
lists:reverse(push([Starter | lists:reverse(DecAcc)], Acc));
canonical_composition_1([CP | CPs], undefined, [], Acc) ->
case ucd:combining_class(CP) of
0 -> canonical_composition_1(CPs, CP, [], Acc);
_ -> canonical_composition_1(CPs, undefined, [], [CP |Acc])
end;
canonical_composition_1([CP | CPs], Starter, [], Acc) ->
case composition(Starter, CP) of
undefined ->
case ucd:combining_class(CP) of
0 ->
canonical_composition_1(CPs, CP, [], [Starter | Acc]);
_ ->
canonical_composition_1(CPs, Starter, [CP], Acc)
end;
CCP ->
canonical_composition_1(CPs, CCP, [], Acc)
end;
canonical_composition_1([CP | CPs], Starter, [DCP | _] = DecAcc, Acc) ->
CP_CCC = ucd:combining_class(CP),
DCP_CCC = ucd:combining_class(DCP),
case DCP_CCC >= CP_CCC of
true when CP_CCC == 0 ->
Acc1 = push([Starter | lists:reverse(DecAcc)], Acc),
canonical_composition_1(CPs, CP, [], Acc1);
true ->
canonical_composition_1(CPs, Starter, [CP | DecAcc], Acc);
false ->
case composition(Starter, CP) of
undefined when CP_CCC == 0 ->
Acc1 = push([Starter | lists:reverse(DecAcc)], Acc),
canonical_composition_1(CPs, CP, [], Acc1);
undefined ->
canonical_composition_1(CPs, Starter, [CP|DecAcc], Acc);
CCP ->
canonical_composition_1(CPs, CCP, DecAcc, Acc)
end
end.
-spec canonical_ordering(string()) -> string().
canonical_ordering(String) ->
canonical_ordering_1(String, [], []).
canonical_ordering_1([], Acc1, Acc2) ->
lists:reverse(canonical_sort(Acc2) ++ Acc1);
canonical_ordering_1([CP | CPs], Acc1, Acc2) ->
case ucd:combining_class(CP) of
0 when Acc2 == [] ->
canonical_ordering_1(CPs, [CP | Acc1], []);
0 ->
NewAcc1 = canonical_sort(Acc2) ++ Acc1,
canonical_ordering_1(CPs, [CP | NewAcc1], []);
CC ->
canonical_ordering_1(CPs, Acc1, [{-CC, CP} | Acc2])
end.
canonical_sort(Acc) ->
[CP || {_, CP} <- lists:keysort(1, Acc)].
decomposition(CP) ->
case ucd:decomposition(CP) of
undefined ->
case ucd:hangul_syllable_type(CP) of
lv -> hangul_syllable_decomposition_lv(CP);
lvt -> hangul_syllable_decomposition_lvt(CP);
_ -> undefined
end;
Value ->
Value
end.
composition(CP1, CP2) ->
case ucd:composition(CP1, CP2) of
undefined -> hangul_syllable_composition(CP1, CP2);
Value -> Value
end.
-define(HANGUL_SYLLABLE_BASE,16#ac00).
-define(HANGUL_SYLLABLE_L_BASE,16#1100).
-define(HANGUL_SYLLABLE_V_BASE,16#1161).
-define(HANGUL_SYLLABLE_T_BASE,16#11a7).
-define(HANGUL_SYLLABLES_L,19).
-define(HANGUL_SYLLABLES_V,21).
-define(HANGUL_SYLLABLES_T,28).
-define(HANGUL_SYLLABLES_N,(?HANGUL_SYLLABLES_V * ?HANGUL_SYLLABLES_T)).
-define(HANGUL_SYLLABLES_COUNT,(?HANGUL_SYLLABLES_L * ?HANGUL_SYLLABLES_N)).
hangul_syllable_decomposition_lv(CP) ->
Idx = CP - ?HANGUL_SYLLABLE_BASE,
LIdx = Idx div ?HANGUL_SYLLABLES_N,
VIdx = (Idx rem ?HANGUL_SYLLABLES_N) div ?HANGUL_SYLLABLES_T,
[?HANGUL_SYLLABLE_L_BASE + LIdx, ?HANGUL_SYLLABLE_V_BASE + VIdx].
hangul_syllable_decomposition_lvt(CP) ->
Idx = CP - ?HANGUL_SYLLABLE_BASE,
LVIdx = (Idx div ?HANGUL_SYLLABLES_T) * ?HANGUL_SYLLABLES_T,
TIdx = Idx rem ?HANGUL_SYLLABLES_T,
[?HANGUL_SYLLABLE_BASE + LVIdx, ?HANGUL_SYLLABLE_T_BASE + TIdx].
hangul_syllable_composition(CP1, CP2) when CP1 >= 16#1100, CP1 =< 16#1112
, CP2 >= 16#1161, CP2 =< 16#1175 ->
LIdx = CP1 - ?HANGUL_SYLLABLE_L_BASE,
VIdx = CP2 - ?HANGUL_SYLLABLE_V_BASE,
LVIdx = LIdx * ?HANGUL_SYLLABLES_N + VIdx * ?HANGUL_SYLLABLES_T,
?HANGUL_SYLLABLE_BASE + LVIdx;
hangul_syllable_composition(CP1, CP2) ->
case ucd:hangul_syllable_type(CP1) of
lv when CP2 >= 16#11a8, CP2 =< 16#11c2 ->
TIdx = CP2 - ?HANGUL_SYLLABLE_T_BASE,
CP1 + TIdx;
_ ->
undefined
end.
push([], Chars) -> Chars;
push([C | Cs], Chars) -> push(Cs, [C | Chars]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
canonical_decomposition_test_() -> [
?_assertEqual("Ḋ", canonical_decomposition("Ḋ"))
,?_assertEqual("Ḋ", canonical_decomposition("Ḋ"))
,?_assertEqual("A\x{0308}ffin", canonical_decomposition("Äffin"))
,?_assertEqual("A\x{0308}\x{FB03}n",canonical_decomposition("Ä\x{FB03}n"))
,?_assertEqual("Henry IV", canonical_decomposition("Henry IV"))
,?_assertEqual("Henry \x{2163}", canonical_decomposition("Henry \x{2163}"))
,?_assertEqual("ガ", canonical_decomposition("ガ"))
,?_assertEqual("가", canonical_decomposition("가"))
,?_assertEqual("뗣", canonical_decomposition("뗣"))
].
compatibility_decomposition_test_() -> [
?_assertEqual("Ḋ", compatibility_decomposition("Ḋ"))
,?_assertEqual("Ḋ", compatibility_decomposition("Ḋ"))
,?_assertEqual("A\x{0308}ffin",compatibility_decomposition("Äffin"))
,?_assertEqual("A\x{0308}ffin",compatibility_decomposition("Ä\x{FB03}n"))
,?_assertEqual("Henry IV",compatibility_decomposition("Henry IV"))
,?_assertEqual("Henry IV",compatibility_decomposition("Henry \x{2163}"))
,?_assertEqual("ガ", compatibility_decomposition("ガ"))
,?_assertEqual("가", compatibility_decomposition("가"))
,?_assertEqual("뗣", compatibility_decomposition("뗣"))
].
canonical_ordering_test_() -> [
?_assertEqual( [68,775],
canonical_ordering([68,775]))
,?_assertEqual( [68,803,775],
canonical_ordering([68,803,775]))
,?_assertEqual( [68,803,775],
canonical_ordering([68,775,803]))
,?_assertEqual( [68,795,803,775],
canonical_ordering([68,775,795,803]))
,?_assertEqual( [65,66,775,67,68],
canonical_ordering([65,66,775,67,68]))
,?_assertEqual( [65,66,803,775,67,68],
canonical_ordering([65,66,803,775,67,68]))
,?_assertEqual( [65,66,803,775,67,68],
canonical_ordering([65,66,775,803,67,68]))
,?_assertEqual( [65,66,795,803,775,67,68],
canonical_ordering([65,66,775,795,803,67,68]))
].
-endif. | src/unicodedata_normalization.erl | 0.525369 | 0.531209 | unicodedata_normalization.erl | starcoder |
-module(attempt).
%% API
-export([
to/1,
map/2,
recover/2,
ok/1,
error/1,
flatten/1,
flat_map/2,
recover_with/2,
traverse/2,
sequence/1]).
%%====================================================================
%% API types
%%====================================================================
-type f0(T) :: fun(() -> T).
-type f1(T, U) :: fun((T) -> U).
-export_type([f0/1, f1/2]).
-type attempt(T) :: {ok, Result :: T} | {error, Reason :: any()}.
-export_type([attempt/1]).
%%====================================================================
%% API functions
%%====================================================================
%% @doc `to` wraps function f() -> T into attempt.
%% Function may return in following ways:
%% - {ok, Result} - will be treated as success
%% - {error, Reason} - will be treated as failure
%% - Result - whatever doesn't match cases above will be treated as success
%% - throw an error - will be converted to failure with reason as reason
-spec to(f0(T)) -> attempt(T).
to(F) ->
try F() of
{ok, Result} -> {ok, Result};
{error, Reason} -> {error, Reason};
Result -> {ok, Result}
catch
error:Reason -> {error, Reason}
end.
%% @doc `ok` wraps value into successful attempt.
-spec ok(T) -> attempt(T).
ok(Result) ->
{ok, Result}.
%% @doc `error` wraps reason into failed attempt.
-spec error(Reason :: any()) -> attempt(none()).
error(Reason) ->
{error, Reason}.
%% @doc `map` converts value of type T if attempt was successful into value of type U using Map function.
%% Successful attempt is mapped using provided function.
%% Failed attempt is returned as is.
-spec map(attempt(T), Map :: f1(T, U)) -> attempt(U).
map({ok, Result} = _Attempt, Map) ->
{ok, Map(Result)};
map(Attempt, _Map) ->
Attempt.
%% @doc `flatten` converts nested attempt into non-nested if outer attempt is a success.
%% Successful attempt is unwrapped and inner attempt is returned.
%% Failed attempt is returned as is.
-spec flatten(attempt(attempt(T))) -> attempt(T).
flatten({ok, {ok, _} = Attempt}) -> Attempt;
flatten({ok, {error, _} = Attempt}) -> Attempt;
flatten(Attempt) -> Attempt.
%% @doc `flat_map` converts attempt into another one based on value held using FlatMap function.
%% Successful attempt is mapped using provided function.
%% Failed attempt is returned as is.
-spec flat_map(attempt(T), FlatMap :: f1(T, attempt(U))) -> attempt(U).
flat_map(Attempt, FlatMap) ->
flatten(map(Attempt, FlatMap)).
%% @doc `recover` allows to handle error held by attempt.
%% Successful attempt is returned as is.
%% Failed attempt is converted to successful attempt with value returned by Recover function.
-spec recover(attempt(T), Recover :: f1(Reason :: any(), T)) -> attempt(T).
recover({error, Reason} = _Attempt, Recover) ->
{ok, Recover(Reason)};
recover(Attempt, _Map) ->
Attempt.
%% @doc `recover_with` allows to handle error held by attempt providing new attempt instead.
%% Successful attempt is returned as is.
%% Failed attempt is converted to attempt returned by provided function.
-spec recover_with(attempt(T), Recover :: f1(Reason :: any(), attempt(T))) -> attempt(T).
recover_with(Attempt, Recover) ->
flatten(recover(Attempt, Recover)).
%% @doc `traverse` allows to apply same operation to each element on a list and get attempt of results in return.
%% If every element on list is successfully traversed traverse results in successful attempt with list of returns.
%% If there is at least one error during traverse first error is returned.
-spec traverse(list(T), f1(T, attempt(U))) -> attempt(list(U)).
traverse(List, Traverse) ->
Loop = fun
F([], Acc) ->
attempt:ok(Acc);
F([H | T], Acc) ->
case Traverse(H) of
{ok, Value} -> F(T, Acc ++ [Value]);
{error, _} = Error -> Error
end
end,
Loop(List, []).
%% @doc `sequence` allows to convert list of attempts into single attempt containing list of values. If any
%% attempt on a list is failed first failure will be returned.
-spec sequence(list(attempt(T))) -> attempt(list(T)).
sequence(List) ->
traverse(List, fun(A) -> A end). | src/attempt.erl | 0.52902 | 0.600188 | attempt.erl | starcoder |
-module(otter_filter).
-export([span/1, pre_span/1]).
-include_lib("otter_lib/include/otter.hrl").
%%----------------------------------------------------------------------
%% @doc Invoke the span filter on active span
%% @end
%%----------------------------------------------------------------------
-spec span(Span :: span()) -> span().
span(#span{timestamp = 0} = Span) ->
Span;
span(Span) ->
run(span, Span).
%%----------------------------------------------------------------------
%% @doc Invoke the span pre filter. If the filter result is to discard
%% the span in this phase (when the start_with_tags/1 API function is
%% called), then the timestamp or the span is set to 0, indicating that
%% the span is not active.
%% @end
%%----------------------------------------------------------------------
-spec pre_span(Span :: span()) -> span().
pre_span(Span) ->
run(prefilter, Span).
%%----------------------------------------------------------------------
%% Internal functions
%%----------------------------------------------------------------------
run(Type, Span) ->
Config = case Type of
prefilter -> prefilter_rules;
_ -> filter_rules
end,
case otter_config:read(Config, []) of
[{_,_}|_] = Rules ->
run_rules(Type, Span, Rules);
{Module, Function, ExtraArgs} ->
{NewSpan, Actions} = Module:Function(Span, ExtraArgs),
do_actions(Type, NewSpan, make_tags(NewSpan), Actions, Span);
[] ->
Span
end.
run_rules(Type, Span, Rules) ->
BreakOrContinue = case Type of prefilter -> break; _ -> continue end,
Tags = make_tags(Span),
Actions = otter_lib_filter:run(Tags, Rules, BreakOrContinue),
do_actions(Type, Span, Tags, Actions, Span).
do_actions(Type, Span, Tags, [{snapshot_count, Prefix, TagNames} | Rest], Return) ->
SnapCountKey = Prefix ++ [
case lists:keyfind(Key, 1, Tags) of
{Key, Value} -> Value;
_ -> undefined
end ||
Key <- TagNames
],
otter_lib_snapshot_count:snapshot(SnapCountKey, Span),
do_actions(Type, Span, Tags, Rest, Return);
do_actions(prefilter, Span, Tags, [allow | Rest], _Return) ->
do_actions(prefilter, Span, Tags, Rest, Span);
do_actions(prefilter, Span, Tags, [discard | Rest], _Return) ->
do_actions(prefilter, Span, Tags, Rest, deactivate_span(Span));
do_actions(span, Span, Tags, [send_to_zipkin | Rest], Return) ->
otter_conn_zipkin:store_span(Span),
do_actions(span, Span, Tags, Rest, Return);
do_actions(Type, Span, Tags, [UnknownAction| Rest], Return) ->
otter_lib_snapshot_count:snapshot(unknown_filter_action, {UnknownAction, Span}),
do_actions(Type, Span, Tags, Rest, Return);
do_actions(_Type, _Span, _Tags, [], Return) ->
Return.
make_tags(#span{tags = Tags, name = Name, duration = Duration}) ->
[
{otter_span_name, Name},
{otter_span_duration, Duration}|
Tags
].
%% In case of prefiltering return a span with timestamp set to 0
deactivate_span(Span) ->
Span#span{timestamp = 0}. | src/otter_filter.erl | 0.57344 | 0.523481 | otter_filter.erl | starcoder |
%% Copyright (c) 2013-2014 <NAME> <<EMAIL>>
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(rationals_SUITE).
-include_lib("common_test/include/ct.hrl").
-compile(export_all).
all() ->
common:all().
groups() ->
common:groups(?MODULE).
adding_quarters_to_thirds_test(_Config) ->
Quarter = rationals:new(1, 4),
Third = rationals:new(1, 3),
Sum = rationals:add(Quarter, Third),
7 = rationals:numerator(Sum),
12 = rationals:denominator(Sum).
adding_three_quarters_to_five_twelves_test(_Config) ->
A = rationals:new(3, 4),
B = rationals:new(5, 12),
Sum = rationals:add(A, B),
56 = rationals:numerator(Sum),
48 = rationals:denominator(Sum).
multiply_two_thirds_by_three_quarters_test(_Config) ->
A = rationals:new(2, 3),
B = rationals:new(3, 4),
Product = rationals:multiply(A, B),
6 = rationals:numerator(Product),
12 = rationals:denominator(Product).
simplify_test(_Config) ->
A = rationals:new(63, 462),
Simplified = rationals:simplify(A),
3 = rationals:numerator(Simplified),
22 = rationals:denominator(Simplified).
is_greater_than_test(_Config) ->
A = rationals:new(3, 4),
B = rationals:new(2, 4),
true = rationals:is_greater_than(A, B),
true = rationals:is_greater_or_equal(A, B),
false = rationals:is_greater_than(B, A),
false = rationals:is_greater_than(A, A),
true = rationals:is_greater_or_equal(A, A).
is_equal_to_test(_Config) ->
A = rationals:new(3, 4),
B = rationals:new(2, 4),
C = rationals:new(1, 2),
false = rationals:is_equal_to(A, B),
false = rationals:is_equal_to(B, A),
true = rationals:is_equal_to(A, A),
true = rationals:is_equal_to(B, B),
true = rationals:is_equal_to(B, C),
true = rationals:is_equal_to(C, B).
is_less_than_test(_Config) ->
A = rationals:new(3, 4),
B = rationals:new(2, 4),
false = rationals:is_less_than(A, B),
true = rationals:is_less_or_equal(B, A),
true = rationals:is_less_than(B, A).
subtraction_test(_Config) ->
A = rationals:new(2, 3),
B = rationals:new(1, 2),
Difference = rationals:subtract(A, B),
1 = rationals:numerator(Difference),
6 = rationals:denominator(Difference).
mixed_numbers_test(_Config) ->
A = rationals:new(6),
B = rationals:new(3, 4),
Product = rationals:multiply(A, B),
18 = rationals:numerator(Product),
4 = rationals:denominator(Product).
reciprocal_test(_Config) ->
A = rationals:new(3, 4),
Reciprocal = rationals:reciprocal(A),
4 = rationals:numerator(Reciprocal),
3 = rationals:denominator(Reciprocal).
divide_test(_Config) ->
A = rationals:new(1, 2),
B = rationals:new(3, 4),
R = rationals:divide(A, B),
4 = rationals:numerator(R),
6 = rationals:denominator(R).
six_from_float_test(_Config) ->
A = rationals:from_float(6.0),
6 = rationals:numerator(A),
1 = rationals:denominator(A).
point_seven_five_from_float_test(_Config) ->
A = rationals:from_float(0.75),
3 = rationals:numerator(A),
4 = rationals:denominator(A).
point_five_from_float_test(_Config) ->
A = rationals:from_float(0.5),
1 = rationals:numerator(A),
2 = rationals:denominator(A).
greatest_common_divisor_test(_Config) ->
6 = rationals:gcd(48, 18). | test/rationals_SUITE.erl | 0.70253 | 0.609887 | rationals_SUITE.erl | starcoder |
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
% License for the specific language governing permissions and limitations under
% the License.
% This module implements the insertion into the vtree. It follows the normal
% R-tree rules and is implementation independent. It just calls out to
% modules for the choosing the correct subtree and splitting the nodes.
% NOTE vmx 2012-09-04: It might make sense to rename this module to
% geocouch_io, as it is more of storage specific to the backend (Apache
% CouchDB vs. Couchbase). I'm not really sure about it, as geocouch_file
% should abstract away from the differences between the backends.
-module(vtree_io).
-include("vtree.hrl").
-include("couch_db.hrl").
-export([write_node/3, read_node/2]).
-export([encode_mbb/1, decode_mbb/1]).
-export([treesize/1]).
-export([decode_dups/1]).
-ifdef(makecheck).
-compile(nowarn_export_all).
-compile(export_all).
-endif.
% keys and docIds have the same size
-define(KEY_BITS, 12).
-define(VALUE_BITS, 28).
-define(POINTER_BITS, 48).
-define(TREE_SIZE_BITS, 48).
-define(RED_BITS, 16).
% 12 bits would be enough for MbbO, but we like to have it padded to full bytes
-define(MBBO_BITS, 16).
-define(MAX_KEY_SIZE, ((1 bsl ?KEY_BITS) - 1)).
-define(MAX_VALUE_SIZE, ((1 bsl ?VALUE_BITS) - 1)).
-define(BLOB_SIZE, 24).
% Writes a node of the tree (which is a list of KV- or KV-nodes) to disk and
% return a KP-node with the corresponding information. No checks on the number
% of nodes is performed, they are just written do disk as given. Potential
% node splitting needs to happen before.
-spec write_node(Fd :: file:io_device(), Nodes :: [#kv_node{} | #kp_node{}],
Less :: lessfun()) -> {ok, #kp_node{}}.
write_node(Fd, Nodes, Less) ->
{Bin, TreeSize} = encode_node(Nodes),
{ok, Pointer, Size} = geocouch_file:append_chunk(Fd, Bin),
geocouch_file:flush(Fd),
% The enclosing bounding box for all children
Mbb = vtree_util:nodes_mbb(Nodes, Less),
KpNode = #kp_node{
key = Mbb,
childpointer = Pointer,
treesize = Size+TreeSize,
% The original MBB is set to the MBB when the node is created
mbb_orig = Mbb
},
{ok, KpNode}.
-spec read_node(Fd :: file:io_device(), Pointer :: non_neg_integer()) ->
[#kp_node{} | #kv_node{}].
read_node(Fd, Pointer) ->
{ok, BinNode} = geocouch_file:pread_chunk(Fd, Pointer),
decode_node(BinNode).
% Returns the binary that will be stored, and the number size of the subtree
% (resp. in case of a KV node, the number of bytes that were written during
% encoding), to make sure the calculation of the subtree size is correct.
-spec encode_node(Nodes :: [#kp_node{} | #kv_node{}]) ->
{binary(), non_neg_integer()}.
encode_node([#kv_node{}|_]=Nodes) ->
encode_node(Nodes, {<<?KV_NODE:8>>, 0});
encode_node([#kp_node{}|_]=Nodes) ->
encode_node(Nodes, {<<?KP_NODE:8>>, 0}).
-spec encode_node(Nodes :: [#kp_node{} | #kv_node{}],
Acc :: {binary(), non_neg_integer()}) ->
{binary(), non_neg_integer()}.
encode_node([], Acc) ->
Acc;
encode_node([Node|T], {BinAcc, TreeSizeAcc}) ->
BinK = encode_key(Node),
SizeK = erlang:size(BinK),
case SizeK < ?MAX_KEY_SIZE of
true -> ok;
false -> throw({error, key_too_long})
end,
{BinV, TreeSize} = encode_value(Node),
SizeV = erlang:iolist_size(BinV),
case SizeV < ?MAX_VALUE_SIZE of
true -> ok;
false -> throw({error, value_too_big})
end,
Bin = <<SizeK:?KEY_BITS, SizeV:?VALUE_BITS, BinK/binary, BinV/binary>>,
encode_node(T, {<<BinAcc/binary, Bin/binary>>, TreeSize + TreeSizeAcc}).
encode_key(#kv_node{key = Key, docid = DocId}) ->
encode_key_docid(Key, DocId);
encode_key(#kp_node{key = Mbb}) ->
BinMbb = encode_mbb(Mbb),
<<(length(Mbb) * 2):16, BinMbb/binary>>.
% Encode the value of a Key-Value pair. It returns the encoded value and the
% size of the subtree (in case of a KV node, the number of bytes that were
% written during encoding). The treesize is used to calculate the disk usage
% of the data in the tree.
-spec encode_value(Node :: #kv_node{} | #kp_node{}) ->
{Bin :: binary(), Size :: non_neg_integer()}.
encode_value(#kv_node{}=Node) ->
#kv_node{
body = Body,
partition = PartId,
geometry = Geom
} = Node,
Value = case Body of
{dups, BodyDups} ->
{dups, GeomDups} = Geom,
lists:foldl(fun(BodyGeom, Acc) ->
Bin = encode_body_geom(BodyGeom),
<<Acc/binary, Bin/binary>>
end, <<>>, lists:zip(BodyDups, GeomDups));
_ ->
encode_body_geom({Body, Geom})
end,
Value2 = <<PartId:16, Value/binary>>,
{Value2, byte_size(Value2)};
encode_value(#kp_node{}=Node) ->
#kp_node{
childpointer = PointerNode,
treesize = TreeSize,
mbb_orig = MbbO
} = Node,
BinMbbO = encode_mbb(MbbO),
NumMbbO = length(MbbO) * 2,
BinReduce = <<NumMbbO:16, BinMbbO/binary>>,
SizeReduce = byte_size(BinReduce),
BinValue = <<PointerNode:?POINTER_BITS, TreeSize:?TREE_SIZE_BITS,
SizeReduce:?RED_BITS, BinReduce:SizeReduce/binary>>,
% Return `0` as no additional bytes are written. The bytes that will
% be written are accounted when the whole chunk gets written.
{BinValue, 0}.
-spec encode_body_geom({binary(), binary()}) -> binary().
encode_body_geom({Body, Geom}) ->
<<(byte_size(Body)):?BLOB_SIZE, (byte_size(Geom)):?BLOB_SIZE,
Body/binary, Geom/binary>>.
-spec decode_dups(binary()) -> [{binary(), binary()}].
decode_dups(Dups) ->
[{Body, Geom} ||
<<BodySize:?BLOB_SIZE, GeomSize:?BLOB_SIZE,
Body:BodySize/binary, Geom:GeomSize/binary>> <= Dups].
-spec decode_kvnode_value(binary()) -> #kv_node{}.
decode_kvnode_value(<<PartId:16, BodySize:?BLOB_SIZE, GeomSize:?BLOB_SIZE,
Body:BodySize/binary, Geom:GeomSize/binary,
Dups/binary>>) ->
case Dups of
<<>> ->
#kv_node{
body = Body,
partition = PartId,
geometry = Geom,
% XXX vmx 2014-07-20: What is the size used for?
size = 0
};
_ ->
{Bodies, Geoms} = lists:unzip(decode_dups(Dups)),
#kv_node{
body = {dups, [Body | Bodies]},
partition = PartId,
geometry = {dups, [Geom | Geoms]},
% XXX vmx 2014-07-20: What is the size used for?
size = 0
}
end.
% Decode the value of a KP-node pair
-spec decode_kpnode_value(BinValue :: binary()) -> #kp_node{}.
decode_kpnode_value(BinValue) ->
<<PointerNode:?POINTER_BITS, TreeSize:?TREE_SIZE_BITS,
SizeReduce:?RED_BITS, Reduce:SizeReduce/binary>> = BinValue,
<<_NumMbb:16, BinMbbO/binary>> = Reduce,
MbbO = decode_mbb(BinMbbO),
#kp_node{
childpointer = PointerNode,
treesize = TreeSize,
mbb_orig = MbbO
}.
% Decode the value of the KP nodes to Erlang terms
-spec decode_node(BinValue :: binary()) -> [#kp_node{} | #kv_node{}].
decode_node(<<?KV_NODE:8, Rest/binary>>) ->
decode_kvnode_pairs(Rest, []);
decode_node(<<?KP_NODE:8, Rest/binary>>) ->
decode_kpnode_pairs(Rest, []).
% Decode KV-nodes key value pairs to an Erlang record
-spec decode_kvnode_pairs(BinValue :: binary(), Acc :: [#kv_node{}]) ->
[#kv_node{}].
decode_kvnode_pairs(<<>>, Acc) ->
lists:reverse(Acc);
% Matchning the binary in the function (and not the body) is an optimization
decode_kvnode_pairs(<<SizeK:?KEY_BITS, SizeV:?VALUE_BITS,
BinK:SizeK/binary, BinV:SizeV/binary,
Rest/binary>>, Acc) ->
{Mbb, DocId} = decode_key_docid(BinK),
Node0 = decode_kvnode_value(BinV),
Node = Node0#kv_node{key = Mbb, docid = DocId},
decode_kvnode_pairs(Rest, [Node|Acc]).
% Decode KP-nodes key value pairs to an Erlang record
-spec decode_kpnode_pairs(BinValue :: binary(), Acc :: [#kp_node{}]) ->
[#kp_node{}].
decode_kpnode_pairs(<<>>, Acc) ->
lists:reverse(Acc);
% Matchning the binary in the function (and not the body) is an optimization
decode_kpnode_pairs(<<SizeK:?KEY_BITS, SizeV:?VALUE_BITS,
BinK:SizeK/binary, BinV:SizeV/binary,
Rest/binary>>, Acc) ->
<<_NumMbb:16, BinMbb/binary>> = BinK,
Mbb = decode_mbb(BinMbb),
Node0 = decode_kpnode_value(BinV),
Node = Node0#kp_node{key = Mbb},
decode_kpnode_pairs(Rest, [Node|Acc]).
-spec encode_mbb(Mbb :: mbb()) -> binary().
encode_mbb(Mbb) ->
<< <<Min:64/native-float, Max:64/native-float>> || {Min, Max} <- Mbb>>.
-spec encode_key_docid(Mbb :: mbb(), DocId :: binary()) -> binary().
encode_key_docid(Mbb, DocId) ->
BinMbb = encode_mbb(Mbb),
% Number of numbers is two times the dimension
<<(length(Mbb) * 2):16, BinMbb/binary, DocId/binary>>.
-spec decode_mbb(BinMbb :: binary()) -> mbb().
decode_mbb(<<BinMbb/binary>>) ->
[{Min, Max} || <<Min:64/native-float, Max:64/native-float>> <= BinMbb].
-spec decode_key_docid(Key :: binary()) -> {mbb(), binary()}.
decode_key_docid(<<Num:16, KeyDocId/binary>>) ->
KeySize = Num * 8,
<<BinMbb:KeySize/binary, DocId/binary>> = KeyDocId,
{decode_mbb(BinMbb), DocId}.
-spec treesize(KpNode :: #kp_node{} | nil) -> non_neg_integer().
treesize(nil) ->
0;
treesize(#kp_node{treesize = Size}) ->
Size. | vtree/src/vtree_io.erl | 0.622115 | 0.473475 | vtree_io.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @author <NAME>
%%% @copyright (C) 2018 ACK CYFRONET AGH
%%% This software is released under the MIT license
%%% cited in 'LICENSE.txt'.
%%% @end
%%%-------------------------------------------------------------------
%%% @doc
%%% This module can be used to implement custom IdP entitlement parsing per IdP.
%%% Please refer to Onedata documentation to learn the entitlement format used
%%% in onedata. Custom parsers must return results in this format.
%%% https://onedata.org/#/home/documentation/doc/administering_onedata/openid_saml_configuration[custom-entitlement-parsers-advanced].html
%%%
%%% validation_examples/0 callback can be used to provide examples to be
%%% evaluated upon the start of Onezone to make sure that parser logic works
%%% as expected.
%%%
%%% Whenever a parser call crashes, stacktrace is written to the debug log and
%%% {error, malformed} is returned, which results in the entitlement being
%%% discarded.
%%% @end
%%%-------------------------------------------------------------------
-module(custom_entitlement_parser).
-behavior(onezone_plugin_behaviour).
-behavior(entitlement_parser_behaviour).
-include("auth/entitlement_mapping.hrl").
%% API
-export([type/0]).
-export([parse/3]).
-export([validation_examples/0]).
%%%===================================================================
%%% API functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @doc
%% Returns the type of this plugin. Depending on the type, the plugin must
%% implement certain behaviour:
%% entitlement_parser -> entitlement_parser_behaviour
%% openid_plugin -> openid_plugin_behaviour
%% attribute_mapper -> attribute_mapper_behaviour
%% @end
%%--------------------------------------------------------------------
-callback type() -> entitlement_parser.
type() ->
entitlement_parser.
%%--------------------------------------------------------------------
%% @doc
%% Parses an entitlement coming from given IdP into internal Onedata format.
%% @end
%%--------------------------------------------------------------------
-spec parse(auth_config:idp(), entitlement_mapping:raw_entitlement(), auth_config:parser_config()) ->
entitlement_mapping:idp_entitlement().
parse(egi, Entitlement, ParserConfig) ->
parse_egi_entitlement(Entitlement, ParserConfig);
parse(plgrid, Entitlement, ParserConfig) ->
parse_plgrid_entitlement(Entitlement, ParserConfig).
%%--------------------------------------------------------------------
%% @doc
%% Returns entitlement mapping validation examples to be evaluated during startup.
%% @end
%%--------------------------------------------------------------------
-spec validation_examples() ->
[{auth_config:idp(), entitlement_mapping:raw_entitlement(), auth_config:parser_config(),
entitlement_mapping:idp_entitlement() | {error, malformed}}].
validation_examples() ->
lists:flatten(
[{egi, Input, ParserConfig, Output} || {Input, ParserConfig, Output} <- egi_validation_examples()],
[{plgrid, Input, ParserConfig, Output} || {Input, ParserConfig, Output} <- plgrid_validation_examples()]
).
%%%===================================================================
%%% Internal functions
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns an #idp_entitlement{} that represents user's group membership for EGI.
%%
%% Group format:
%% urn:mace:egi.eu:group:<VO>[[:<GROUP>][:<SUBGROUP>*]][:role=<ROLE>]#<GROUP-AUTHORITY>
%% where:
%% <VO> is the name of the Virtual Organisation
%% <GROUP> is the name of a group in the identified <VO>;
%% specifying a group is optional
%% zero or more <SUBGROUP> components represent the hierarchy of subgroups
%% in the <GROUP>; specifying sub-groups is optional
%% the optional <ROLE> component is scoped to the rightmost (sub)group;
%% if no group information is specified, the role applies to the VO
%% <GROUP-AUTHORITY> is a non-empty string that indicates the authoritative
%% source for the entitlement value. For example, it can be the FQDN of
%% the group management system that is responsible for the identified
%% group membership information
%% @end
%%--------------------------------------------------------------------
-spec parse_egi_entitlement(entitlement_mapping:raw_entitlement(), auth_config:parser_config()) ->
entitlement_mapping:idp_entitlement().
parse_egi_entitlement(<<"urn:mace:egi.eu:group:", Group/binary>>, ParserConfig) ->
% Strip out the prefix standard for EGI
OriginGroupType = maps:get(originGroupType, ParserConfig, organization),
TopGroupType = maps:get(topGroupType, ParserConfig, team),
SubGroupsType = maps:get(subGroupsType, ParserConfig, team),
[GroupStructureEncoded, Origin] = binary:split(Group, <<"#">>),
% Replace plus sings with spaces
GroupStructure = binary:replace(GroupStructureEncoded, <<"+">>, <<" ">>, [global]),
GroupTokens = binary:split(GroupStructure, <<":">>, [global, trim_all]),
{Groups, RoleStr} = case lists:last(GroupTokens) of
<<"role=", Role/binary>> ->
{lists:sublist(GroupTokens, length(GroupTokens) - 1), Role};
_ ->
{GroupTokens, undefined}
end,
UserPrivileges = case RoleStr of
<<"member">> -> member;
<<"manager">> -> manager;
<<"admin">> -> admin;
<<"chair">> -> admin;
<<"owner">> -> admin;
_ -> member
end,
Path = lists:flatten([
#idp_group{type = OriginGroupType, name = Origin},
#idp_group{type = TopGroupType, name = hd(Groups)},
[#idp_group{type = SubGroupsType, name = G, privileges = member} || G <- tl(Groups)]
]),
#idp_entitlement{
idp = egi,
path = Path,
privileges = UserPrivileges
}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns an #idp_entitlement{} that represents user's group membership for PlGrid.
%%
%% Group format:
%% group-short-name(Group long name)
%% Examples:
%% plgg-group-1(plgg-group-1)
%% plgg-admins(PlGrid admin group)
%%
%% Long names are not always specified, in this case the short name is repeated.
%% This parser adds a space before the parenthesis for better readability, or
%% removes the part in parenthesis completely if the long name is a duplicated or
%% a substring of the short name.
%% @end
%%--------------------------------------------------------------------
-spec parse_plgrid_entitlement(entitlement_mapping:raw_entitlement(), auth_config:parser_config()) ->
entitlement_mapping:idp_entitlement().
parse_plgrid_entitlement(RawEntitlement, ParserConfig) ->
GroupType = maps:get(groupType, ParserConfig, team),
GroupName = case binary:split(RawEntitlement, [<<"(">>, <<")">>], [global, trim_all]) of
[Name] ->
Name;
[ShortName, LongName] ->
case binary:match(ShortName, LongName) of
nomatch -> <<ShortName/binary, " (", LongName/binary, ")">>;
_ -> ShortName
end
end,
#idp_entitlement{
idp = plgrid,
path = [#idp_group{type = GroupType, name = GroupName}],
privileges = member
}.
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns entitlement mapping validation examples for EGI.
%% @end
%%--------------------------------------------------------------------
-spec egi_validation_examples() ->
[{entitlement_mapping:raw_entitlement(), auth_config:parser_config(), entitlement_mapping:idp_entitlement() | {error, malformed}}].
egi_validation_examples() -> [
{
<<"urn:mace:egi.eu:group:fedcloud.egi.eu:role=vm_operator#aai.egi.eu">>,
#{},
#idp_entitlement{idp = egi, path = [
#idp_group{type = organization, name = <<"aai.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"fedcloud.egi.eu">>, privileges = member}
], privileges = member}
},
{
<<"urn:mace:egi.eu:group:fedcloud.egi.eu:child:role=member#sso.egi.eu">>,
#{},
#idp_entitlement{idp = egi, path = [
#idp_group{type = organization, name = <<"sso.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"fedcloud.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"child">>, privileges = member}
], privileges = member}
},
{
<<"urn:mace:egi.eu:group:fedcloud.egi.eu:child:role=owner#aai.egi.eu">>,
#{
originGroupType => unit,
topGroupType => team,
subGroupsType => role_holders
},
#idp_entitlement{idp = egi, path = [
#idp_group{type = unit, name = <<"aai.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"fedcloud.egi.eu">>, privileges = member},
#idp_group{type = role_holders, name = <<"child">>, privileges = member}
], privileges = admin}
},
{
<<"urn:mace:egi.eu:group:egi-engage-members:role=manager#sso.egi.eu">>,
#{},
#idp_entitlement{idp = egi, path = [
#idp_group{type = organization, name = <<"sso.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"egi-engage-members">>, privileges = member}
], privileges = manager}
},
{
<<"urn:mace:egi.eu:group:egi-engage-members:role=admin#aai.egi.eu">>,
#{},
#idp_entitlement{idp = egi, path = [
#idp_group{type = organization, name = <<"aai.egi.eu">>, privileges = member},
#idp_group{type = team, name = <<"egi-engage-members">>, privileges = member}
], privileges = admin}
},
{
<<"urn:mace:egi.eu:group:egi-engage-members:role=chair#other.origin.com">>,
#{},
#idp_entitlement{idp = egi, path = [
#idp_group{type = organization, name = <<"other.origin.com">>, privileges = member},
#idp_group{type = team, name = <<"egi-engage-members">>, privileges = member}
], privileges = admin}
},
{
<<"urn:mace:egi.eu:bad-prefix:egi-engage-members:role=chair#other.origin.com">>,
#{},
{error, malformed}
},
{
<<"urn:mace:egi.eu:group:group-without-origin">>,
#{},
{error, malformed}
},
{
<<"unconfromant-group-name">>,
#{},
{error, malformed}
}
].
%%--------------------------------------------------------------------
%% @private
%% @doc
%% Returns entitlement mapping validation examples for PlGrid.
%% @end
%%--------------------------------------------------------------------
-spec plgrid_validation_examples() ->
[{entitlement_mapping:raw_entitlement(), auth_config:parser_config(), entitlement_mapping:idp_entitlement() | {error, malformed}}].
plgrid_validation_examples() -> [
{
<<"plgg-group-1(plgg-group-1)">>,
#{},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = team, name = <<"plgg-group-1">>, privileges = member}
], privileges = member}
},
{
<<"plgg-group-1(plgg-group-1)">>,
#{groupType => unit},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = unit, name = <<"plgg-group-1">>, privileges = member}
], privileges = member}
},
{
<<"plgg-team-alpha-research(plgg-team-alpha)">>,
#{},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = team, name = <<"plgg-team-alpha-research">>, privileges = member}
], privileges = member}
},
{
<<"plgg-team-alpha-research(plgg-team-alpha)">>,
#{groupType => unit},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = unit, name = <<"plgg-team-alpha-research">>, privileges = member}
], privileges = member}
},
{
<<"plgg-admin-group(Longer description)">>,
#{},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = team, name = <<"plgg-admin-group (Longer description)">>, privileges = member}
], privileges = member}
},
{
<<"plgg-admin-group(Longer description)">>,
#{groupType => role_holders},
#idp_entitlement{idp = plgrid, path = [
#idp_group{type = role_holders, name = <<"plgg-admin-group (Longer description)">>, privileges = member}
], privileges = member}
}
]. | rel/files/plugins/custom_entitlement_parser.erl | 0.501465 | 0.423875 | custom_entitlement_parser.erl | starcoder |
%% Copyright (C) 2011-2013 IMVU Inc.
%%
%% Permission is hereby granted, free of charge, to any person obtaining a copy of
%% this software and associated documentation files (the "Software"), to deal in
%% the Software without restriction, including without limitation the rights to
%% use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
%% of the Software, and to permit persons to whom the Software is furnished to do
%% so, subject to the following conditions:
%%
%% The above copyright notice and this permission notice shall be included in all
%% copies or substantial portions of the Software.
%%
%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
%% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
%% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
%% SOFTWARE.
-module(hash).
-export([is_power_of_two/1, bits/1, worker_for_key/2]).
-compile(export_all).
-include("dtm_redis.hrl").
%% is_power_Of_two/1
is_power_of_two(Pow) ->
(Pow > 0) andalso ((Pow band (Pow - 1)) =:= 0).
%% bit_count/1
bit_count(Pow) when Pow < 1 ->
0;
bit_count(Pow) ->
1 + bit_count(Pow bsr 1).
%% bits/1
bits(Pow) ->
bit_count(Pow - 1).
%% bytes_from_bits/1
bytes_from_bits(0) ->
0;
bytes_from_bits(NumBits) when NumBits < 8 ->
1;
bytes_from_bits(NumBits) ->
(NumBits div 8) + bytes_from_bits(NumBits rem 8).
%% hash_to_int/3
hash_to_int([], _Byte, _Total) ->
0;
hash_to_int(_Hash, Total, Total) ->
0;
hash_to_int([H|T], Byte, Total) ->
(H * (1 bsl (Byte * 8))) + hash_to_int(T, Byte + 1, Total).
hash_to_int(Hash, Bytes) ->
hash_to_int(Hash, 0, Bytes).
%% bucket/2
bucket(Key, NumBits) when is_atom(Key) ->
bucket(atom_to_list(Key), NumBits);
bucket(Key, NumBits) ->
Int = hash_to_int(binary_to_list(erlang:md5(Key)), bytes_from_bits(NumBits)),
Int band ((1 bsl NumBits) - 1).
%% worker_for_key/2
worker_for_key(Key, Buckets) ->
Bucket = bucket(Key, Buckets#buckets.bits),
{ok, Pid} = dict:find(Bucket, Buckets#buckets.map),
Pid.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
is_power_of_two_test() ->
lists:foreach(fun(I) -> false = is_power_of_two(I) end, [0, 3, 5, 6, 7, 9]),
lists:foreach(fun(I) -> true = is_power_of_two(I) end, [1, 2, 4, 8]).
bit_count_test() ->
[0, 0, 1, 2, 2, 3, 3] = [bit_count(X) || X <- lists:seq(-1, 5)].
bits_test() ->
[0, 0, 1, 2, 2, 3, 3] = [bits(X) || X <- lists:seq(0, 6)].
bytes_from_bits_test() ->
[0, 1, 1, 1, 2, 2, 2, 3] = [bytes_from_bits(X) || X <- [0, 1, 7, 8, 9, 15, 16, 17]].
hash_to_int_test() ->
Data = [170, 85, 204, 51],
[0, 170, 21930, 13391274, 869029290, 869029290] = [hash_to_int(Data, X) || X <- [0, 1, 2, 3, 4, 5]].
bucket_test() ->
[0, 0, 0, 4, 12, 12, 44, 44, 172] = [bucket("foo", X) || X <- lists:seq(0, 8)],
[428, 428, 1452, 3500, 7596, 15788, 15788, 48556] = [bucket("foo", X) || X <- lists:seq(9, 16)].
-endif. | apps/dtm_redis/src/hash.erl | 0.548915 | 0.422445 | hash.erl | starcoder |
%%
%% Copyright (c) 2015-2016 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Bounded Counter CRDT.
%% Modeled as a pair where the first component is a
%% PNCounter and the second component is a GMap.
%%
%% @reference <NAME> et al.
%% Extending Eventually Consistent Cloud Databases for
%% Enforcing Numeric Invariants (2015)
%% [http://arxiv.org/abs/1503.09052]
%%
%% @reference <NAME>
%% delta-enabled-crdts C++ library
%% [https://github.com/CBaquero/delta-enabled-crdts]
-module(state_bcounter).
-author("<NAME> <<EMAIL>>").
-include("state_type.hrl").
-behaviour(type).
-behaviour(state_type).
-define(TYPE, ?MODULE).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0, new/1]).
-export([mutate/3, delta_mutate/3, merge/2]).
-export([query/1, equal/2, is_bottom/1, is_inflation/2, is_strict_inflation/2, irreducible_is_strict_inflation/2]).
-export([join_decomposition/1, delta/3]).
-export([encode/2, decode/2]).
-export_type([state_bcounter/0, state_bcounter_op/0]).
-opaque state_bcounter() :: {?TYPE, payload()}.
-type payload() :: {?PNCOUNTER_TYPE:state_pncounter(), ?GMAP_TYPE:state_gmap()}.
-type state_bcounter_op() :: {move, pos_integer(), term()} |
increment |
decrement.
%% @doc Create a new, empty `state_bcounter()'
-spec new() -> state_bcounter().
new() ->
{?TYPE, {?PNCOUNTER_TYPE:new(), ?GMAP_TYPE:new([?MAX_INT_TYPE])}}.
%% @doc Create a new, empty `state_bcounter()'
-spec new([term()]) -> state_bcounter().
new([]) ->
new().
%% @doc Mutate a `state_bcounter()'.
-spec mutate(state_bcounter_op(), type:id(), state_bcounter()) ->
{ok, state_bcounter()} | {error, {precondition, non_enough_permissions}}.
mutate(Op, Actor, {?TYPE, _BCounter}=CRDT) ->
state_type:mutate(Op, Actor, CRDT).
%% @doc Delta-mutate a `state_bcounter()'.
%% The first argument can be:
%% - `{move, term()}', that moves permissions to
%% decrement to another replica (if it has enough permissions)
%% - `increment' which can always happen
%% - `decrement' which can happen when the replica has enough
%% local increments, or has permissions received from
%% other replicas
-spec delta_mutate(state_bcounter_op(), type:id(), state_bcounter()) ->
{ok, state_bcounter()} | {error, {precondition, non_enough_permissions}}.
delta_mutate({move, Count, To}, Actor, {?TYPE, {PNCounter, GMap}}=BCounter) ->
{?GMAP_TYPE, {?MAX_INT_TYPE, Map0}} = GMap,
case Count =< permissions(BCounter, Actor) of
true ->
Current = case orddict:find({Actor, To}, Map0) of
{ok, Value} ->
Value;
error ->
0
end,
Map1 = orddict:store({Actor, To}, Current + Count, orddict:new()),
Delta = {state_type:new(PNCounter), {?GMAP_TYPE, {?MAX_INT_TYPE, Map1}}},
{ok, {?TYPE, Delta}};
false ->
{error, {precondition, non_enough_permissions}}
end;
delta_mutate(increment, Actor, {?TYPE, {PNCounter, GMap}}) ->
{ok, IncDelta} = ?PNCOUNTER_TYPE:delta_mutate(increment, Actor, PNCounter),
Delta = {IncDelta, state_type:new(GMap)},
{ok, {?TYPE, Delta}};
delta_mutate(decrement, Actor, {?TYPE, {PNCounter, GMap}}=BCounter) ->
case 0 < permissions(BCounter, Actor) of
true ->
{ok, DecDelta} = ?PNCOUNTER_TYPE:delta_mutate(decrement, Actor, PNCounter),
Delta = {DecDelta, state_type:new(GMap)},
{ok, {?TYPE, Delta}};
false ->
{error, {precondition, non_enough_permissions}}
end.
%% @doc Returns the number of permissions a given replica has.
%% This is calculated as:
%% - the number of increments minus the number of decrements
%% - plus permissions received
%% - minus permissions given
permissions({?TYPE, {{?PNCOUNTER_TYPE, PNCounter},
{?GMAP_TYPE, {?MAX_INT_TYPE, GMap}}}}, Actor) ->
Local = case orddict:find(Actor, PNCounter) of
{ok, {Inc, Dec}} ->
Inc - Dec;
error ->
0
end,
{Incoming, Outgoing} = orddict:fold(
fun({From, To}, Value, {In0, Out0}) ->
In1 = case To == Actor of
true ->
In0 + Value;
false ->
In0
end,
Out1 = case From == Actor of
true ->
Out0 + Value;
false ->
Out0
end,
{In1, Out1}
end,
{0, 0},
GMap
),
Local + Incoming - Outgoing.
%% @doc Returns the value of the `state_bcounter()'.
%% The value of the `state_bcounter()' is the
%% value of the first component, the `state_pncounter()'.
-spec query(state_bcounter()) -> non_neg_integer().
query({?TYPE, {PNCounter, _GMap}}) ->
?PNCOUNTER_TYPE:query(PNCounter).
%% @doc Merge two `state_bcounter()'.
%% The result is the merge of both `state_pncounter()'
%% in the first component, and the merge of both
%% `state_gmap()' in the second component.
-spec merge(state_bcounter(), state_bcounter()) -> state_bcounter().
merge({?TYPE, _}=CRDT1, {?TYPE, _}=CRDT2) ->
MergeFun = fun({?TYPE, {PNCounter1, GMap1}}, {?TYPE, {PNCounter2, GMap2}}) ->
PNCounter = ?PNCOUNTER_TYPE:merge(PNCounter1, PNCounter2),
GMap = ?GMAP_TYPE:merge(GMap1, GMap2),
{?TYPE, {PNCounter, GMap}}
end,
state_type:merge(CRDT1, CRDT2, MergeFun).
%% @doc Equality for `state_bcounter()'.
%% Two `state_bcounter()' are equal if each
%% component is `equal/2'.
-spec equal(state_bcounter(), state_bcounter()) -> boolean().
equal({?TYPE, {PNCounter1, GMap1}}, {?TYPE, {PNCounter2, GMap2}}) ->
?PNCOUNTER_TYPE:equal(PNCounter1, PNCounter2) andalso
?GMAP_TYPE:equal(GMap1, GMap2).
%% @doc Some BCounter state is bottom is both components
%% of the pair (the PNCounter and the GMap)
%% are bottom.
-spec is_bottom(state_bcounter()) -> boolean().
is_bottom({?TYPE, {PNCounter, GMap}}) ->
?PNCOUNTER_TYPE:is_bottom(PNCounter) andalso
?GMAP_TYPE:is_bottom(GMap).
%% @doc Given two `state_bcounter()', check if the second is an
%% inflation of the first.
%% We have and inflation if we have an inflation component wise.
-spec is_inflation(state_bcounter(), state_bcounter()) -> boolean().
is_inflation({?TYPE, {PNCounter1, GMap1}}, {?TYPE, {PNCounter2, GMap2}}) ->
?PNCOUNTER_TYPE:is_inflation(PNCounter1, PNCounter2) andalso
?GMAP_TYPE:is_inflation(GMap1, GMap2).
%% @doc Check for strict inflation.
%% In pairs we have strict inflations if we have component wise
%% inflations and at least one strict inflation in the composition.
%%
%% @reference <NAME>, <NAME>, <NAME> and <NAME>
%% Composition of State-based CRDTs (2015)
%% [http://haslab.uminho.pt/cbm/files/crdtcompositionreport.pdf]
%%
-spec is_strict_inflation(state_bcounter(), state_bcounter()) -> boolean().
is_strict_inflation({?TYPE, {PNCounter1, GMap1}}, {?TYPE, {PNCounter2, GMap2}}) ->
(?PNCOUNTER_TYPE:is_strict_inflation(PNCounter1, PNCounter2)
andalso
?GMAP_TYPE:is_inflation(GMap1, GMap2))
orelse
(?PNCOUNTER_TYPE:is_inflation(PNCounter1, PNCounter2)
andalso
?GMAP_TYPE:is_strict_inflation(GMap1, GMap2)).
%% @doc Check for irreducible strict inflation.
-spec irreducible_is_strict_inflation(state_bcounter(), state_bcounter()) ->
boolean().
irreducible_is_strict_inflation({?TYPE, _}=Irreducible, {?TYPE, _}=CRDT) ->
state_type:irreducible_is_strict_inflation(Irreducible, CRDT).
%% @doc Join decomposition for `state_bcounter()'.
%% @todo
-spec join_decomposition(state_bcounter()) -> [state_bcounter()].
join_decomposition({?TYPE, _}=CRDT) ->
[CRDT].
%% @doc Delta calculation for `state_bcounter()'.
-spec delta(state_type:delta_method(), state_bcounter(), state_bcounter()) ->
state_bcounter().
delta(Method, {?TYPE, _}=A, {?TYPE, _}=B) ->
state_type:delta(Method, A, B).
-spec encode(state_type:format(), state_bcounter()) -> binary().
encode(erlang, {?TYPE, _}=CRDT) ->
erlang:term_to_binary(CRDT).
-spec decode(state_type:format(), binary()) -> state_bcounter().
decode(erlang, Binary) ->
{?TYPE, _} = CRDT = erlang:binary_to_term(Binary),
CRDT.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual({?TYPE, {?PNCOUNTER_TYPE:new(), ?GMAP_TYPE:new([?MAX_INT_TYPE])}}, new()).
query_test() ->
BCounter0 = new(),
BCounter1 = {?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}, {2, {5, 0}}, {3, {10, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}},
?assertEqual(0, query(BCounter0)),
?assertEqual(17, query(BCounter1)).
delta_increment_test() ->
BCounter0 = new(),
{ok, {?TYPE, Delta1}} = delta_mutate(increment, 1, BCounter0),
BCounter1 = merge({?TYPE, Delta1}, BCounter0),
{ok, {?TYPE, Delta2}} = delta_mutate(increment, 1, BCounter1),
BCounter2 = merge({?TYPE, Delta2}, BCounter1),
{ok, {?TYPE, Delta3}} = delta_mutate(increment, 2, BCounter2),
BCounter3 = merge({?TYPE, Delta3}, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, {?TYPE, Delta1}),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter1),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, {?TYPE, Delta2}),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{2, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, {?TYPE, Delta3}),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}, {2, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter3).
add_test() ->
BCounter0 = new(),
{ok, BCounter1} = mutate(increment, 1, BCounter0),
{ok, BCounter2} = mutate(increment, 1, BCounter1),
{ok, BCounter3} = mutate(increment, 2, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter1),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}, {2, {1, 0}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter3).
delta_decrement_test() ->
Actor = 1,
BCounter0 = new(),
{error, _} = delta_mutate(decrement, Actor, BCounter0),
{ok, BCounter1} = mutate(increment, Actor, BCounter0),
{ok, {?TYPE, Delta1}} = delta_mutate(decrement, Actor, BCounter1),
BCounter2 = merge({?TYPE, Delta1}, BCounter1),
{error, _} = delta_mutate(decrement, Actor, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{Actor, {1, 1}}]}, {?GMAP_TYPE, {?MAX_INT_TYPE, []}}}}, BCounter2).
delta_move_test() ->
From = 1,
To = 2,
BCounter0 = new(),
{error, _} = delta_mutate({move, 1, To}, From, BCounter0),
{ok, BCounter1} = mutate(increment, From, BCounter0),
{ok, BCounter2} = mutate(increment, From, BCounter1),
{error, _} = delta_mutate({move, 3, To}, From, BCounter0),
{error, _} = delta_mutate(decrement, To, BCounter2),
{ok, {?TYPE, Delta1}} = delta_mutate({move, 2, To}, From, BCounter2),
BCounter3 = merge({?TYPE, Delta1}, BCounter2),
{error, _} = delta_mutate({move, 1, To}, From, BCounter3),
{ok, {?TYPE, Delta2}} = delta_mutate(decrement, To, BCounter3),
BCounter4 = merge({?TYPE, Delta2}, BCounter3),
{error, _} = delta_mutate(decrement, From, BCounter4),
{error, _} = delta_mutate({move, 2, From}, To, BCounter4),
{ok, {?TYPE, Delta3}} = delta_mutate({move, 1, From}, To, BCounter4),
BCounter5 = merge({?TYPE, Delta3}, BCounter4),
{ok, {?TYPE, Delta4}} = delta_mutate(decrement, From, BCounter5),
BCounter6 = merge({?TYPE, Delta4}, BCounter5),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{From, {2, 0}}]},
{?GMAP_TYPE, {?MAX_INT_TYPE, [{{From, To}, 2}]}}}}, BCounter3),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{From, {2, 1}}, {To, {0, 1}}]},
{?GMAP_TYPE, {?MAX_INT_TYPE, [{{From, To}, 2}, {{To, From}, 1}]}}}}, BCounter6).
merge_deltas_test() ->
GMap = {?GMAP_TYPE, {?MAX_INT_TYPE, []}},
BCounter1 = {?TYPE, {{?PNCOUNTER_TYPE, [{1, {2, 0}}, {2, {1, 0}}]}, GMap}},
Delta1 = {?TYPE, {{?PNCOUNTER_TYPE, [{1, {4, 0}}]}, GMap}},
Delta2 = {?TYPE, {{?PNCOUNTER_TYPE, [{2, {1, 17}}]}, GMap}},
BCounter2 = merge(Delta1, BCounter1),
BCounter3 = merge(BCounter1, Delta1),
DeltaGroup = merge(Delta1, Delta2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {4, 0}}, {2, {1, 0}}]}, GMap}}, BCounter2),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {4, 0}}, {2, {1, 0}}]}, GMap}}, BCounter3),
?assertEqual({?TYPE, {{?PNCOUNTER_TYPE, [{1, {4, 0}}, {2, {1, 17}}]}, GMap}}, DeltaGroup).
join_decomposition_test() ->
%% @todo
ok.
encode_decode_test() ->
GMap = {?GMAP_TYPE, {?MAX_INT_TYPE, []}},
Counter = {?TYPE, {{?PNCOUNTER_TYPE, [{1, {4, 0}}, {2, {1, 0}}]}, GMap}},
Binary = encode(erlang, Counter),
ECounter = decode(erlang, Binary),
?assertEqual(Counter, ECounter).
-endif. | _build/default/lib/types/src/state_bcounter.erl | 0.669421 | 0.42173 | state_bcounter.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% riak_core_coverage_plan: Create a plan to cover a minimal set of VNodes.
%%
%% Copyright (c) 2007-2011 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc A module to calculate a plan to cover a minimal set of VNodes.
%% There is also an option to specify a number of primary VNodes
%% from each preference list to use in the plan.
-module(riak_core_coverage_plan).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
-export([prop_cover_partitions/0,
prop_distribution/0,
prop_find_coverage_partitions/0,
prop_pvc/0]).
-endif.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% API
-export([create_plan/5]).
-type index() :: chash:index_as_int().
-type req_id() :: non_neg_integer().
-type coverage_vnodes() :: [{index(), node()}].
-type vnode_filters() :: [{node(), [{index(), [index()]}]}].
-type coverage_plan() :: {coverage_vnodes(), vnode_filters()}.
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc Create a coverage plan to distribute work to a set
%% covering VNodes around the ring.
-spec create_plan(all | allup, pos_integer(), pos_integer(),
req_id(), atom()) ->
{error, term()} | coverage_plan().
create_plan(VNodeSelector, NVal, PVC, ReqId, Service) ->
{ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
PartitionCount = chashbin:num_partitions(CHBin),
{ok, Ring} = riak_core_ring_manager:get_my_ring(),
%% Create a coverage plan with the requested primary
%% preference list VNode coverage.
%% Get a list of the VNodes owned by any unavailble nodes
Members = riak_core_ring:all_members(Ring),
NonCoverageNodes = [Node || Node <- Members,
riak_core_ring:get_member_meta(Ring, Node, participate_in_coverage) == false],
DownVNodes = [Index ||
{Index, _Node}
<- riak_core_apl:offline_owners(Service, CHBin, NonCoverageNodes)],
RingIndexInc = chash:ring_increment(PartitionCount),
UnavailableKeySpaces = [(DownVNode div RingIndexInc) || DownVNode <- DownVNodes],
%% Create function to map coverage keyspaces to
%% actual VNode indexes and determine which VNode
%% indexes should be filtered.
CoverageVNodeFun =
fun({Position, KeySpaces}, Acc) ->
%% Calculate the VNode index using the
%% ring position and the increment of
%% ring index values.
VNodeIndex = (Position rem PartitionCount) * RingIndexInc,
Node = chashbin:index_owner(VNodeIndex, CHBin),
CoverageVNode = {VNodeIndex, Node},
case length(KeySpaces) < NVal of
true ->
%% Get the VNode index of each keyspace to
%% use to filter results from this VNode.
KeySpaceIndexes = [(((KeySpaceIndex+1) rem
PartitionCount) * RingIndexInc) ||
KeySpaceIndex <- KeySpaces],
{CoverageVNode, [{VNodeIndex, KeySpaceIndexes} | Acc]};
false ->
{CoverageVNode, Acc}
end
end,
CoveragePlanFun =
case application:get_env(riak_core, legacy_coverage_planner, false) of
true ->
% Safety net for refactoring, we can still go back to old
% function if necessary. Note 35 x performance degradation
% with this function with ring_size of 1024
fun find_coverage/5;
false ->
fun initiate_plan/5
end,
%% The ReqId value serves as a tiebreaker in the
%% compare_next_vnode function and is used to distribute
%% work to different sets of VNodes.
CoverageResult =
CoveragePlanFun(ReqId,
NVal,
PartitionCount,
UnavailableKeySpaces,
lists:min([PVC, NVal])),
case CoverageResult of
{ok, CoveragePlan} ->
%% Assemble the data structures required for
%% executing the coverage operation.
lists:mapfoldl(CoverageVNodeFun, [], CoveragePlan);
{insufficient_vnodes_available, _KeySpace, PartialCoverage} ->
case VNodeSelector of
allup ->
%% The allup indicator means generate a coverage plan
%% for any available VNodes.
lists:mapfoldl(CoverageVNodeFun, [], PartialCoverage);
all ->
{error, insufficient_vnodes_available}
end
end.
%% ====================================================================
%% Internal functions
%% ====================================================================
-type vnode_covers() :: {non_neg_integer(), list(non_neg_integer())}.
%% @doc Produce a coverage plan
%% The coverage plan should include all partitions at least PVC times
%% Inputs:
%% ReqId - a random integer identifier for the request which will be used to
%% provide a randomised input to vary the plans.
%% NVal - the n_val for the bucket used in the query
%% PartitionCount - ring_size, should be the length of AllVnodes
%% UnavailableVnodes - any primary vnodes not available, as either the node is
%% down, or set not to participate_in_coverage
%% PVC - Primary Vnode Count, in effect the r value for the query
-spec initiate_plan(non_neg_integer(),
pos_integer(),
pos_integer(),
list(non_neg_integer()),
pos_integer()) ->
{ok, list(vnode_covers())} |
{insufficient_vnodes_available,
list(non_neg_integer()),
list(vnode_covers())}.
initiate_plan(ReqId, NVal, PartitionCount, UnavailableVnodes, PVC) ->
% Order the vnodes for the fold. Will number each vnode in turn between
% 0 and NVal - 1. Then sort by this Offset, so that by default we visit
% every NVal'th vnode first, then offset and repeat.
%
% The use of the offset will tend to give an optimal coverage plan in the
% happy-day scenario (when all vnodes are available). There is a balance
% between time of calculation, and how optimal the plan needs to be. The
% plan is not necessarily optimal (in terms of involving the fewest number
% of vnodes). Nor does it consider location (trying to query as a local
% to the planning node as possible).
%
% There does need to be an even spread of load across plans. To achieve
% this we don't treat the ring as a list always starting at 0, instead the
% ring is first split at a random place. Otherwise, if the planning were
% to always start at the front of the ring then those vnodes that cover the
% tail of the ring will be involved in a disproportionate number of
% queries.
{L1, L2} =
lists:split(ReqId rem PartitionCount,
lists:seq(0, PartitionCount - 1)),
% Use an array to hold a list for each offset, before flattening the array
% back to a list to rejoin together
A0 = array:new(NVal, {default, []}),
{A1, _} =
lists:foldl(
fun(I, {A, Offset}) ->
{array:set(Offset, [I|array:get(Offset, A)], A),
(Offset + 1) rem NVal}
end,
{A0, 0},
L2 ++ L1
),
OrderedVnodes = lists:flatten(array:to_list(A1)),
% Setup an array for tracking which partition has "Wants" left, starting
% with a value of PVC
PartitionWants = array:new(PartitionCount, {default, PVC}),
Countdown = PartitionCount * PVC,
% Subtract any Unavailable vnodes. Must only assign available primary
% vnodes a role in the coverage plan
AvailableVnodes = lists:subtract(OrderedVnodes, UnavailableVnodes),
develop_plan(AvailableVnodes, NVal, PartitionWants, Countdown, []).
develop_plan(_UnusedVnodes, _NVal, _PartitionWants, 0, VnodeCovers) ->
% Use the countdown to know when to stop, rather than having the cost of
% checking each entry in the PartitionWants array each loop
{ok, VnodeCovers};
develop_plan([], _NVal, _PartitionWants, _N, VnodeCovers) ->
% The previous function coverage_plan/7 returns "KeySpaces" as the second
% element, which is then ignored - so we don't bother calculating this here
{insufficient_vnodes_available, [], VnodeCovers};
develop_plan([HeadVnode|RestVnodes], NVal,
PartitionWants, PartitionCountdown,
VnodeCovers) ->
PartitionCount = array:size(PartitionWants),
% Need to find what partitions are covered by this vnode
LookBackFun =
fun(I) -> (PartitionCount + HeadVnode - I) rem PartitionCount end,
PartsCoveredByHeadNode =
lists:sort(lists:map(LookBackFun, lists:seq(1, NVal))),
% For these partitions covered by the vnode, are there any partitions with
% non-zero wants
PartsCoveredAndWanted =
lists:filter(fun(P) -> array:get(P, PartitionWants) > 0 end,
PartsCoveredByHeadNode),
% If there are partitions that are covered by the vnode and have wants,
% then we should include this vnode in the coverage plan for these
% partitions. Otherwise, skip the vnode.
case length(PartsCoveredAndWanted) of
L when L > 0 ->
% Add the vnode to the coverage plan
VnodeCovers0 =
lists:sort([{HeadVnode, PartsCoveredAndWanted}|VnodeCovers]),
% Update the wants, for each partition that has been added to the
% coverage plan
UpdateWantsFun =
fun(P, PWA) -> array:set(P, array:get(P, PWA) - 1, PWA) end,
PartitionWants0 =
lists:foldl(UpdateWantsFun,
PartitionWants,
PartsCoveredAndWanted),
% Now loop, to find use of the remaining vnodes
develop_plan(RestVnodes, NVal,
PartitionWants0, PartitionCountdown - L,
VnodeCovers0);
_L ->
develop_plan(RestVnodes, NVal,
PartitionWants, PartitionCountdown,
VnodeCovers)
end.
-spec find_coverage(non_neg_integer(),
pos_integer(),
pos_integer(),
list(non_neg_integer()),
pos_integer()) ->
{ok, list(vnode_covers())} |
{insufficient_vnodes_available,
list(non_neg_integer()),
list(vnode_covers())}.
find_coverage(ReqId, NVal, PartitionCount, UnavailableKeySpaces, PVC) ->
AllKeySpaces = lists:seq(0, PartitionCount - 1),
%% Calculate an offset based on the request id to offer
%% the possibility of different sets of VNodes being
%% used even when all nodes are available.
Offset = ReqId rem NVal,
find_coverage(AllKeySpaces, Offset, NVal, PartitionCount,
UnavailableKeySpaces, PVC, []).
%% @private
-spec find_coverage(list(non_neg_integer()),
non_neg_integer(),
pos_integer(),
pos_integer(),
list(non_neg_integer()),
pos_integer(),
list(vnode_covers())) ->
{ok, list(vnode_covers())} |
{insufficient_vnodes_available,
list(non_neg_integer()),
list(vnode_covers())}.
find_coverage(AllKeySpaces, Offset, NVal, PartitionCount, UnavailableKeySpaces, PVC, []) ->
%% Calculate the available keyspaces.
AvailableKeySpaces = [{((VNode+Offset) rem PartitionCount),
VNode,
n_keyspaces(VNode, NVal, PartitionCount)}
|| VNode <- (AllKeySpaces -- UnavailableKeySpaces)],
case find_coverage_vnodes(
ordsets:from_list(AllKeySpaces),
AvailableKeySpaces,
[]) of
{ok, CoverageResults} ->
case PVC of
1 ->
{ok, CoverageResults};
_ ->
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC-1,
CoverageResults)
end;
Error ->
Error
end;
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC,
ResultsAcc) ->
%% Calculate the available keyspaces. The list of
%% keyspaces for each vnode that have already been
%% covered by the plan are subtracted from the complete
%% list of keyspaces so that coverage plans that
%% want to cover more one preflist vnode work out
%% correctly.
AvailableKeySpaces = [{((VNode+Offset) rem PartitionCount),
VNode,
n_keyspaces(VNode, NVal, PartitionCount) --
proplists:get_value(VNode, ResultsAcc, [])}
|| VNode <- (AllKeySpaces -- UnavailableKeySpaces)],
case find_coverage_vnodes(ordsets:from_list(AllKeySpaces),
AvailableKeySpaces,
ResultsAcc) of
{ok, CoverageResults} ->
UpdateResultsFun =
fun({Key, NewValues}, Results) ->
case proplists:get_value(Key, Results) of
undefined ->
[{Key, NewValues} | Results];
Values ->
UniqueValues = lists:usort(Values ++ NewValues),
[{Key, UniqueValues} |
proplists:delete(Key, Results)]
end
end,
UpdatedResults =
lists:foldl(UpdateResultsFun, ResultsAcc, CoverageResults),
case PVC of
1 ->
{ok, UpdatedResults};
_ ->
find_coverage(AllKeySpaces,
Offset,
NVal,
PartitionCount,
UnavailableKeySpaces,
PVC-1,
UpdatedResults)
end;
Error ->
Error
end.
%% @private
%% @doc Find the N key spaces for a VNode
n_keyspaces(VNode, N, PartitionCount) ->
ordsets:from_list([X rem PartitionCount ||
X <- lists:seq(PartitionCount + VNode - N,
PartitionCount + VNode - 1)]).
%% @private
%% @doc Find a minimal set of covering VNodes
find_coverage_vnodes([], _, Coverage) ->
{ok, lists:sort(Coverage)};
find_coverage_vnodes(KeySpace, [], Coverage) ->
{insufficient_vnodes_available, KeySpace, lists:sort(Coverage)};
find_coverage_vnodes(KeySpace, Available, Coverage) ->
Res = next_vnode(KeySpace, Available),
case Res of
{0, _, _} -> % out of vnodes
find_coverage_vnodes(KeySpace, [], Coverage);
{_NumCovered, VNode, _} ->
{value, {_, VNode, Covers}, UpdAvailable} = lists:keytake(VNode, 2, Available),
UpdCoverage = [{VNode, ordsets:intersection(KeySpace, Covers)} | Coverage],
UpdKeySpace = ordsets:subtract(KeySpace, Covers),
find_coverage_vnodes(UpdKeySpace, UpdAvailable, UpdCoverage)
end.
%% @private
%% @doc Find the next vnode that covers the most of the
%% remaining keyspace. Use VNode id as tie breaker.
next_vnode(KeySpace, Available) ->
CoverCount = [{covers(KeySpace, CoversKeys), VNode, TieBreaker} ||
{TieBreaker, VNode, CoversKeys} <- Available],
hd(lists:sort(fun compare_next_vnode/2, CoverCount)).
%% @private
%% There is a potential optimization here once
%% the partition claim logic has been changed
%% so that physical nodes claim partitions at
%% regular intervals around the ring.
%% The optimization is for the case
%% when the partition count is not evenly divisible
%% by the n_val and when the coverage counts of the
%% two arguments are equal and a tiebreaker is
%% required to determine the sort order. In this
%% case, choosing the lower node for the final
%% vnode to complete coverage will result
%% in an extra physical node being involved
%% in the coverage plan so the optimization is
%% to choose the upper node to minimize the number
%% of physical nodes.
compare_next_vnode({CA, _VA, TBA}, {CB, _VB, TBB}) ->
if
CA > CB -> %% Descending sort on coverage
true;
CA < CB ->
false;
true ->
TBA < TBB %% If equal coverage choose the lower node.
end.
%% @private
%% @doc Count how many of CoversKeys appear in KeySpace
covers(KeySpace, CoversKeys) ->
ordsets:size(ordsets:intersection(KeySpace, CoversKeys)).
%%%============================================================================
%%% Test
%%%============================================================================
-ifdef(EQC).
pow(N, Gen) when not is_integer(Gen) ->
?LET(Exp, Gen, pow(N, Exp));
pow(_, 0) ->
1;
pow(N, Exp) when is_integer(Exp), Exp > 0 ->
N * pow(N, Exp - 1).
uniq_n_out_of_m(N, M) when N =< M ->
?LET({Candidates, Split}, {shuffle(lists:seq(0, M - 1)), choose(0, N - 1)},
begin
{L, _} = lists:split(Split, Candidates), L
end).
prop_cover_partitions() ->
prop_cover_partitions(fun initiate_plan/5).
prop_find_coverage_partitions() ->
prop_cover_partitions(fun find_coverage/5).
%% A coverage plan should contain all partitions
%% and should not return an unavailable node
prop_cover_partitions(F) ->
?FORALL({NVal, PartitionCount}, {choose(3,5), pow(2, choose(3, 10))},
?FORALL({ReqId, Unavailable}, {choose(0, PartitionCount - 1), uniq_n_out_of_m(NVal, PartitionCount)},
begin
KeySpaces = lists:seq(0, PartitionCount - 1),
PVC = 1,
{ok, Plan} = F(ReqId, NVal, PartitionCount, Unavailable, PVC),
conjunction([{partitions,
equals(lists:sort([ P || {_, Ps} <- Plan, P <- Ps ]),
KeySpaces)},
{no_unavailable,
equals(Unavailable -- [ N || {N, _} <- Plan ],
Unavailable)}])
end)).
prop_distribution() ->
% Test only on new function
% legacy function - find_coverage/5 - will consistently fail on this
% property
prop_distribution(fun initiate_plan/5).
%% Compute all possible plans and check that there is no "hot" node that is
%% used more than all other nodes in the plan.
%% Also check that the resulting plans have approximately the same
%% number of nodes to select partitions from (not one plan that takes it from 3
%% nodes and another taking it from 8.
%% If one of the plans fails, then they should all fail. Failing should not
%% depend on the choosen ReqId.
prop_distribution(F) ->
?FORALL({NVal, PartitionCount}, {choose(3,5), pow(2, choose(3, 9))},
?FORALL(Unavailable, uniq_n_out_of_m(NVal, PartitionCount),
begin
PVC = 1,
Plans = [ F(ReqId, NVal, PartitionCount, Unavailable, PVC)
|| ReqId <- lists:seq(0, PartitionCount -1) ],
Tags = lists:usort([ element(1, Plan) || Plan <- Plans ]),
?WHENFAIL(eqc:format("Plans: ~p with distribution ~p\n", [Plans, distribution(Plans)]),
conjunction([{nok_all_nok, length(Tags) == 1}] ++
[{similar_set, max_diff(length(Unavailable), [ length(Plan) || {_, Plan} <- Plans ])}
|| Tags == [ok]] ++
[{hot_node, same_count(distribution(Plans))} || Unavailable == []]))
end)).
%% Computing with PVC larger than 1 takes a lot of time.
%% We take slightly smaller paertition size here to test more in same time
%%
%% If PVC is 2 then each partition should appear on two returned nodes
%% But, if nodes are unavailable, then this might not work out, then
%% a next best solution should be presented.
prop_pvc() ->
?FORALL({NVal, PartitionCount}, {choose(3,5), pow(2, choose(3, 8))},
?FORALL({ReqId, Unavailable}, {choose(0, PartitionCount),
uniq_n_out_of_m(NVal, PartitionCount)},
?FORALL(PVC, choose(1, NVal),
collect({pvc, PVC},
begin
PVCPlan = initiate_plan(ReqId, NVal, PartitionCount, Unavailable, PVC),
OnePlan = initiate_plan(ReqId, NVal, PartitionCount, Unavailable, 1),
case {OnePlan, PVCPlan} of
{{ok, _}, {ok, Plan}} ->
Distribution = distribution([ P || {_, Ps} <- Plan, P <- Ps ], []),
?WHENFAIL(eqc:format("Plan: ~p with distribution ~p\n", [Plan, Distribution]),
conjunction([{partition_count, same_count([{x, PVC} | Distribution])}]));
{{ok, _}, {insufficient_vnodes_available, _, Plan}} ->
Distribution = distribution([ P || {_, Ps} <- Plan, P <- Ps ], []),
conjunction([{unavailable, PVC > NVal - length(Unavailable)},
{partitions,
equals(lists:usort([ P || {_, Ps} <- Plan, P <- Ps ]),
lists:seq(0, PartitionCount - 1))},
{no_unavailable,
equals(Unavailable -- [ N || {N, _} <- Plan ],
Unavailable)},
{good_enough, equals([ Count || {_, Count} <- Distribution,
Count < NVal - length(Unavailable)], [])}]);
_ ->
equals(element(1, PVCPlan), insufficient_vnodes_available)
end
end)))).
max_diff(N, List) ->
lists:max(List) - lists:min(List) =< N.
distribution(Plans) ->
distribution([ Node || {_, Plan} <- Plans, {Node, _} <- Plan ], []).
distribution([], Dist) ->
Dist;
distribution([N|Ns], Dist) ->
case lists:keyfind(N, 1, Dist) of
false ->
distribution(Ns, [{N, 1} | Dist]);
{_, Count} ->
distribution(Ns, lists:keyreplace(N, 1, Dist, {N, Count +1}))
end.
same_count([]) ->
true;
same_count([{_, Count} | Rest]) ->
equals([ C || {_, C} <- Rest, C /= Count ], []).
-endif.
-ifdef(TEST).
%% Unit testing at moment, but there appear to be some obvious properties:
%% - The output of find_coverage is [{A, [B]}] - where accumulation of all [B]
%% should be equal to the KeySpaces
%% - The accumulation of [A] should not include any unavailable KeySpaces
%% - The length of the list should be optimal?
eight_vnode_prefactor_test() ->
Offset = 0,
NVal = 3,
PartitionCount = 8,
UnavailableKeySpaces = [],
PVC = 1,
{ok, VnodeCovers0} =
find_coverage(Offset, NVal, PartitionCount,
UnavailableKeySpaces,
PVC),
R0 = [{0, [5, 6, 7]}, {3, [0, 1, 2]}, {5, [3, 4]}],
?assertMatch(R0, VnodeCovers0),
UnavailableKeySpaces1 = [3, 7],
{ok, VnodeCovers1} =
find_coverage(Offset, NVal, PartitionCount,
UnavailableKeySpaces1,
PVC),
% Is the result below the most efficient - still only 3 vnodes to be asked
% R1 = [{0, [5, 6, 7]}, {2, [0, 1]}, {5, [2, 3, 4]}],
% Actual result returned needs to cover 4 vnodes
R1 = [{0, [5, 6, 7]}, {1, [0]}, {4, [1, 2, 3]}, {5, [4]}],
?assertMatch(R1, VnodeCovers1),
Offset2 = 1,
{ok, VnodeCovers2} =
find_coverage(Offset2, NVal, PartitionCount,
UnavailableKeySpaces,
PVC),
% The result here is effective - as we want the use of offset to lead to
% distinct choices of cover vnodes, and [2, 4, 7] is distinct to [0, 3, 5]
R2 = [{2, [0, 1, 7]}, {4, [2, 3]}, {7, [4, 5, 6]}],
?assertMatch(R2, VnodeCovers2),
Offset3 = 2,
{ok, VnodeCovers3} =
find_coverage(Offset3, NVal, PartitionCount,
UnavailableKeySpaces,
PVC),
R3 = [{1, [0, 6, 7]}, {3, [1, 2]}, {6, [3, 4, 5]}],
?assertMatch(R3, VnodeCovers3),
%% The Primay Vnode Count- now set to 2 (effectively a r value of 2)
%% Each partition must be included twice in the result
PVC4 = 2,
{ok, VnodeCovers4} =
find_coverage(Offset, NVal, PartitionCount,
UnavailableKeySpaces,
PVC4),
R4 = [{0, [5, 6, 7]}, {1, [0, 6, 7]}, {3, [0, 1, 2]}, {4, [1, 2, 3]},
{5, [3, 4]}, {6, [4, 5]}],
%% For r of 2 - need to check at n_val * 2 vnodes - so count is optimal
?assertMatch(R4, lists:keysort(1, VnodeCovers4)).
changing_repeated_vnode_CODEFAIL_prefactor_test() ->
MaxCount = changing_repeated_vnode_tester(fun find_coverage/5, 200),
% This assertion is bad!
% This demonstrates that the prefactored code fails the test of providing
% well distributed coverage plans
?assertEqual(true, MaxCount > 100).
changing_repeated_vnode_refactor_test() ->
MaxCount = changing_repeated_vnode_tester(fun initiate_plan/5, 200),
?assertEqual(true, MaxCount < 100).
changing_repeated_vnode_tester(CoverageFun, Runs) ->
% Confirm that the vnode that overlaps (i.e. appears in all the offsets)
% will change between runs
Partitions = lists:seq(0, 31),
GetVnodesUsedFun =
fun(_I) ->
R = rand:uniform(99999),
{Vnodes0, Coverage0} =
ring_tester(32, CoverageFun, R, 3, [], 1),
?assertEqual(Partitions, Coverage0),
Vnodes0
end,
AllVnodes = lists:flatten(lists:map(GetVnodesUsedFun, lists:seq(1, Runs))),
CountVnodesFun =
fun(V, Acc) ->
case lists:keyfind(V, 1, Acc) of
{V, C} ->
lists:ukeysort(1, [{V, C + 1}|Acc]);
false ->
lists:ukeysort(1, [{V, 1}|Acc])
end
end,
[{Vnode, MaxCount}|_RestCounts] =
lists:reverse(
lists:keysort(2, lists:foldl(CountVnodesFun, [], AllVnodes))),
io:format(user,
"~nVnode=~w MaxCount=~w out of ~w queries~n",
[Vnode, MaxCount, Runs]),
MaxCount.
all_refactor_1024ring_test_() ->
{timeout, 1200, fun all_refactor_1024_ring_tester/0}.
all_refactor_1024_ring_tester() ->
PVC = 1,
TestFun =
fun(ReqId) ->
{ok, VnodeCovers} = initiate_plan(ReqId, 3, 1024, [], PVC),
PC = array:new(1024, {default, 0}),
PC1 =
lists:foldl(
fun({_I, L}, Acc) ->
lists:foldl(fun(P, IA) ->
array:set(P,
array:get(P, IA) + 1,
IA)
end,
Acc,
L)
end,
PC,
VnodeCovers),
lists:foreach(fun(C) -> ?assertEqual(PVC, C) end, array:to_list(PC1))
end,
lists:foreach(fun(I) -> TestFun(I) end, lists:seq(0, 1023)).
refactor_2048ring_test() ->
ring_tester(2048, fun initiate_plan/5).
prefactor_1024ring_test() ->
ring_tester(1024, fun find_coverage/5).
prefactor_ns1024ring_test() ->
nonstandardring_tester(1024, fun find_coverage/5).
refactor_1024ring_test() ->
ring_tester(1024, fun initiate_plan/5).
refactor_ns1024ring_test() ->
nonstandardring_tester(1024, fun initiate_plan/5).
prefactor_512ring_test() ->
ring_tester(512, fun find_coverage/5).
refactor_512ring_test() ->
ring_tester(512, fun initiate_plan/5).
prefactor_256ring_test() ->
ring_tester(256, fun find_coverage/5).
refactor_256ring_test() ->
ring_tester(256, fun initiate_plan/5).
prefactor_128ring_test() ->
ring_tester(256, fun find_coverage/5).
refactor_128ring_test() ->
ring_tester(256, fun initiate_plan/5).
prefactor_64ring_test() ->
ring_tester(64, fun find_coverage/5).
refactor_64ring_test() ->
ring_tester(64, fun initiate_plan/5).
compare_vnodesused_test() ->
compare_tester(64),
compare_tester(128),
compare_tester(256).
compare_tester(RingSize) ->
PFC = nonstandardring_tester(RingSize, fun find_coverage/5),
RFC = nonstandardring_tester(RingSize, fun initiate_plan/5),
% With a little wiggle room - we've not made the number of vnodes
% used worse
?assertMatch(true, RFC =< (PFC + 1)).
ring_tester(PartitionCount, CoverageFun) ->
ring_tester(PartitionCount, CoverageFun, 0).
ring_tester(PartitionCount, CoverageFun, ReqId) ->
NVal = 3,
UnavailableKeySpaces = [],
PVC = 1,
{Vnodes, CoveredKeySpaces} =
ring_tester(PartitionCount, CoverageFun,
ReqId, NVal, UnavailableKeySpaces, PVC),
ExpVnodeCount = (PartitionCount div NVal) + 2,
KeySpaces = lists:seq(0, PartitionCount - 1),
?assertMatch(KeySpaces, CoveredKeySpaces),
?assertMatch(true, length(Vnodes) =< ExpVnodeCount).
ring_tester(PartitionCount, CoverageFun,
Offset, NVal, UnavailableKeySpaces, PVC) ->
{ok, VnodeCovers0} =
CoverageFun(Offset, NVal, PartitionCount,
UnavailableKeySpaces,
PVC),
AccFun =
fun({A, L}, {VnodeAcc, CoverAcc}) -> {[A|VnodeAcc], CoverAcc ++ L} end,
{Vnodes, Coverage} = lists:foldl(AccFun, {[], []}, VnodeCovers0),
{Vnodes, lists:sort(Coverage)}.
nonstandardring_tester(PartitionCount, CoverageFun) ->
Offset = 2,
NVal = 3,
OneDownVnode = rand:uniform(PartitionCount) - 1,
UnavailableKeySpaces = [OneDownVnode],
PVC = 2,
{ok, VnodeCovers} =
CoverageFun(Offset, NVal, PartitionCount,
UnavailableKeySpaces,
PVC),
PC = array:new(PartitionCount, {default, 0}),
PC1 =
lists:foldl(
fun({I, L}, Acc) ->
?assertNotEqual(OneDownVnode, I),
lists:foldl(fun(P, IA) ->
array:set(P, array:get(P, IA) + 1, IA)
end,
Acc,
L)
end,
PC,
VnodeCovers),
lists:foreach(fun(C) -> ?assertEqual(2, C) end, array:to_list(PC1)),
length(VnodeCovers).
multifailure_r2_post_test() ->
multi_failure_tester(32, fun initiate_plan/5),
multi_failure_tester(64, fun initiate_plan/5),
multi_failure_tester(128, fun initiate_plan/5),
multi_failure_tester(256, fun initiate_plan/5),
multi_failure_tester(512, fun initiate_plan/5),
multi_failure_tester(1024, fun initiate_plan/5).
multifailure_r2_pre_test() ->
multi_failure_tester(32, fun find_coverage/5),
multi_failure_tester(64, fun find_coverage/5),
multi_failure_tester(128, fun find_coverage/5),
multi_failure_tester(256, fun find_coverage/5),
multi_failure_tester(512, fun find_coverage/5).
multi_failure_tester(PartitionCount, CoverageFun) when PartitionCount >= 32 ->
% If there are failures at least target_n_val appart, a r=2 coverage plan
% can still be produced
ReqId = rand:uniform(99999),
NVal = 3,
PVC = 2,
C = rand:uniform(PartitionCount div 8),
UnavailableKeySpaces = lists:map(fun(I) -> I * 4 - C end, lists:seq(1, 8)),
{ok, VnodeCovers} =
CoverageFun(ReqId, NVal, PartitionCount,
UnavailableKeySpaces, PVC),
PC = array:new(PartitionCount, {default, 0}),
PC0 =
lists:foldl(
fun({I, L}, Acc) ->
?assertNotEqual(true, lists:member(I, UnavailableKeySpaces)),
lists:foldl(fun(P, IA) ->
array:set(P, array:get(P, IA) + 1, IA)
end,
Acc,
L)
end,
PC,
VnodeCovers),
lists:foreach(fun(Cnt) -> ?assertEqual(2, Cnt) end, array:to_list(PC0)),
% Fail two vnodes together - now can only get partial coverage from a r=2
% plan
RVN = rand:uniform(PartitionCount),
UnavailableKeySpaces1 =
case RVN of
1 ->
[0, PartitionCount - 1];
_ ->
[RVN - 1, RVN - 2]
end,
{insufficient_vnodes_available, _, VnodeCovers1} =
CoverageFun(ReqId, NVal, PartitionCount,
UnavailableKeySpaces1, PVC),
PC1 =
lists:foldl(
fun({I, L}, Acc) ->
?assertNotEqual(true, lists:member(I, UnavailableKeySpaces1)),
lists:foldl(fun(P, IA) ->
array:set(P, array:get(P, IA) + 1, IA)
end,
Acc,
L)
end,
PC,
VnodeCovers1),
Covered =
length(lists:filter(fun(Cnt) -> Cnt == 2 end, array:to_list(PC1))),
PartiallyCovered =
length(lists:filter(fun(Cnt) -> Cnt == 1 end, array:to_list(PC1))),
?assertEqual(2, PartiallyCovered),
?assertEqual(2, PartitionCount - Covered).
-endif. | src/riak_core_coverage_plan.erl | 0.640973 | 0.458349 | riak_core_coverage_plan.erl | starcoder |
%% @doc `rebar3 hex search' - search for packages on hexpm.
%%
%% Displays packages matching the given search query.
%%
%% If you are authenticated it will additionally search all organizations you are member of.
%%
%% ```
%% $ rebar3 hex search PACKAGE
%% '''
%%
%% <h2> Command line options </h2>
%%
%% <ul>
%% <li>`--repo' - Specify the repository to work with. This option is required when you
%% have multiple repositories configured, including organizations. The argument must be a fully qualified repository
%% name (e.g, `hexpm', `hexpm:my_org', `my_own_hexpm').
%% Defaults to `hexpm'.
%% </li>
%% </ul>
-module(rebar3_hex_search).
-export([init/1,
do/1,
format_error/1]).
-include("rebar3_hex.hrl").
-define(PROVIDER, search).
-define(DEPS, []).
%% @private
-spec init(rebar_state:t()) -> {ok, rebar_state:t()}.
init(State) ->
Provider = providers:create([
{name, ?PROVIDER},
{module, ?MODULE},
{namespace, hex},
{bare, true},
{deps, ?DEPS},
{example, "rebar3 hex search <term>"},
{short_desc, "Display packages matching the given search query"},
{desc, ""},
{opts, [{term, undefined, undefined, string, "Search term."},
rebar3_hex:repo_opt()]}
]),
State1 = rebar_state:add_provider(State, Provider),
{ok, State1}.
%% @private
-spec do(rebar_state:t()) -> {ok, rebar_state:t()}.
do(State) ->
{Args, _} = rebar_state:command_parsed_args(State),
Term = proplists:get_value(term, Args, ""),
Parents = rebar3_hex_config:parent_repos(State),
lists:foreach(fun(Repo) -> search(State, Repo, Term) end, Parents),
{ok, State}.
search(State, Repo, Term) ->
HexConfig = rebar3_hex_config:get_hex_config(?MODULE, Repo, read),
case hex_api_package:search(HexConfig, rebar_utils:to_binary(Term), []) of
{ok, {200, _Headers, []}} ->
io:format("No Results~n"),
{ok, State};
{ok, {200, _Headers, Packages}} ->
Header = ["Name", "Version", "Description", "URL"],
Rows = lists:map(fun(Package) ->
#{<<"name">> := Name,
<<"meta">> := #{<<"description">> := Description},
<<"releases">> := Releases,
<<"html_url">> := Url
} = Package,
Descrip = truncate_description(Description),
[binary_to_list(Name),
latest_stable(Releases), Descrip, unicode:characters_to_list(Url)]
end, sort_by_downloads(Packages)),
ok = rebar3_hex_results:print_table([Header] ++ Rows),
{ok, State};
{ok, {Status, _Headers, _Body}} ->
throw(?PRV_ERROR({status, Status}));
{error, Reason} ->
throw(?PRV_ERROR({error, Reason}))
end.
truncate_description(Description) ->
Descrip = string:sub_string(
string:strip(
string:strip(
unicode:characters_to_list(Description), both, $\n)
), 1, 50),
Blist = binary:split(unicode:characters_to_binary(Descrip), <<"\n">>, [global]),
Slist = lists:map(fun(B) -> unicode:characters_to_list(B) end, Blist),
Dstr = string:join(Slist, ""),
case size(Description) of
N when N >= 50 ->
Dstr ++ "...";
_ ->
Dstr
end.
sort_by_downloads(Packages) ->
{Unused, Popular} = lists:partition(fun(P) -> maps:get(<<"downloads">>, P) == #{} end, Packages),
lists:sort(fun(#{<<"downloads">> := #{<<"all">> := A}},
#{<<"downloads">> := #{<<"all">> := B}}) ->
A > B
end,
Popular) ++ Unused.
latest_stable(Releases) ->
case gather_stable_releases(Releases) of
[] ->
"";
[Latest | _Rest] ->
binary_to_list(maps:get(<<"version">>, Latest))
end.
gather_stable_releases(Releases) ->
version_sort(lists:filter(fun(#{<<"version">> := Ver}) ->
{ok, V} = verl:parse(Ver),
case V of
#{pre := []} ->
true;
_ ->
false
end
end,
Releases
)).
version_sort(Releases) ->
lists:sort(fun(#{<<"version">> := A}, #{<<"version">> := B}) ->
At = list_to_tuple(binary:split(A, <<".">>, [global])),
Bt = list_to_tuple(binary:split(B, <<".">>, [global])),
At >= Bt
end,
Releases).
%% @private
-spec format_error(any()) -> iolist().
format_error({status, Status}) ->
io_lib:format("Error searching for packages: ~ts",
[rebar3_hex_client:pretty_print_status(Status)]);
format_error({error, Reason}) ->
io_lib:format("Error searching for packages: ~p", [Reason]);
format_error(Reason) ->
rebar3_hex_error:format_error(Reason). | src/rebar3_hex_search.erl | 0.525369 | 0.496582 | rebar3_hex_search.erl | starcoder |
% This file is part of ecsv released under the MIT license.
% See the LICENSE file for more information.
-module(ecsv_parser).
-author("<NAME> <<EMAIL>>").
-include("ecsv.hrl").
%
% This module is a raw CSV parser.
%
% This parser is based on the blog post written by <NAME> located
% here http://andrewtill.blogspot.com/2009/12/erlang-csv-parser.html.
%
% This parser supports well formed csv files which are
% - a set of lines ending with a \n
% - each line contains a set of fields separated with a comma (,)
% - each field value can be enclosed with double quote (") ONLY
% - each field value can be empty
%
% Please note:
% - This parser has no failsafe mechanism if the file is badly formed!
% But the line a,,,,,\n is perfectly fine.
% - This parser doesn't allow a return (\n) in a field value!
%
-export([init/1, init/2, parse_with_character/2, end_parsing/1]).
-record(pstate, {
state, % ready, in_quotes, skip_to_delimiter, eof
skipped_backslash,
current_line,
current_value,
opts,
process_fun,
process_fun_state
}).
% 4 states:
% ready
% in_quotes
% skip_to_delimiter
% eof
init(ProcessingFun) ->
init(ProcessingFun, []).
init(ProcessingFun, ProcessingFunInitState) ->
init(#ecsv_opts{}, ProcessingFun, ProcessingFunInitState).
init(Opts, ProcessingFun, ProcessingFunInitState) ->
#pstate{
state = ready,
skipped_backslash = true,
current_line = [],
current_value = [],
opts = Opts,
process_fun = ProcessingFun,
process_fun_state = ProcessingFunInitState
}.
parse_with_character(Character, PState) when is_integer(Character) ->
parse_with({char, Character}, PState).
end_parsing(PState) ->
FinalState = parse_with({eof}, PState),
{ok, FinalState#pstate.process_fun_state}.
% -----------------------------------------------------------------------------
parse_with(Input, #pstate{state=State}=PState) ->
case State of
ready ->
do_ready(Input, PState);
in_quotes ->
do_in_quotes(Input, PState);
skip_to_delimiter ->
do_skip_to_delimiter(Input, PState);
eof ->
PState;
_ ->
throw({error, wrong_state})
end.
do_ready(
Input,
#pstate{
opts=Opts,
current_line=CurrentLine,
current_value=CurrentValue,
process_fun=ProcessingFun,
process_fun_state=ProcessingFunState
}=PState
) ->
Delimiter = Opts#ecsv_opts.delimiter,
case Input of
{eof} ->
NewLine = case CurrentValue of
[] -> lists:reverse(CurrentLine);
_ -> lists:reverse([lists:reverse(CurrentValue) | CurrentLine])
end,
UpdatedProcessingFunState =
process_new_line(ProcessingFun, NewLine, ProcessingFunState),
UpdatedProcessingFunState1 =
ProcessingFun({eof}, UpdatedProcessingFunState),
PState#pstate{
state=eof,
current_line=[],
current_value=[],
process_fun_state=UpdatedProcessingFunState1
};
{char, Char} when (Char == $") ->
% pass an empty string to in_quotes as we do not want the
% preceeding characters to be included, only those in quotes
PState#pstate{state=in_quotes, current_value=[], skipped_backslash = true};
{char, Char} when Char == Delimiter ->
PState#pstate{
current_line=[lists:reverse(CurrentValue) | CurrentLine],
current_value=[],
skipped_backslash = true
};
{char, Char} when Char == $\n ->
% a new line has been parsed: time to send it back
NewLine = lists:reverse([lists:reverse(CurrentValue) | CurrentLine]),
UpdatedProcessingFunState =
process_new_line(ProcessingFun, NewLine, ProcessingFunState),
PState#pstate{
current_line=[],
current_value=[],
process_fun_state=UpdatedProcessingFunState
};
{char, Char} when Char == $\r ->
% ignore line feed characters
PState;
{char, Char} ->
PState#pstate{current_value=[Char | CurrentValue]}
end.
do_in_quotes(Input, #pstate{current_value = CurrentValue} = PState) ->
%io:format("input ~p~n", [Input]),
case Input of
{eof} -> on_eof(on_new_line(PState));
{char, Char} when Char == $" ->
case CurrentValue of
[ $\\ | TCurrentValue] when (PState#pstate.skipped_backslash == false) ->
%% Handle the case when there is an escaped double quote in the field
%io:format("do_in_quotes:2: ~p~n", [ lists:reverse([Char | TCurrentValue]) ]),
PState#pstate{
current_value=[Char | TCurrentValue],
skipped_backslash = true
};
_ ->
%io:format("do_in_quotes:3: ~p~n", [ lists:reverse(CurrentValue) ]),
PState#pstate{
state = skip_to_delimiter,
current_value = CurrentValue,
skipped_backslash = true
}
end;
{char, Char} when Char == $\\ ->
%io:format("do_in_quotes:5: ~p~n", [ PState#pstate.skipped_backslash ] ),
case CurrentValue of
[ $\\ | _] when (PState#pstate.skipped_backslash == false) ->
PState#pstate{skipped_backslash = true};
_ ->
%io:format("do_in_quotes:6: ~p~n", [ lists:reverse([Char | CurrentValue]) ]),
PState#pstate{
current_value=[Char | CurrentValue],
skipped_backslash = false
}
end;
{char, Char} ->
%io:format("do_in_quotes:1: ~p~n", [ lists:reverse([Char | CurrentValue]) ]),
PState#pstate{
current_value=[Char | CurrentValue],
skipped_backslash = true
}
end.
do_skip_to_delimiter(Input, #pstate{
opts = Opts,
current_line = CurrentLine,
current_value = CurrentValue
} = PState) ->
case Input of
{eof} -> on_eof(on_new_line(PState));
{char, Char} when (Char == $\n) -> on_new_line(PState);
{char, Char} when (Char == Opts#ecsv_opts.delimiter) ->
PState#pstate{
state = ready,
current_value = [],
current_line = [lists:reverse(CurrentValue) | CurrentLine]
};
{char, Char} ->
%io:format("do_skip_to_delimiter: ~s~n", [ lists:reverse([Char | CurrentValue]) ]),
PState#pstate{
state = in_quotes,
current_value = [Char | CurrentValue]
}
end.
on_new_line(#pstate{
current_line = CurrentLine,
current_value = CurrentValue,
process_fun = ProcessingFun,
process_fun_state = ProcessingFunState
} = PState) ->
NewLine = lists:reverse([lists:reverse(CurrentValue) | CurrentLine]),
UpdatedProcessingFunState =
process_new_line(ProcessingFun, NewLine, ProcessingFunState),
PState#pstate{
state = ready,
current_line = [],
current_value = [],
process_fun_state = UpdatedProcessingFunState
}.
on_eof(#pstate{
process_fun = ProcessingFun,
process_fun_state = ProcessingFunState
} = PState) ->
UpdatedProcessingFunState = ProcessingFun({eof}, ProcessingFunState),
PState#pstate{
state = eof,
current_line = [],
current_value = [],
process_fun_state = UpdatedProcessingFunState
}.
process_new_line(_ProcessingFun, [], State) ->
% ignore empty lines
State;
process_new_line(ProcessingFun, NewLine, State) ->
ProcessingFun({newline, NewLine}, State). | src/ecsv_parser.erl | 0.525369 | 0.423458 | ecsv_parser.erl | starcoder |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% giferly - a GIF 89a decoder
% - by <NAME> <<EMAIL>>
%
% A standalone program to decode and display a GIF file. This is my attempt to
% learn both the GIF 89a format and Erlang. This project started by my
% encountering an article describing the GIF format, in essence as a summary of
% some of the more fundamental parts of the official specification of the
% format. While this task would have been fairly straightforward in Ruby, C or
% some other language I am familiar with, I decided to implement the decoder in
% Erlang in order to learn the language.
%
% This program only depends on Esdl, an Erlang binding to the Simple
% DirectMedia Layer, to display the decoded image. Other than that, the entire
% parsing is implemented from scratch in Erlang. While Erlang may not
% necessarily be the ideal choice for this task, it is an interesting exercise
% to attempt this in a functional style, and taking advantage of Erlang's
% binary data manipulation features.
%
% References:
% * The article that piqued my interest:
% - http://matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
% * The official specification of the GIF 89a format:
% - http://www.w3.org/Graphics/GIF/spec-gif89a.txt
% * The book I'm using to learn Erlang:
% - http://learnyousomeerlang.com/contents
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-module(giferly).
-author('<NAME> <<EMAIL>>').
-export([go/1]).
-include_lib("esdl/include/sdl.hrl").
-include_lib("esdl/include/sdl_events.hrl").
-include_lib("esdl/include/sdl_video.hrl").
-include_lib("esdl/include/sdl_keyboard.hrl").
% == GIF FILE FORMAT ==========================================================
% ~~ HEADER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% The first six bytes form the header, with all the bytes representing ASCII
% characters. First, the header must contain "GIF" as the first three bytes,
% followed by the version of the specification used to encode this image. The
% only version supported by this library is "89a", with the older "87a" not
% readily found anymore.
header_valid(<<"GIF89a">>) ->
true;
header_valid(_) ->
false.
% ~~ LOGICAL SCREEN DESCRIPTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% The first four bytes of the logical screen descriptor specify the canvas
% width and height. Each of the values takes up two bytes and is specified with
% the least significant byte first.
screen_dim(<<W:16/little, H:16/little, _:24>>) ->
{{w, W}, {h, H}}.
% The next byte is a packed byte containin multiple pieces of information.
% The first bit is the global color table flag, with "1" signifying that there
% is a global color table, and the table will follow the logical screen
% descriptor.
global_color_table_flag(<<_:32, 1:1, _:23>>) ->
true;
global_color_table_flag(<<_:32, 0:1, _:23>>) ->
false.
% The next three bits state the color depth, minus one. Thus, 2#001 signifies a
% color depth of 2BPP, while 2#111 signifies a color depth of 8BPP.
color_depth(<<_:32, _:1, Depth:3, _:20>>) ->
Depth + 1.
% The next bit is the sort flag, with "1" signifying that the colors in the
% global color table are sorted by "decreasing importance." While this can be
% used by the decoder, it can also be safely ignored, as we will do.
% The final three bits specify the size of the global color table as calculated
% by the the formula: size = 2^(N+1), where N is the value of the three bits in
% question. The size reported is the number of colors in the table.
global_color_table_size(<<_:32, _:5, N:3, _:16>>) ->
round(math:pow(2, N + 1)).
% The next byte of the logical screen descriptor is the background color index,
% and is only meaningful when the global color table flag is "1". In this case,
% the background color index specifies which color in the global color table is
% usedwhen a pixel does not specify a value in the image data. If there is no
% global color table, this byte should be "0".
background_color_index(<<_:40, Index:8, _:8>>) ->
Index.
% The final byte of the logical screen descriptor is the pixel aspect ratio.
% This will be ignored as well, since it is typically set to "0".
% ~~ GLOBAL COLOR TABLE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-record(color, {r, g, b}).
% The global color table immediately follows the logical screen descriptor if
% the global color table flag is set. If so, then the size of the color table,
% also in the logical screen descriptor, specifies the number of colors in the
% table. Each color takes up three bytes, representing the red, green and blue
% components respectively.
color_table(_, 0, ParsedColors) ->
lists:reverse(ParsedColors);
color_table(BinData, NumColorsLeft, ParsedColors) ->
<<R:8, G:8, B:8, Rest/binary>> = BinData,
NewParsedColors = [#color{r=R, g=G, b=B}|ParsedColors],
color_table(Rest, NumColorsLeft - 1, NewParsedColors).
% The local color table, described later, is identical in structure to the
% global color table, and so can use the same parser.
global_color_table(BinData, NumColors) ->
color_table(BinData, NumColors, []).
% ~~ EXTENSION BLOCKS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% An number of extension blocks may appear in the data stream. All of these
% extensions are signalled by the first byte of the block, which is always
% "0x21". The approach here will be to ignore the ones that are not pertinent
% to this application, which will still require recognzing the blocks and
% skipping over the contained data.
% --- Sub-blocks --------------------------------------------------------------
% Both extension blocks and the image data are grouped into data sub-blocks.
%
% Each sub-block starts with a single byte stating the number of bytes of data
% in the following sub-block, followed by that many bytes of data. Once a
% sub-block with size zero is reached, no more sub-blocks follow.
%% @doc Reads all the consecutive sub-blocks starting at the beginning of the
%% given bit string.
%%
%% @returns All the binary data in the sub-blocks, as a single bit string.
next_sub_block(<< 0:8, Rest/binary>>, SubBlocks) ->
{SubBlocks, Rest};
next_sub_block(<<Size:8, Rest/binary>>, SubBlocks) ->
<<Block:Size/bytes, RemainingSubBlocks/binary>> = Rest,
SubBlocksNew = <<SubBlocks/binary, Block/binary>>,
next_sub_block(RemainingSubBlocks, SubBlocksNew).
% --- Graphics Control Extension ----------------------------------------------
% Most of the extensions are handled in the main parse routines. However, some
% of the functionality in one of the extensions--the graphics control
% extension--is specified below.
%% @doc A temporary holding place for the information in the graphics control
%% extension. This information is later attached directly to the image
%% immediately following this extension block.
-record(graphic_control, {delay, disposal, transparency}).
% The first byte in the graphics control extension is a packed byte, the first
% three bits of which are reserved. Following this are three bits denoting the
% disposal method, which specifies how the decoder should handle the canvas
% after a image has been displayed, before displaying the next. This is used
% for animating multiple frames one after another. The possible values are:
% 0 - No disposal specified. This is used when there is no animation. The
% decoder should do nothing.
-define(DISPOSAL_UNSPECIFIED , 0).
% 1 - Do not dispose. Leave the canvas as it is, painting over it for the
% next frame. This allows for incremental changes between frames.
-define(DISPOSAL_NONE , 1).
% 2 - Restore to background color. The area belonging to the current image
% should be completely painted with the background color, which is
% specified in the logical screen descriptor.
-define(DISPOSAL_RESTORE_BG , 2).
% 3 - Restore to previous. The area belonging to the current image should be
% restored to whatever was present in that area before.
-define(DISPOSAL_RESTORE_PREV, 3).
% 4-7 - To be defined.
disposal_method(<<_:3/bits, DisposalMethod:3, _:2/bits>>) ->
DisposalMethod.
% The next bit in the packed byte is the user input flag. Determines whether
% the application is expected to wait for some user input (as defined by the
% application) before displaying the next image. It is unlikely that this will
% be set, and this application will ignore it.
% The final bit is the transparency flag. If set, then the transparency index,
% given later in the graphics control extension, should be used to provide
% transparency in the image.
transparency_flag(<<_:7/bits, 1:1>>) ->
true ;
transparency_flag(<<_:7/bits, 0:1>>) ->
false.
% ~~ IMAGE DESCRIPTOR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% A single GIF file may contain multiple images (this is used in animated GIFs)
% and ech such image begins with an image descriptor block. The first byte of
% the image descriptor is the image separotor with a value of "2C". This will
% be matched by all of the functions below.
% A single image need not take up the entire canvas, so the first two bytes and
% the second two bytes specify the left offset and the top offset respectively.
% The third two bytes and the fourth two bytes are the width and the height
% respectively. Each is specified with the least significant byte first.
image_descriptor_dim(<<16#2c,
L:16/little, T:16/little,
W:16/little, H:16/little,
_:8>>) -> {{l, L}, {t, T}, {w, W}, {h, H}}.
% The last byte of the image descriptor is a packed field. The first bit is the
% local color table flag, which is "1" when the following image data has a
% local color table immediately following the image descriptor.
local_color_table_flag(<<16#2c, _:64/bits, 1:1, _:7>>) ->
true;
local_color_table_flag(<<16#2c, _:64/bits, 0:1, _:7>>) ->
false.
% The second bit is the interlace flag.
% TODO?
% The third flag is the sort flag, which functions just like in the case of the
% logical screen descriptor, with a value of "1" stating that the local color
% table is sorted by "decreasing importance". Again, this can be ignored, as we
% will do.
% The fourth and fifth bits are reserved.
% The last three bits specify the size of the local color table. Again, the
% size is calculated by the the formula: size = 2^(N+1), where N is the value
% of the three bits in question. The size reported is the number of colors in
% the table.
local_color_table_size(<<16#2c, _:69/bits, N:3>>) ->
round(math:pow(2, N + 1)).
% The local color table immediately follows the image descriptor if the local
% color table flag is set and is identical in structure to the global color
% table. So can use the same parser.
local_color_table(BinData, NumColors) ->
color_table(BinData, NumColors, []).
% ~~ IMAGE DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
% The image data block contains LZW-encoded data that, when decoded, determines
% the colors to display on a canvas. The first few functions below are relevant
% to the structure of the block itself, and the meanings of the pertinent terms
% are explained in the LZW compression section following those functions.
% The first byte of the image data is the LZW minimum code size.
lzw_minimum_code_size(<<Size:8, Rest/binary>>) ->
{Size, Rest}.
%% @doc Read all consecutive sub-blocks, starting at the beginning of the given
%% bit string.
%%
%% @returns The binary data in the consecutive sub-blocks, divided into
%% individual bit strings, one per byte.
image_data_all_sub_blocks(BinData) ->
{AllSubBlockData, Rest} = next_sub_block(BinData, <<>>),
AllSubBlockBytes =
lists:map(fun(Byte) -> <<Byte>> end, binary_to_list(AllSubBlockData)),
{AllSubBlockBytes, Rest}.
% --- LZW Decompression -------------------------------------------------------
% The GIF 89a format encodes its image data using the Variable-Length-Code LZW
% Compression, which is a variation o the Lempel-Ziv Compression algorithm. In
% this compression algorithm, patterns of colors in the raw data are identified
% by codes in a translation table. The codes are then further represented by
% variable length codes concatenated and partitioned into 8-bit bytes.
% ---- Translation Table ------------------------------------------------------
% The translation, or code, table starts with all the colors in the color table
% (local if there is one, global otherwise) currently being used. This is
% followed by two special codes, the CLEAR code and the EOI (end of
% information) code, used to signal special events in the decompression
% process. In particular, the code table is re-initialized using only these
% entries whenever the CLEAR code is encountered.
% In order to provide a level of abstraction, a family of functions are
% implemented that abstract over the actual data structure used to implement
% the code table. Underneath, we will use an orddict, which is an arbitrary
% choice due to the lack of benchmarking (since performance is not a concern
% for this decoder).
-define(MAXIMUM_CODE_SIZE, 12).
lzw_code_table_new(NumColors) ->
Table = lzw_code_table_initialize(0, NumColors, orddict:new()),
Table2 = lzw_code_table_add(Table , clear),
Table3 = lzw_code_table_add(Table2, eoi ),
Table3.
lzw_code_table_initialize(_, 0 , Table) ->
Table;
lzw_code_table_initialize(I, NumColors, Table) ->
NewTable = lzw_code_table_add(Table, [I]),
lzw_code_table_initialize(I + 1, NumColors - 1, NewTable).
lzw_code_table_add(Table, Pattern) ->
Size = orddict:size(Table),
orddict:store(Size, Pattern, Table).
% Returns {ok, Value}, or `error` if the key is not present in the dictionary.
% This is the actual behavior for orddict:find, but even if the underlying
% choice of data structure were to change, this function would return values in
% the same format.
lzw_code_table_lookup(Table, Code) ->
orddict:find(Code, Table).
% Technically, a data structure used to implement the code table should grow
% dynamically, but a certain number of bits are needed to index into the code
% table if there are sufficiently many elements present. Thus, we need to check
% if the table contains the maximum number of elements that can be indexed by
% the given code size (in bits).
lzw_code_table_full(Table, CSize) ->
orddict:size(Table) =:= round(math:pow(2, CSize)).
%% @doc Determines what the next code size should be, given the current table
%% and code size. The code size generally increases when the table is filled up
%% with the maximum number of entries addressable by the current code size.
lzw_code_table_next_code_size(Table, CSize) ->
% One caveat is if the code table is filled up for the current code size,
% and the code size can't increase, then the next code in the code stream
% will be a CLEAR code. The code size does not increase in this case.
ShouldIncreaseCCSize =
(CSize < ?MAXIMUM_CODE_SIZE) and lzw_code_table_full(Table, CSize),
case ShouldIncreaseCCSize of
true -> CSize + 1;
_ -> CSize
end.
% ---- Data Stream Representation ---------------------------------------------
% The LZW compression algorithm converts a stream of indices into a stream of
% codes, with each code corresponding to some pattern of consecutive indices.
%
% The codes themselves are not stored as a single byte per code, as that would
% limit the number of codes to 256, as well as taking up unnecessary space for
% codes when there are fewer unique codes in the table at that point during the
% execution of the compression algorithm. Thus, the number of bits used to
% store a code varies over the execution of the algorithm.
%
% See the `code_reader` module for information about the exact representation.
% ---- Decompression Algorithm ------------------------------------------------
% The exact decompression algorithm has the following characteristics:
%
% * Originally, the code size is the LZW Minimum Code Size plus one, since
% the Minimum Code Size is only sufficient for a table consisting only of
% the colors, and not the two special codes, CLEAR and EOI.
%
% * Whenever the CLEAR code is encountered, the code table is rebuilt, and
% the current code size goes back to the original code size. The next code
% is immediately retrieved, and its value in the table is sent to the output
% stream. This is because the maximum code size is 12 bits. An encoder may
% choose to say with this code size when it is reached, not adding any more
% patterns to the code table.
%
% * After the first code following the CLEAR code is decoded, the following
% loop is executed:
%
% 1. Let CODE be the next code in the code stream, and {CODE} its
% corresponding pattern in the code table, if the code is in the table.
% Similarly, CODE-1 is the code that has just been decoded, and {CODE-1}
% its corresponding pattern.
%
% 2. If CODE is in the table:
%
% a. Output {CODE} to the output stream.
%
% b. Let K be the first index in {CODE}.
%
% c. Add {CODE-1} ++ [K] to the code table.
%
% 3. IF CODE is not in the table:
%
% a. Let K be the first index in {CODE-1}.
%
% b. Output {CODE-1} ++ [K] to the output stream.
%
% c. Add {CODE-1} ++ [K] to the code table.
%
% 4. Restart loop.
%
% * When the code table contains the maximum number of codes addressable by
% the current code size (i.e. after the code with index 2^{CodeSize}-1 is
% added), the code size is increased by 1.
%
% The exception is when the current code size is the maximum allowable code
% size, in which case the code size remains the same, and the next code is
% the clear code.
%
% * When the EOI code is encountered, decompression is complete, and the loop
% is terminated.
%% @doc The dynamic state of the decompression algorithm. Represents data
%% internal to the algorithm that changes from iteration to iteration. Does not
%% capture the actual data being decompressed.
-record(decompression_state, {
previous_index_sequence,
code_size,
table
}).
%% @doc The constant parameters driving the decompression algorithm. Does not
%% change over the execution of the algorithm.
-record(decompression_context, {num_colors, minimum_code_size}).
%% @doc Runs the decompression algorithm, one step at a time. Recursively calls
%% itself to continue the algorithm.
%%
%% @returns A list of lists, with each inner list containing an index
%% sequence.
%%
%% The outer list is in reverse order, but the individual sequences are each in
%% the correct order. It's more efficient to build up such a list. To post-
%% process the output, reverse the outer list, then flatten it:
%%
%% lists:flatten(lists:reverse(Decoded))
lzw_decode_single(Compressed, Decoded, State, Context) ->
#decompression_state{
previous_index_sequence=PrevIndexSequence,
code_size=CodeSize,
table=Table
} = State,
{Code, RestCompressed} = code_reader:read_code(Compressed, CodeSize),
case lzw_code_table_lookup(Table, Code) of
{ok, clear} ->
io:format("Re-building code table~n"),
#decompression_context{
num_colors=NumColors,
minimum_code_size=MinCodeSize
} = Context,
NewState = #decompression_state{
previous_index_sequence=clear,
code_size=MinCodeSize,
table=lzw_code_table_new(NumColors)
},
lzw_decode_single(RestCompressed, Decoded, NewState, Context);
{ok, eoi} -> Decoded;
{ok, IndexSequence} ->
NewDecoded = [IndexSequence|Decoded],
NewTable =
if
PrevIndexSequence == undefined -> Table;
PrevIndexSequence == clear -> Table;
true ->
FirstIndex = hd(IndexSequence),
NextIndexSequence = PrevIndexSequence ++ [FirstIndex],
lzw_code_table_add(Table, NextIndexSequence)
end,
NewCodeSize = lzw_code_table_next_code_size(NewTable, CodeSize),
NewState = #decompression_state{
previous_index_sequence=IndexSequence,
code_size=NewCodeSize,
table=NewTable
},
lzw_decode_single(RestCompressed, NewDecoded, NewState, Context);
error ->
FirstIndexInPreviousSequence = hd(PrevIndexSequence),
IndexSequence =
PrevIndexSequence ++ [FirstIndexInPreviousSequence],
NewDecoded = [IndexSequence|Decoded],
NewTable = lzw_code_table_add(Table, IndexSequence),
NewCodeSize = lzw_code_table_next_code_size(NewTable, CodeSize),
NewState = #decompression_state{
previous_index_sequence=IndexSequence,
code_size=NewCodeSize,
table=NewTable
},
lzw_decode_single(RestCompressed, NewDecoded, NewState, Context)
end.
lzw_decode(ImageData, LZWMinCodeSize, NumColors) ->
CodeSize = LZWMinCodeSize + 1,
% The minimum allowable code size is 3 (corresponding to a LZW minimum code
% size value of 2). This means that the table must contain at least 4
% colors, so as to require 3 bits to address the clear and EOI codes in the
% table.
%
% In practice, if the local color table contains two colors, then the extra
% two "colors" in the padded table won't actually be referenced by any code
% in the code stream. However, they need to be present in the table so that
% the clear code is at the correct offset in the table.
PaddedNumColors =
if
NumColors < 4 -> 4;
true -> NumColors
end,
Table = lzw_code_table_new(PaddedNumColors),
DecodedSequences = lzw_decode_single(
ImageData,
[],
#decompression_state{
previous_index_sequence=undefined,
code_size=CodeSize,
table=Table
},
#decompression_context{
num_colors=PaddedNumColors,
minimum_code_size=CodeSize
}
),
lists:flatten(lists:reverse(DecodedSequences)).
% == PARSED RECORD STRUCTURE ==================================================
%% @doc A structure describing the entire GIF file.
-record(parsed_gif, {
w, h, % the dimensions of the full output
color_depth, % bits per pixel
% The global color table. Used when there is no local color table for a
% given image. May be missing (empty) if all contained images have their
% own local color tables.
colors=[],
% The individual contained images. Usually one per frame of animation.
images=[]
}).
%% @doc A structure describing a single image within the GIF file. These images
%% are the ones that actually have pixels to be displayed.
-record(parsed_img, {
% The dimensions of the bounding rectangle for this image. When rendering
% animations, it's possible to paint over only part of the full canvas.
l, t, w, h, % Left, Top, Width, Height
disposal, delay, % animation-related metadata
transparency, % index that represents a transparent pixel
% The local color table. If present, used to determine the colors for this
% image. Otherwise, the global color table is used.
colors=[],
% The decoded image data, represented as a sequence of indexes into
% whichever color table is being used.
data=[]
}).
%% @doc Determine which color table--local or global--is the correct one to use
%% for this image.
color_table_for_image(Gif, Image) ->
case Image#parsed_img.colors of
[] -> Gif#parsed_gif.colors;
LocalTable -> LocalTable
end.
% == MAIN ROUTINES ============================================================
go([Filename|_]) ->
case file:read_file(Filename) of
{ok, Data} ->
ParsedData = parse_data(Data),
init_sdl(ParsedData);
{error, Reason} ->
io:format("Unable to open file: ~s~n", [Reason]),
error
end.
% == MAIN PARSE ROUTINES ======================================================
parse_data(<<Header:6/bytes, Rest/binary>>) ->
HeaderValid = header_valid(Header),
if
HeaderValid ->
parse_logical_screen_descriptor(Rest);
true ->
io:format("Invalid header: ~p~n", [Header]),
error
end.
parse_logical_screen_descriptor(<<Lsd:7/bytes, Rest/binary>>) ->
ParsedData = #parsed_gif{},
case {screen_dim(Lsd), color_depth(Lsd)} of
{{{w, W}, {h, H}}, ColorDepth} ->
ParsedDataWH =
ParsedData#parsed_gif{w=W, h=H, color_depth=ColorDepth}
end,
case global_color_table_flag(Lsd) of
true ->
GCTableSize = global_color_table_size(Lsd),
BGIndex = background_color_index (Lsd),
parse_global_color_table(Rest, ParsedDataWH,
{{gc_table_size, GCTableSize},
{bg_index, BGIndex }});
false ->
parse_main_blocks(undefined, Rest, ParsedDataWH)
end.
parse_global_color_table(BinData, ParsedData,
{{gc_table_size, GCTableSize},
{bg_index , BGIndex }}) ->
GCTableByteLen = GCTableSize * 3,
<<BinGCTable:GCTableByteLen/bytes, Rest/binary>> = BinData,
GCTable = global_color_table(BinGCTable, GCTableSize),
ParsedDataGC = ParsedData#parsed_gif{colors=GCTable},
% TODO: preserve BGIndex
parse_main_blocks(undefined, Rest, ParsedDataGC).
%% @doc When the trailer (`0x3B`) is reached, there is no more data to parse.
%%
%% By the time the trailer is reached, there should no longer be an unattached
%% graphics control block.
parse_main_blocks(undefined, <<16#3b>>, ParsedData) ->
io:format("Finished parsing image...~n"),
% One caveat is that the images list is built up in reverse order, due to
% efficiency. Now that all the images have been parsed, reverse their
% order.
#parsed_gif{images=Images} = ParsedData,
ParsedData#parsed_gif{images=lists:reverse(Images)};
%% @doc `0x21 0xF9` marks the beginning of a graphic control extension block.
%%
%% This block, like all other extensions, contains a sub-block structure.
%% However, the block is guaranteed to contain one, fixed-length block, so the
%% entire block can be parsed in a single pattern match.
%%
%% The graphics control extension block modifies the rendering properties of
%% the next encountered image block. However, there may be other blocks in
%% between this graphics control extension block and the target image.
%%
%% By the time a new graphic control extension block is reached, there should
%% not be an unattached graphic control extension block from before.
parse_main_blocks(
undefined,
<<
16#21:8, % extension introducer
16#f9:8, % graphic control
16#04:8, % sub-block size
% The first byte is a packed byte. See the "Graphics Control Extension"
% section above for more information about this byte. In particular, the
% routines to unpack this byte are defined in that section.
Packed:8/bits,
% When not "0", specifies the amount of time to wait before displaying the
% next image. The value is given in 1/100ths of a second and is counted
% starting immediately after the image is rendered.
DelayTime:16/little,
% This is relevant only if the transparency index is set in the packed byte
% mentioned before. If so, any pixels with this index is unmodified when
% rendering.
TransparentIndex:8,
0:8, % block terminator
Rest/binary
>>,
ParsedData) ->
DisposalMethod = disposal_method(Packed),
ParsedTransparencyIndex = case transparency_flag(Packed) of
true -> TransparentIndex;
false -> -1
end,
GraphicControl = #graphic_control{
delay=DelayTime,
disposal=DisposalMethod,
transparency=ParsedTransparencyIndex
},
parse_main_blocks(GraphicControl, Rest, ParsedData);
%% @doc `0x2c` marks the beginning of an image, starting with the image
%% descriptor.
%%
%% As described in the variant of `parse_main_blocks/2` that matches a graphic
%% control extension, an image descriptor follows a graphic control extension.
%% However, an image may not have a graphic control extension at all, so the
%% image descriptor can be considered the start of its own main block.
parse_main_blocks(
GraphicControl,
BinData = <<16#2c:8, _/binary>>,
ParsedData) ->
parse_image_descriptor(GraphicControl, BinData, ParsedData);
%% @doc `0x21 0xFE` marks the beginning of a comment extension block. All the
%% bytes in the contained sub-blocks are ASCII characters.
parse_main_blocks(
GraphicControl,
<<16#21fe:16, BinData/binary>>,
ParsedData) ->
{AllSubBlockData, Rest} = next_sub_block(BinData, <<>>),
Comment = binary_to_list(AllSubBlockData),
io:format("Comment: ~s~n", [Comment]),
parse_main_blocks(GraphicControl, Rest, ParsedData);
%% @doc `0x21 0x01` marks the beginning of a plain text extension block. This
%% block specifies text that should be rendered on top of the image. No major
%% renderer seems to support this extension, so this block will be skipped.
%%
%% The reason to recognizing this block explicitly is because an active
%% graphics control extension block can target a plain text extension block.
%% Thus, if there's an active graphics control extension block and a plain text
%% extension is encountered, the graphics control extension block should be
%% dropped.
parse_main_blocks(
_,
<<16#2101:16, BinData/binary>>,
ParsedData) ->
io:format("Found plain text extension. Ignoring~n"),
{_, Rest} = next_sub_block(BinData, <<>>),
parse_main_blocks(undefined, Rest, ParsedData);
%% @doc In general, `0x21` marks the beginning of some extension block. Any
%% extension block not handled in the above variants of `parse_main_blocks/2`
%% fall through to here, where all the sub-blocks are read and skipped.
parse_main_blocks(
GraphicControl,
<<16#21:8, ExtType:8, BinData/binary>>,
ParsedData) ->
io:format("Found extension with unknown type: ~b. Ignoring~n", [ExtType]),
{_, Rest} = next_sub_block(BinData, <<>>),
parse_main_blocks(GraphicControl, Rest, ParsedData).
parse_image_descriptor(undefined, BinData, ParsedData) ->
parse_image_descriptor(
#graphic_control{
delay=0,
disposal=?DISPOSAL_UNSPECIFIED,
transparency=-1
},
BinData,
ParsedData
);
parse_image_descriptor(
#graphic_control{
delay=DelayTime,
disposal=DisposalMethod,
transparency=TransparencyIndex
},
<<ImDesc:10/bytes, Rest/binary>>,
ParsedData) ->
io:format("Found new image~n"),
{{l, L}, {t, T}, {w, W}, {h, H}} = image_descriptor_dim(ImDesc),
ParsedImage = #parsed_img{
l=L,
t=T,
w=W,
h=H,
disposal=DisposalMethod,
delay=DelayTime,
transparency=TransparencyIndex
},
case local_color_table_flag(ImDesc) of
true ->
parse_local_color_table(
Rest,
ParsedData,
ParsedImage,
local_color_table_size(ImDesc)
);
false ->
parse_image_data(Rest, ParsedData, ParsedImage)
end.
parse_local_color_table(BinData, ParsedData, ParsedImage, LCTableSize) ->
LCTableByteLen = LCTableSize * 3,
<<BinLCTable:LCTableByteLen/bytes, Rest/binary>> = BinData,
LCTable = local_color_table(BinLCTable, LCTableSize),
ParsedImageLC = ParsedImage#parsed_img{colors=LCTable},
parse_image_data(Rest, ParsedData, ParsedImageLC).
parse_image_data(BinData, ParsedData, ParsedImage) ->
{LZWMinCodeSize, SubBlocks} = lzw_minimum_code_size(BinData),
NumColors = length(color_table_for_image(ParsedData, ParsedImage)),
{ParsedImageData, Rest} = image_data_all_sub_blocks(SubBlocks),
ImageDataDecoded = lzw_decode(ParsedImageData, LZWMinCodeSize, NumColors),
OldImages = ParsedData#parsed_gif.images,
ParsedImageFull = ParsedImage#parsed_img{data=ImageDataDecoded},
ParsedDataIm = ParsedData#parsed_gif{images=[ParsedImageFull|OldImages]},
% Now that the last encountered graphics control extension block (if any)
% has been successfully attached to the current image, reset to no active
% graphics control extension block.
parse_main_blocks(undefined, Rest, ParsedDataIm).
% == MAIN SDL ROUTINES ========================================================
% Note from Avik: I use a Hi-DPI display, and a larger zoom looks better. For
% anyone not using a Hi-DPI display, a zoom level of 2 still won't look bad.
-define(ZOOM_DEFAULT, 2).
-define(ZOOM_MIN, 2).
-define(ZOOM_MAX, 32).
-define(ZOOM_INCREASE, 1).
-define(ZOOM_DECREASE, 2).
-define(DISP_BPP, 16).
init_sdl(#parsed_gif{images=[]}) ->
io:format("No images to display; shutting down...~n"),
ok;
init_sdl(ParsedData) ->
case init_video(ParsedData) of
error ->
sdl:quit(),
error;
ok ->
draw_image(ParsedData, [], ?ZOOM_DEFAULT),
io:format("Shutting down...~n"),
sdl:quit(),
ok
end.
init_video(#parsed_gif{w=W, h=H}) ->
sdl:init(?SDL_INIT_VIDEO bor ?SDL_INIT_ERLDRIVER),
Surface = resize_canvas(W, H, ?ZOOM_DEFAULT),
% TODO: set the title to the image filename?
sdl_video:wm_setCaption("giferly", []),
case Surface of
error ->
io:format("Can't set video mode~n"),
error;
_ -> ok
end.
resize_canvas(W, H, Zoom) ->
Surface = sdl_video:setVideoMode
(W * Zoom, H * Zoom, ?DISP_BPP, ?SDL_SWSURFACE),
case Surface of
error -> error;
_ ->
draw_background(),
Surface
end.
draw_background() ->
SurfaceRef = sdl_video:getVideoSurface(),
Surface = sdl_video:getSurface(SurfaceRef),
#sdl_surface{w=W, h=H} = Surface,
DestRect = #sdl_rect{x=0, y=0, w=W, h=H},
BGColor = sdl_video:mapRGB(Surface, 255, 255, 255),
sdl_video:fillRect(Surface, DestRect, BGColor),
BGColor2 = sdl_video:mapRGB(Surface, 192, 192, 192),
draw_bg_checkers(Surface, 0, 0, W, H, 4, BGColor2),
sdl_video:updateRect(SurfaceRef, 0, 0, W, H).
draw_bg_checkers(Surface, X, Y, W, H, Size, Color) ->
Parity = (X div Size + Y div Size) rem 2,
if
Y > H -> ok;
X > W ->
draw_bg_checkers(Surface, 0, Y + Size, W, H, Size, Color);
Parity =:= 0 ->
draw_bg_checkers(Surface, X + Size, Y, W, H, Size, Color);
Parity =/= 0 ->
DestRect = #sdl_rect{x=X, y=Y, w=Size, h=Size},
sdl_video:fillRect(Surface, DestRect, Color),
draw_bg_checkers(Surface, X + Size, Y, W, H, Size, Color)
end.
draw_image(ParsedData, [], Zoom) ->
#parsed_gif{images=Images} = ParsedData,
draw_image(ParsedData, Images, Zoom);
draw_image(ParsedData, Images, Zoom) ->
[Image|ImagesRest] = Images,
ColorTable = color_table_for_image(ParsedData, Image),
paint_pixels(Image, ColorTable, Zoom),
handle_next_event(ParsedData, [Image|ImagesRest], Zoom).
handle_next_event(ParsedData, Images, Zoom) ->
case check_event() of
ok ->
handle_next_event(ParsedData, Images, Zoom);
{zoom, ZoomType} ->
NewZoom = change_zoom(Zoom, ZoomType),
#parsed_gif{w=W, h=H} = ParsedData,
resize_canvas(W, H, NewZoom),
draw_image(ParsedData, Images, NewZoom);
next ->
io:format("Moving to next image...~n"),
[_|ImagesRest] = Images,
draw_image(ParsedData, ImagesRest, Zoom);
quit ->
ok
end.
change_zoom(OldZoom, ?ZOOM_INCREASE) ->
if OldZoom >= ?ZOOM_MAX -> OldZoom;
true -> OldZoom + 1
end;
change_zoom(OldZoom, ?ZOOM_DECREASE) ->
if OldZoom =< ?ZOOM_MIN -> OldZoom;
true -> OldZoom - 1
end.
paint_pixels(
#parsed_img{
l=L,
t=T,
w=W,
h=H,
transparency=TransparentIndex,
data=Data
},
ColorTable,
Zoom) ->
Xs = lists:seq(L, L + W - 1),
Ys = lists:seq(T, T + H - 1),
% The decoded pixels are in row-major order. That means all the pixels for
% a given row (i.e. a constant Y-coordinate) are consecutive. Thus, when
% constructing the flattened list of coordinates, it's important to iterate
% over the Y-values first, then the X-values.
Coords = lists:map
(fun(Y) -> lists:map(fun(X) -> {X, Y} end, Xs) end, Ys),
RawCoordData = lists:zip(lists:flatten(Coords), Data),
CoordData = lists:filter
(fun({_, Index}) -> Index =/= TransparentIndex end, RawCoordData),
SurfaceRef = sdl_video:getVideoSurface(),
Surface = sdl_video:getSurface(SurfaceRef),
lists:foreach(fun(Pixel) ->
sdl_put_pixel(Surface, Pixel, ColorTable, Zoom) end, CoordData),
sdl_video:updateRect(SurfaceRef, L * Zoom, T * Zoom, W * Zoom, H * Zoom),
ok.
sdl_put_pixel(Surface, {{X, Y}, Index}, ColorTable, Zoom) ->
DestRect = #sdl_rect{x=X * Zoom, y=Y * Zoom, w=Zoom, h=Zoom},
#color{r=R, g=G, b=B} = lists:nth(Index + 1, ColorTable),
SdlColor = sdl_video:mapRGB(Surface, R, G, B),
sdl_video:fillRect(Surface, DestRect, SdlColor).
check_event() ->
case sdl_events:pollEvent() of
#quit{} -> quit;
#keyboard{sym=?SDLK_q} -> quit;
#keyboard{sym=?SDLK_ESCAPE} -> quit;
#keyboard{state=?SDL_RELEASED, sym=?SDLK_EQUALS} ->
{zoom, ?ZOOM_INCREASE};
#keyboard{state=?SDL_RELEASED, sym=?SDLK_MINUS} ->
{zoom, ?ZOOM_DECREASE};
no_event -> ok;
#keyboard{state=?SDL_RELEASED, sym=$n} -> next;
#keyboard{state=?SDL_RELEASED, sym=?SDLK_SPACE} -> next;
Event ->
io:format("Got event ~p~n", [Event]),
ok
end. | src/giferly.erl | 0.551332 | 0.433382 | giferly.erl | starcoder |
%% -------------------------------------------------------------------
%%
%% Copyright (c) 2016 Basho Technologies, Inc.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%%
%% @doc This module contains kludges that should go away.
%%
%% @deprecated Use other BRT modules.
%%
-module(brt_fudge).
%-deprecated(module).
-export([
test_deps/1
]).
-include("brt.hrl").
-spec test_deps(Where :: brt:app_spec() | brt:fs_path() | brt:rebar_state())
-> [brt:app_name()] | brt:err_result().
%
% It would be better to use xref on test modules, which would pick up meck and
% the like (including quickcheck, which would need to be filtered out), but
% for now just look for the pattern we use for cuttlefish schema files and
% assume we actually test them.
%
% Proper results will be obtained by compiling with the `prod' and `test'
% profiles, running xref on both, and diffing the results to come up the apps
% that are used only in test mode.
% That can be handled with some some plugin magic to swap profiles on the fly,
% but I'm not biting it off right now.
%
test_deps({Name, Path, _}) ->
test_deps([Name], Path);
test_deps(Path) when erlang:is_list(Path) ->
Func = fun(File) ->
case file:consult(File) of
{ok, [{application, App, [_|_]}]} ->
App;
{ok, _} ->
erlang:error(app_malformed, [File]);
{error, What} ->
{error, Error} = brt:file_error(File, What),
erlang:error(Error)
end
end,
Apps = lists:map(Func,
filelib:wildcard(filename:join([Path, "ebin", "*.app"]))),
Names = case Apps of
[_] ->
Apps;
_ ->
Name = erlang:list_to_atom(erlang:hd(
string:tokens(filename:basename(Path), "-"))),
case lists:member(Name, Apps) of
true ->
Apps;
_ ->
[Name | Apps]
end
end,
test_deps(Names, Path);
test_deps(State) when ?is_rebar_state(State) ->
test_deps(
[rebar_app_info:name(AI) || AI <- rebar_state:project_apps(State)],
rebar_state:dir(State)).
-spec test_deps(Names :: [atom() | binary()], Path :: brt:fs_path())
-> [brt:app_name()].
test_deps([cuttlefish | _], _) ->
[];
test_deps([Name | Names], Path) ->
Schema = filename:join([Path, "priv", brt:to_string(Name) ++ ".schema"]),
case filelib:is_regular(Schema) of
true ->
[cuttlefish];
_ ->
test_deps(Names, Path)
end;
test_deps([], _) ->
[]. | src/brt_fudge.erl | 0.612426 | 0.477128 | brt_fudge.erl | starcoder |
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(esockd_connection_sup_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
all() -> esockd_ct:all(?MODULE).
t_start_connection(_) ->
ok = meck:new(esockd_transport, [non_strict, passthrough, no_history]),
ok = meck:expect(esockd_transport, peername, fun(_Sock) -> {ok, {{127,0,0,1}, 3456}} end),
ok = meck:expect(esockd_transport, wait, fun(Sock) -> {ok, Sock} end),
ok = meck:expect(esockd_transport, recv, fun(Sock, 0) -> {ok, <<"Hi">>} end),
ok = meck:expect(esockd_transport, send, fun(Sock, Data) -> ok end),
ok = meck:expect(esockd_transport, controlling_process, fun(_Sock, _ConnPid) -> ok end),
ok = meck:expect(esockd_transport, ready, fun(_ConnPid, _Sock, []) -> ok end),
with_conn_sup([{max_connections, 1024}],
fun(ConnSup) ->
{ok, ConnPid} = esockd_connection_sup:start_connection(ConnSup, sock, []),
?assert(is_process_alive(ConnPid))
end),
ok = meck:unload(esockd_transport).
t_allow_deny(_) ->
AccessRules = [{allow, "192.168.1.0/24"}],
with_conn_sup([{access_rules, AccessRules}],
fun(ConnSup) ->
?assertEqual([{allow, "192.168.1.0/24"}],
esockd_connection_sup:access_rules(ConnSup)),
ok = esockd_connection_sup:allow(ConnSup, "10.10.0.0/16"),
ok = esockd_connection_sup:deny(ConnSup, "172.16.1.1/16"),
?assertEqual([{deny, "172.16.0.0/16"},
{allow, "10.10.0.0/16"},
{allow, "192.168.1.0/24"}
], esockd_connection_sup:access_rules(ConnSup))
end).
t_get_shutdown_count(_) ->
with_conn_sup([{max_connections, 1024}],
fun(ConnSup) ->
?assertEqual([], esockd_connection_sup:get_shutdown_count(ConnSup))
end).
t_count_connections(_) ->
with_conn_sup([{max_connections, 1024}],
fun(ConnSup) ->
?assertEqual(0, esockd_connection_sup:count_connections(ConnSup))
end).
t_get_set_max_connections(_) ->
with_conn_sup([{max_connections, 100}],
fun(ConnSup) ->
?assertEqual(100, esockd_connection_sup:get_max_connections(ConnSup)),
ok = esockd_connection_sup:set_max_connections(ConnSup, 200),
?assertEqual(200, esockd_connection_sup:get_max_connections(ConnSup))
end).
t_handle_unexpected(_) ->
{reply, ignore, state} = esockd_connection_sup:handle_call(req, from, state),
{noreply, state} = esockd_connection_sup:handle_cast(msg, state),
{noreply, state} = esockd_connection_sup:handle_info(info, state).
with_conn_sup(Opts, Fun) ->
{ok, ConnSup} = esockd_connection_sup:start_link(Opts, {echo_server, start_link, []}),
Fun(ConnSup),
ok = esockd_connection_sup:stop(ConnSup). | test/esockd_connection_sup_SUITE.erl | 0.620392 | 0.494446 | esockd_connection_sup_SUITE.erl | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.