code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
%%%-------------------------------------------------------------------
%%% @author <NAME>, <NAME> <>
%%% @copyright (C) 2013, <NAME>, <NAME>, <NAME>
%%% @doc
%% Arithmetic Simplifications Core - Basic straightforward arithmetic simplifications. The following simplifications are covered:
%% <ul>
%%<li><b>Sum by zero</b> - Expressions of type <i>Exp + 0</i> and <i>0 + Exp</i> are transformed into <i>Exp</i>. For example, <i>X + 0</i> is modified to <i>X</i>.</li>
%%<li><b>Subtraction by zero</b> - Subtractions with zero as the second operand are modified. For instance, <i>Y - 0</i> becomes <i>Y</i>. </li>
%%<li><b>Subtraction from zero</b> - Expressions that match <i>0 - Exp</i> are altered to <i>-Exp</i>. To exemplify, <i>0 - Y</i> is changed to <i>-Y</i>. </li>
%%<li><b>Multiplication by one</b> - Expressions of the formats <i>Exp * 1</i> and <i>1 * Exp</i> are simplified to <i>Exp</i>. To illustrate, <i>Z * 1</i> is modified to <i>Z</i>.</li>
%%<li><b>Multiplication by zero</b> - Multiplications containing zero as an operand are simplified to zero.</li>
%%<li><b>Division by one</b> - Divisions by one, using the operator <i>(div)</i>, are replaced by the numerator. For example, <i>3 div 1</i> is transformed to <i>3</i>.</li>
%% </ul>
%%
%% These simplifications can by applied to any valid Erlang expression.
%%% @end
%%%-------------------------------------------------------------------
-module(core_arit_simpl).
%% Include files
-include("wrangler.hrl").
-export([rules/2]).
%%--------------------------------------------------------------------
%% @doc
%% Returns the list of arithmetic simplification rules. This list includes, in order, rules for the following possibilities:
%%<ul>
%%<li>Sub by zero</li>
%%<li>Sub from zero</li>
%%<li>Sum by zero</li>
%%<li>Multiplication by one rule</li>
%%<li>Multiplication by zero </li>
%%<li>Division by one </li>
%%</ul>
%% @spec rules(term(), term()) -> [rule()]
%% @end
%%--------------------------------------------------------------------
-spec(rules(_,_) -> [{'rule',fun(),list() | tuple()},...]).
rules(_,_) ->
[
subZero_rule_1(),
subZero_rule_2(),
sumZero_rule_1(),
sumZero_rule_2(),
multByOne_rule_1(),
multByOne_rule_2(),
multByZero_rule_1(),
multByZero_rule_2(),
divByOne_rule()
].
%%%===================================================================
%%% Functions with rules
%%%===================================================================
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function represents a rule that substitutes an expression of type "sub-expression + 0" by "sub-expression". e.g: <i>"Exp + 0"</i> is <b>transformed</b> to <i>"Exp</i>.
%% @end
%%--------------------------------------------------------------------
sumZero_rule_1() ->
?RULE(?T("Expr@ + 0"),
Expr@,
true
).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function represents a rule that substitutes an expression of type "0 + sub-expression" by "sub-expression". e.g: <i>"0 + Exp"</i> is <b>transformed</b> to <i>"Exp"</i>.
%% @end
%%--------------------------------------------------------------------
sumZero_rule_2() ->
?RULE(?T("0 + Expr@"),
Expr@,
true).
%%--------------------------------------------------------------------
%%@private
%% @doc
%% This function represents a rule that substitutes an expression of type "sub-expression - 0" by "sub-expression". e.g: <i>"Exp - 0"</i> is <b>transformed</b> to <i>"Exp"</i>.
%% @end
%%--------------------------------------------------------------------
subZero_rule_1() ->
?RULE(?T("Expr@ - 0"),
Expr@,
true).
%%--------------------------------------------------------------------
%%@private
%% @doc
%% This function represents a rule that substitutes an expression of type "0 - sub-expression" by "-sub-expression". e.g: <i>"0 - Exp"</i> is <b>transformed</b> to <i>"-Exp"</i>.
%% @end
%%--------------------------------------------------------------------
subZero_rule_2() ->
?RULE(?T("0 - Expr@"),
?TO_AST("-Expr@"),
true).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function represents a rule that substitutes an expression of type "sub-expression * 0" by "0". e.g: <i>"Exp * 0"</i> is <b>transformed</b> to <i>"0"</i>.
%% @end
%%--------------------------------------------------------------------
multByZero_rule_1() ->
?RULE(?T("Expr@ * 0"),
?TO_AST("0"),
true).
%%--------------------------------------------------------------------
%% @private
%% @doc
%% This function represents a rule that substitutes an expression of type "0 * sub-expression" by "0". e.g: <i>"0 * Exp"</i> is <b>transformed</b> to <i>"0"</i>.
%% @end
%%--------------------------------------------------------------------
multByZero_rule_2() ->
?RULE(?T("0 * Expr@"),
?TO_AST("0"),
true).
%%--------------------------------------------------------------------
%%@private
%% @doc
%% This function represents a rule that substitutes an expression of type "sub-expression * 1" by "sub-expression". e.g: <i>"Exp * 1"</i> is <b>transformed</b> to <i>"Exp"</i>.
%% @end
%%--------------------------------------------------------------------
multByOne_rule_1() ->
?RULE(?T("Expr@ * 1"),
Expr@,
true).
%%--------------------------------------------------------------------
%%@private
%% @doc
%% This function represents a rule that substitutes an expression of type "1 * sub-expression" by "sub-expression". e.g: <i>"1 * Exp"</i> is <b>transformed</b> to <i>"Exp"</i>.
%% @end
%%--------------------------------------------------------------------
multByOne_rule_2() ->
?RULE(?T("1 * Expr@"),
Expr@,
true).
%%--------------------------------------------------------------------
%%@private
%% @doc
%% This function represents a rule that substitutes an expression of type "sub-expression div 1" by "sub-expression". e.g: <i>"Exp div 1"</i> is <b>transformed</b> to <i>"Exp"</i>.
%% @end
%%--------------------------------------------------------------------
divByOne_rule() ->
?RULE(?T("Expr@ div 1"),
Expr@,
true). | src/symbolic_evaluation/core/core_arit_simpl.erl | 0.50293 | 0.516169 | core_arit_simpl.erl | starcoder |
%% @doc The `couchdb_mango' module contains functionality listed under CouchDB API
%% Reference section 1.3.6.
%%
%% Now let’s do something a little more useful: create databases. For the strict, CouchDB
%% is a database management system (DMS). That means it can hold multiple databases. A
%% database is a bucket that holds "related data".
%%
-module(couchdb_databases).
-include("couchdb.hrl").
-include("../../dev.hrl").
-export([
exists/1
,exists/2
,info/1
,create/1
,create/2
,delete/1
,delete/2
,all_docs/1
,all_docs/2
,bulk_docs_save/2
,bulk_docs_save/3
,compact/1
,compact/2
,ensure_full_commit/1
,ensure_full_commit/2
,get_missing_revs/2
]).
%% @doc Returns true if database exists, false if not
%%
%% Uses HTTP Headers containing a minimal amount of information about the specified db
%% @equiv exists(Server, DatabaseName)
-spec(exists(Database::db()) -> boolean()).
exists(#db{server=#server{}=Server, name=DbName}) -> exists(Server, DbName).
%% %reference CouchDB Docs 1.3.1/HEAD
%% @doc Returns true if database exists, false if not
%% Returns the HTTP Headers containing a minimal amount of information about the specified
%% database. Since the response body is empty, using the HEAD method is a lightweight way to
%% check if the database exists already or not.
-spec(exists(Server::server(), Name::binary()) -> boolean()).
exists(#server{url=ServerUrl, options=Opts}, <<DbName/binary>>) ->
case couchdb:database_name_is_valid(DbName) of
true ->
Url = hackney_url:make_url(ServerUrl, DbName, []),
case couchdb_httpc:db_request(head, Url, [], <<>>, Opts, [200]) of
{ok, 200, _}->
true;
_Error ->
false
end;
false -> false
end.
%% %reference CouchDB Docs 1.3.1/GET
%% @doc get database info
-spec(info(Database::db()) -> {ok, binary()} | {error, term()}).
info(#db{server=Server, name=DbName, options=Opts}) ->
Url = hackney_url:make_url(couchdb_httpc:server_url(Server), DbName, []),
case couchdb_httpc:db_request(get, Url, [], <<>>, Opts, [200]) of
{ok, _Status, _Headers, Ref} ->
Infos = couchdb_httpc:json_body(Ref),
{ok, Infos};
{error, not_found} ->
{error, db_not_found};
Error ->
Error
end.
%% %reference CouchDB Docs 1.3.1/PUT
%% @doc Create a database
%%
%% Connections are made to:
%% ```http://Host:PortPrefix/DbName'''
%%
%% If ssl is set https is used. See server_connections for options.
%% Params is a list of optionnal query argument you want to pass to the
%% db. Useful for bigcouch for example.
%% DB names must conform to ^[a-z][a-z0-9_$()+/-]*$
-spec(create(Database::db(), Params::list()) -> {ok, db()} | {error, term()}).
create(#db{server=#server{url=ServerUrl}, name=DbName, options=Options}=Db, Params) ->
Url = hackney_url:make_url(ServerUrl, DbName, Params),
Resp = couchdb_httpc:db_request(put, Url, [], <<>>, Options, [201]),
case Resp of
{ok, _Status, _Headers, Ref} ->
hackney:skip_body(Ref),
{ok, Db};
{error, precondition_failed} ->
{error, db_exists};
Error ->
Error
end;
%% %reference CouchDB Docs 1.3.1/PUT
%% @doc Create a database
%% @equiv create(Db, [])
create(#server{}=Server, <<DbName/binary>>) ->
{ok, Db} = couchdb:database_record(Server, DbName),
create(Db, []).
%% %reference CouchDB Docs 1.3.1/PUT
%% @doc Create a database
%% @equiv create(Db, [])
-spec(create(Database::db()) -> {ok, db()} | {error, term()}).
create(#db{}=Database) -> create(Database, []).
%% %reference CouchDB Docs 1.3.1/DELETE
%% @doc delete database
-spec(delete(Database::db()) -> {ok, db()} | {error, term()}).
delete(#db{server=#server{url=ServerUrl}, name=DbName, options=Opts}) ->
Url = hackney_url:make_url(ServerUrl, DbName, []),
Resp = couchdb_httpc:request(delete, Url, [], <<>>, Opts),
case couchdb_httpc:db_resp(Resp, [200]) of
{ok, _, _, Ref} ->
{ok, couchdb_httpc:json_body(Ref)};
Error ->
Error
end.
%% %reference CouchDB Docs 1.3.1/DELETE
%% @equiv delete(Db)
-spec(delete(Server::server(), DbName::binary()) -> {ok, db()} | {error, term()}).
delete(#server{url=_ServerUrl, options=_Opts}=Server, <<DbName/binary>>) ->
{ok, Db} = couchdb:database_record(Server, DbName),
delete(Db).
%% %reference CouchDB Docs 1.3.1/DELETE
%% @equiv all_docs(Db, #{})
-spec(all_docs(Database::db()) -> {ok, binary()} | {error, term()}).
all_docs(#db{server=Server, name=_DbName, options=_DbOpts}=Db) -> all_docs(Db, #{}).
%% %reference CouchDB Docs 1.3.2/GET
%% @doc All Docs
%%
%% Options:
%% - include_docs (true|false)
%%
-spec(all_docs(Database::db(), Options::map()) -> {ok, binary()} | {error, term()}).
all_docs(#db{server=Server, name=_DbName, options=DbOpts}=Db, #{}=Options) ->
QueryParams = case Options of
#{<<"include_docs">> := true} -> <<"include_docs=true">>;
_ -> []
end,
Url = hackney_url:make_url(
couchdb_httpc:server_url(Server),
[couchdb_httpc:db_url(Db), <<"_all_docs">>],
QueryParams
),
case couchdb_httpc:db_request(get, Url, [], <<>>, DbOpts, [200]) of
{ok, _Status, _Headers, Ref} ->
Docs = couchdb_httpc:json_body(Ref),
{ok, Docs};
{error, not_found} ->
{error, db_not_found};
Error ->
Error
end.
%
% BULK
%
%% %reference CouchDB Docs 1.3.4
%% @doc NIY: This method can be called to query several documents in bulk. It is well suited for
%% fetching a specific revision of documents, as replicators do for example, or for getting
%% revision history.
bulk_get() -> niy.
%% %reference CouchDB Docs 1.3.5.2
%% @doc save a list of documents
%% @equiv save_docs(Db, Docs, [])
bulk_docs_save(Db, Docs) ->
bulk_docs_save(Db, Docs, []).
bulk_docs_save(#db{server=Server, options=Opts}=Db, [_Car | _Cdr]=Documents, Options) when is_list(Options) ->
Headers = [{<<"Content-Type">>, <<"application/json">>}],
Url = hackney_url:make_url(
couchdb_httpc:server_url(Server),
[couchdb_httpc:db_url(Db), <<"_bulk_docs">>],
[]
),
Documents1 = lists:map(fun(Document) ->
case maps:is_key(<<"_id">>, Document) of
true -> Document;
false -> maps:put(<<"_id">>, couchdb:generate_unique_id(), Document)
end
end, Documents),
ReqObj = case Options of
[{new_edits, Val}] when is_boolean(Val) -> #{<<"docs">> => Documents1, <<"new_edits">> => Val};
_Other -> #{<<"docs">> => Documents1}
end,
Body = couchdb_ejson:encode(ReqObj),
case couchdb_httpc:db_request(post, Url, Headers, Body, Opts, [201]) of
{ok, 201, _, Ref} ->
Response = couchdb_httpc:json_body(Ref),
BulkList = [ maps:merge(Doc0, #{<<"_id">> => Id1, <<"_rev">> => Rev})
|| #{<<"_id">> := Id0}=Doc0 <- Documents1,
#{<<"id">> := Id1, <<"rev">> := Rev, <<"ok">> := true}=_Doc1 <- Response,
Id0 =:= Id1],
{ok, BulkList};
Error ->
Error
end.
% NOT TESTED
%% %reference CouchDB Docs 1.3.13
%% @doc Compaction compresses the database file by removing unused
%% sections created during updates.
%% See [http://wiki.apache.org/couchdb/Compaction] for more informations
%% @spec compact(Db::db()) -> ok|{error, term()}
compact(#db{server=Server, options=Opts}=Db) ->
Url = hackney_url:make_url(couchdb_httpc:server_url(Server), [couchdb_httpc:db_url(Db),
<<"_compact">>],
[]),
Headers = [{<<"Content-Type">>, <<"application/json">>}],
case couchdb_httpc:db_request(post, Url, Headers, <<>>, Opts, [202]) of
{ok, _, _, Ref} ->
hackney:skip_body(Ref),
ok;
Error ->
Error
end.
% NOT TESTED
%% %reference CouchDB Docs 1.3.14
%% @doc Like compact/1 but this compacts the view index from the
%% current version of the design document.
%% See [http://wiki.apache.org/couchdb/Compaction#View_compaction] for more informations
%% @spec compact(Db::db(), ViewName::string()) -> ok|{error, term()}
compact(#db{server=Server, options=Opts}=Db, DesignName) ->
Headers = [{<<"Content-Type">>, <<"application/json">>}],
Url = hackney_url:make_url(
couchdb_httpc:server_url(Server), [couchdb_httpc:db_url(Db), <<"_compact">>, DesignName], []
),
case couchdb_httpc:db_request(post, Url, Headers, <<>>, Opts, [202]) of
{ok, _, _, Ref} ->
hackney:skip_body(Ref),
ok;
Error ->
Error
end.
% NOT TESTED
%% %reference CouchDB Docs 1.3.15
%% @doc Commits any recent changes to the specified database to disk. You should call this if you want to ensure that recent changes have been flushed. This function is likely not required, assuming you have the recommended configuration setting of delayed_commits=false, which requires CouchDB to ensure changes are written to disk before a 200 or similar result is returned.
%% @equiv ensure_full_commit(Db, [])
ensure_full_commit(Db) ->
ensure_full_commit(Db, []).
%% @doc Commits any recent changes to the specified database to disk. You should call this if you want to ensure that recent changes have been flushed. This function is likely not required, assuming you have the recommended configuration setting of delayed_commits=false, which requires CouchDB to ensure changes are written to disk before a 200 or similar result is returned.
-spec ensure_full_commit(Db::db(), Options::list()) -> {ok, InstancestartTime :: binary()} | {error, term()}.
ensure_full_commit(#db{server=Server, options=Opts}=Db, Options) ->
Headers = [{<<"Content-Type">>, <<"application/json">>}],
Url = hackney_url:make_url(couchdb_httpc:server_url(Server), [couchdb_httpc:db_url(Db), <<"_ensure_full_commit">>], Options),
case couchdb_httpc:db_request(post, Url, Headers, <<>>, Opts, [201]) of
{ok, 201, _, Ref} ->
#{
<<"ok">> := true,
<<"instance_start_time">> := InstanceStartTime} = couchdb_httpc:json_body(Ref),
{ok, InstanceStartTime};
Error ->
Error
end.
% NOT TESTED
%% %reference CouchDB Docs 1.3.20
%% @doc Given a list of document revisions, returns the document revisions that do not exist in the database.
-spec get_missing_revs(#db{}, [{binary(), [binary()]}]) -> {ok, [{DocId :: binary(), [MissingRev :: binary()], [PossibleAncestor :: binary()]}]} | {error, term()}.
get_missing_revs(#db{server=Server, options=Opts}=Db, IdRevs) ->
Headers = [{<<"Content-Type">>, <<"application/json">>}],
Url = hackney_url:make_url(couchdb_httpc:server_url(Server), [couchdb_httpc:db_url(Db), <<"_revs_diff">>],[]),
Json = couchdb_ejson:encode({IdRevs}),
case couchdb_httpc:db_request(post, Url, Headers, Json, Opts, [200]) of
{ok, _RespCode, _, Ref} ->
Listing = couchdb_httpc:json_body(Ref),
{ok, Listing};
Error -> Error
end. | src/sections/couchdb_databases/couchdb_databases.erl | 0.519765 | 0.4436 | couchdb_databases.erl | starcoder |
%%% @author <NAME> <<EMAIL>>
%%% @copyright (C) 2021, <NAME>
%%% @doc
%%% Basic associative network.
%%% @end
%%% Created : 20 Feb 2021 by <NAME> <<EMAIL>>
-module(network).
-export([empty/0, empty/1, from_edges/1, from_edges/2, ring/2]).
-export([neighbors/2, all_nodes/1, all_edges/1, foreach_node/2]).
-record(network, {type = undirected :: edge_type(),
adjacency_list = #{} :: #{term() => gb_sets:set()}}).
-export_type([network/0]).
-type network() :: #network{}.
-type edge() :: {term(), term()}.
-type edge_type() :: undirected | directed.
%% @doc Create an empty undirected network.
-spec empty() -> network().
empty() ->
empty(undirected).
%% @doc Create an empty network of the given `Type'.
-spec empty(Type :: edge_type()) -> network().
empty(undirected) ->
#network{
type = undirected,
adjacency_list = #{}}.
%% @doc Create an undirected network from a list of edges.
-spec from_edges(EdgeList :: list(edge())) -> network().
from_edges(EdgeList) ->
from_edges(EdgeList, undirected).
%% @doc
%% Create a network of type `Type' from a list of edges.
%% @end
-spec from_edges(EdgeList :: list(edge()), Type :: edge_type()) -> network().
from_edges(EdgeList, undirected) ->
#network{
type = undirected,
adjacency_list =
lists:foldl(
fun ({U, V}, AdjacencyList) ->
maps:update_with(
U,
fun(Set) -> gb_sets:add_element(V, Set) end,
gb_sets:from_list([V]),
maps:update_with(
V,
fun(Set) -> gb_sets:add_element(U, Set) end,
gb_sets:from_list([U]),
AdjacencyList))
end, #{}, EdgeList)}.
%% @doc Return the neighbors of `Node'.
-spec neighbors(Node :: term(), Network :: network()) -> list(term()).
neighbors(Node, #network{adjacency_list = AdjacencyList}) ->
gb_sets:to_list(maps:get(Node, AdjacencyList)).
%% @doc Return a list of all nodes in `Network'.
-spec all_nodes(Network :: network()) -> list(term()).
all_nodes(#network{adjacency_list = AdjacencyList}) ->
maps:keys(AdjacencyList).
%% @doc Return a list of all edges in `Network'.
-spec all_edges(Network :: network()) -> list(edge()).
all_edges(#network{adjacency_list = AdjacencyList}) ->
maps:fold(
fun(Node, Neighbors, Edges) ->
[{Node, N} || N <- gb_sets:to_list(Neighbors), Node < N] ++ Edges
end, [], AdjacencyList).
%% @doc Apply `Fun' to each node in `Network'.
-spec foreach_node(Fun :: fun((term()) -> term()), Network :: network()) -> ok.
foreach_node(Fun, Network) ->
lists:foreach(Fun, maps:keys(Network#network.adjacency_list)).
%% @doc Construct a ring network with `NumNodes' nodes and `Radius'.
-spec ring(NumNodes :: pos_integer(), Radius :: pos_integer()) -> network().
ring(NumNodes, Radius) ->
Edges = lists:flatten(
[[{X, Y rem NumNodes} || Y <- lists:seq(X+1, X + Radius)]
|| X <- lists:seq(0, NumNodes - 1)]),
network:from_edges(Edges). | src/network.erl | 0.570331 | 0.545649 | network.erl | starcoder |
%%%
%%% Copyright 2020 RBKmoney
%%%
%%% Licensed under the Apache License, Version 2.0 (the "License");
%%% you may not use this file except in compliance with the License.
%%% You may obtain a copy of the License at
%%%
%%% http://www.apache.org/licenses/LICENSE-2.0
%%%
%%% Unless required by applicable law or agreed to in writing, software
%%% distributed under the License is distributed on an "AS IS" BASIS,
%%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%%% See the License for the specific language governing permissions and
%%% limitations under the License.
%%%
-module(machinegun_hay).
-behaviour(hay_metrics_handler).
-export([child_spec/3]).
%% how_are_you callbacks
-export([init/1]).
-export([get_interval/1]).
-export([gather_metrics/1]).
%% Types
-type options() :: #{
interval => timeout()
}.
-export_type([options/0]).
%% Internal types
-record(state, {
interval :: timeout(),
namespace :: mg_core:ns(),
registry :: mg_core_procreg:options()
}).
-type state() :: #state{}.
-type worker() :: {mg_core:ns(), mg_core:id(), pid()}.
-type metric() :: how_are_you:metric().
-type metric_key() :: how_are_you:metric_key().
-type metric_value() :: how_are_you:metric_value().
-type metrics() :: [metric()].
%% API
-spec child_spec(options() | undefined, mg_core_workers_manager:options(), _ChildID) -> supervisor:child_spec().
child_spec(Options, ManagerOptions, ChildID) ->
HandlerOptions = {genlib:define(Options, #{}), ManagerOptions},
hay_metrics_handler:child_spec({?MODULE, HandlerOptions}, ChildID).
-spec init({options(), mg_core_workers_manager:options()}) -> {ok, state()}.
init({Options, #{name := NS, registry := Registry}}) ->
{ok, #state{
interval = maps:get(interval, Options, 10 * 1000),
namespace = NS,
registry = Registry
}}.
-spec get_interval(state()) -> timeout().
get_interval(#state{interval = Interval}) ->
Interval.
-spec gather_metrics(state()) -> [hay_metrics:metric()].
gather_metrics(#state{namespace = NS, registry = Procreg}) ->
Workers = mg_core_worker:list(Procreg, NS),
WorkerStats = workers_stats([mg, workers, NS], Workers),
WorkerStats.
%% Internals
-spec workers_stats(metric_key(), [worker()]) -> metrics().
workers_stats(KeyPrefix, Workers) ->
Metrics = [gauge([KeyPrefix, number], erlang:length(Workers))],
WorkersStats = lists:foldl(fun extract_worker_stats/2, #{}, Workers),
maps:fold(
fun(Info, Values, Acc) ->
stat_metrics([KeyPrefix, Info], Values, Acc)
end,
Metrics,
WorkersStats
).
-spec extract_worker_stats(worker(), Acc) -> Acc when Acc :: #{atom() => [number()]}.
extract_worker_stats({_NS, _ID, Pid}, Acc) ->
case erlang:process_info(Pid, interest_worker_info()) of
undefined ->
Acc;
ProcessInfo ->
append_list(ProcessInfo, Acc)
end.
-spec append_list([{K, V}], #{K => [V]}) -> #{K => [V]}.
append_list(L, Acc) ->
lists:foldl(
fun({K, V}, A) -> maps:update_with(K, fun(Vs) -> [V | Vs] end, [V], A) end,
Acc,
L
).
-spec interest_worker_info() -> [atom()].
interest_worker_info() ->
[
memory,
message_queue_len
].
-spec stat_metrics(metric_key(), [number()], metrics()) -> metrics().
stat_metrics(KeyPrefix, Values, Acc) ->
BearKeys = [
min,
max,
{percentile, [50, 75, 90, 95, 99]},
skewness,
kurtosis,
variance
],
Statistics = bear:get_statistics_subset(Values, BearKeys),
lists:foldl(
fun(S, Acc1) -> bear_metric(KeyPrefix, S, Acc1) end,
Acc,
Statistics
).
-spec bear_metric(metric_key(), BearStat, metrics()) -> metrics() when
BearStat :: {StatKey, StatValue},
StatKey :: atom(),
StatValue :: number() | PercentileValues,
PercentileValues :: [{integer(), number()}].
bear_metric(KeyPrefix, {percentile, Percentiles}, Acc) ->
[bear_percentile_metric(KeyPrefix, P) || P <- Percentiles] ++ Acc;
bear_metric(KeyPrefix, {StatKey, StatValue}, Acc) ->
[gauge([KeyPrefix, StatKey], StatValue) | Acc].
-spec bear_percentile_metric(metric_key(), {integer(), number()}) -> metric().
bear_percentile_metric(KeyPrefix, {Percentile, Value}) ->
gauge([KeyPrefix, <<"p", (erlang:integer_to_binary(Percentile))/binary>>], Value).
-spec gauge(metric_key(), metric_value()) -> metric().
gauge(Key, Value) ->
how_are_you:metric_construct(gauge, Key, Value). | src/machinegun_hay.erl | 0.653901 | 0.446796 | machinegun_hay.erl | starcoder |
%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2011-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% %CopyrightEnd%
%%
-module(beam_split).
-export([module/2]).
-import(lists, [reverse/1]).
-spec module(beam_utils:module_code(), [compile:option()]) ->
{'ok',beam_utils:module_code()}.
module({Mod,Exp,Attr,Fs0,Lc}, _Opts) ->
Fs = [split_blocks(F) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
%% We must split the basic block when we encounter instructions with labels,
%% such as catches and BIFs. All labels must be visible outside the blocks.
split_blocks({function,Name,Arity,CLabel,Is0}) ->
Is = split_blocks(Is0, []),
{function,Name,Arity,CLabel,Is}.
split_blocks([{block,Bl}|Is], Acc0) ->
Acc = split_block(Bl, [], Acc0),
split_blocks(Is, Acc);
split_blocks([I|Is], Acc) ->
split_blocks(Is, [I|Acc]);
split_blocks([], Acc) -> reverse(Acc).
split_block([{set,[R],[_,_,_]=As,{bif,is_record,{f,Lbl}}}|Is], Bl, Acc) ->
%% is_record/3 must be translated by beam_clean; therefore,
%% it must be outside of any block.
split_block(Is, [], [{bif,is_record,{f,Lbl},As,R}|make_block(Bl, Acc)]);
split_block([{set,[R],As,{bif,N,{f,Lbl}=Fail}}|Is], Bl, Acc) when Lbl =/= 0 ->
split_block(Is, [], [{bif,N,Fail,As,R}|make_block(Bl, Acc)]);
split_block([{set,[],[],{line,_}=Line},
{set,[R],As,{bif,raise,{f,_}=Fail}}|Is], Bl, Acc) ->
split_block(Is, [], [{bif,raise,Fail,As,R},Line|make_block(Bl, Acc)]);
split_block([{set,[R],As,{alloc,Live,{gc_bif,N,{f,Lbl}=Fail}}}|Is], Bl, Acc)
when Lbl =/= 0 ->
split_block(Is, [], [{gc_bif,N,Fail,Live,As,R}|make_block(Bl, Acc)]);
split_block([{set,[D],[S|Puts],{alloc,R,{put_map,Op,{f,Lbl}=Fail}}}|Is],
Bl, Acc) when Lbl =/= 0 ->
split_block(Is, [], [{put_map,Fail,Op,S,D,R,{list,Puts}}|
make_block(Bl, Acc)]);
split_block([{set,[R],[],{try_catch,Op,L}}|Is], Bl, Acc) ->
split_block(Is, [], [{Op,R,L}|make_block(Bl, Acc)]);
split_block([I|Is], Bl, Acc) ->
split_block(Is, [I|Bl], Acc);
split_block([], Bl, Acc) -> make_block(Bl, Acc).
make_block([], Acc) -> Acc;
make_block([{set,[D],Ss,{bif,Op,Fail}}|Bl]=Bl0, Acc) ->
%% If the last instruction in the block is a comparison or boolean operator
%% (such as '=:='), move it out of the block to facilitate further
%% optimizations.
Arity = length(Ss),
case erl_internal:comp_op(Op, Arity) orelse
erl_internal:new_type_test(Op, Arity) orelse
erl_internal:bool_op(Op, Arity) of
false ->
[{block,reverse(Bl0)}|Acc];
true ->
I = {bif,Op,Fail,Ss,D},
case Bl =:= [] of
true -> [I|Acc];
false -> [I,{block,reverse(Bl)}|Acc]
end
end;
make_block([{set,[Dst],[Src],move}|Bl], Acc) ->
%% Make optimization of {move,Src,Dst}, {jump,...} possible.
I = {move,Src,Dst},
case Bl =:= [] of
true -> [I|Acc];
false -> [I,{block,reverse(Bl)}|Acc]
end;
make_block(Bl, Acc) -> [{block,reverse(Bl)}|Acc]. | lib/compiler/src/beam_split.erl | 0.503662 | 0.48054 | beam_split.erl | starcoder |
-module(rstar_geometry).
-export([new/3, origin/1, point2d/3, point3d/4, bounding_box/1,
area/1, intersect/2, num_edges/1, margin/1, center/1,
distance/2, min_dist/2, value/1]).
-include("../include/rstar.hrl").
% Creates a new geometry record
-spec new(integer(), list({float(), float()}), any()) -> {error, badarg} | #geometry{}.
new(Dimensions, _, _) when Dimensions < 1 -> {error, badarg};
new(Dimensions, MBR, Value) ->
case valid_axes(MBR, 0) of
{ok, Dimensions} ->
#geometry{dimensions=Dimensions, mbr=MBR, value=Value};
_ -> {error, badarg}
end.
% Creates a point at the origin with the proper dimensionality
-spec origin(integer()) -> #geometry{} | {error, badarg}.
origin(Dimensions) when Dimensions < 1 -> {error, badarg};
origin(Dimensions) ->
MBR = [{0, 0} || _D <- lists:seq(1, Dimensions)],
#geometry{dimensions=Dimensions, mbr=MBR}.
% Helper to create a 2D point
-spec point2d(float(), float(), any()) -> #geometry{}.
point2d(X, Y, Value) -> new(2, [{X, X}, {Y, Y}], Value).
% Helper to create a 3D point
-spec point3d(float(), float(), float(), any()) -> #geometry{}.
point3d(X, Y, Z, Value) -> new(3, [{X, X}, {Y, Y}, {Z, Z}], Value).
% Returns a new geometry which is a bounding box of
% the given geometries
-spec bounding_box([#geometry{}]) -> #geometry{}.
bounding_box([First | More]) ->
bounding_box_r(First, More, First#geometry.mbr);
bounding_box([]) -> {error, badarg}.
% Recursively handles each geometry
bounding_box_r(G, [First | MoreGeo], Bound) ->
NB = bounding_mbr(First#geometry.mbr, Bound, []),
bounding_box_r(G, MoreGeo, NB);
bounding_box_r(G, [], Bound) ->
G#geometry{mbr=Bound, value=undefined}.
% Recursively handles each axis
bounding_mbr([{MinA, MaxA}|More1], [{MinB, MaxB}|More2], Bound) ->
Min = if
MinA < MinB -> MinA;
true -> MinB
end,
Max = if
MaxA < MaxB -> MaxB;
true -> MaxA
end,
NB = [{Min, Max} | Bound],
bounding_mbr(More1, More2, NB);
bounding_mbr([], [], Bound) -> lists:reverse(Bound).
% Returns the area of the given geometry
-spec area(#geometry{}) -> float().
area(Geometry) ->
area_r(Geometry#geometry.mbr, 1).
area_r([{MinV, MaxV} | More], Sum) ->
area_r(More, Sum * (MaxV - MinV));
area_r([], Sum) -> Sum.
% Returns the number of edges in a given geometry
-spec num_edges(#geometry{}) -> integer().
num_edges(Geometry) ->
N = Geometry#geometry.dimensions,
trunc(math:pow(2, N - 1) * N).
% Returns the margin of the given geometry
-spec margin(#geometry{}) -> float().
margin(Geometry) ->
% Sum length of each axis
AxisSum = margin_r(Geometry#geometry.mbr, 0),
% Figure out the scaling factor,
Scale = num_edges(Geometry) / Geometry#geometry.dimensions,
% Multiple the sum by the scale
AxisSum * Scale.
margin_r([{MinV, MaxV} | More], Sum) ->
margin_r(More, Sum + (MaxV - MinV));
margin_r([], Sum) -> Sum.
% Returns the overlapping geometry or 0
-spec intersect(#geometry{}, #geometry{}) -> #geometry{} | undefined.
intersect(Geo1, Geo2) ->
intersect_r(Geo1, Geo1#geometry.mbr, Geo2#geometry.mbr, []).
intersect_r(G, [{MinA, MaxA} | More1], [{MinB, MaxB} |More2], Intersect) ->
Min = if
MinA < MinB -> MinB;
true -> MinA
end,
Max = if
MaxA < MaxB -> MaxA;
true -> MaxB
end,
if
Min =< Max ->
intersect_r(G, More1, More2, [{Min, Max} | Intersect]);
true -> undefined
end;
intersect_r(G, [], [], Intersect) ->
G#geometry{mbr=lists:reverse(Intersect), value=undefined}.
% Returns the center of a given geometry
-spec center(#geometry{}) -> #geometry{}.
center(Geo) ->
% Average to get the center points along each axis
CenterPoint = [{(Min+Max)/2.0, (Min+Max)/2.0} ||
{Min, Max} <- Geo#geometry.mbr],
Geo#geometry{mbr=CenterPoint, value=undefined}.
% Returns the distance between two points, or uses
% the minimum bounds for non-point geometries.
-spec distance(#geometry{}, #geometry{}) -> float().
distance(Geo1, Geo2) ->
% Sum the square distances
SumDistance = distance_r(Geo1#geometry.mbr,
Geo2#geometry.mbr, 0),
% The square root is the Euclidean distance
math:sqrt(SumDistance).
distance_r([{MinA, _} | More1], [{MinB, _} | More2], Sum) ->
distance_r(More1, More2, math:pow(MinB - MinA, 2) + Sum);
distance_r([], [], Sum) -> Sum.
% Returns the minimum distance between a point and rectangle
-spec min_dist(#geometry{}, #geometry{}) -> float().
min_dist(Point, Rect) ->
min_dist_r(Point#geometry.mbr, Rect#geometry.mbr, 0.0).
min_dist_r([{P, _} | More1], [{MinR, MaxR} | More2], Sum) ->
Val = if
P < MinR -> math:pow(MinR - P, 2);
P > MaxR -> math:pow(P - MaxR, 2);
true -> 0.0
end,
min_dist_r(More1, More2, Sum + Val);
min_dist_r([], [], Sum) -> Sum.
% Verifies that the max axis value is greater or equal to the minimum
valid_axes([], Length) -> {ok, Length};
valid_axes([{MinV, MaxV}| Other], Length) ->
if
MaxV >= MinV -> valid_axes(Other, Length + 1);
true -> {error, {badarg, {MinV, MaxV}}}
end.
% Returns the opaque value of the given geometry
-spec value(#geometry{}) -> any().
value(Geometry) ->
Geometry#geometry.value. | src/rstar_geometry.erl | 0.824285 | 0.754757 | rstar_geometry.erl | starcoder |
%% @copyright 2013-2017 Zuse Institute Berlin
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%% @author <NAME> <<EMAIL>>
%% @doc Unit tests for rbrcseq which simulate specific interleaving of messages.
%% The tests depend on the gen_component breakpoint mechanism to delay specific messages.
%% Each test case assumes a specific replication factor (usually 4).
%% @end
%% @version $Id$
-module(rbr_interleaving_SUITE).
-author('<EMAIL>').
-vsn('$Id$').
-compile(export_all).
-define(TRACE(X,Y), ok).
-include("scalaris.hrl").
-include("unittest.hrl").
-include("client_types.hrl").
all() -> [
test_link_slowing,
test_link_slowing2,
test_interleaving,
test_write_through_notifies_original_proposer,
test_read_write_commuting
].
suite() -> [ {timetrap, {seconds, 400}} ].
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_testcase(_TestCase, Config) ->
{priv_dir, PrivDir} = lists:keyfind(priv_dir, 1, Config),
unittest_helper:make_symmetric_ring([{config, [{log_path, PrivDir}, {replication_factor, 4}]}]),
unittest_helper:check_ring_size_fully_joined(config:read(replication_factor)),
[{stop_ring, true} | Config].
test_link_slowing(_Config) ->
%% Slow down a link. One replica should not receive any prbr messages during the
%% test. After the slow link is removed (messages will be flushed), all replicase should
%% be consistent. Assumes R=4.
get_notified_by_message(self(), 1, kv_db, 2, dht_node, write),
Link = slow_link(1, kv_db, 2, dht_node),
{ok, _} = write_via_node(1, "1", filter_list_append(), "TestWrite"),
%% replica 2 should be empty, and the other three have the value written
?equals(prbr_values(), [[["TestWrite"]],
[],
[["TestWrite"]],
[["TestWrite"]]]),
remove_slow_link(Link),
receive {message_received} -> ok end,
%% all replicas should have received the written value
?equals(prbr_values(), [[["TestWrite"]],
[["TestWrite"]],
[["TestWrite"]],
[["TestWrite"]]]).
test_link_slowing2(_Config) ->
%% slow down one link, but use a different client to send write request
%% slow link should have no impact. Assumes R=4.
_Link = slow_link(1, kv_db, 2, dht_node),
[get_notified_by_message(self(), 2, kv_db, I, dht_node, write) ||
I <- lists:seq(1, 4)],
{ok, _} = write_via_node(2, "1", filter_list_append(), "TestWrite"),
[receive {message_received} -> ok end || _ <- lists:seq(1, 4)],
%% all replicas should have received the written value
?equals(prbr_values(), [[["TestWrite"]],
[["TestWrite"]],
[["TestWrite"]],
[["TestWrite"]]]).
test_interleaving(_Config) ->
%% This test simulates the following interleaving of operations:
%% (4 nodes with R=4, the nodes are called 1,2,3,4)
%%
%% Three requests are made from three different clients.
%% 1. Client A Starts a write operation, but has only written replica
%% on node 1 so far (has read all replicas in its read phase)
%% 2. Client B Executes a read which only has read replicas 2,3,4 yet
%% (read has returned since majority replied)
%% 3. Client C Executes a write. In its read phase and it gets replies
%% from 2,3,4 first; After that write on every replica
Key = "A",
%% write of client A
[get_notified_by_message(self(), 1, kv_db, I, dht_node, round_request) || I<-lists:seq(2, 4)],
get_notified_by_message(self(), 1, kv_db, 1, dht_node, write),
_LinkA = [slow_link(1, kv_db, I, dht_node, write) || I <- lists:seq(2, 4)],
spawn(fun() -> write_via_node(1, Key, filter_list_append(), "WriteA") end),
[receive {message_received} -> ok end || _ <- lists:seq(1, 4)],
%% read of client B
_LinkB = slow_link(2, kv_db, 1, dht_node),
{ok, _} = read_via_node(2, Key, element(1, filter_list_append())),
%% write of client C
get_notified_by_message(self(), 3, kv_db, 1, dht_node, write),
LinkC = slow_link(3, kv_db, 1, dht_node),
{ok, _} = write_via_node(3, Key, filter_list_append(), "WriteB"),
remove_slow_link(LinkC),
receive {message_received} -> ok end,
ct:pal("PRBR state after interleaved operations: ~n~p", [prbr_data()]),
%% Test that there aren't two different values
%% with the same write round.
PrbrData = prbr_w_rounds_with_values(),
ValList = lists:usort(lists:flatten(PrbrData)),
case ValList of
[A, B] ->
?compare_w_note(fun(E1, E2) -> element(1, E1) =/= element(1, E2) end,
A, B, "Same write round for different values!");
[_A] -> ok;
_ ->
ct:fail("More than two different values/rounds! ~nprbr data:~n~p", [PrbrData])
end,
%% Do a read over replica 1, 2, 3
%% It should be an inconsistent read and currently diverging replica 1
%% should be repaired,
get_notified_by_message(self(), 4, kv_db, 4, dht_node, write),
LinkD = slow_link(4, kv_db, 4, dht_node),
{ok, _} = read_via_node(4, Key, element(1, filter_list_append())),
remove_slow_link(LinkD),
receive {message_received} -> ok end,
ct:pal("PRBR state after inconsistent read: ~n~p", [prbr_data()]),
PrbrData2 = prbr_w_rounds_with_values(),
ValList2 = lists:usort(lists:flatten(PrbrData2)),
?equals_w_note(length(ValList2), 1, "All replicas should have the same value and write round").
test_write_through_notifies_original_proposer(_Config) ->
%% This tests case tests if a write through correctly notifies the original proposer
%% of the write.
%% Write A is started, but does not finish because two write messages are delayed.
%% A subsequent write (or read) should detect the write in progress and triggers a
%% write through which will finish write A before B is started.
TestPid = self(),
Key = "1234",
[get_notified_by_message(self(), 1, kv_db, I, dht_node, write) || I <- lists:seq(1,2)],
_LinkA = slow_link(1, kv_db, 3, dht_node, write),
_LinkB = slow_link(1, kv_db, 4, dht_node, write),
% start write A which will not finish since it only gets two write ack.
spawn(fun() ->
{ok, _} = write_via_node(1, Key, filter_list_append(), "WriteA"),
TestPid ! {write_a_done}
end),
% wait until A has written the two remaining replicas
[receive {message_received} -> ok end || _ <- lists:seq(1,2)],
% write B should now trigger a write through which should finish write A
{ok, _} = write_via_node(2, Key, filter_list_append(), "WriteB"),
receive {write_a_done} -> ok
after 10000 ->
?ct_fail("Write through has not notified original proposer in a timely manner", [])
end,
{ok, Value} = read_via_node(3, Key, element(1, filter_list_append())),
?equals_w_note(Value, ["WriteB", "WriteA"], "Values must match exactly due to interleaving").
test_read_write_commuting(_Config) ->
Key = "123",
% write baseline
[get_notified_by_message(self(), 1, kv_db, I, dht_node, write) || I <- lists:seq(1,4)],
_ = write_via_node(1, Key, {fun prbr:noop_read_filter/1,
fun ?MODULE:cc_noop/3,
fun prbr:noop_write_filter/3},
{"A", "B"}),
[receive {message_received} -> ok end || _ <- lists:seq(1,4)],
_ = slow_link(1, kv_db, 4, dht_node, write),
_ = write_via_node(1, Key, {fun ?MODULE:rf_second/1,
fun ?MODULE:cc_noop/3,
fun ?MODULE:wf_second/3}, "C"),
PrbrDataBeforeRead = prbr_data(),
_ = slow_link(4, kv_db, 1, dht_node),
{ok, "A"} = read_via_node(4, Key, fun ?MODULE:rf_first/1),
PrbrDataAfterRead = prbr_data(),
?equals_w_note(PrbrDataBeforeRead, PrbrDataAfterRead,
"Read was independent from write and thus should not have caused a "
"write through"),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Helper functions
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-spec get_commuting_wf_for_rf(prbr:read_filter()) ->
[prbr:write_filter()].
get_commuting_wf_for_rf(ReadFilter) ->
{name, Name} = erlang:fun_info(ReadFilter, name),
{module, Module} = erlang:fun_info(ReadFilter, module),
case {Module, Name} of
{?MODULE, rf_first} ->
[fun ?MODULE:wf_second/3];
_ ->
ct:pal("~p", [[Name, Module]]),
[]
end.
-spec rf_first(prbr_bottom | {any(), any()}) -> any().
rf_first(prbr_bottom) -> prbr_bottom;
rf_first({A, _B}) -> A.
-spec rf_second(prbr_bottom | {any(), any()}) -> any().
rf_second(prbr_bottom) -> prbr_bottom;
rf_second({_A,B}) -> B.
-spec cc_noop(any(), any(), any()) ->
{true, none}.
cc_noop(_, _, _) -> {true, none}.
-spec wf_second({any(), any()}, any(), any()) ->
{{any(), any()}, none}.
wf_second({A, _B}, _UI, WriteVal) -> {{A, WriteVal}, none}.
%% @doc Simple set of filter which append the given value to a list
filter_list_append() ->
RF = fun (prbr_bottom) -> [];
(DBEntry) -> DBEntry
end,
CC = fun (_ReadVal_, _WF, _WriteVal) -> {true, none} end,
WF = fun (prbr_bottom, _UI, WriteVal) -> {[WriteVal], none};
(DBEntry, _UI, WriteVal) -> {[WriteVal | DBEntry], none}
end,
{RF, CC, WF}.
%% @doc Sends a read requests via node number ViaKvNr (lexicographically order by pid).
%% Blocks until read is done.
read_via_node(ViaKvNr, Key, ReadFilter) ->
Pid = nth(ViaKvNr, kv_db),
Msg = {qround_request, self(), '_', ?RT:hash_key(Key), ?MODULE, ReadFilter, read, 1},
comm:send_local(Pid, {request_init, _ClinetPosInMsg=2, _OpenReqPos=3, Msg}),
receive
?SCALARIS_RECV({qread_done, _, _, _, Value}, {ok, Value})
end.
%% @doc Sends a write requests via node number ViaKvNr (lexicographically order by pid).
%% Blocks until write is done.
write_via_node(ViaKvNr, Key, Filter, Value) ->
Pid = nth(ViaKvNr, kv_db),
Msg = {qwrite, self(), '_', ?RT:hash_key(Key), ?MODULE, Filter, Value, 20},
comm:send_local(Pid, {request_init, _ClientPos=2, _OpenReqPos=3, Msg}),
receive
?SCALARIS_RECV({qwrite_done, _, _, _, RetValue}, {ok, RetValue});
?SCALARIS_RECV({qwrite_deny, _ReqId, _NextFastWriteRound, _Value, Reason}, Reason)
end.
%% @doc Notifies process PidToNotify if process nth(ToId, ToType) received a message
%% of type MessageType from process nth(FromId, FromType).
%% ATTENTION: If the corresponding link is slowed by slow_link/[4,5,6] this method must be called
%% BEFORE slow_link. Otherwise two notifications might be received for the same message.
%% Todo? (Works only for ToType=dht_node so far).
get_notified_by_message(PidToNotify, FromId, FromType, ToId, ToType, MessageType) ->
BpName = bp_name("notify_" ++ atom_to_list(MessageType), FromId, FromType, ToId, ToType),
ToPid = nth(ToId, ToType),
NotifyFun = notify_fun(PidToNotify, nth(FromId, FromType), ToPid,
ToType, MessageType, BpName),
gen_component:bp_set_cond(ToPid, NotifyFun, BpName).
notify_fun(PidToNotify, FromPid, ToPid, _ToType=dht_node, MessageType, BpName) ->
fun(Msg, _State) ->
case Msg of
_ when element(1, Msg) =:= prbr andalso
element(2, Msg) =:= MessageType andalso
element(3, element(1, element(5, Msg))) =:= FromPid ->
?TRACE("Notify ~p message on ~p received: ~n~p", [PidToNotify, ToPid, Msg]),
gen_component:bp_del(ToPid, BpName),
comm:send_local(PidToNotify, {message_received}),
false;
_ -> false
end
end.
%% @doc Gets all information stored in prbr for all nodes.
prbr_data() ->
[begin
comm:send_local(N, {prbr, tab2list_raw, kv_db, self()}),
receive
{_, List} -> List
end
end || N <- lists:sort(pid_groups:find_all(dht_node))].
%% @doc Returns all value for each node.
prbr_values() ->
[
[prbr:entry_val(E) || E <- Replica]
|| Replica <- prbr_data()].
%% @doc Returns all {write_round, value} tuples for each node.
%% Removes write_through infos
prbr_w_rounds_with_values() ->
[
[{pr:set_wti(element(3, E), none), prbr:entry_val(E)} || E <- Replica]
|| Replica <- prbr_data()].
%% @doc Flush all slow messages of a link
flush_slow_link({_BPName, LoopPid, _Node}) ->
comm:send_local(LoopPid, {flush}).
%% @doc Stops slowing messages down and flushes message queue.
remove_slow_link({BPName, LoopPid, Node}) ->
gen_component:bp_del(Node, BPName),
comm:send_local(LoopPid, {flush_and_stop}).
%% @doc See slow_link/5. But link is slow from the beginning.
slow_link(From, FromType, To, ToType) ->
slow_link(From, FromType, To, ToType, always_slow).
%% @doc Delays messages from From to To. Returns a link-info tuple.
%% Link behaves normally until a message of type FastUntilMessageType is received.
%% Starting with this message, all received messages between these two PIDs are queued
%% until flush_link/1 or remove_slow_link/1 is called.
%% From/To are integer ids representing the nths Pid in PidGroup FromType/ToType.
%% Affected messages in prbr are: round_request, read and write. Tab2list is not affected.
%% No messages are thrown away and the delivery order is unchanged.
slow_link(From, FromType, To, ToType, FastUntilMessageType) ->
FromPid = nth(From, FromType),
ToPid = nth(To, ToType),
BpName = bp_name("slow_", From, FromType, To, ToType),
slow_link(FromPid, FromType, ToPid, ToType, BpName, FastUntilMessageType).
slow_link(FromPid, FromType, ToPid, ToType, BPName, FastUntilMessageType) ->
{LoopPid, BpFun} = slow_link_fun(FromPid, FromType, ToPid, ToType, FastUntilMessageType),
gen_component:bp_set_cond(ToPid, BpFun, BPName),
{BPName, LoopPid, ToPid}.
%% @doc Delays all round_request, write and read messages received by prbr from PID
%% From, on DHT node with PID To. The link starts delivering all queued messages as
%% soon as a flush message was received.
%% tab2list_raw messages are not delayed.
slow_link_fun(From, _FromType, To, _ToType=dht_node, FastUntilMessageType) ->
LoopPid = spawn(?MODULE, slow_loop, [To, FastUntilMessageType]),
BpFun = fun (Msg, _State) ->
case Msg of
%% prbr round_request, write and read messages seventh
%% element is the datatype. This is abused to ensure that
%% a message is only delayed once
_ when element(7, Msg) =:= rbr_interleave_SUITE_dont_delay ->
?TRACE("Deliver delayed message: ~n~p", [Msg]),
false;
%% delay a prbr round_request, write or read message if it commes
%% from PID From.
_ when element(1, Msg) =:= prbr andalso
element(3, element(1, element(5, Msg))) =:= From ->
?TRACE("Delay message: ~n~p", [Msg]),
%% change Datatype in message since it is not used in this unit test suite.
%% marks messages which where already delayed.
NewMsg = setelement(7, Msg, rbr_interleave_SUITE_dont_delay),
MsgType = element(2, Msg),
comm:send_local(LoopPid, {delay, MsgType, NewMsg}),
drop_single;
_ ->
false
end
end,
{LoopPid, BpFun}.
slow_loop(To, always_slow) ->
slow_loop(To, always_slow, [], true);
slow_loop(To, FastUntil) ->
slow_loop(To, FastUntil, [], false).
slow_loop(To, FastUntil, MsgQueue, _IsSlow=false) ->
receive
{delay, FastUntil, Msg} ->
slow_loop(To, FastUntil, [Msg | MsgQueue], true);
{delay, _Type, Msg} ->
comm:send_local(To, Msg),
slow_loop(To, FastUntil, MsgQueue, false)
end;
slow_loop(To, FastUntil, MsgQueue, _IsSlow=true) ->
receive
{delay, _Type, Msg} ->
slow_loop(To, FastUntil, [Msg | MsgQueue], true);
{flush} ->
[comm:send_local(To, Msg) || Msg <- lists:reverse(MsgQueue)],
slow_loop(To, FastUntil, [], true);
{flush_and_stop} ->
[comm:send_local(To, Msg) || Msg <- lists:reverse(MsgQueue)]
end.
nth_dht_node(N) -> nth(N, dht_node).
nth_kv_db(N) -> nth(N, kv_db).
nth(N, PidGroup) -> nth_pid(N, pid_groups:find_all(PidGroup)).
nth_pid(N, Pids) -> lists:nth(N, lists:sort(Pids)).
%% @doc Generate a breakpoint name
bp_name(Prefix, FromId, FromType, ToId, ToType) ->
BPNameString = Prefix ++ " " ++ integer_to_list(FromId) ++ "," ++ atom_to_list(FromType)
++ "|" ++ integer_to_list(ToId) ++ "," ++ atom_to_list(ToType),
list_to_atom(BPNameString). %% ugh, dynamic creation of atoms... | test/rbr_interleaving_SUITE.erl | 0.585338 | 0.42925 | rbr_interleaving_SUITE.erl | starcoder |
%%%------------------------------------------------------------------------
%% Copyright 2019, OpenTelemetry Authors
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%
%% @doc
%% @end
%%%-------------------------------------------------------------------------
-module(ot_meter).
-callback new_instruments(opentelemetry:meter(), [instrument_opts()]) -> boolean().
-callback record(opentelemetry:meter(), term (), number()) -> ok.
-callback record(opentelemetry:meter(), name(), label_set(), number()) -> ok.
-callback record_batch(opentelemetry:meter(), [{instrument(), number()}], label_set()) -> ok.
-callback bind(opentelemetry:meter(), instrument(), label_set()) -> term().
-callback release(opentelemetry:meter(), term()) -> ok.
-callback register_observer(opentelemetry:meter(), ot_meter:name(), ot_observer:callback()) -> ok | unknown_instrument.
-callback set_observer_callback(opentelemetry:meter(), ot_meter:name(), ot_observer:callback()) -> ok | unknown_instrument.
-callback observe(ot_observer:observer_result(), number(), label_set()) -> ok.
-export([new_instruments/2,
bind/3,
release/1,
record/2,
record/4,
record_batch/3,
register_observer/3,
set_observer_callback/3,
observe/3]).
-type label_key() :: unicode:unicode_binary().
-type label_value() :: unicode:unicode_binary().
-type label_set() :: [{label_key(), label_value()}].
-type name() :: unicode:unicode_binary().
-type key() :: {name(), label_set()}.
-type description() :: unicode:unicode_binary().
-type instrument_kind() :: counter | observer | measure.
-type unit() :: atom().
-type input_type() :: integer | float.
-type instrument_opts() :: #{name := name(),
description => description(),
kind := instrument_kind(),
type => input_type(),
label_keys => [label_key()],
unit => unit()}.
-type instrument() :: term().
-type bound_instrument() :: {opentelemetry:meter(), term()}.
-type measurement() :: {bound_instrument() | name(), number()}.
-export_type([key/0,
name/0,
description/0,
instrument_kind/0,
input_type/0,
unit/0,
measurement/0,
label_set/0]).
-spec new_instruments(opentelemetry:meter(), [instrument_opts()]) -> boolean().
new_instruments(Meter={Module, _}, List) ->
Module:new_instruments(Meter, List).
-spec bind(opentelemetry:meter(), name(), label_set()) -> bound_instrument().
bind(Meter={Module, _}, Name, LabelSet) ->
{Meter, Module:bind(Meter, Name, LabelSet)}.
-spec release(bound_instrument()) -> ok.
release({Meter={Module, _}, BoundInstrument}) ->
Module:release(Meter, BoundInstrument).
-spec record(opentelemetry:meter(), name(), number(), label_set()) -> ok.
record(Meter={Module, _}, Name, Number, LabelSet) ->
Module:record(Meter, Name, LabelSet, Number).
-spec record(bound_instrument(), number()) -> ok.
record({Meter={Module, _}, BoundInstrument}, Number) ->
Module:record(Meter, BoundInstrument, Number).
-spec record_batch(opentelemetry:meter(), label_set(), [measurement()]) -> ok.
record_batch(Meter={Module, _}, LabelSet, Measurements) ->
Module:record_batch(Meter, LabelSet, Measurements).
-spec register_observer(opentelemetry:meter(), ot_meter:name(), ot_observer:callback())
-> ok | unknown_instrument.
register_observer(Meter={Module, _}, Name, Callback) ->
Module:register_observer(Meter, Name, Callback).
-spec set_observer_callback(opentelemetry:meter(), ot_meter:name(), ot_observer:callback())
-> ok | unknown_instrument.
set_observer_callback(Meter={Module, _}, Name, Callback) ->
Module:set_observer_callback(Meter, Name, Callback).
-spec observe(ot_observer:observer_result(), number(), label_set()) -> ok.
observe({Module, Instrument}, Number, LabelSet) ->
Module:observe(Instrument, Number, LabelSet). | src/ot_meter.erl | 0.826151 | 0.445047 | ot_meter.erl | starcoder |
%% @doc
%% `rebar3_hex_organization' - Manage organizations
%%
%% Manages the list of authorized hex organizations.
%%
%% Note that all commands that require a `NAME' argument expect a qualified repository name for the
%% argument (i.e., `hexpm:my_org').
%%
%% == About Organizations ==
%%
%% Organizations are feature provided by hexpm that allows you group packages, public and private alike. Organizations
%% are treated as repositories that have a parent. The parent is found as the first part of a repository's name,
%% separated from the organization by a `:'. So for the organization `your_org' on the main repository `hexpm'
%% the fully qualified name would be `hexpm:your_org'.
%%
%% Be sure to add your organization to either your global rebar.config `~/.config/rebar3/rebar.config' or
%% within an projects `rebar.config'. Below is an example:
%%
%% ```
%% {hex, [{repos, [ #{name => <<"hexpm:your_org">>}]}]}.
%% '''
%%
%% == Authorize an organization ==
%%
%% This command will generate an API key used to authenticate access to the organization. See the `rebar3_hex_user'
%% tasks to list and control all your active API keys.
%%
%% ```
%% $ rebar3 hex organization auth NAME [--key KEY] [--key-name KEY_NAME]
%% '''
%%
%% == Deauthorize and remove an organization ==
%%
%% ```
%% $ rebar3 hex organization deauth NAME
%% '''
%%
%% == List all authorized organizations ==
%%
%% This command will only list organizations you have authorized with this task, it will not list organizations you
%% have access to by having authorized with `rebar3 hex user auth'.
%%
%% == Generate organization key ==
%% This command is useful to pre-generate keys for use with `rebar3 hex organization auth NAME --key KEY' on CI
%% servers or similar systems. It returns the hash of the generated key that you can pass to auth NAME `--key' KEY.
%% Unlike the `hex user' key commands, a key generated with this command is owned by the organization directly,
%% and not the user that generated it. This makes it ideal for shared environments such as CI where you don't
%% want to give access to user-specific resources and the user's organization membership status won't affect key. By
%% default this command sets the organization permission which allows read-only access to the organization, it can be
%% overridden with the `--permission' flag.
%%
%%
%% ```
%% $ rebar3 hex organization key NAME generate [--key-name KEY_NAME] [--permission PERMISSION]
%% '''
%%
%% == Revoke key ==
%% Removes a given key from a organization.
%%
%% ```
%% $ rebar3 hex organization key NAME revoke KEY_NAME
%% '''
%%
%% == List keys ==
%% Lists all keys associated with the organization.
%%
%% ```
%% $ rebar3 hex organization key NAME list
%% '''
%%
%% == Command line options ==
%%
%% <ul>
%% <li>`--all' - Used for revoking all keys for authorized organization. Only valid with the `revoke` task.</li>
%% <li>`--key KEY' - Hash of key used to authenticate HTTP requests to organization, if omitted will generate a new key
%% with your account credentials. This flag is useful if you have a key pre-generated with
%% `rebar3 hex organization key' and want to authenticate on a CI server or similar system.</li>
%% <br/>
%% <li>`--key-name KEY_NAME' - By default Hex will base the key name on your machine's hostname and the organization
%% name, use this option to give your own name.</li>
%% <br/>
%% <li>`--permission PERMISSION' - Sets the permissions on the key, this option can be given multiple times, possibly
%% values are:
%% <ul>
%% <br/>
%% <li>`api:read' - API read access.</li>
%% <li>`api:write' - API write access.</li>
%% <li>`repository' - Access to the repository (this is the default permission).</li>
%% </ul>
%% </li>
%% </ul>
-module(rebar3_hex_organization).
-export([
init/1,
do/1,
format_error/1
]).
-include("rebar3_hex.hrl").
-define(PROVIDER, organization).
-define(DEPS, []).
%% ===================================================================
%% Public API
%% ===================================================================
%% @private
-spec init(rebar_state:t()) -> {ok, rebar_state:t()}.
init(State) ->
Provider = providers:create([
{name, ?PROVIDER},
{module, ?MODULE},
{namespace, hex},
{bare, true},
{deps, ?DEPS},
{example, "rebar3 hex organization auth my_org --key 1234"},
{short_desc, "Add, remove or list configured organizations and their auth keys"},
{desc, ""},
{opts, [
{all, undefined, "all", boolean, "Specifies all keys. Only recognized when used with the revoke task."},
{key, $k, "key", string, "Authentication key for an organization that already exists at the repository."},
{key_name, undefined, "key-name", string, "Specifies a key name to use when generating or revoking a key."},
{permission, $p, "permission", list, "Colon delimited permission. This option may be given multiple times."}
]}
]),
State1 = rebar_state:add_provider(State, Provider),
{ok, State1}.
%% @private
-spec do(rebar_state:t()) -> {ok, rebar_state:t()}.
do(State) ->
case rebar_state:command_args(State) of
["auth", OrgName | _] ->
auth(State, to_binary(OrgName));
["deauth", OrgName | _] ->
deauth(State, to_binary(OrgName));
["key", OrgName, "generate" | _] ->
generate(State, to_binary(OrgName));
["key", OrgName, "revoke", "--all" | _] ->
revoke_all(State, to_binary(OrgName));
["key", OrgName, "revoke" | _] ->
revoke(State, to_binary(OrgName));
["key", OrgName, "list" | _] ->
list_org_keys(State, to_binary(OrgName));
["list" | _] ->
list_orgs(State);
_ ->
?RAISE(bad_command)
end.
%% @private
-spec format_error(any()) -> iolist().
format_error(no_repo) ->
"Authenticate and generate commands require repository name as argument";
format_error(auth_no_key) ->
"Repo authenticate command requires key";
format_error({auth, Reason}) when is_binary(Reason) ->
io_lib:format("Error authenticating organization : ~ts", [Reason]);
format_error({auth, Errors}) when is_map(Errors) ->
Reason = rebar3_hex_client:pretty_print_errors(Errors),
io_lib:format("Error authenticating organization : ~ts", [Reason]);
format_error({generate_key, Reason}) when is_binary(Reason) ->
io_lib:format("Error generating organization key: ~ts", [Reason]);
format_error({generate_key, Errors}) when is_map(Errors) ->
Reason = rebar3_hex_client:pretty_print_errors(Errors),
io_lib:format("Error generating organization key: ~ts", [Reason]);
format_error({key_generate, Reason}) when is_binary(Reason) ->
io_lib:format("Error generating organization key: ~ts", [Reason]);
format_error({key_generate, Errors}) when is_map(Errors) ->
Reason = rebar3_hex_client:pretty_print_errors(Errors),
io_lib:format("Error generating organization key: ~ts", [Reason]);
format_error({key_revoke_all, Reason}) when is_binary(Reason) ->
io_lib:format("Error revoking all organization keys: ~ts", [Reason]);
format_error({key_revoke_all, Errors}) when is_map(Errors) ->
Reason = rebar3_hex_client:pretty_print_errors(Errors),
io_lib:format("Error revoking all organization keys: ~ts", [Reason]);
format_error({key_list, Reason}) when is_binary(Reason) ->
io_lib:format("Error listing organization keys: ~ts", [Reason]);
format_error({key_list, Errors}) when is_map(Errors) ->
Reason = rebar3_hex_client:pretty_print_errors(Errors),
io_lib:format("Error listing organization keys: ~ts", [Reason]);
format_error(bad_command) ->
"Invalid arguments, expected one of:\n\n"
"rebar3 hex organization auth ORG_NAME auth\n"
"rebar3 hex organization deauth ORG_NAME deauth\n"
"rebar3 hex organization key ORG_NAME generate\n"
"rebar3 hex organization key ORG_NAME revoke --key-name NAME\n"
"rebar3 hex organization key ORG_NAME revoke --all\n"
"rebar3 hex organization key ORG_NAME list\n"
"rebar3 hex organization list\n";
format_error(not_a_valid_repo_name) ->
"Invalid organization repository: organization name arguments must be given as a fully qualified "
"repository name (i.e, hexpm:my_org)";
format_error({get_parent_repo_and_org_name, Error, Name}) ->
Str = "Error getting the parent repo for ~ts. Be sure to authenticate first with: rebar3 hex user",
rebar_log:log(diagnostic, "Error getting parent repo and org name: ~p", [Error]),
io_lib:format(Str, [Name]);
format_error({get_repo_by_name, {error,{not_valid_repo,ParentName}}}) ->
Str = io_lib:format("You do not appear to be authenticated as a user to the ~ts repository.", [ParentName]),
Str ++ " " ++ "Run rebar3 hex user auth and try this command again.";
format_error(Reason) ->
rebar3_hex_error:format_error(Reason).
-dialyzer({nowarn_function, auth/2}).
-spec auth(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
auth(State, RepoName) ->
{Opts, _} = rebar_state:command_parsed_args(State),
{ParentRepo, OrgName} = get_parent_repo_and_org_name(State, RepoName),
Key =
case proplists:get_value(key, Opts, undefined) of
undefined ->
Config = rebar3_hex_config:get_hex_config(?MODULE, ParentRepo, write),
Config1 = Config#{api_organization => OrgName},
KeyName = proplists:get_value(key_name, Opts, rebar3_hex_config:repos_key_name()),
generate_key(Config1, KeyName, default_perms(OrgName));
ProvidedKey ->
TestPerms = #{domain => <<"repository">>, resource => OrgName},
Config = ParentRepo#{api_key => to_binary(ProvidedKey),
api_repository => OrgName,
api_organization => OrgName
},
case rebar3_hex_client:test_key(Config, TestPerms) of
{ok, _} ->
ProvidedKey;
Error ->
?RAISE({auth, Error})
end
end,
rebar3_hex_config:update_auth_config(#{RepoName => #{name => RepoName, repo_key => Key}}, State),
rebar3_hex_io:say("Successfully authenticated to ~ts", [RepoName]),
{ok, State}.
-spec deauth(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
deauth(State, RepoName) ->
ok = rebar_hex_repos:remove_from_auth_config(RepoName, State),
rebar3_hex_io:say("Successfully deauthorized ~ts", [RepoName]),
{ok, State}.
-spec generate(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
generate(State, RepoName) ->
{Repo, OrgName} = get_parent_repo_and_org_name(State, RepoName),
{Opts, _} = rebar_state:command_parsed_args(State),
KeyName = proplists:get_value(key_name, Opts, rebar3_hex_config:repos_key_name()),
Config = rebar3_hex_config:get_hex_config(?MODULE, Repo, write),
PermOpts = proplists:get_all_values(permission, Opts),
Perms = rebar3_hex_key:convert_permissions(PermOpts, default_perms(OrgName)),
Key = generate_key(Config#{api_organization => OrgName}, KeyName, Perms),
rebar3_hex_io:say("~ts", [Key]),
{ok, State}.
-spec list_org_keys(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
list_org_keys(State, RepoName) ->
{Repo, OrgName} = get_parent_repo_and_org_name(State, RepoName),
Config = rebar3_hex_config:get_hex_config(?MODULE, Repo, read),
case rebar3_hex_key:list(Config#{api_organization => OrgName}) of
ok ->
{ok, State};
{error, #{<<"errors">> := Errors}} ->
?RAISE({key_list, Errors});
{error, #{<<"message">> := Message}} ->
?RAISE({key_list, Message});
Error ->
?RAISE({key_list, Error})
end.
-spec revoke(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
revoke(State, RepoName) ->
{Repo, OrgName} = get_parent_repo_and_org_name(State, RepoName),
{Opts, _} = rebar_state:command_parsed_args(State),
KeyName = case proplists:get_value(key_name, Opts, undefined) of
undefined ->
?RAISE(bad_command);
K ->
K
end,
Config = rebar3_hex_config:get_hex_config(?MODULE, Repo, write),
case rebar3_hex_key:revoke(Config#{api_organization => OrgName}, KeyName) of
ok ->
rebar3_hex_io:say("Key successfully revoked", []),
{ok, State};
{error, #{<<"errors">> := Errors}} ->
?RAISE({key_revoke, Errors});
{error, #{<<"message">> := Message}} ->
?RAISE({key_revoke, Message});
Error ->
?RAISE({key_revoke, Error})
end.
-spec revoke_all(rebar_state:t(), binary()) -> {ok, rebar_state:t()}.
revoke_all(State, RepoName) ->
{Repo, OrgName} = get_parent_repo_and_org_name(State, RepoName),
Config = rebar3_hex_config:get_hex_config(?MODULE, Repo, write),
case rebar3_hex_key:revoke_all(Config#{api_organization => OrgName}) of
ok ->
rebar3_hex_io:say("All keys successfully revoked", []),
{ok, State};
{error, #{<<"errors">> := Errors}} ->
?RAISE({key_revoke_all, Errors});
{error, #{<<"message">> := Message}} ->
?RAISE({key_revoke_all, Message});
Error ->
?RAISE({key_revoke_all, Error})
end.
-spec list_orgs(rebar_state:t()) -> {ok, rebar_state:t()}.
list_orgs(State) ->
Resources = rebar_state:resources(State),
#{repos := Repos} = rebar_resource_v2:find_resource_state(pkg, Resources),
Headers = ["Name", "URL", "Public Key"],
Orgs = lists:foldl(
fun(#{name := Name} = Repo, Acc) ->
case binary:split(Name, <<":">>) of
[_, _] ->
[Repo | Acc];
_ ->
Acc
end
end,
[],
Repos
),
Rows = lists:map(
fun(Repo) ->
#{
name := Name,
api_organization := Org,
repo_url := Url,
repo_public_key := PubKey
} = Repo,
[
binary_to_list(Name),
org_url(Org, Url),
printable_public_key(PubKey)
]
end,
Orgs
),
rebar3_hex_results:print_table([Headers] ++ Rows),
{ok, State}.
-spec default_perms(binary()) -> [map()].
default_perms(OrgName) ->
[#{<<"domain">> => <<"repository">>, <<"resource">> => OrgName}].
-spec generate_key(map(), binary() | undefined, [map()]) -> binary().
generate_key(HexConfig, KeyName, Perms) ->
case rebar3_hex_key:generate(HexConfig, KeyName, Perms) of
{ok, #{<<"secret">> := Secret}} ->
Secret;
{error, #{<<"errors">> := Errors}} ->
?RAISE({generate_key, Errors});
{error, #{<<"message">> := Message}} ->
?RAISE({generate_key, Message});
Error ->
?RAISE({generate_key, Error})
end.
-spec printable_public_key(binary()) -> nonempty_string().
printable_public_key(PubKey) ->
[Pem] = public_key:pem_decode(PubKey),
Public = public_key:pem_entry_decode(Pem),
Hash = crypto:hash(sha256, ssh_encode(Public)),
Encoded = string:substr(base64:encode_to_string(Hash), 1, 43),
"SHA256:" ++ Encoded.
-ifdef(OTP_23).
-spec ssh_encode(binary()) -> binary().
ssh_encode(InData) ->
public_key:ssh_encode(InData, ssh2_pubkey).
-elif(POST_OTP_22 and not OTP_23).
-spec ssh_encode(binary()) -> binary().
ssh_encode(InData) ->
ssh_file:encode(InData, ssh2_pubkey).
-else.
-spec ssh_encode(binary()) -> binary().
ssh_encode(InData) ->
public_key:ssh_encode(InData, ssh2_pubkey).
-endif.
to_binary(Name) ->
rebar_utils:to_binary(Name).
-spec org_url(binary(), binary()) -> [byte(), ...].
org_url(Org, Url) -> binary_to_list(Url) ++ "/repos/" ++ binary_to_list(Org).
get_parent_repo_and_org_name(State, RepoName) ->
case binary:split(RepoName, <<":">>) of
[Parent, Org] ->
case rebar3_hex_config:repo(State, Parent) of
{ok, Repo} ->
{Repo, Org};
Error ->
?RAISE({get_parent_repo_and_org_name, Error, RepoName})
end;
[_] ->
?RAISE(not_a_valid_repo_name)
end. | src/rebar3_hex_organization.erl | 0.613468 | 0.407569 | rebar3_hex_organization.erl | starcoder |
%%%-------------------------------------------------------------------
%%% @doc
%%% Utility functions for converting and viewing digraph graphs.
%%% @end
%%%-------------------------------------------------------------------
-module(digraph_export).
-define(TEMP_LABEL, (atom_to_list(?MODULE))).
-define(TEMP_FILE, "graph").
%% API
-export([formats/0,
programs/0,
convert/2,
convert/3,
view/3]).
-type format() :: dot | graphml.
%% Available output formats.
-type formats() :: [format(), ...].
%% A list of output formats.
-type program() :: cytoscape | gephi.
%% Available graph viewer programs.
-type programs() :: [program(), ...].
%% A list of graph viewer programs.
-type convert_properties() :: [{name, string()} |
pretty |
{pretty, boolean()}].
%% Conversion proplist values.
-export_type([format/0,
formats/0,
program/0,
programs/0,
convert_properties/0]).
%%%===================================================================
%%% API
%%%===================================================================
%% @doc
%% List all file formats supported by the convert functions.
%% @end
%% @see convert/2
%% @see convert/3
-spec formats() -> Formats when
Formats :: formats().
formats() ->
[dot, graphml].
%% @doc
%% List all external viewing programs supported by the view function.
%% @end
%% @see view/3
-spec programs() -> Programs when
Programs :: programs().
programs() ->
[cytoscape, gephi].
%% @equiv convert/3
-spec convert(Graph, Format) -> Serialized when
Graph :: digraph:graph(),
Format :: format(),
Serialized :: unicode:charlist().
convert(Graph, Format) ->
convert(Graph, Format, []).
%% @doc
%% Serialize a digraph graph to the given file format.
%%
%% Options are passed as a property list. The two supported options
%% are:
%%
%% <ul>
%% <li>Name: An optional name to include in the graph file.</li>
%% <li>Pretty: A boolean value for if the output file should be
%% formatted for human readability or optimized for size.</li>
%% </ul>
%% @end
%% @param Graph An existing digraph graph.
%% @param Format One of the supported file formats.
%% @param Options Property list of conversion options.
%% @returns The serialized graph data.
%% @see formats/0
-spec convert(Graph, Format, Options) -> Serialized when
Graph :: digraph:graph(),
Format :: format(),
Options :: convert_properties(),
Serialized :: unicode:charlist().
convert(Graph, dot, Options) ->
{Name, Pretty} = parse_convert_properties(Options),
digraph_export_dot:convert(Graph, Name, Pretty);
convert(Graph, graphml, Options) ->
{Name, Pretty} = parse_convert_properties(Options),
digraph_export_graphml:convert(Graph, Name, Pretty).
%% @doc
%% Launch an external program to view a serialized graph. A temporary
%% file is created to store the graph and passed into the external
%% program.
%%
%% The external program will need to be installed and on the current
%% PATH for this to function.
%%
%% This will block on the external program completing. Please spawn
%% this function in a separate process if that is not desired.
%% @end
%% @param Converted The serialized graph data.
%% @param Format The format the graph was serialized in.
%% @param Program The external program to launch.
%% @returns The output of the program.
%% @see formats/0
%% @see programs/0
-spec view(Serialized, Format, Program) -> Output when
Serialized :: unicode:charlist(),
Format :: format(),
Program :: program(),
Output :: string().
view(Serialized, Format, Program) ->
Data = unicode:characters_to_binary(Serialized),
{ok, TempFile} = mktemp(?TEMP_LABEL, ?TEMP_FILE ++ extension(Format)),
try
ok = file:write_file(TempFile, Data),
Command = io_lib:format(command(Program), [TempFile]),
_ = os:cmd(Command)
after
ok = file:delete(TempFile)
end.
%%%===================================================================
%%% Internal Functions
%%%===================================================================
parse_convert_properties(Options) when is_list(Options) ->
Name = proplists:get_value(name, Options, ""),
Pretty = proplists:get_bool(indent, Options),
{Name, Pretty}.
extension(dot) -> ".dot";
extension(graphml) -> ".graphml".
command(gephi) -> "gephi --console suppress \"~ts\"";
command(cytoscape) -> "cytoscape -N \"~ts\"".
-spec mktemp(Label, File) -> {ok, TempFile} | {error, Reason} when
Label :: string(),
File :: string(),
TempFile :: file:filename(),
Reason :: file:posix().
mktemp(Label, File) ->
RandBytes = crypto:strong_rand_bytes(8),
RandChars = integer_to_list(binary:decode_unsigned(RandBytes), 36),
TempDir = filename:basedir(user_cache, Label ++ "-" ++ RandChars),
TempFile = filename:join(TempDir, File),
EnsureResult = filelib:ensure_dir(TempFile),
WriteResult = file:write_file(TempFile, <<>>),
case {EnsureResult, WriteResult} of
{ok, ok} -> {ok, TempFile};
{ok, Error} -> Error;
{Error, _} -> Error
end. | src/digraph_export.erl | 0.545286 | 0.463626 | digraph_export.erl | starcoder |
%% Copyright 2019-2020 Klarna Bank AB
%% Copyright 2021 snabbkaffe contributors
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(asciiart).
-export([ init/0
, dimensions/1
, render/1
, char/3
, char/2
, line/4
, line/3
, string/4
, string/3
, plot/1
, plot/2
, draw/2
, draw/1
, visible/3
]).
%%====================================================================
%% Types
%%====================================================================
-type vector() :: {integer(), integer()}.
-type cont() :: fun((canvas()) -> canvas()).
-opaque canvas() :: #{vector() => char()}.
-type plot_data() :: [{char(), [{float(), float()}]}].
-export_type([vector/0, canvas/0]).
-define(epsilon, 1.0e-6).
%%====================================================================
%% API functions
%%====================================================================
-spec init() -> canvas().
init() ->
#{}.
-spec render(canvas()) -> iolist().
render(Cnv) ->
{{Xm, Ym}, {XM, YM}} = dimensions(Cnv),
[[[maps:get({X, Y}, Cnv, $ ) || X <- lists:seq(Xm, XM)], $\n]
|| Y <- lists:reverse(lists:seq(Ym, YM))].
-spec draw([cont()], canvas()) -> canvas().
draw(Ops, Cnv) ->
lists:foldl(fun(F, Acc) -> F(Acc) end, Cnv, Ops).
-spec draw([cont()]) -> canvas().
draw(Ops) ->
draw(Ops, init()).
-spec dimensions(canvas()) -> {vector(), vector()}.
dimensions(Cnv) ->
Fun = fun({X, Y}, _, {{Xm, Ym}, {XM, YM}}) ->
{ {min(X, Xm), min(Y, Ym)}
, {max(X, XM), max(Y, YM)}
}
end,
maps:fold(Fun, {{1, 1}, {1, 1}}, Cnv).
-spec char(canvas(), vector(), char()) -> canvas().
char(Cnv, Pos, Char) ->
Cnv #{Pos => Char}.
-spec char(vector(), char()) -> cont().
char(Pos, Char) ->
fun(Cnv) -> char(Cnv, Pos, Char) end.
-spec line(canvas(), vector(), vector(), char()) -> canvas().
line(Cnv, {X1, Y1}, {X2, Y2}, Char) ->
X = X2 - X1,
Y = Y2 - Y1,
N = max(1, max(abs(X), abs(Y))),
lists:foldl( fun(Pos, Cnv) -> char(Cnv, Pos, Char) end
, Cnv
, [{ X1 + round(X * I / N)
, Y1 + round(Y * I / N)
} || I <- lists:seq(0, N)]
).
-spec line(vector(), vector(), char()) -> cont().
line(F, T, C) ->
fun(Cnv) -> line(Cnv, F, T, C) end.
-spec string(canvas(), vector(), string(), left | right) -> canvas().
string(Cnv, _, [], _) ->
Cnv;
string(Cnv, {X, Y}, String, Direction) ->
XL = case Direction of
right ->
lists:seq(X, X + length(String) - 1);
left ->
lists:seq(X - length(String) + 1, X)
end,
L = lists:zip(XL, String),
lists:foldl( fun({X, Char}, Cnv) ->
char(Cnv, {X, Y}, Char)
end
, Cnv
, L
).
-spec string(vector(), string(), left | right) -> cont().
string(Pos, Str, Dir) ->
fun(Cnv) -> string(Cnv, Pos, Str, Dir) end.
-spec plot(plot_data()) -> canvas().
plot(Datapoints) ->
plot(Datapoints, #{}).
-spec plot(plot_data(), map()) -> canvas().
plot(Datapoints, Config) ->
AllDatapoints = lists:append([L || {_, L} <- Datapoints]),
{XX, YY} = lists:unzip(AllDatapoints),
Xm = bound(min, Config, XX),
XM = bound(max, Config, XX),
Ym = bound(min, Config, YY),
YM = bound(max, Config, YY),
DX = max(?epsilon, XM - Xm),
DY = max(?epsilon, YM - Ym),
%% Dimensions of the plot:
AspectRatio = maps:get(aspect_ratio, Config, 0.2),
Width = max(length(Datapoints) * 2, 70),
Height = round(Width * AspectRatio),
Frame = {{Xm, Ym}, {Width / DX, Height / DY}},
%% Draw axis
Cnv0 = draw( [ %% Vertical:
line({0, 0}, {0, Height - 1}, $|)
, char({0, Height}, $^)
%% Labels:
, string({-2, 0}, print_num(Ym), left)
, string({-2, Height}, print_num(YM), left)
%% Horizontal:
, line({0, 0}, {Width - 1, 0}, $-)
, char({Width, 0}, $>)
, char({0, 0}, $+)
%% Labels
, string({0, -1}, print_num(Xm), right)
, string({Width, -1}, print_num(XM), left)
]
, init()
),
lists:foldl( fun({Char, Data}, Acc) ->
draw_datapoints(Frame, Char, Data, Acc)
end
, Cnv0
, Datapoints
).
draw_datapoints(Frame, Char, Data, Acc) ->
lists:foldl( fun(Coords, Acc) ->
char(Acc, plot_coord(Frame, Coords), Char)
end
, Acc
, Data
).
print_num(Num) when is_integer(Num) ->
integer_to_list(Num);
print_num(Num) ->
lists:flatten(io_lib:format("~.6..f", [Num])).
plot_coord({{Xm, Ym}, {SX, SY}}, {X, Y}) ->
{round((X - Xm) * SX), round((Y - Ym) * SY)}.
bound(Fun, Cfg, L) ->
N = case L of
[] -> 0;
_ -> lists:Fun(L)
end,
case maps:get(include_zero, Cfg, true) of
true ->
erlang:Fun(0, N);
false ->
N
end.
-spec visible(char(), string(), [term()]) -> iolist().
visible(Char, Fmt, Args) ->
Str = lines(lists:flatten(io_lib:format(Fmt, Args))),
Width = max(79, lists:max([length(I) || I <- Str])) + 1,
N = length(Str),
Text = [string({4, Y}, S, right)
|| {Y, S} <- lists:zip( lists:seq(1, N)
, lists:reverse(Str)
)],
Cnv = draw([ asciiart:line({1, -1}, {Width, -1}, Char)
, asciiart:line({1, N + 2}, {Width, N + 2}, Char)
, asciiart:line({1, 0}, {1, N + 1}, Char)
, asciiart:line({2, 0}, {2, N + 1}, Char)
, asciiart:line({Width - 1, 0}, {Width - 1, N + 1}, Char)
, asciiart:line({Width, 0}, {Width, N + 1}, Char)
] ++ Text),
[$\n, render(Cnv), $\n].
-spec lines(string()) -> [string()].
lines(Str) ->
re:split(Str, "\n", [{return, list}]). | src/asciiart.erl | 0.702122 | 0.572573 | asciiart.erl | starcoder |
%%
%% Copyright (c) 2018 <NAME>. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
%% @doc Op-based LWWMap CRDT.
-module(op_lwwmap).
-author("<NAME> <<EMAIL>>").
-behaviour(type).
-define(TYPE, ?MODULE).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-export([new/0, new/1]).
-export([mutate/3]).
-export([query/1, query/2, equal/2]).
-export_type([op_lwwmap/0, op_lwwmap_op/0]).
-opaque op_lwwmap() :: {?TYPE, payload()}.
-type payload() :: maps:map(key(), {timestamp(), value()}).
-type key() :: term().
-type timestamp() :: non_neg_integer().
-type value() :: term().
-type op_lwwmap_op() :: {set, key(), timestamp(), value()} |
[{set, key(), timestamp(), value()}].
%% @doc Create a new, empty `op_lwwmap()'
-spec new() -> op_lwwmap().
new() ->
{?TYPE, maps:new()}.
%% @doc Create a new, empty `op_lwwmap()'
-spec new([term()]) -> op_lwwmap().
new([]) ->
new().
%% @doc Mutate a `op_lwwmap()'.
-spec mutate(op_lwwmap_op(), type:id(), op_lwwmap()) ->
{ok, op_lwwmap()}.
mutate({set, Key, Timestamp, Value}, _, {?TYPE, Map0}) ->
ShouldAdd = case maps:find(Key, Map0) of
{ok, {CurrentTimestamp, _}} -> Timestamp > CurrentTimestamp;
error -> true
end,
Map1 = case ShouldAdd of
true -> maps:put(Key, {Timestamp, Value}, Map0);
false -> Map0
end,
{ok, {?TYPE, Map1}};
mutate(OpList, Id, Map) ->
Result = lists:foldl(
fun(Op, MapAcc0) ->
{ok, MapAcc1} = mutate(Op, Id, MapAcc0),
MapAcc1
end,
Map,
OpList
),
{ok, Result}.
%% @doc Map-mutate a `op_lwwmap()'.
-spec query(op_lwwmap()) -> maps:map(key(), value()).
query({?TYPE, Map}) ->
%% simply hide timestamps
maps:map(fun(_, {_, V}) -> V end, Map).
%% @doc Returns the value of the `op_lwwmap()', given a list
%% of extra arguments.
-spec query(list(term()), op_lwwmap()) -> non_neg_integer().
query([MoreRecent], {?TYPE, Map}) ->
Keys = lists:reverse(lists:sort(maps:keys(Map))),
TopKeys = lists:sublist(Keys, MoreRecent),
MapTop = maps:with(TopKeys, Map),
query({?TYPE, MapTop}).
%% @doc Are two `op_lwwmap()'s structurally equal?
-spec equal(op_lwwmap(), op_lwwmap()) -> boolean().
equal(_, _) ->
undefined.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
new_test() ->
?assertEqual({?TYPE, #{}}, new()).
query_test() ->
Map0 = new(),
Map1 = {?TYPE, maps:from_list([{1, {10, 1}}, {2, {11, 13}}, {3, {12, 1}}])},
?assertEqual(#{}, query(Map0)),
?assertEqual(maps:from_list([{1, 1}, {2, 13}, {3, 1}]), query(Map1)).
set_test() ->
Map0 = new(),
{ok, Map1} = mutate({set, a, 10, v1}, 1, Map0),
{ok, Map2} = mutate({set, a, 11, v2}, 2, Map1),
{ok, Map3} = mutate({set, b, 12, v3}, 1, Map2),
?assertEqual({?TYPE, maps:from_list([{a, {10, v1}}])}, Map1),
?assertEqual({?TYPE, maps:from_list([{a, {11, v2}}])}, Map2),
?assertEqual({?TYPE, maps:from_list([{a, {11, v2}}, {b, {12, v3}}])}, Map3).
multiple_set_test() ->
OpList = [{set, a, 10, v1}, {set, a, 11, v2}, {set, b, 12, v3}],
{ok, Map} = mutate(OpList, 1, new()),
?assertEqual({?TYPE, maps:from_list([{a, {11, v2}}, {b, {12, v3}}])}, Map).
-endif. | src/op_lwwmap.erl | 0.670608 | 0.407864 | op_lwwmap.erl | starcoder |
-module(interval).
-export([new/2, length/1, contains/2, contains/3, split/2, split/3,
preceeds/2, adjacent/2, merge/2]).
-export_type([interval/0]).
-record(interval, {left :: rational:rational(),
right :: rational:rational()}).
-type interval() :: #interval{}.
%% @doc Create a new interval with the given endpoints.
%%
%% The function fails with a `badarg' exception if `Left' is greater
%% than `Right'.
%%
%% @throws badarg
-spec new(rational:rational(), rational:rational()) -> interval().
new(Left, Right) ->
case rational:compare(Left, Right) of
gt -> throw({badarg, "Left must be less than Right"});
_ -> #interval{left = Left, right = Right}
end.
%% @doc Get the length of an interval.
-spec length(interval()) -> rational:rational().
length(#interval{left=Left, right=Right}) ->
rational:subtract(Right, Left).
%% @doc Test whether `Value' is contained in `Interval'.
-spec contains(Value::rational:rational(), Interval::interval()) -> boolean().
contains(Value, IntervalTable) ->
contains(Value, IntervalTable, both).
-spec contains(Value::rational:rational(), Interval::interval(),
Closed::left|right|both|neither) ->
boolean().
contains(Value, #interval{left=Left, right=Right}, Closed) ->
L = rational:compare(Value, Left),
R = rational:compare(Value, Right),
case {L, R} of
{lt, lt} -> false;
{gt, gt} -> false;
{gt, lt} -> true;
{eq, eq} ->
(Closed =:= left) or (Closed =:= right) or (Closed =:= both);
{eq, lt} ->
(Closed =:= left) or (Closed =:= both);
{gt, eq} ->
(Closed =:= right) or (Closed =:= both)
end.
%% @doc split an interval into two parts where the first part is at
%% least `Span' long.
-spec split(rational:rational(), interval()) -> {interval() | empty, interval() | empty}.
split(Span, I) ->
split(Span, I, left).
%% @doc split the interval from the left or the right.
-spec split(Span::rational:rational(), Interval::interval(), From::left | right) ->
{interval() | empty, interval() | empty}.
split(Span, I=#interval{left=Left, right=Right}, left) ->
Split = rational:add(Left, Span),
case rational:compare(Span, interval:length(I)) of
eq ->
{I, empty};
gt ->
{I, empty};
lt ->
{empty_or(interval:new(Left, Split)),
empty_or(interval:new(Split, Right))}
end;
split(Span, I=#interval{left=Left, right=Right}, right) ->
Split = rational:subtract(Right, Span),
case rational:compare(Span, interval:length(I)) of
eq ->
{empty, I};
gt ->
{empty, I};
lt ->
{empty_or(interval:new(Left, Split)),
empty_or(interval:new(Split, Right))}
end.
-spec empty_or(interval()) -> interval() | empty.
empty_or(#interval{left=Left, right=Right}) when Left =:= Right ->
empty;
empty_or(Interval) ->
Interval.
-spec preceeds(interval(), interval()) -> boolean().
preceeds(#interval{left=LeftA, right=RightA},
#interval{left=LeftB}) ->
(rational:compare(LeftA, LeftB) == lt)
andalso ((rational:compare(RightA, LeftB) == lt)
orelse (rational:compare(RightA, LeftB) == eq)).
-spec adjacent(interval(), interval()) -> boolean().
adjacent(#interval{left=LeftA, right=RightA},
#interval{left=LeftB, right=RightB}) ->
(RightA == LeftB) orelse (RightB == LeftA).
merge(IntervalA=#interval{left=LeftA, right=RightA},
IntervalB=#interval{left=LeftB, right=RightB}) ->
case adjacent(IntervalA, IntervalB) of
true ->
#interval{
left=rational:minimum(LeftA, LeftB),
right=rational:maximum(RightA, RightB)};
false ->
throw({badarg, "intervals must be adjacent"})
end. | src/interval.erl | 0.831998 | 0.613584 | interval.erl | starcoder |
% Copyright 2007-2008 Konrad-Zuse-Zentrum für Informationstechnik Berlin
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
%%%-------------------------------------------------------------------
%%% File : intervals.erl
%%% Author : <NAME> <<EMAIL>>
%%% <NAME> <<EMAIL>>
%%% Description : interval data structure + functions for bulkowner
%%%
%%% Created : 3 May 2007 by <NAME> <<EMAIL>>
%%%-------------------------------------------------------------------
%% @author <NAME> <<EMAIL>>
%% @copyright 2007-2008 Konrad-Zuse-Zentrum für Informationstechnik Berlin
%% 2008 onScale solutions GmbH
%% @version $Id$
-module(intervals).
-author('<EMAIL>').
-vsn('$Id$ ').
-export([first/0,last/0,
new/1, new/2, make/1,
is_empty/1, empty/0,
cut/2,
is_covered/2,
unpack/1,
in/2,
sanitize/1,
% for unit testing only
cut_iter/2,
normalize/1,
wraps_around/1,
is_between/3,
find_start/3
]).
% @type interval() = [] | term() | {interval,term(),term()} | all | list(interval()).
% [] -> empty interval
% {element, term()} -> one element interval
% {interval,term(),term()} -> closed interval
% all -> minus_infinity to plus_infinity
% list(interval()) -> [i1, i2, i3,...]
%
% access start and endpoint of a tuple-interval using
% element(first(),Interval) and element(last(),Interval).
first() ->
2.
last() ->
3.
% @spec new(term(), term()) -> interval()
new(X, X) ->
all;
new(Begin, End) ->
{interval, Begin, End}.
new(X) ->
{element,X}.
make({Begin, End}) ->
new(Begin, End).
% @spec unpack(interval()) -> {term(), term()}
unpack(all) -> {minus_infinity,plus_infinity};
unpack([]) -> empty();
unpack({interval,First,Last}) ->
{First, Last};
unpack(X) ->
X.
% @spec is_empty(interval()) -> bool()
is_empty([]) ->
true;
is_empty(_) ->
false.
% @spec empty() -> interval()
empty() ->
[].
% @spec cut(A::interval(), B::interval()) -> interval()
cut([], _) ->
empty();
cut(_, []) ->
empty();
cut({element,X}=A, B) ->
case in(X,B) of
true -> A;
false -> empty()
end;
cut(A, {element,_}=B) ->
cut(B, A);
cut(A, B) ->
A_ = normalize(A),
B_ = normalize(B),
cut_iter(A_, B_).
cut_iter([First | Rest], B) ->
[cut_iter(First, B) | cut_iter(Rest, B)];
cut_iter(A, [First | Rest]) ->
[cut_iter(A, First) | cut_iter(A, Rest)];
cut_iter(all, B) ->
B;
cut_iter([], _) ->
[];
cut_iter(_, []) ->
[];
cut_iter(A, all) ->
A;
cut_iter({element,_}=A, B) ->
case in(A, B) of
true -> A;
false -> empty()
end;
cut_iter(A, {element,_}=B) ->
cut_iter(B, A);
cut_iter({interval, A0, A1}, {interval, B0, B1}) ->
B0_in_A = is_between(A0, B0, A1),
B1_in_A = is_between(A0, B1, A1),
A0_in_B = is_between(B0, A0, B1),
A1_in_B = is_between(B0, A1, B1),
if
(A1 == B0) and not A0_in_B ->
{element,A1};
(B1 == A0) and not B0_in_A ->
{element,B1};
B0_in_A or B1_in_A or A0_in_B or A1_in_B ->
new(util:max(A0, B0), util:min(A1, B1));
true ->
empty()
end.
% @doc returns true if the intervals cover the complete interval
% @spec is_covered(interval(), [interval()]) -> bool()
is_covered([], _) ->
true;
is_covered(all, Intervals) ->
%io:format("is_covered: ~p ~p~n", [all, Intervals]),
is_covered(new(minus_infinity, plus_infinity), Intervals);
is_covered({element,_}=X, Intervals) ->
in(X, Intervals);
is_covered({interval, _, _}, {element,_}) ->
false;
is_covered(Interval, Intervals) ->
%io:format("is_covered: ~p ~p~n", [Interval, Intervals]),
NormalIntervals = normalize(Intervals),
case wraps_around(Interval) of
true ->
is_covered_helper(new(minus_infinity, element(last(), Interval)), NormalIntervals)
and
is_covered_helper(new(element(first(),Interval), plus_infinity), NormalIntervals);
false ->
is_covered_helper(Interval, NormalIntervals)
end.
% @private
is_covered_helper(Interval, Intervals) ->
% io:format("helper: ~p ~p~n", [Interval, Intervals]),
% io:format("find_start: ~p~n", [find_start(element(first(),Interval), Intervals, [])]),
case find_start(element(first(),Interval), Intervals, []) of
none ->
false;
{CoversStart, RemainingIntervals} ->
case greater_equals_than(element(last(),CoversStart), element(last(),Interval)) of
true ->
true;
false ->
is_covered_helper(intervals:new(element(last(),CoversStart), element(last(),Interval)), RemainingIntervals)
end
end.
in(X, {interval, First, Last}) ->
is_between(First, X, Last);
in(X, [I | Rest]) ->
in(X, I) or in(X, Rest);
in(_, []) ->
false;
in(_, all) ->
true;
in(X, {element,X}) ->
true;
in(_, {element,_}) ->
false.
sanitize(X) when is_list(X) ->
sanitize_helper(X, []);
sanitize(X) ->
X.
sanitize_helper([[] | Rest], List) ->
sanitize_helper(Rest, List);
sanitize_helper([all | _], _) ->
all;
sanitize_helper([X | Rest], List) when is_list(X) ->
sanitize_helper(Rest, sanitize_helper(X, List));
sanitize_helper([X | Rest], List) ->
sanitize_helper(Rest, [X | List]);
sanitize_helper(all, List) ->
List;
sanitize_helper({interval, _First, _Last} = X, List) ->
[X | List];
sanitize_helper({element, _} = X, List) ->
[X | List];
sanitize_helper([], List) ->
List.
%%====================================================================
%% private functions
%%====================================================================
% @private
wraps_around({interval, minus_infinity, _}) ->
false;
wraps_around({interval, _, plus_infinity}) ->
false;
wraps_around({interval, First, Last}) when First >= Last ->
true;
wraps_around(_) ->
false.
normalize([]) ->
[];
normalize([all | Rest]) ->
[new(minus_infinity, plus_infinity) | normalize(Rest)];
normalize([[] | Rest]) ->
normalize(Rest);
normalize([Interval]) ->
Interval;
normalize([Interval|Rest]) ->
case wraps_around(Interval) of
true ->
[new(element(first(),Interval), plus_infinity), new(minus_infinity, element(last(),Interval)) | normalize(Rest)];
false ->
[Interval | normalize(Rest)]
end;
normalize({interval,First,Last}=I) ->
case greater_equals_than(First,Last) of
true -> [new(minus_infinity, Last), new(First, plus_infinity)];
false -> I
end;
normalize(A) ->
A.
% @private
% @spec find_start(term(), [interval()], [interval()]) -> {interval(), [interval()]} | none
find_start(_Start, [], _) ->
none;
find_start(Start, [{element,_} | Rest], Remainder) ->
find_start(Start, Rest, Remainder);
find_start(Start, [Interval | Rest], Remainder) ->
case is_between(element(first(),Interval), Start, element(last(),Interval)) of
true ->
{Interval, Remainder ++ Rest};
false ->
find_start(Start, Rest, [Interval | Remainder])
end;
find_start(Start, Interval, Remainder) ->
find_start(Start, [Interval], Remainder).
% @private
% @spec is_between(term(), term(), term()) -> bool()
is_between(_, X, X) ->
true;
is_between(X, _, X) ->
true;
is_between(_, plus_infinity, plus_infinity) ->
true;
is_between(minus_infinity, _, plus_infinity) ->
true;
is_between(_, minus_infinity, plus_infinity) ->
false;
is_between(X, Y, plus_infinity) ->
X =< Y;
is_between(minus_infinity, minus_infinity, _) ->
true;
is_between(minus_infinity, plus_infinity, _) ->
false;
is_between(minus_infinity, X, Y) ->
X =< Y;
is_between(X, plus_infinity, Y) when X > Y->
true;
is_between(_, plus_infinity, _) ->
false;
is_between(X, minus_infinity, Y) when X > Y->
true;
is_between(_, minus_infinity, _) ->
false;
is_between(Begin, Id, End) ->
if
Begin < End ->
(Begin =< Id) and (Id =< End);
Begin == End ->
true;
true ->
(Begin =< Id) or (Id =< End)
end.
greater_equals_than(minus_infinity, minus_infinity) ->
true;
greater_equals_than(minus_infinity, _) ->
false;
greater_equals_than(plus_infinity, plus_infinity) ->
true;
greater_equals_than(_, plus_infinity) ->
false;
greater_equals_than(X, Y) ->
X >= Y. | src/intervals.erl | 0.586523 | 0.4856 | intervals.erl | starcoder |
%%
%% @doc Implementation of RSA encryption functions.
%%
%% @reference [FSK1] Chapter 12. RSA
%%
-module(rsa).
-author("<NAME>").
-export([generate_rsa_key/1, generate_rsa_prime/1]).
-export([decrypt_random_key_with_rsa/2, encrypt_random_key_with_rsa/1]).
-export([msg_to_rsa_number/2, sign_with_rsa/2, verify_rsa_signature/3]).
-include_lib("stdlib/include/assert.hrl").
%% =============================================================================
%% Chapter 12.4.5. Generating RSA Keys
%% =============================================================================
%%
%% @doc Returns a random prime in the interval `2^{k-1}...2^k-1'
%% subject to `P mod 3 =/= 1' and `P mod 5 =/= 1'.
%%
%% @param K size of the desired prime, in number of bits.
%%
-spec generate_rsa_prime(K :: 1024..4096) -> pos_integer().
generate_rsa_prime(K) when 1024 =< K, K =< 4096 ->
generate_rsa_prime(K, 100 * K).
generate_rsa_prime(K, R) when 0 < R ->
N = rnd:random(maths:pow(2, K - 1), maths:pow(2, K) - 1),
generate_rsa_prime(K, R, N, N rem 3 =/= 1 andalso N rem 5 =/= 1 andalso primes:is_prime(N)).
generate_rsa_prime(_, _, N, true) -> N;
generate_rsa_prime(K, R, _, false) -> generate_rsa_prime(K, R - 1).
%%
%% @doc Returns a newly generated RSA private key.
%% The key is a tuple of the following integers:
%% `P', `Q' - prime factors of the modulus,
%% `N' - modulus of about `K' bits,
%% `D3' - signing exponent,
%% `D5' - decryption exponent.
%%
%% Public exponents are fixed:
%% 3 - signature verification exponent,
%% 5 - encryption exponent.
%%
%% @param K size of the modulus, in number of bits.
%%
-spec generate_rsa_key(K :: 2048..8192) -> {P, Q, N, D3, D5} when
P :: pos_integer(), Q :: pos_integer(), N :: pos_integer(), D3 :: pos_integer(), D5 :: pos_integer().
generate_rsa_key(K) when 2048 =< K, K =< 8192 ->
P = generate_rsa_prime(K div 2),
Q = generate_rsa_prime(K div 2),
?assertNotEqual(P, Q),
T = maths:lcm(P - 1, Q - 1),
D3 = maths:mod_inv(3, T),
D5 = maths:mod_inv(5, T),
{P, Q, P * Q, D3, D5}.
%% =============================================================================
%% Chapter 12.4.6. Encryption
%% =============================================================================
%%
%% @doc Returns a tuple `{K, C}' where `K' is a random 256-bit symmetric key,
%% and `C' is the RSA-encrypted key.
%%
%% @param PK RSA public key
%%
-spec encrypt_random_key_with_rsa(PK :: {N :: pos_integer(), E :: pos_integer()}) ->
{K :: binary(), C :: non_neg_integer()}.
encrypt_random_key_with_rsa({N, E}) ->
BitSize = maths:ilog2(N),
R = rnd:random(0, maths:pow(2, BitSize) - 1),
K = crypto:hash(sha256, binary:encode_unsigned(R)),
{K, maths:mod_exp(R, E, N)}.
%%
%% @doc Returns the 256-bit symmetric key `K' that was generated by
%% {@link encrypt_random_key_with_rsa/1}.
%%
%% @param SK RSA private key
%% @param C RSA-encrypted symmetric key
%%
-spec decrypt_random_key_with_rsa(
SK :: {N :: pos_integer(), D :: pos_integer()},
C :: non_neg_integer()) -> binary().
decrypt_random_key_with_rsa({N, D}, C) when 0 =< C, C < N ->
crypto:hash(sha256, binary:encode_unsigned(maths:mod_exp(C, D, N))).
%% =============================================================================
%% Chapter 12.4.7. Signatures
%% =============================================================================
%%
%% @doc Returns a pseudo-random number modulo `N' that is used
%% to create and verify a signature.
%%
%% @see sign_with_rsa/2
%% @see verify_rsa_signature/3
%%
%% @param N Modulus of RSA public key.
%% @param M Message to be converted to a value modulo N.
%%
-spec msg_to_rsa_number(N :: pos_integer(), M :: binary()) -> pos_integer().
msg_to_rsa_number(N, M) ->
Pid = self(),
PRNG = spawn(fun() -> Pid ! {self(), msg_to_rsa_number({N, M})} end),
receive
{PRNG, Result} -> Result
after
2000 -> error(timeout)
end.
%% @hidden
%% This function reseeds the PRNG:
%% call it in a separate process for safety.
msg_to_rsa_number({N, M}) ->
K = size(binary:encode_unsigned(N)),
rnd:rand_seed(crypto:hash(sha256, M)),
X = rnd:rand_bytes(K),
X rem N.
%%
%% @doc Signs the message with RSA private key.
%%
%% Instead of decrypting hash of the message, we seed PRNG with the hash
%% and decrypt the first random integer of the same size as modulus.
%%
%% @param SK RSA private key with E = 3.
%% @param M Message to be signed.
%%
%% @see msg_to_rsa_number/2
%%
-spec sign_with_rsa(SK :: {N :: pos_integer(), D :: pos_integer()}, M :: binary() | integer()) -> pos_integer().
sign_with_rsa({N, D}, M) when is_integer(M) ->
sign_with_rsa({N, D}, binary:encode_unsigned(M));
sign_with_rsa({N, D}, M) when is_binary(M) ->
S = msg_to_rsa_number(N, M),
maths:mod_exp(S, D, N).
%%
%% @doc Verifies RSA signature of the message `M'.
%%
%% @param PK RSA public key with modulus N and exponent E = 3.
%% @param M Message that is supposed to be signed.
%% @param Sig Signature of the message.
%%
-spec verify_rsa_signature(PK :: {N :: pos_integer(), E :: pos_integer()},
M :: binary(), Sig :: pos_integer()) -> ok.
verify_rsa_signature({N, E}, M, Sig) ->
S = msg_to_rsa_number(N, M),
S = maths:mod_exp(Sig, E, N),
ok.
%% =============================================================================
%% Unit tests
%% =============================================================================
-include_lib("eunit/include/eunit.hrl").
-define(SK, {
16#F01EC3CC06CEB98449CD10CEED03C3CC02D68DAB3D168C46C4102426BF012425246AD9F6E89561EF0C5ADDE586F9DE787CFF5669CEBC28199B51329B43319A227E2F0CBF5FFB608E13CEACC7B974A1CFBD55E08D4C4144D43C3B7B61FCED9CA013B9E1800CECBCC63A3215FFAACE395BB585C74F5DD94105E40ED8103D79431D,
16#EBA58019CC067C29F20422540B1539472582F34BE52B365BA2DEC6243A3167F80874EEA8FECF3B9D8FA153068C393E45948A09776B63C7E217B83C8780B6655EAD5CEDC9DE3501C9EDCC2A18333C7BD6B70253042E63CF6BD0185F2A60D3BCB43A36F20124687FD02CF1FDE3EC109A80CA9EC64CFE6E9F354916B2ABD58DACA9,
16#DD0779B81105E31373EF1BD75B5E5A1FFDB647317299300DBC5C01C64D5B3C1FBA39BC467648E7CC3DB3635964568DEE7CAF1C6E8676CB09D413278091A81200B2001F5409835C4799E190CE5E6FA51DBEFDF2E1DBC305E5457C3CF20EE4D13AF4456DD8B6D574B844B613E9F9158AEADC1B03C55375673A8DAE2DEA4D11A1307C6EE7E34904228F0A6D0C691F803275283819F380969C7AF7FC80C546A76279587FBBF4AC706D67465686CB868507D37F32DC3878251F754DBD2B4C8B1A53D24D29C12D2A111263E812FB6501329363DE2149150117716B489F61422C6F0F5F39BCB001B76235A2275F284A5AC846C845F09D73256D3A3775374B06261DCA25,
16#24D69449582BA5D8935284A3E48FB9AFFF9E6132E86EDD579F64AAF66239DF5A9F099F6113B6D14CB4F33B3990B917A7BF7284BD166921D6F8ADDBEAC2F1585573000538AC408F61445042CD0FBD462F9FD4FDD04F4B2BA6363F5F7DAD2622DF28B63CF973CE3E1EB61E58A6FED8EC7C7A0480A0E33E3BDF179D07A70CD84587C571C5FF93B27CD0226F4EE106913390554FC42A0FB8CEF96D8243BF0CE8CE64B1EFFDE3762CA2A471B9B8FA933851D8E79C3F63DFAB87E999C89F5C4BDDB8B83044F670A7501D57511406162E15939F91A18395EBBDBA31DF61EBC8F7C79E57274CA4C016577ED74AB4836675FC3DD1F64CAD4E76DB8EFF61584AB7032E79BB,
16#B0D2C7C67404B5A9298C1645E2B1E1B3315E9F5AC2142671637CCE383DE2967FC82E30385EA0B970315C4F7AB6ABA4BECA25B0586B923C07DCDC1F9A0E200E66F4CCE5DCD469169FAE4E0D71E5261DB16597F5817C9C04B76AC9CA5B3F1D742F29D124AD5F112A2D03C4DCBB2DAAD588B0159C9DDC5DEC2ED7BE8B21D7414DBEE6EEE99791BF23E70BAFE104EC52911B33187A637EAA47E0740ADEC83DF711E356198F77040973155515117F8F74BC11248796AC319DBF947BC2FCEE38F51040E7B16BB6564D59A31EC6839DAA012AFDEE3A779C6B8EB0EF636F9EF7D88AF808BCA316CD380A60D6336276B89CBAC25637D673123A8447FD067499D4DC12485
}).
encrypt_decrypt_key_test() ->
{_P, _Q, N, _D3, D5} = ?SK,
{K, C} = encrypt_random_key_with_rsa({N, 5}),
?assertEqual(K, decrypt_random_key_with_rsa({N, D5}, C)).
msg_to_rsa_number_test() ->
{_P, _Q, N, _D3, _D5} = ?SK,
M = <<1, 2, 3>>,
S = 16#6B1756A121DF748A364E27186207BDCD28EAF66ACC9F41C6AE12B81BFEDC19D34BAA8C05FCE66EA7EBA0FE316F4DA4AB967C2881B34FC887993A89D9175D7CBAB798F8F70D1F7EA172A5D36E4C4F91F2D9175E7F0D3CF0FCFEC570C5A6DE6D5C2347C1363CDFBC98DCD8E0138BFBF60831F6B675B0103C12DF41BCDB2CD0391D6F9F76BFA8B2747BD02560A4ACFE607BBA9914D7812C171C2814318EFB1C136058F8C29639BEAF95373C06E7E18145CDDD6999491B70672E075A1169C9835B2A86B62DE71CD32154A1F0815B85FFA857706AEEFB379FB8429D70D67C11CEBD53B014A8C59D28AC640ECE569A2C031BE7F7737F0FD4FAEC56B1BAAEAED3A5D346,
?assertEqual(S, msg_to_rsa_number(N, M)).
sign_verify_test_() ->
{_P, _Q, N, D3, _D5} = ?SK,
M = <<1, 2, 3>>,
Sig = 16#961C5B057698A05BAFA2BAF0BD2305C7F402F23C3ADFFF82890A3DC50503CE233F26C8A9068F48217C028010218DB1876DCA0772B8DB57F7D370A97B616CAD361C0BC01666E7C208C478DFF4CD4DD3866595E01C4041A5815D04DA8D50D418FAC0E8F45B48F9FF7EFDDAE41F4FE396B952DEA088381E11300D61669D37141452F23E8E55A1D0477B4692F3B0DD664F45479E9BED1E542FEF011A59356D78D6668E6F84910F609058032118D72A30E81F54B27A9346EC0E24082DCEC442AC8134C88A258DCD802D47F4AF8502FF611BB62BF30AFBA11841EF32B34B478E3AC5BE8D64308EBA5463E3E92730B65FB25C5175AC8B1C46E4D93C3C130CF2667A2350,
[
?_assertEqual(Sig, sign_with_rsa({N, D3}, M)),
?_assertEqual(ok, verify_rsa_signature({N, 3}, M, Sig))
].
sign_verify_prng_test() ->
{_P, _Q, N, D3, _D5} = ?SK,
M = <<1, 2, 3>>,
Max = maths:pow(2, 256),
Sig = sign_with_rsa({N, D3}, M),
FirstRandomNumber = rand:uniform(Max),
verify_rsa_signature({N, 3}, M, Sig),
?assertNotEqual(FirstRandomNumber, rand:uniform(Max)).
signature_product_test() ->
N = (P = 71) * (Q = 89),
?assertEqual(6319, N),
T = maths:lcm(P - 1, Q - 1),
?assertEqual(3080, T),
D = maths:mod_inv(3, T),
?assertEqual(1027, D),
S1 = sign_with_rsa({N, D}, M1 = 5416),
?assertEqual(923, S1),
S2 = sign_with_rsa({N, D}, M2 = 2397),
?assertEqual(2592, S2),
S3 = sign_with_rsa({N, D}, M1 * M2 rem N),
?assertEqual(5086, S3), % not equal to
?assertEqual(3834, S1 * S2 rem N). | lib/ndpar/src/rsa.erl | 0.742328 | 0.679059 | rsa.erl | starcoder |
package perlin
import (
"math"
"math/rand"
)
// General constants
const (
B = 0x100
N = 0x1000
BM = 0xff
)
// Perlin is the noise generator
type Perlin struct {
alpha float64
beta float64
n int
p [B + B + 2]int
g3 [B + B + 2][3]float64
g2 [B + B + 2][2]float64
g1 [B + B + 2]float64
}
// NewPerlin creates new Perlin noise generator
// In what follows "alpha" is the weight when the sum is formed.
// Typically it is 2, As this approaches 1 the function is noisier.
// "beta" is the harmonic scaling/spacing, typically 2, n is the
// number of iterations and seed is the math.rand seed value to use
func NewPerlin(alpha, beta float64, n int, seed int64) *Perlin {
return NewPerlinRandSource(alpha, beta, n, rand.NewSource(seed))
}
// NewPerlinRandSource creates new Perlin noise generator
// In what follows "alpha" is the weight when the sum is formed.
// Typically it is 2, As this approaches 1 the function is noisier.
// "beta" is the harmonic scaling/spacing, typically 2, n is the
// number of iterations and source is source of pseudo-random int64 values
func NewPerlinRandSource(alpha, beta float64, n int, source rand.Source) *Perlin {
var p Perlin
var i int
p.alpha = alpha
p.beta = beta
p.n = n
r := rand.New(source)
for i = 0; i < B; i++ {
p.p[i] = i
p.g1[i] = float64((r.Int()%(B+B))-B) / B
for j := 0; j < 2; j++ {
p.g2[i][j] = float64((r.Int()%(B+B))-B) / B
}
normalize2(&p.g2[i])
for j := 0; j < 3; j++ {
p.g3[i][j] = float64((r.Int()%(B+B))-B) / B
}
normalize3(&p.g3[i])
}
for ; i > 0; i-- {
k := p.p[i]
j := r.Int() % B
p.p[i] = p.p[j]
p.p[j] = k
}
for i := 0; i < B+2; i++ {
p.p[B+i] = p.p[i]
p.g1[B+i] = p.g1[i]
for j := 0; j < 2; j++ {
p.g2[B+i][j] = p.g2[i][j]
}
for j := 0; j < 3; j++ {
p.g3[B+i][j] = p.g3[i][j]
}
}
return &p
}
func normalize2(v *[2]float64) {
s := math.Sqrt(v[0]*v[0] + v[1]*v[1])
v[0] = v[0] / s
v[1] = v[1] / s
}
func normalize3(v *[3]float64) {
s := math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
v[0] = v[0] / s
v[1] = v[1] / s
v[2] = v[2] / s
}
func at2(rx, ry float64, q [2]float64) float64 {
return rx*q[0] + ry*q[1]
}
func at3(rx, ry, rz float64, q [3]float64) float64 {
return rx*q[0] + ry*q[1] + rz*q[2]
}
func sCurve(t float64) float64 {
return t * t * (3. - 2.*t)
}
func lerp(t, a, b float64) float64 {
return a + t*(b-a)
}
func (p *Perlin) noise1(arg float64) float64 {
var vec [1]float64
vec[0] = arg
t := vec[0] + N
bx0 := int(t) & BM
bx1 := (bx0 + 1) & BM
rx0 := t - float64(int(t))
rx1 := rx0 - 1.
sx := sCurve(rx0)
u := rx0 * p.g1[p.p[bx0]]
v := rx1 * p.g1[p.p[bx1]]
return lerp(sx, u, v)
}
func (p *Perlin) noise2(vec [2]float64) float64 {
t := vec[0] + N
bx0 := int(t) & BM
bx1 := (bx0 + 1) & BM
rx0 := t - float64(int(t))
rx1 := rx0 - 1.
t = vec[1] + N
by0 := int(t) & BM
by1 := (by0 + 1) & BM
ry0 := t - float64(int(t))
ry1 := ry0 - 1.
i := p.p[bx0]
j := p.p[bx1]
b00 := p.p[i+by0]
b10 := p.p[j+by0]
b01 := p.p[i+by1]
b11 := p.p[j+by1]
sx := sCurve(rx0)
sy := sCurve(ry0)
q := p.g2[b00]
u := at2(rx0, ry0, q)
q = p.g2[b10]
v := at2(rx1, ry0, q)
a := lerp(sx, u, v)
q = p.g2[b01]
u = at2(rx0, ry1, q)
q = p.g2[b11]
v = at2(rx1, ry1, q)
b := lerp(sx, u, v)
return lerp(sy, a, b)
}
func (p *Perlin) noise3(vec [3]float64) float64 {
t := vec[0] + N
bx0 := int(t) & BM
bx1 := (bx0 + 1) & BM
rx0 := t - float64(int(t))
rx1 := rx0 - 1.
t = vec[1] + N
by0 := int(t) & BM
by1 := (by0 + 1) & BM
ry0 := t - float64(int(t))
ry1 := ry0 - 1.
t = vec[2] + N
bz0 := int(t) & BM
bz1 := (bz0 + 1) & BM
rz0 := t - float64(int(t))
rz1 := rz0 - 1.
i := p.p[bx0]
j := p.p[bx1]
b00 := p.p[i+by0]
b10 := p.p[j+by0]
b01 := p.p[i+by1]
b11 := p.p[j+by1]
t = sCurve(rx0)
sy := sCurve(ry0)
sz := sCurve(rz0)
q := p.g3[b00+bz0]
u := at3(rx0, ry0, rz0, q)
q = p.g3[b10+bz0]
v := at3(rx1, ry0, rz0, q)
a := lerp(t, u, v)
q = p.g3[b01+bz0]
u = at3(rx0, ry1, rz0, q)
q = p.g3[b11+bz0]
v = at3(rx1, ry1, rz0, q)
b := lerp(t, u, v)
c := lerp(sy, a, b)
q = p.g3[b00+bz1]
u = at3(rx0, ry0, rz1, q)
q = p.g3[b10+bz1]
v = at3(rx1, ry0, rz1, q)
a = lerp(t, u, v)
q = p.g3[b01+bz1]
u = at3(rx0, ry1, rz1, q)
q = p.g3[b11+bz1]
v = at3(rx1, ry1, rz1, q)
b = lerp(t, u, v)
d := lerp(sy, a, b)
return lerp(sz, c, d)
}
// Noise1D generates 1-dimensional Perlin Noise value
func (p *Perlin) Noise1D(x float64) float64 {
var scale float64 = 1
var sum float64
px := x
for i := 0; i < p.n; i++ {
val := p.noise1(px)
sum += val / scale
scale *= p.alpha
px *= p.beta
}
return sum
}
// Noise2D Generates 2-dimensional Perlin Noise value
func (p *Perlin) Noise2D(x, y float64) float64 {
var scale float64 = 1
var sum float64
var px [2]float64
px[0] = x
px[1] = y
for i := 0; i < p.n; i++ {
val := p.noise2(px)
sum += val / scale
scale *= p.alpha
px[0] *= p.beta
px[1] *= p.beta
}
return sum
}
// Noise3D Generates 3-dimensional Perlin Noise value
func (p *Perlin) Noise3D(x, y, z float64) float64 {
var scale float64 = 1
var sum float64
var px [3]float64
if z < 0.0000 {
return p.Noise2D(x, y)
}
px[0] = x
px[1] = y
px[2] = z
for i := 0; i < p.n; i++ {
val := p.noise3(px)
sum += val / scale
scale *= p.alpha
px[0] *= p.beta
px[1] *= p.beta
px[2] *= p.beta
}
return sum
} | vendor/github.com/aquilax/go-perlin/perlin.go | 0.664323 | 0.480966 | perlin.go | starcoder |
package ql
import (
"bufio"
"bytes"
"io"
"strings"
)
// Scanner represents a lexical scanner.
type Scanner struct {
r *bufio.Reader
}
// NewScanner returns a new instance of Scanner.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
// Scan returns the next token and literal value.
func (s *Scanner) Scan() (tok Token, lit string) {
// Read the next rune.
ch := s.read()
// If we see whitespace then consume all contiguous whitespace.
// If we see a letter then consume as an ident or reserved word.
// If we see a digit then consume as a number.
if isWhitespace(ch) {
s.unread()
return s.scanWhitespace()
} else if isLetter(ch) || isDigit(ch) {
s.unread()
return s.scanIdent()
}
// Otherwise read the individual character.
switch ch {
case eof:
return EOF, ""
case '*':
return ASTERISK, string(ch)
case ',':
return COMMA, string(ch)
case '=', '^', '<', '>':
return COMPARITOR, string(ch)
}
return ILLEGAL, string(ch)
}
// scanWhitespace consumes the current rune and all contiguous whitespace.
func (s *Scanner) scanWhitespace() (tok Token, lit string) {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent whitespace character into the buffer.
// Non-whitespace characters and EOF will cause the loop to exit.
for {
if ch := s.read(); ch == eof {
break
} else if !isWhitespace(ch) {
s.unread()
break
} else {
buf.WriteRune(ch)
}
}
return WS, buf.String()
}
// scanIdent consumes the current rune and all contiguous ident runes.
func (s *Scanner) scanIdent() (tok Token, lit string) {
// Create a buffer and read the current character into it.
var buf bytes.Buffer
buf.WriteRune(s.read())
// Read every subsequent ident character into the buffer.
// Non-ident characters and EOF will cause the loop to exit.
for {
if ch := s.read(); ch == eof {
break
} else if !isLetter(ch) && !isDigit(ch) && ch != '_' {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
// If the string matches a keyword then return that keyword.
switch strings.ToUpper(buf.String()) {
case "SELECT":
return SELECT, buf.String()
case "FROM":
return FROM, buf.String()
case "WHERE":
return WHERE, buf.String()
case "AND":
return AND, buf.String()
}
// Otherwise return as a regular identifier.
return IDENT, buf.String()
}
// read reads the next rune from the bufferred reader.
// Returns the rune(0) if an error occurs (or io.EOF is returned).
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return ch
}
// unread places the previously read rune back on the reader.
func (s *Scanner) unread() { _ = s.r.UnreadRune() }
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
// isLetter returns true if the rune is a letter.
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
// isDigit returns true if the rune is a digit.
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
// eof represents a marker rune for the end of the reader.
var eof = rune(0) | plugins/data/parser/ql/ql/v1/scanner.go | 0.739986 | 0.425068 | scanner.go | starcoder |
package completion
import (
"context"
"go/ast"
"go/types"
)
// builtinArgKind determines the expected object kind for a builtin
// argument. It attempts to use the AST hints from builtin.go where
// possible.
func (c *completer) builtinArgKind(ctx context.Context, obj types.Object, call *ast.CallExpr) objKind {
builtin, err := c.snapshot.BuiltinFile(ctx)
if err != nil {
return 0
}
exprIdx := exprAtPos(c.pos, call.Args)
builtinObj := builtin.File.Scope.Lookup(obj.Name())
if builtinObj == nil {
return 0
}
decl, ok := builtinObj.Decl.(*ast.FuncDecl)
if !ok || exprIdx >= len(decl.Type.Params.List) {
return 0
}
switch ptyp := decl.Type.Params.List[exprIdx].Type.(type) {
case *ast.ChanType:
return kindChan
case *ast.ArrayType:
return kindSlice
case *ast.MapType:
return kindMap
case *ast.Ident:
switch ptyp.Name {
case "Type":
switch obj.Name() {
case "make":
return kindChan | kindSlice | kindMap
case "len":
return kindSlice | kindMap | kindArray | kindString | kindChan
case "cap":
return kindSlice | kindArray | kindChan
}
}
}
return 0
}
// builtinArgType infers the type of an argument to a builtin
// function. parentInf is the inferred type info for the builtin
// call's parent node.
func (c *completer) builtinArgType(obj types.Object, call *ast.CallExpr, parentInf candidateInference) candidateInference {
var (
exprIdx = exprAtPos(c.pos, call.Args)
// Propagate certain properties from our parent's inference.
inf = candidateInference{
typeName: parentInf.typeName,
modifiers: parentInf.modifiers,
}
)
switch obj.Name() {
case "append":
if exprIdx <= 0 {
// Infer first append() arg type as apparent return type of
// append().
inf.objType = parentInf.objType
if parentInf.variadic {
inf.objType = types.NewSlice(inf.objType)
}
break
}
// For non-initial append() args, infer slice type from the first
// append() arg, or from parent context.
if len(call.Args) > 0 {
inf.objType = c.pkg.GetTypesInfo().TypeOf(call.Args[0])
}
if inf.objType == nil {
inf.objType = parentInf.objType
}
if inf.objType == nil {
break
}
inf.objType = deslice(inf.objType)
// Check if we are completing the variadic append() param.
inf.variadic = exprIdx == 1 && len(call.Args) <= 2
// Penalize the first append() argument as a candidate. You
// don't normally append a slice to itself.
if sliceChain := objChain(c.pkg.GetTypesInfo(), call.Args[0]); len(sliceChain) > 0 {
inf.penalized = append(inf.penalized, penalizedObj{objChain: sliceChain, penalty: 0.9})
}
case "delete":
if exprIdx > 0 && len(call.Args) > 0 {
// Try to fill in expected type of map key.
firstArgType := c.pkg.GetTypesInfo().TypeOf(call.Args[0])
if firstArgType != nil {
if mt, ok := firstArgType.Underlying().(*types.Map); ok {
inf.objType = mt.Key()
}
}
}
case "copy":
var t1, t2 types.Type
if len(call.Args) > 0 {
t1 = c.pkg.GetTypesInfo().TypeOf(call.Args[0])
if len(call.Args) > 1 {
t2 = c.pkg.GetTypesInfo().TypeOf(call.Args[1])
}
}
// Fill in expected type of either arg if the other is already present.
if exprIdx == 1 && t1 != nil {
inf.objType = t1
} else if exprIdx == 0 && t2 != nil {
inf.objType = t2
}
case "new":
inf.typeName.wantTypeName = true
if parentInf.objType != nil {
// Expected type for "new" is the de-pointered parent type.
if ptr, ok := parentInf.objType.Underlying().(*types.Pointer); ok {
inf.objType = ptr.Elem()
}
}
case "make":
if exprIdx == 0 {
inf.typeName.wantTypeName = true
inf.objType = parentInf.objType
} else {
inf.objType = types.Typ[types.UntypedInt]
}
}
return inf
} | internal/lsp/source/completion/builtin.go | 0.562177 | 0.409103 | builtin.go | starcoder |
package lib
import (
"regexp"
"strconv"
)
var regWordSep = regexp.MustCompile(`\d+`)
// AutoPattern auto generate a regexp to match the index of sequential file names.
// Algorithm:
// 1. Create a table, the row is each path, the column is the words of each path.
// 2. Create histogram of the words in each column.
// 2. Analyze the histogram, use similar columns to create the regexp.
func AutoPattern(list []string) *regexp.Regexp {
table := [][]string{}
halfHeight := len(list) / 2
for _, l := range list {
table = append(table, Split(l))
}
histograms := Histograms(table)
indexCol := FindIndexCol(histograms)
pattern := ""
for col, histogram := range histograms {
if col == indexCol {
pattern += `(\d+)`
} else {
word, count := FindWordForCol(histogram)
if count < halfHeight {
continue
}
pattern += regexp.QuoteMeta(word)
}
}
return regexp.MustCompile(pattern)
}
// FindIndexCol find the column that looks like the index part of the paths.
// The column that has the most different numbers.
func FindIndexCol(list []map[string]int) int {
max := 0
col := 0
for i, histogram := range list {
// filter all the numbers
nums := []int{}
for word := range histogram {
num, err := strconv.ParseInt(word, 10, 64)
if err == nil {
nums = append(nums, int(num))
}
}
l := len(nums)
if max < l {
max = l
col = i
}
}
return col
}
// FindWordForCol the word that appear the most times
func FindWordForCol(histogram map[string]int) (string, int) {
max := 0
res := ""
for word, count := range histogram {
if max < count {
max = count
res = word
}
}
return res, max
}
// Histograms histogram of the words of each path
func Histograms(table [][]string) []map[string]int {
list := []map[string]int{}
for col := 0; ; col++ {
insufficient := true
histogram := map[string]int{}
for _, row := range table {
if len(row) <= col {
continue
}
insufficient = false
histogram[row[col]]++
}
if len(histogram) > 0 {
list = append(list, histogram)
}
if insufficient {
return list
}
}
}
// Split use `\d+` as separator to split a path
func Split(path string) []string {
locs := regWordSep.FindAllStringIndex(path, -1)
if len(locs) == 0 {
return nil
}
list := []string{}
preLoc := []int{0, 0}
var loc []int
for _, loc = range locs {
sepLeftSide := path[preLoc[1]:loc[0]]
sep := path[loc[0]:loc[1]]
list = append(list, sepLeftSide, sep)
preLoc = loc
}
if loc[0] != loc[1] {
list = append(list, path[loc[1]:])
}
return list
} | lib/auto_pattern.go | 0.683842 | 0.443118 | auto_pattern.go | starcoder |
package varis
// Neuron - interface for all Neuron
// Each Neuron must have coreNeuron and getCore() to get pointer for CoreNeuron
// Live method - for goroutine. All kind of Neurons implement functionality of his type
// changeWeight is the method for training
type Neuron interface {
live()
getCore() *CoreNeuron
changeWeight(neuronDelta float64)
}
// CoreNeuron - entity with float64 weight (it is bias) and connection.
// Activation result store in cache for training.
type CoreNeuron struct {
conn connection
weight float64
cache float64
}
// changeWeight - change weight of CoreNeuron and change weight for all related synapses.
func (n *CoreNeuron) changeWeight(neuronDelta float64) {
n.weight += neuronDelta
n.conn.changeWeight(neuronDelta)
}
// getCore - return core of Neuron.
func (n *CoreNeuron) getCore() *CoreNeuron {
return n
}
type inputNeuron struct {
CoreNeuron
connectTo chan float64
}
// INeuron - creates inputNeuron.
// This kind of Neuron get signal from connectTo channel and broadcast it to all output synapses without Activation.
func INeuron(weight float64, connectTo chan float64) Neuron {
return &inputNeuron{
CoreNeuron: CoreNeuron{weight: weight},
connectTo: connectTo,
}
}
func (neuron *inputNeuron) live() {
for {
neuron.conn.broadcastSignals(<-neuron.connectTo)
}
}
type hiddenNeuron struct {
CoreNeuron
}
// HNeuron - creates hiddenNeuron.
// This kind of Neuron get signal from input Synapses channel, activate and broadcast it to all output synapses.
func HNeuron(weight float64) Neuron {
return &hiddenNeuron{
CoreNeuron: CoreNeuron{weight: weight},
}
}
func (neuron *hiddenNeuron) live() {
for {
vector := neuron.conn.collectSignals()
neuron.cache = vector.sum() + neuron.weight
neuron.conn.broadcastSignals(ACTIVATION(neuron.cache))
}
}
type outputNeuron struct {
CoreNeuron
connectTo chan float64
}
// ONeuron - creates outputNeuron.
// This kind of Neuron get signal from input Synapses channel, activate and send it to connectTo channel.
func ONeuron(weight float64, connectTo chan float64) Neuron {
return &outputNeuron{
CoreNeuron: CoreNeuron{weight: weight},
connectTo: connectTo,
}
}
func (neuron *outputNeuron) live() {
for {
vector := neuron.conn.collectSignals()
neuron.cache = vector.sum() + neuron.weight
neuron.connectTo <- ACTIVATION(neuron.cache)
}
} | neuron.go | 0.752559 | 0.522994 | neuron.go | starcoder |
package mit
import (
"errors"
"time"
)
// ErrBadTermCode is reported when the given term code doesn't exist.
var ErrBadTermCode = errors.New("mit: unknown term code")
// A TermInfo struct contains information about an academic term.
type TermInfo struct {
Code string
FirstDayOfClasses time.Time
LastDayOfClasses time.Time
ExceptionDays map[string]time.Weekday
}
// GetTermByCode returns the TermInfo struct for the term with the given code, or ErrBadTermCode if the term couldn't be found.
func GetTermByCode(code string) (TermInfo, error) {
if code == "2020FA" {
return TermInfo{
Code: "2020FA",
FirstDayOfClasses: time.Date(2019, 9, 4, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2019, 12, 11, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{},
}, nil
} else if code == "2020JA" {
return TermInfo{
Code: "2020JA",
FirstDayOfClasses: time.Date(2020, 1, 6, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2020, 1, 31, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{},
}, nil
} else if code == "2020SP" {
return TermInfo{
Code: "2020SP",
FirstDayOfClasses: time.Date(2020, 2, 3, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{
// Feb 18: Monday schedule of classes to be held.
"2020-02-18": time.Monday,
},
}, nil
} else if code == "2021FA" {
return TermInfo{
Code: "2021FA",
FirstDayOfClasses: time.Date(2020, 9, 1, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2020, 12, 9, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{
// Oct 13: Monday schedule of classes to be held.
"2020-10-13": time.Monday,
},
}, nil
} else if code == "2021JA" {
return TermInfo{
Code: "2021JA",
FirstDayOfClasses: time.Date(2021, 1, 4, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2021, 1, 29, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{},
}, nil
} else if code == "2021SP" {
return TermInfo{
Code: "2021SP",
FirstDayOfClasses: time.Date(2021, 2, 16, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2021, 5, 20, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{
// Mar 9: Monday schedule of classes to be held.
"2021-03-09": time.Monday,
},
}, nil
} else if code == "2022FA" {
return TermInfo{
Code: "2022FA",
FirstDayOfClasses: time.Date(2021, 9, 8, 0, 0, 0, 0, time.UTC),
LastDayOfClasses: time.Date(2021, 12, 9, 0, 0, 0, 0, time.UTC),
ExceptionDays: map[string]time.Weekday{},
}, nil
}
return TermInfo{}, ErrBadTermCode
}
// GetCurrentTerm returns a TermInfo struct for the current academic term.
func GetCurrentTerm() TermInfo {
term, _ := GetTermByCode("2022FA")
return term
} | mit/term.go | 0.537284 | 0.515742 | term.go | starcoder |
package util
import (
"io"
)
// Size of the buffer used for calculating file chunks.
const chunksCalculationBufferSizeBytes = 4096
// FileChunk describes a chunk within a file with its starting position and end
// position in bytes. Both are given as bytes counted from the file's beginning.
// Also carries information about the chunk number (i.e. its order position)
// within the file.
type FileChunk struct {
ChunkNumber int
StartBytes int64
EndBytes int64
}
// FileChunkCalculationResult represents a single result of a file chunk calculation.
// Carries information about a FileChunk and additional error information.
// A FileChunk can still hold valuable information even in the presence of an
// error such as the chunk number.
type FileChunkCalculationResult struct {
FileChunk FileChunk
Err error
}
// CalculateFileChunks calculates all chunks of r that are delimited by delimiter.
// r is read in a streamed fashion.
// Results will be published on a res channel as they appear when reading r.
// Closes the result channel as soon as r is exhaustively read.
func CalculateFileChunks(r io.Reader, delimiter byte, res chan<- FileChunkCalculationResult) {
var lastSeenDelimiterTokenOffsetBytes int64 = 0
alreadyReadBytes := int64(0)
chunkNumber := 0
buf := make([]byte, 0, chunksCalculationBufferSizeBytes)
for {
n, err := r.Read(buf[:cap(buf)])
buf = buf[:n]
if n == 0 {
if err == nil {
continue
}
if err == io.EOF {
// For when r does not end with the delimiter.
if alreadyReadBytes > lastSeenDelimiterTokenOffsetBytes {
res <- FileChunkCalculationResult{
FileChunk: FileChunk{
ChunkNumber: chunkNumber + 1,
StartBytes: lastSeenDelimiterTokenOffsetBytes,
EndBytes: alreadyReadBytes,
},
}
}
close(res)
break
}
res <- FileChunkCalculationResult{
FileChunk: FileChunk{
ChunkNumber: chunkNumber,
},
Err: err,
}
}
for idx, b := range buf {
if b == delimiter {
chunkNumber++
res <- FileChunkCalculationResult{
FileChunk: FileChunk{
ChunkNumber: chunkNumber,
StartBytes: lastSeenDelimiterTokenOffsetBytes,
EndBytes: alreadyReadBytes + int64(idx),
},
}
lastSeenDelimiterTokenOffsetBytes = alreadyReadBytes + int64(idx) + 1
}
}
alreadyReadBytes += int64(n)
}
} | util/fileAnalyzer.go | 0.550366 | 0.431644 | fileAnalyzer.go | starcoder |
package metric
import (
"fmt"
"strings"
)
// SourceType defines the kind of data source. Based on this SourceType, metric
// package performs some calculations with it. Check below the description for
// each one.
type SourceType int
// Source types
// If any more SourceTypes are added update maps: SourcesTypeToName & SourcesNameToType.
const (
// GAUGE is a value that may increase and decrease.
// It generally represents the value for something at a particular moment in time
GAUGE SourceType = iota
// COUNT counts the number of times an event occurred since the last time it was retrieved (time window).
// It's values can go up or down
COUNT SourceType = iota
// SUMMARY is a composite value with avg, min, max sample count and sum
SUMMARY SourceType = iota
// CUMULATIVE_COUNT counts the number of times an event occurred. It is not a delta, but an absolute value.
// It's value should either be the same or go up, never down
CUMULATIVE_COUNT = iota
// RATE represents a rate of change of a value in a specific time window
RATE = iota
// CUMULATIVE_RATE represents an ever-increasing rate of change.
CUMULATIVE_RATE = iota
// PROMETHEUS_HISTROGRAM is a histogram as defined by Prometheus
PROMETHEUS_HISTOGRAM SourceType = iota
// PROMETHEUS_SUMMARY is a summaru as defined by Prometheus
PROMETHEUS_SUMMARY SourceType = iota
)
// SourcesTypeToName metric sources list mapping its type to readable name.
var SourcesTypeToName = map[SourceType]string{
GAUGE: "gauge",
COUNT: "count",
SUMMARY: "summary",
CUMULATIVE_COUNT: "cumulative-count",
RATE: "rate",
CUMULATIVE_RATE: "cumulative-rate",
PROMETHEUS_HISTOGRAM: "prometheus-histogram",
PROMETHEUS_SUMMARY: "prometheus-summary",
}
// SourcesNameToType metric sources list mapping its name to type.
var SourcesNameToType = map[string]SourceType{
"gauge": GAUGE,
"count": COUNT,
"summary": SUMMARY,
"cumulative-count": CUMULATIVE_COUNT,
"rate": RATE,
"cumulative-rate": CUMULATIVE_RATE,
"prometheus-histogram": PROMETHEUS_HISTOGRAM,
"prometheus-summary": PROMETHEUS_SUMMARY,
}
// String fulfills stringer interface, returning empty string on invalid source types.
func (t SourceType) String() string {
if s, ok := SourcesTypeToName[t]; ok {
return s
}
return ""
}
// SourceTypeForName does a case insensitive conversion from a string to a SourceType.
// An error will be returned if no valid SourceType matched.
func SourceTypeForName(sourceTypeTag string) (SourceType, error) {
if st, ok := SourcesNameToType[strings.ToLower(sourceTypeTag)]; ok {
return st, nil
}
return 0, fmt.Errorf("metric: Unknown source_type %s", sourceTypeTag)
} | vendor/github.com/newrelic/infra-integrations-sdk/v4/data/metric/source_type.go | 0.628293 | 0.472866 | source_type.go | starcoder |
package delete
import (
"bytes"
"testing"
"github.com/hpcng/singularity/e2e/internal/e2e"
"github.com/hpcng/singularity/e2e/internal/testhelper"
)
type ctx struct {
env e2e.TestEnv
}
func (c ctx) testDeleteCmd(t *testing.T) {
tests := []struct {
name string
args []string
agree string
expectExit int
expect e2e.SingularityCmdResultOp
}{
{
name: "delete unauthorized arch",
args: []string{"--arch=amd64", "library://test/default/test:v0.0.3"},
agree: "y",
expectExit: 255,
},
{
name: "delete unauthorized no arch",
args: []string{"library://test/default/test:v0.0.3"},
agree: "y",
expectExit: 255,
},
{
name: "delete disagree arch",
args: []string{"--arch=amd64", "library://test/default/test:v0.0.3"},
agree: "n",
expectExit: 0,
},
{
name: "delete disagree noarch",
args: []string{"library://test/default/test:v0.0.3"},
agree: "n",
expectExit: 0,
},
{
name: "delete unauthorized force arch",
args: []string{"--force", "--arch=amd64", "library://test/default/test:v0.0.3"},
agree: "",
expectExit: 255,
},
{
name: "delete unauthorized force noarch",
args: []string{"--force", "library://test/default/test:v0.0.3"},
agree: "",
expectExit: 255,
},
{
name: "delete unauthorized custom library",
args: []string{"--library=https://cloud.staging.sylabs.io", "library://test/default/test:v0.0.3"},
agree: "y",
expectExit: 255,
},
{
name: "delete host in uri",
args: []string{"library://library.example.com/test/default/test:v0.0.3"},
agree: "y",
expectExit: 255,
expect: e2e.ExpectError(e2e.ContainMatch, "dial tcp: lookup library.example.com: no such host"),
},
}
for _, tt := range tests {
c.env.RunSingularity(
t,
e2e.AsSubtest(tt.name),
e2e.WithProfile(e2e.UserProfile),
e2e.WithCommand("delete"),
e2e.WithArgs(tt.args...),
e2e.WithStdin(bytes.NewBufferString(tt.agree)),
e2e.ExpectExit(tt.expectExit, tt.expect),
)
}
}
// E2ETests is the main func to trigger the test suite.
func E2ETests(env e2e.TestEnv) testhelper.Tests {
c := ctx{
env: env,
}
return testhelper.Tests{
"delete": c.testDeleteCmd,
}
} | e2e/delete/delete.go | 0.553747 | 0.482063 | delete.go | starcoder |
package main
import (
"encoding/xml"
"fmt"
"os"
"github.com/freddy33/graphml"
"github.com/sirupsen/logrus"
)
// NodeLabelID is the ID of the GraphML attribute used for the label of nodes in the graph
const NodeLabelID = "node-label"
// NodeLabelName is the name of the GraphML attribute used for the label of nodes in the graph
const NodeLabelName = "label"
// NodeDescriptionID is the ID of the GraphML attribute used for the description of nodes in the graph
const NodeDescriptionID = "node-description"
// NodeDescriptionName is the name of the GraphML attribute used for the description of nodes in the graph
const NodeDescriptionName = "description"
// NodeURLID is the ID of the GraphML attribute used for the URL of nodes in the graph
const NodeURLID = "node-url"
// NodeURLName is the name of the GraphML attribute used for the URL of nodes in the graph
const NodeURLName = "url"
// EdgeLabelID is the ID of the GraphML attribute used for the label of edges in the graph
const EdgeLabelID = "edge-label"
// EdgeLabelName is the name of the GraphML attribute used for the label of edges in the graph
const EdgeLabelName = "label"
// EdgeDescriptionID is the ID of the GraphML attribute used for the description of edges in the graph
const EdgeDescriptionID = "edge-description"
// EdgeDescriptionName is the name of the GraphML attribute used for the description of edges in the graph
const EdgeDescriptionName = "description"
// GraphMLUtil provides a number of util methods to create GraphML documents with standardized node and edge data
type GraphMLUtil struct{}
// NewGraphMLUtil creates a new instance of GraphMLUtil
func NewGraphMLUtil() *GraphMLUtil {
return &GraphMLUtil{}
}
// SaveGraphMLDocument saves the provided graphMLDocument with the specified filename on the file system
func (gu *GraphMLUtil) SaveGraphMLDocument(filename string, graphMLDocument *graphml.Document) error {
logrus.Infof("Saving GraphML to file [%s]", filename)
file, fileErr := os.Create(filename)
defer file.Close()
if fileErr != nil {
return fmt.Errorf("Failed to create GraphML document file [%s]: %w", filename, fileErr)
}
encodeErr := graphml.Encode(file, graphMLDocument)
if encodeErr != nil {
return fmt.Errorf("Failed to encode GraphML document to file [%s]: %w", filename, encodeErr)
}
return nil
}
// CreateGraphMLDocument creates the GraphML document based on the supplied GraphML graphs with the standardised GraphML attributes definition
func (gu *GraphMLUtil) CreateGraphMLDocument(graphs []graphml.Graph) *graphml.Document {
return &graphml.Document{
Instr: xml.ProcInst{
Target: "xml",
Inst: []byte("version=\"1.0\" encoding=\"UTF-8\"")},
Attrs: []xml.Attr{
{Name: xml.Name{Local: "xmlns"}, Value: "http://graphml.graphdrawing.org/xmlns"},
{Name: xml.Name{Local: "xmlns:xsi"}, Value: "http://www.w3.org/2001/XMLSchema-instance"},
{Name: xml.Name{Local: "xsi:schemaLocation"}, Value: "http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd"}},
Graphs: graphs,
Keys: []graphml.Key{
graphml.NewKey(graphml.KindNode, NodeLabelID, NodeLabelName, "string"),
graphml.NewKey(graphml.KindNode, NodeDescriptionID, NodeDescriptionName, "string"),
graphml.NewKey(graphml.KindNode, NodeURLID, NodeURLName, "string"),
graphml.NewKey(graphml.KindEdge, EdgeLabelID, EdgeLabelName, "string"),
graphml.NewKey(graphml.KindEdge, EdgeDescriptionID, EdgeDescriptionName, "string")}}
}
// CreateGraph creates a GraphML graph with the specified id, nodes, edges, and edge direction
func (gu *GraphMLUtil) CreateGraph(id string, edgeDefault graphml.EdgeDir, nodes []graphml.Node, edges []graphml.Edge) *graphml.Graph {
return &graphml.Graph{
ExtObject: graphml.ExtObject{Object: graphml.Object{ID: id}},
EdgeDefault: edgeDefault,
Nodes: nodes,
Edges: edges}
}
// CreateNode creates a GraphML node with the specified id and the supplied label, description, and URL as GraphML attributes
func (gu *GraphMLUtil) CreateNode(nodeID, nodeLabel, nodeDescription, nodeURL string) *graphml.Node {
return &graphml.Node{
ExtObject: graphml.ExtObject{
Object: graphml.Object{ID: nodeID},
Data: []graphml.Data{
graphml.NewData(NodeLabelID, nodeLabel),
graphml.NewData(NodeDescriptionID, nodeDescription),
graphml.NewData(NodeURLID, nodeURL)}}}
}
// CreateEdge creates a GraphML edge with the specified id, source and target nodes, and the supplied label and description as GraphML attributes
func (gu *GraphMLUtil) CreateEdge(edgeID, sourceNodeID, targetNodeID, edgeLabel, edgeDescription string) *graphml.Edge {
return &graphml.Edge{
ExtObject: graphml.ExtObject{
Object: graphml.Object{ID: edgeID},
Data: []graphml.Data{
graphml.NewData(EdgeLabelID, edgeLabel),
graphml.NewData(EdgeDescriptionID, edgeDescription)}},
Source: sourceNodeID,
Target: targetNodeID}
} | graphmlutil.go | 0.591605 | 0.484197 | graphmlutil.go | starcoder |
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "rand"
const Count = 1e5
func i64rand() int64 {
for {
a := int64(rand.Uint32())
a = (a << 32) | int64(rand.Uint32())
a >>= uint(rand.Intn(64))
if -a != a {
return a
}
}
return 0 // impossible
}
func i64test(a, b, c int64) {
d := a % c
if d != b {
println("i64", a, b, c, d)
panic("fail")
}
}
func i64run() {
var a, b int64
for i := 0; i < Count; i++ {
a = i64rand()
b = a % 1
i64test(a, b, 1)
b = a % 2
i64test(a, b, 2)
b = a % 3
i64test(a, b, 3)
b = a % 4
i64test(a, b, 4)
b = a % 5
i64test(a, b, 5)
b = a % 6
i64test(a, b, 6)
b = a % 7
i64test(a, b, 7)
b = a % 8
i64test(a, b, 8)
b = a % 10
i64test(a, b, 10)
b = a % 16
i64test(a, b, 16)
b = a % 20
i64test(a, b, 20)
b = a % 32
i64test(a, b, 32)
b = a % 60
i64test(a, b, 60)
b = a % 64
i64test(a, b, 64)
b = a % 128
i64test(a, b, 128)
b = a % 256
i64test(a, b, 256)
b = a % 16384
i64test(a, b, 16384)
b = a % -1
i64test(a, b, -1)
b = a % -2
i64test(a, b, -2)
b = a % -3
i64test(a, b, -3)
b = a % -4
i64test(a, b, -4)
b = a % -5
i64test(a, b, -5)
b = a % -6
i64test(a, b, -6)
b = a % -7
i64test(a, b, -7)
b = a % -8
i64test(a, b, -8)
b = a % -10
i64test(a, b, -10)
b = a % -16
i64test(a, b, -16)
b = a % -20
i64test(a, b, -20)
b = a % -32
i64test(a, b, -32)
b = a % -60
i64test(a, b, -60)
b = a % -64
i64test(a, b, -64)
b = a % -128
i64test(a, b, -128)
b = a % -256
i64test(a, b, -256)
b = a % -16384
i64test(a, b, -16384)
}
}
func u64rand() uint64 {
a := uint64(rand.Uint32())
a = (a << 32) | uint64(rand.Uint32())
a >>= uint(rand.Intn(64))
return a
}
func u64test(a, b, c uint64) {
d := a % c
if d != b {
println("u64", a, b, c, d)
panic("fail")
}
}
func u64run() {
var a, b uint64
for i := 0; i < Count; i++ {
a = u64rand()
b = a % 1
u64test(a, b, 1)
b = a % 2
u64test(a, b, 2)
b = a % 3
u64test(a, b, 3)
b = a % 4
u64test(a, b, 4)
b = a % 5
u64test(a, b, 5)
b = a % 6
u64test(a, b, 6)
b = a % 7
u64test(a, b, 7)
b = a % 8
u64test(a, b, 8)
b = a % 10
u64test(a, b, 10)
b = a % 16
u64test(a, b, 16)
b = a % 20
u64test(a, b, 20)
b = a % 32
u64test(a, b, 32)
b = a % 60
u64test(a, b, 60)
b = a % 64
u64test(a, b, 64)
b = a % 128
u64test(a, b, 128)
b = a % 256
u64test(a, b, 256)
b = a % 16384
u64test(a, b, 16384)
}
}
func i32rand() int32 {
for {
a := int32(rand.Uint32())
a >>= uint(rand.Intn(32))
if -a != a {
return a
}
}
return 0 // impossible
}
func i32test(a, b, c int32) {
d := a % c
if d != b {
println("i32", a, b, c, d)
panic("fail")
}
}
func i32run() {
var a, b int32
for i := 0; i < Count; i++ {
a = i32rand()
b = a % 1
i32test(a, b, 1)
b = a % 2
i32test(a, b, 2)
b = a % 3
i32test(a, b, 3)
b = a % 4
i32test(a, b, 4)
b = a % 5
i32test(a, b, 5)
b = a % 6
i32test(a, b, 6)
b = a % 7
i32test(a, b, 7)
b = a % 8
i32test(a, b, 8)
b = a % 10
i32test(a, b, 10)
b = a % 16
i32test(a, b, 16)
b = a % 20
i32test(a, b, 20)
b = a % 32
i32test(a, b, 32)
b = a % 60
i32test(a, b, 60)
b = a % 64
i32test(a, b, 64)
b = a % 128
i32test(a, b, 128)
b = a % 256
i32test(a, b, 256)
b = a % 16384
i32test(a, b, 16384)
b = a % -1
i32test(a, b, -1)
b = a % -2
i32test(a, b, -2)
b = a % -3
i32test(a, b, -3)
b = a % -4
i32test(a, b, -4)
b = a % -5
i32test(a, b, -5)
b = a % -6
i32test(a, b, -6)
b = a % -7
i32test(a, b, -7)
b = a % -8
i32test(a, b, -8)
b = a % -10
i32test(a, b, -10)
b = a % -16
i32test(a, b, -16)
b = a % -20
i32test(a, b, -20)
b = a % -32
i32test(a, b, -32)
b = a % -60
i32test(a, b, -60)
b = a % -64
i32test(a, b, -64)
b = a % -128
i32test(a, b, -128)
b = a % -256
i32test(a, b, -256)
}
}
func u32rand() uint32 {
a := uint32(rand.Uint32())
a >>= uint(rand.Intn(32))
return a
}
func u32test(a, b, c uint32) {
d := a % c
if d != b {
println("u32", a, b, c, d)
panic("fail")
}
}
func u32run() {
var a, b uint32
for i := 0; i < Count; i++ {
a = u32rand()
b = a % 1
u32test(a, b, 1)
b = a % 2
u32test(a, b, 2)
b = a % 3
u32test(a, b, 3)
b = a % 4
u32test(a, b, 4)
b = a % 5
u32test(a, b, 5)
b = a % 6
u32test(a, b, 6)
b = a % 7
u32test(a, b, 7)
b = a % 8
u32test(a, b, 8)
b = a % 10
u32test(a, b, 10)
b = a % 16
u32test(a, b, 16)
b = a % 20
u32test(a, b, 20)
b = a % 32
u32test(a, b, 32)
b = a % 60
u32test(a, b, 60)
b = a % 64
u32test(a, b, 64)
b = a % 128
u32test(a, b, 128)
b = a % 256
u32test(a, b, 256)
b = a % 16384
u32test(a, b, 16384)
}
}
func i16rand() int16 {
for {
a := int16(rand.Uint32())
a >>= uint(rand.Intn(16))
if -a != a {
return a
}
}
return 0 // impossible
}
func i16test(a, b, c int16) {
d := a % c
if d != b {
println("i16", a, b, c, d)
panic("fail")
}
}
func i16run() {
var a, b int16
for i := 0; i < Count; i++ {
a = i16rand()
b = a % 1
i16test(a, b, 1)
b = a % 2
i16test(a, b, 2)
b = a % 3
i16test(a, b, 3)
b = a % 4
i16test(a, b, 4)
b = a % 5
i16test(a, b, 5)
b = a % 6
i16test(a, b, 6)
b = a % 7
i16test(a, b, 7)
b = a % 8
i16test(a, b, 8)
b = a % 10
i16test(a, b, 10)
b = a % 16
i16test(a, b, 16)
b = a % 20
i16test(a, b, 20)
b = a % 32
i16test(a, b, 32)
b = a % 60
i16test(a, b, 60)
b = a % 64
i16test(a, b, 64)
b = a % 128
i16test(a, b, 128)
b = a % 256
i16test(a, b, 256)
b = a % 16384
i16test(a, b, 16384)
b = a % -1
i16test(a, b, -1)
b = a % -2
i16test(a, b, -2)
b = a % -3
i16test(a, b, -3)
b = a % -4
i16test(a, b, -4)
b = a % -5
i16test(a, b, -5)
b = a % -6
i16test(a, b, -6)
b = a % -7
i16test(a, b, -7)
b = a % -8
i16test(a, b, -8)
b = a % -10
i16test(a, b, -10)
b = a % -16
i16test(a, b, -16)
b = a % -20
i16test(a, b, -20)
b = a % -32
i16test(a, b, -32)
b = a % -60
i16test(a, b, -60)
b = a % -64
i16test(a, b, -64)
b = a % -128
i16test(a, b, -128)
b = a % -256
i16test(a, b, -256)
b = a % -16384
i16test(a, b, -16384)
}
}
func u16rand() uint16 {
a := uint16(rand.Uint32())
a >>= uint(rand.Intn(16))
return a
}
func u16test(a, b, c uint16) {
d := a % c
if d != b {
println("u16", a, b, c, d)
panic("fail")
}
}
func u16run() {
var a, b uint16
for i := 0; i < Count; i++ {
a = u16rand()
b = a % 1
u16test(a, b, 1)
b = a % 2
u16test(a, b, 2)
b = a % 3
u16test(a, b, 3)
b = a % 4
u16test(a, b, 4)
b = a % 5
u16test(a, b, 5)
b = a % 6
u16test(a, b, 6)
b = a % 7
u16test(a, b, 7)
b = a % 8
u16test(a, b, 8)
b = a % 10
u16test(a, b, 10)
b = a % 16
u16test(a, b, 16)
b = a % 20
u16test(a, b, 20)
b = a % 32
u16test(a, b, 32)
b = a % 60
u16test(a, b, 60)
b = a % 64
u16test(a, b, 64)
b = a % 128
u16test(a, b, 128)
b = a % 256
u16test(a, b, 256)
b = a % 16384
u16test(a, b, 16384)
}
}
func i8rand() int8 {
for {
a := int8(rand.Uint32())
a >>= uint(rand.Intn(8))
if -a != a {
return a
}
}
return 0 // impossible
}
func i8test(a, b, c int8) {
d := a % c
if d != b {
println("i8", a, b, c, d)
panic("fail")
}
}
func i8run() {
var a, b int8
for i := 0; i < Count; i++ {
a = i8rand()
b = a % 1
i8test(a, b, 1)
b = a % 2
i8test(a, b, 2)
b = a % 3
i8test(a, b, 3)
b = a % 4
i8test(a, b, 4)
b = a % 5
i8test(a, b, 5)
b = a % 6
i8test(a, b, 6)
b = a % 7
i8test(a, b, 7)
b = a % 8
i8test(a, b, 8)
b = a % 10
i8test(a, b, 10)
b = a % 8
i8test(a, b, 8)
b = a % 20
i8test(a, b, 20)
b = a % 32
i8test(a, b, 32)
b = a % 60
i8test(a, b, 60)
b = a % 64
i8test(a, b, 64)
b = a % 127
i8test(a, b, 127)
b = a % -1
i8test(a, b, -1)
b = a % -2
i8test(a, b, -2)
b = a % -3
i8test(a, b, -3)
b = a % -4
i8test(a, b, -4)
b = a % -5
i8test(a, b, -5)
b = a % -6
i8test(a, b, -6)
b = a % -7
i8test(a, b, -7)
b = a % -8
i8test(a, b, -8)
b = a % -10
i8test(a, b, -10)
b = a % -8
i8test(a, b, -8)
b = a % -20
i8test(a, b, -20)
b = a % -32
i8test(a, b, -32)
b = a % -60
i8test(a, b, -60)
b = a % -64
i8test(a, b, -64)
b = a % -128
i8test(a, b, -128)
b = a % -101
i8test(a, b, -101)
}
}
func u8rand() uint8 {
a := uint8(rand.Uint32())
a >>= uint(rand.Intn(8))
return a
}
func u8test(a, b, c uint8) {
d := a % c
if d != b {
println("u8", a, b, c, d)
panic("fail")
}
}
func u8run() {
var a, b uint8
for i := 0; i < Count; i++ {
a = u8rand()
b = a % 1
u8test(a, b, 1)
b = a % 2
u8test(a, b, 2)
b = a % 3
u8test(a, b, 3)
b = a % 4
u8test(a, b, 4)
b = a % 5
u8test(a, b, 5)
b = a % 6
u8test(a, b, 6)
b = a % 7
u8test(a, b, 7)
b = a % 8
u8test(a, b, 8)
b = a % 10
u8test(a, b, 10)
b = a % 8
u8test(a, b, 8)
b = a % 20
u8test(a, b, 20)
b = a % 32
u8test(a, b, 32)
b = a % 60
u8test(a, b, 60)
b = a % 64
u8test(a, b, 64)
b = a % 127
u8test(a, b, 127)
}
}
func main() {
xtest()
i64run()
u64run()
i32run()
u32run()
i16run()
u16run()
i8run()
u8run()
}
func xtest() {
} | test/ken/modconst.go | 0.541651 | 0.495667 | modconst.go | starcoder |
package slice
import (
"fmt"
"sort"
)
type Ordered interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | ~string
}
// Unique returns a new slice that is sorted with all the duplicate strings removed.
func Unique[T Ordered](ss []T) []T {
return SortedUnique[T](Sort(ss))
}
func SortedUnique[T Ordered](ss []T) []T {
if ss == nil {
return nil
}
result := []T{}
last := *new(T)
for i, s := range ss {
if i != 0 && last == s {
continue
}
result = append(result, s)
last = s
}
return result
}
// Sort returns a new slice that is the sorted copy of the slice it was called on. Unlike sort.Strings, it does not mutate the original slice
func Sort[T Ordered](ss []T) []T {
if ss == nil {
return nil
}
ss2 := make([]T, len(ss))
copy(ss2, ss)
sort.Slice(ss2, func(i int, j int) bool {
return ss2[i] <= ss2[j]
})
return ss2
}
// SortBy returns a new, slice that is the sorted copy of the slice it was called on, using sortFunc to interpret the string as a sortable integer value. It does not mutate the original slice
func SortBy[T any](ss []T, sortFunc func(slice []T, i, j int) bool) []T {
if ss == nil {
return nil
}
ss2 := make([]T, len(ss))
copy(ss2, ss)
sort.Slice(ss2, func(i, j int) bool {
return sortFunc(ss2, i, j)
})
return ss2
}
// Compare sorts and iterates s1 and s2. calling left() if the element is only in s1, right() if the element is only in s2, and equal() if it's in both.
// this is used as the speedy basis for other set operations.
func Compare[T Ordered](s1, s2 []T, left, equal, right func(s T)) {
var compareNoop = func(s T) {}
if left == nil {
left = compareNoop
}
if right == nil {
right = compareNoop
}
if equal == nil {
equal = compareNoop
}
s1 = Unique[T](Sort[T](s1))
s2 = Unique[T](Sort[T](s2))
s1Counter := 0
s2Counter := 0
for s1Counter < len(s1) && s2Counter < len(s2) {
if s1[s1Counter] < s2[s2Counter] {
left(s1[s1Counter])
s1Counter++
continue
}
if s1[s1Counter] > s2[s2Counter] {
right(s2[s2Counter])
s2Counter++
continue
}
// must be equal
equal(s1[s1Counter])
s1Counter++
s2Counter++
}
// catch any remaining items
for i := s1Counter; i < len(s1); i++ {
left(s1[i])
}
for i := s2Counter; i < len(s2); i++ {
right(s2[i])
}
}
// Subtract is a set operation that returns the elements from s1 that are not in s2.
func Subtract[T Ordered](s1, s2 []T) []T {
result := []T{}
Compare[T](s1, s2, func(s T) {
result = append(result, s)
}, nil, nil)
return result
}
type MapFunc[T any, R any] interface {
~func(int, T) R | ~func(T) R
}
// Map over each element in the slice and perform an operation on it. the result of the operation will replace the element value.
// Normal func structure is func(i int, s string) string.
// Also accepts func structure func(s string) string
func Map[T any, R any, F MapFunc[T, R]](ss []T, funcInterface F) []R {
if ss == nil {
return nil
}
f := func(i int, s T) R {
switch tf := (interface{})(funcInterface).(type) {
case func(int, T) R:
return tf(i, s)
case func(T) R:
return tf(s)
}
panic(fmt.Sprintf("Map cannot understand function type %T", funcInterface))
}
result := make([]R, len(ss))
for i, s := range ss {
result[i] = f(i, s)
}
return result
}
type AccumulatorFunc[T any, R any] func(acc R, i int, s T) R
// Reduce (aka inject) iterates over the slice of items and calls the accumulator function for each pass, storing the state in the acc variable through each pass.
func Reduce[T any, R any](items []T, initialAccumulator R, f AccumulatorFunc[T, R]) R {
if items == nil {
return initialAccumulator
}
acc := initialAccumulator
for i, s := range items {
acc = f(acc, i, s)
}
return acc
}
// Index returns the index of string in the slice, otherwise -1 if the string is not found.
func Index[T comparable](ss []T, s T) int {
for i, b := range ss {
if b == s {
return i
}
}
return -1
}
// SortedIndex returns the index of string in the slice, otherwise -1 if the string is not found.
// this function will do a log2(n) binary search through the list, which is much faster for large lists.
// The slice must be sorted in ascending order.
func SortedIndex[T Ordered](ss []T, s T) int {
idx := sort.Search(len(ss), func(i int) bool {
return ss[i] >= s
})
if idx >= 0 && idx < len(ss) && ss[idx] == s {
return idx
}
return -1
}
// First returns the First element, or "" if there are no elements in the slice.
// First will also return an "ok" bool value that will be false if there were no elements to select from
func First[T any](ss []T) (T, bool) {
if len(ss) > 0 {
return ss[0], true
}
return *new(T), false
}
// Last returns the Last element, or "" if there are no elements in the slice.
// Last will also return an "ok" bool value that will be false if there were no elements to select from
func Last[T any](ss []T) (T, bool) {
if len(ss) > 0 {
return ss[len(ss)-1], true
}
return *new(T), false
}
type SelectFunc[T any] interface {
~func(int, T) bool | ~func(T) bool
}
func Select[T any, F SelectFunc[T]](ss []T, funcInterface F) []T {
f := func(i int, s T) bool {
switch tf := (interface{})(funcInterface).(type) {
case func(int, T) bool:
return tf(i, s)
case func(T) bool:
return tf(s)
default:
panic(fmt.Sprintf("Filter cannot understand function type %T", funcInterface))
}
}
result := []T{}
for i, s := range ss {
if f(i, s) {
result = append(result, s)
}
}
return result
}
// Contains returns true if the string is in the slice.
func Contains[T comparable](ss []T, s T) bool {
return Index(ss, s) != -1
}
// SortedContains returns true if the string is in an already sorted slice. it's faster than Contains for large slices
func SortedContains[T Ordered](ss []T, s T) bool {
return SortedIndex(ss, s) != -1
}
// Pop pops the last element off a slice and returns the popped element and the remaining slice
// (note that the original slice is not modified)
func Pop[T any](ss []T) (T, []T) {
elem, ok := Last(ss)
if ok {
return elem, ss[0 : len(ss)-1]
}
return elem, nil
}
// Shift returns the first element and the remaining slice
func Shift[T any](ss []T) (T, []T) {
if len(ss) == 0 {
return *new(T), nil
}
return ss[0], ss[1:]
}
// Unshift prepends the element in front of the first value
func Unshift[T any](ss []T, elem T) []T {
return append([]T{elem}, ss...)
}
type FindFunc[T any] interface {
~func(T) bool | ~func(int, T) bool
}
// Find and return the first element that matches. returns false if none found.
func Find[T any, F FindFunc[T]](ss []T, funcInterface F) (elem T, found bool) {
f := func(i int, s T) bool {
switch tf := (interface{})(funcInterface).(type) {
case func(int, T) bool:
return tf(i, s)
case func(T) bool:
return tf(s)
default:
panic(fmt.Sprintf("Find cannot understand function type %T", funcInterface))
}
}
for i, s := range ss {
if f(i, s) {
return s, true
}
}
return *new(T), false
}
type EachFunc[T any] interface {
~func(int, T) | ~func(T)
}
// Runs provided function on each item of slice
func Each[T any, F EachFunc[T]](ss []T, fn F) {
var run func(i int, t T)
switch x := interface{}(fn).(type) {
case func(int, T):
run = func(i int, t T) { x(i, t) }
case func(T):
run = func(i int, t T) { x(t) }
}
for i, s := range ss {
run(i, s)
}
} | slice.go | 0.770551 | 0.447098 | slice.go | starcoder |
package primitives
import (
"fmt"
"math"
"github.com/FactomProject/factomd/common/constants"
"github.com/FactomProject/factomd/common/interfaces"
)
var _ = fmt.Println
// NextPowerOfTwo returns the next highest power of two from a given number if
// it is not already a power of two. This is a helper function used during the
// calculation of a merkle tree.
func NextPowerOfTwo(n int) int {
// Return the number if it's already a power of 2.
if n&(n-1) == 0 {
return n
}
// Figure out and return the next power of two.
exponent := uint(math.Log2(float64(n))) + 1
return 1 << exponent // 2^exponent
}
// HashMerkleBranches takes two hashes, treated as the left and right tree
// nodes, and returns the hash of their concatenation. This is a helper
// function used to aid in the generation of a merkle tree.
func HashMerkleBranches(left interfaces.IHash, right interfaces.IHash) interfaces.IHash {
// Concatenate the left and right nodes.
var barray []byte = make([]byte, constants.ADDRESS_LENGTH*2)
copy(barray[:constants.ADDRESS_LENGTH], left.Bytes())
copy(barray[constants.ADDRESS_LENGTH:], right.Bytes())
newSha := Sha(barray)
return newSha
}
// Give a list of hashes, return the root of the Merkle Tree
func ComputeMerkleRoot(hashes []interfaces.IHash) interfaces.IHash {
merkles := BuildMerkleTreeStore(hashes)
return merkles[len(merkles)-1]
}
// The root of the Merkle Tree is returned in merkles[len(merkles)-1]
func BuildMerkleTreeStore(hashes []interfaces.IHash) (merkles []interfaces.IHash) {
if len(hashes) == 0 {
return append(make([]interfaces.IHash, 0, 1), new(Hash))
}
if len(hashes) < 2 {
return hashes
}
nextLevel := []interfaces.IHash{}
for i := 0; i < len(hashes); i += 2 {
var node interfaces.IHash
if i+1 == len(hashes) {
node = HashMerkleBranches(hashes[i], hashes[i])
} else {
node = HashMerkleBranches(hashes[i], hashes[i+1])
}
nextLevel = append(nextLevel, node)
}
nextIteration := BuildMerkleTreeStore(nextLevel)
return append(hashes, nextIteration...)
}
type MerkleNode struct {
Left *Hash `json:"left,omitempty"`
Right *Hash `json:"right,omitempty"`
Top *Hash `json:"top,omitempty"`
}
func BuildMerkleBranchForEntryHash(hashes []interfaces.IHash, entryHash interfaces.IHash, fullDetail bool) []*MerkleNode {
for i, h := range hashes {
if h.IsSameAs(entryHash) {
return BuildMerkleBranch(hashes, i, fullDetail)
}
}
return nil
}
func BuildMerkleBranch(hashes []interfaces.IHash, entryIndex int, fullDetail bool) []*MerkleNode {
if len(hashes) < entryIndex || len(hashes) == 0 {
return nil
}
merkleTree := BuildMerkleTreeStore(hashes)
//fmt.Printf("Merkle tree - %v\n", merkleTree)
levelWidth := len(hashes)
complimentIndex := 0
topIndex := 0
index := entryIndex
answer := []*MerkleNode{}
offset := 0
for {
/*fmt.Printf("Index %v out of %v\n", offset+index, len(merkleTree))
fmt.Printf("levelWidth - %v\n", levelWidth)
fmt.Printf("offset - %v\n", offset)*/
if levelWidth == 1 {
break
}
mn := new(MerkleNode)
if index%2 == 0 {
complimentIndex = index + 1
if complimentIndex == levelWidth {
complimentIndex = index
}
topIndex = index/2 + levelWidth
mn.Right = merkleTree[offset+complimentIndex].(*Hash)
if fullDetail == true {
mn.Left = merkleTree[offset+index].(*Hash)
mn.Top = merkleTree[offset+topIndex].(*Hash)
}
} else {
complimentIndex = index - 1
topIndex = complimentIndex/2 + levelWidth
mn.Left = merkleTree[offset+complimentIndex].(*Hash)
if fullDetail == true {
mn.Right = merkleTree[offset+index].(*Hash)
mn.Top = merkleTree[offset+topIndex].(*Hash)
}
}
answer = append(answer, mn)
offset += levelWidth
index = topIndex - levelWidth
levelWidth = (levelWidth + 1) / 2
}
return answer
} | common/primitives/merkle.go | 0.658308 | 0.517937 | merkle.go | starcoder |
package chart
type Axes struct {
// A point on the Y axis at which the chart displays a horizontal line, indicating the max Y value of interest. Points with a Y value that exceeds the high water mark still appear.<br> **Notes:** * SignalFx only uses this value for time series charts. * Value must be less than or equal to `options.axes.max` and greater than `options.axes.lowWaterMark` for the corresponding axis.
HighWatermark *float32 `json:"highWatermark,omitempty"`
// A label that's displayed beside the horizontal line indicating the high water mark.<br> **Notes:** * SignalFx only uses this value for time series charts. * If `options.axes.highWaterMark` isn't specified, this label is ignored.
HighWatermarkLabel string `json:"highWatermarkLabel,omitempty"`
// Label that's displayed for the Y axis of the chart. It appears to the left of axis values on the left axis and to the right of axis values on the right axis.<br> **Note:** SignalFx only uses this value for time series charts.
Label string `json:"label,omitempty"`
// A point on the Y axis at which the chart displays a horizontal line, indicating the minimum Y value of interest. Points with a Y value that is less than the low water mark still appear.<br> **Notes:** * SignalFx only uses this value for time series charts. * Value must be greater than or equal to `options.axes.min` and less than `options.axes.highWaterMark` for the corresponding axis.
LowWatermark *float32 `json:"lowWatermark,omitempty"`
// A label that's displayed beside the horizontal line indicating the low water mark.<br> **Notes:** * SignalFx only uses this value for time series charts. * If `options.axes.lowWaterMark` isn't specified, this label is ignored.
LowWatermarkLabel string `json:"lowWatermarkLabel,omitempty"`
// Specifies the largest data value to display on the chart. Overrides options.includeZero if the properties are set to incompatible values.<br> **Notes:** * This value is only used if `options.type` is set to TimeSeriesChart. * The value must be greater than the value of `options.axes.min` for the same `options.axes` element.
Max *float32 `json:"max,omitempty"`
// Specifies the smallest data value to display on the chart. Overrides options.includeZero if the properties are set to incompatible values.<br> **Notes:** * This value is only used if `options.type` is set to TimeSeriesChart. * The value must be less than the value of `options.axes.max` for the same `options.axes` element.\"
Min *float32 `json:"min,omitempty"`
} | chart/model_axes.go | 0.916367 | 0.800458 | model_axes.go | starcoder |
package main
import (
"sort"
"strings"
)
// copied from elsewhere in the codebase (migration shouldn't import from
// codebase)
func newUnionFind(pks []string) *unionFind {
ret := &unionFind{
parents: make(map[string]string, len(pks)),
ranks: make(map[string]int, len(pks)),
}
for _, pk := range pks {
ret.parents[pk] = pk
ret.ranks[pk] = 0
}
return ret
}
// unionFind is an implementation of a union-find data structure for efficient
// computation of connected components in a graph. The DS implements path
// compression on find and rank-rated union
type unionFind struct {
parents map[string]string
ranks map[string]int
}
// find finds the root of the given pk and compresses the path
func (uf *unionFind) find(pk string) string {
p := uf.parents[pk]
if p != pk {
uf.parents[pk] = uf.find(p)
}
return uf.parents[pk]
}
func (uf *unionFind) union(x, y string) {
xRoot, yRoot := uf.find(x), uf.find(y)
if xRoot == yRoot {
return
}
// To make things simpler, we invariantly make the rank of yRoot's tree
// smaller than xRoot so we can always merge yRoot into xRoot
xRank, yRank := uf.ranks[xRoot], uf.ranks[yRoot]
if xRank < yRank {
xRoot, yRoot = yRoot, xRoot
}
// The only time we need to update rank is if the trees were equal size,
// since merging a smaller tree into a larger one doesn't affect the
// larger one's rank.
uf.parents[yRoot] = xRoot
if xRank == yRank {
uf.ranks[xRoot]++
}
}
// getComponents returns the connected components in the data structure, sorted
// by length and tiebroken by string join
func (uf *unionFind) getComponents() [][]string {
// do a find() on everything to fully compress all paths
for pk := range uf.parents {
uf.find(pk)
}
// now we can invert the parent map - each root points to a list of
// child pk's and itself since root nodes are self-referential in parents
inverseMap := map[string][]string{}
for pk, parent := range uf.parents {
inverseMap[parent] = append(inverseMap[parent], pk)
}
// construct the return value
ret := make([][]string, 0, len(inverseMap))
for _, children := range inverseMap {
sort.Strings(children)
ret = append(ret, children)
}
sort.Slice(
ret,
func(i, j int) bool {
if len(ret[i]) == len(ret[j]) {
return strings.Join(ret[i], "") < strings.Join(ret[j], "")
}
return len(ret[i]) < len(ret[j])
},
)
return ret
} | wifi/cloud/go/tools/migrations/m003_configurator/plugin/union.go | 0.654343 | 0.426083 | union.go | starcoder |
package xorfilter
import (
"errors"
"math"
"math/bits"
"sort"
)
type BinaryFuse8 struct {
Seed uint64
SegmentLength uint32
SegmentLengthMask uint32
SegmentCount uint32
SegmentCountLength uint32
Fingerprints []uint8
}
func calculateSegmentLength(arity uint32, size uint32) uint32 {
// These parameters are very sensitive. Replacing 'floor' by 'round' can
// substantially affect the construction time.
if size == 0 {
return 4
}
if arity == 3 {
return uint32(1) << int(math.Floor(math.Log(float64(size)) / math.Log(3.33) + 2.25))
} else if arity == 4 {
return uint32(1) << int(math.Floor(math.Log(float64(size)) / math.Log(2.91) - 0.5))
} else {
return 65536
}
}
func calculateSizeFactor(arity uint32, size uint32) float64 {
if arity == 3 {
return math.Max(1.125, 0.875 + 0.25 * math.Log(1000000)/math.Log(float64(size)))
} else if arity == 4 {
return math.Max(1.075, 0.77 + 0.305 * math.Log(600000)/math.Log(float64(size)))
} else {
return 2.0
}
}
func (filter *BinaryFuse8) initializeParameters(size uint32) {
arity := uint32(3)
filter.SegmentLength = calculateSegmentLength(arity, size)
if filter.SegmentLength > 262144 {
filter.SegmentLength = 262144
}
filter.SegmentLengthMask = filter.SegmentLength - 1
sizeFactor := calculateSizeFactor(arity, size)
capacity := uint32(0)
if size > 1 {
capacity = uint32(math.Round(float64(size) * sizeFactor))
}
initSegmentCount := (capacity+filter.SegmentLength-1)/filter.SegmentLength - (arity - 1)
arrayLength := (initSegmentCount + arity - 1) * filter.SegmentLength
filter.SegmentCount = (arrayLength + filter.SegmentLength - 1) / filter.SegmentLength
if filter.SegmentCount <= arity-1 {
filter.SegmentCount = 1
} else {
filter.SegmentCount = filter.SegmentCount - (arity - 1)
}
arrayLength = (filter.SegmentCount + arity - 1) * filter.SegmentLength
filter.SegmentCountLength = filter.SegmentCount * filter.SegmentLength
filter.Fingerprints = make([]uint8, arrayLength)
}
func (filter *BinaryFuse8) getHashFromHash(hash uint64) (uint32, uint32, uint32) {
hi, _ := bits.Mul64(hash, uint64(filter.SegmentCountLength))
h0 := uint32(hi)
h1 := h0 + filter.SegmentLength
h2 := h1 + filter.SegmentLength
h1 ^= uint32(hash>>18) & filter.SegmentLengthMask
h2 ^= uint32(hash) & filter.SegmentLengthMask
return h0, h1, h2
}
func mod3(x uint8) uint8 {
if x > 2 {
x -= 3
}
return x
}
// PopulateBinaryFuse8 fills a BinaryFuse8 filter with provided keys.
// The function may return an error after too many iterations: it is unlikely.
// If your input has duplicates, it may get sorted.
func PopulateBinaryFuse8(keys []uint64) (*BinaryFuse8, error) {
size := uint32(len(keys))
filter := &BinaryFuse8{}
filter.initializeParameters(size)
rngcounter := uint64(1)
filter.Seed = splitmix64(&rngcounter)
capacity := uint32(len(filter.Fingerprints))
alone := make([]uint32, capacity)
// the lowest 2 bits are the h index (0, 1, or 2)
// so we only have 6 bits for counting;
// but that's sufficient
t2count := make([]uint8, capacity)
reverseH := make([]uint8, size)
t2hash := make([]uint64, capacity)
reverseOrder := make([]uint64, size+1)
reverseOrder[size] = 1
// the array h0, h1, h2, h0, h1, h2
var h012 [6]uint32
// this could be used to compute the mod3
// tabmod3 := [5]uint8{0,1,2,0,1}
iterations := 0
for true {
iterations += 1
if iterations > MaxIterations {
return nil, errors.New("too many iterations, you probably have duplicate keys")
}
blockBits := 1
for (1 << blockBits) < filter.SegmentCount {
blockBits += 1
}
startPos := make([]uint, 1<<blockBits)
for i, _ := range startPos {
// important: we do not want i * size to overflow!!!
startPos[i] = uint((uint64(i) * uint64(size)) >> blockBits)
}
for _, key := range keys {
hash := mixsplit(key, filter.Seed)
segment_index := hash >> (64 - blockBits)
for reverseOrder[startPos[segment_index]] != 0 {
segment_index++
segment_index &= (1 << blockBits) - 1
}
reverseOrder[startPos[segment_index]] = hash
startPos[segment_index] += 1
}
error := 0
duplicates := uint32(0)
for i := uint32(0); i < size; i++ {
hash := reverseOrder[i]
index1, index2, index3 := filter.getHashFromHash(hash)
t2count[index1] += 4
// t2count[index1] ^= 0 // noop
t2hash[index1] ^= hash
t2count[index2] += 4
t2count[index2] ^= 1
t2hash[index2] ^= hash
t2count[index3] += 4
t2count[index3] ^= 2
t2hash[index3] ^= hash
// If we have duplicated hash values, then it is likely that
// the next comparison is true
if t2hash[index1] & t2hash[index2] & t2hash[index3] == 0 {
// next we do the actual test
if ((t2hash[index1] == 0) && (t2count[index1] == 8)) || ((t2hash[index2] == 0) && (t2count[index2] == 8)) || ((t2hash[index3] == 0) && (t2count[index3] == 8)) {
duplicates += 1
t2count[index1] -= 4
t2hash[index1] ^= hash
t2count[index2] -= 4
t2count[index2] ^= 1
t2hash[index2] ^= hash
t2count[index3] -= 4
t2count[index3] ^= 2
t2hash[index3] ^= hash
}
}
if t2count[index1] < 4 {
error = 1
}
if t2count[index2] < 4 {
error = 1
}
if t2count[index3] < 4 {
error = 1
}
}
if error == 1 {
for i := uint32(0); i < size; i++ {
reverseOrder[i] = 0
}
for i := uint32(0); i < capacity; i++ {
t2count[i] = 0
t2hash[i] = 0
}
filter.Seed = splitmix64(&rngcounter)
continue
}
// End of key addition
Qsize := 0
// Add sets with one key to the queue.
for i := uint32(0); i < capacity; i++ {
alone[Qsize] = i
if (t2count[i] >> 2) == 1 {
Qsize++
}
}
stacksize := uint32(0)
for Qsize > 0 {
Qsize--
index := alone[Qsize]
if (t2count[index] >> 2) == 1 {
hash := t2hash[index]
found := t2count[index] & 3
reverseH[stacksize] = found
reverseOrder[stacksize] = hash
stacksize++
index1, index2, index3 := filter.getHashFromHash(hash)
h012[1] = index2
h012[2] = index3
h012[3] = index1
h012[4] = h012[1]
other_index1 := h012[found+1]
alone[Qsize] = other_index1
if (t2count[other_index1] >> 2) == 2 {
Qsize++
}
t2count[other_index1] -= 4
t2count[other_index1] ^= mod3(found + 1) // could use this instead: tabmod3[found+1]
t2hash[other_index1] ^= hash
other_index2 := h012[found+2]
alone[Qsize] = other_index2
if (t2count[other_index2] >> 2) == 2 {
Qsize++
}
t2count[other_index2] -= 4
t2count[other_index2] ^= mod3(found + 2) // could use this instead: tabmod3[found+2]
t2hash[other_index2] ^= hash
}
}
if stacksize + duplicates == size {
// Success
size = stacksize
break
} else if duplicates > 0 {
// Duplicates were found, but we did not
// manage to remove them all. We may simply sort the key to
// solve the issue. This will run in time O(n log n) and it
// mutates the input.
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
}
for i := uint32(0); i < size; i++ {
reverseOrder[i] = 0
}
for i := uint32(0); i < capacity; i++ {
t2count[i] = 0
t2hash[i] = 0
}
filter.Seed = splitmix64(&rngcounter)
}
if size == 0 {
return filter, nil
}
for i := int(size - 1); i >= 0; i-- {
// the hash of the key we insert next
hash := reverseOrder[i]
xor2 := uint8(fingerprint(hash))
index1, index2, index3 := filter.getHashFromHash(hash)
found := reverseH[i]
h012[0] = index1
h012[1] = index2
h012[2] = index3
h012[3] = h012[0]
h012[4] = h012[1]
filter.Fingerprints[h012[found]] = xor2 ^ filter.Fingerprints[h012[found+1]] ^ filter.Fingerprints[h012[found+2]]
}
return filter, nil
}
// Contains returns `true` if key is part of the set with a false positive probability of <0.4%.
func (filter *BinaryFuse8) Contains(key uint64) bool {
hash := mixsplit(key, filter.Seed)
f := uint8(fingerprint(hash))
h0, h1, h2 := filter.getHashFromHash(hash)
f ^= filter.Fingerprints[h0] ^ filter.Fingerprints[h1] ^ filter.Fingerprints[h2]
return f == 0
} | binaryfusefilter.go | 0.619241 | 0.571946 | binaryfusefilter.go | starcoder |
// This file must be kept in sync with index_bound_checks.go.
//+build !bounds
package mat64
import "github.com/gonum/blas"
// At returns the element at row r, column c.
func (m *Dense) At(r, c int) float64 {
if r >= m.mat.Rows || r < 0 {
panic(ErrRowAccess)
}
if c >= m.mat.Cols || c < 0 {
panic(ErrColAccess)
}
return m.at(r, c)
}
func (m *Dense) at(r, c int) float64 {
return m.mat.Data[r*m.mat.Stride+c]
}
// Set sets the element at row r, column c to the value v.
func (m *Dense) Set(r, c int, v float64) {
if r >= m.mat.Rows || r < 0 {
panic(ErrRowAccess)
}
if c >= m.mat.Cols || c < 0 {
panic(ErrColAccess)
}
m.set(r, c, v)
}
func (m *Dense) set(r, c int, v float64) {
m.mat.Data[r*m.mat.Stride+c] = v
}
// At returns the element at row r, column c. It panics if c is not zero.
func (v *Vector) At(r, c int) float64 {
if r < 0 || r >= v.n {
panic(ErrRowAccess)
}
if c != 0 {
panic(ErrColAccess)
}
return v.at(r)
}
func (v *Vector) at(r int) float64 {
return v.mat.Data[r*v.mat.Inc]
}
// Set sets the element at row r to the value val. It panics if r is less than
// zero or greater than the length.
func (v *Vector) SetVec(i int, val float64) {
if i < 0 || i >= v.n {
panic(ErrVectorAccess)
}
v.setVec(i, val)
}
func (v *Vector) setVec(i int, val float64) {
v.mat.Data[i*v.mat.Inc] = val
}
// At returns the element at row r and column c.
func (s *SymDense) At(r, c int) float64 {
if r >= s.mat.N || r < 0 {
panic(ErrRowAccess)
}
if c >= s.mat.N || c < 0 {
panic(ErrColAccess)
}
return s.at(r, c)
}
func (s *SymDense) at(r, c int) float64 {
if r > c {
r, c = c, r
}
return s.mat.Data[r*s.mat.Stride+c]
}
// SetSym sets the elements at (r,c) and (c,r) to the value v.
func (s *SymDense) SetSym(r, c int, v float64) {
if r >= s.mat.N || r < 0 {
panic(ErrRowAccess)
}
if c >= s.mat.N || c < 0 {
panic(ErrColAccess)
}
s.set(r, c, v)
}
func (s *SymDense) set(r, c int, v float64) {
if r > c {
r, c = c, r
}
s.mat.Data[r*s.mat.Stride+c] = v
}
// At returns the element at row r, column c.
func (t *TriDense) At(r, c int) float64 {
if r >= t.mat.N || r < 0 {
panic(ErrRowAccess)
}
if c >= t.mat.N || c < 0 {
panic(ErrColAccess)
}
return t.at(r, c)
}
func (t *TriDense) at(r, c int) float64 {
if t.mat.Uplo == blas.Upper {
if r > c {
return 0
}
return t.mat.Data[r*t.mat.Stride+c]
}
if r < c {
return 0
}
return t.mat.Data[r*t.mat.Stride+c]
}
// SetTri sets the element at row r, column c to the value v.
// It panics if the location is outside the appropriate half of the matrix.
func (t *TriDense) SetTri(r, c int, v float64) {
if r >= t.mat.N || r < 0 {
panic(ErrRowAccess)
}
if c >= t.mat.N || c < 0 {
panic(ErrColAccess)
}
if t.mat.Uplo == blas.Upper && r > c {
panic("mat64: triangular set out of bounds")
}
if t.mat.Uplo == blas.Lower && r < c {
panic("mat64: triangular set out of bounds")
}
t.set(r, c, v)
}
func (t *TriDense) set(r, c int, v float64) {
t.mat.Data[r*t.mat.Stride+c] = v
} | vendor/github.com/gonum/matrix/mat64/index_no_bound_checks.go | 0.761538 | 0.512266 | index_no_bound_checks.go | starcoder |
package sc
// JPverb is a reverb effect from sc3-plugins.
// See https://github.com/supercollider/sc3-plugins
// and http://doc.sccode.org/Classes/JPverb.html
type JPverb struct {
// In is the input signal.
In Input
// T60 is time for the reverb to decay 60db.
// Does not effect early reflections. (0.1..60)
T60 Input
// Damp controls damping of high-frequencies as the reverb decays.
// 0 is no damping, 1 is very strong damping (0..1)
Damp Input
// Size scales size of delay-lines within the reverberator,
// producing the impression of a larger or smaller space.
// Values below 1 can sound metallic. (0.5..5)
Size Input
// EarlyDiff controls shape of early reflections.
// Values of 0.707 or more produce smooth exponential decay.
// Lower values produce a slower build-up of echoes. (0..1)
EarlyDiff Input
// ModDepth is the depth of delay-line modulation.
// Use in combination with ModFreq to set amount of chorusing
// within the structure. (0..1)
ModDepth Input
// ModFreq is the frequency of delay-line modulation.
// Use in combination with modDepth to set amount of chorusing
// within the structure. (0..10)
ModFreq Input
// Low is the multiplier for the reverberation time within the low band. (0..1)
Low Input
// Mid is the multiplier for the reverberation time within the mid band. (0..1)
Mid Input
// High is the multiplier for the reverberation time within the high band. (0..1)
High Input
// LowCut is the frequency at which the crossover between
// the low and mid bands of the reverb occurs. (100..6000)
LowCut Input
// HighCut is the frequency at which the crossover between
// the mid and high bands of the reverb occurs. (1000..10000)
HighCut Input
}
func (jpv *JPverb) defaults() {
if jpv.T60 == nil {
jpv.T60 = C(1)
}
if jpv.Damp == nil {
jpv.Damp = C(0)
}
if jpv.Size == nil {
jpv.Size = C(1)
}
if jpv.EarlyDiff == nil {
jpv.EarlyDiff = C(0.707)
}
if jpv.ModDepth == nil {
jpv.ModDepth = C(0.1)
}
if jpv.ModFreq == nil {
jpv.ModFreq = C(2)
}
if jpv.Low == nil {
jpv.Low = C(1)
}
if jpv.Mid == nil {
jpv.Mid = C(1)
}
if jpv.High == nil {
jpv.High = C(1)
}
if jpv.LowCut == nil {
jpv.LowCut = C(500)
}
if jpv.HighCut == nil {
jpv.HighCut = C(2000)
}
}
// Rate creates a new ugen at a specific rate.
// If rate is an unsupported value this method will cause a runtime panic.
// If the input signal is nil this method will panic.
func (jpv JPverb) Rate(rate int8) Input {
CheckRate(rate)
if jpv.In == nil {
panic("JPverb requires an input signal")
}
(&jpv).defaults()
var in1, in2 Input
switch x := jpv.In.(type) {
case Inputs:
if len(x) == 0 {
panic("JPverb requires an input signal")
}
in1, in2 = x[0], x[len(x)-1]
default:
in1, in2 = jpv.In, jpv.In
}
return NewInput("JPverbRaw", rate, 0, 2, in1, in2, jpv.T60, jpv.Damp, jpv.Size, jpv.EarlyDiff, jpv.ModDepth, jpv.ModFreq, jpv.Low, jpv.Mid, jpv.High, jpv.LowCut, jpv.HighCut)
}
// defJPverb is a synthdef that exposes the fields of the JPverb ugen.
func defJPverb(params Params) Ugen {
var (
in = params.Add("in", 0)
out = params.Add("out", 0)
t60 = params.Add("t60", 1)
damp = params.Add("damp", 0)
size = params.Add("size", 1)
earlyDiff = params.Add("earlyDiff", 0.707)
modDepth = params.Add("modDepth", 0.1)
modFreq = params.Add("modFreq", 2)
low = params.Add("low", 1)
mid = params.Add("mid", 1)
high = params.Add("high", 1)
lowcut = params.Add("lowcut", 500)
highcut = params.Add("highcut", 2000)
)
return Out{
Bus: out,
Channels: JPverb{
In: In{
Bus: in,
NumChannels: 2,
}.Rate(AR),
T60: t60,
Damp: damp,
Size: size,
EarlyDiff: earlyDiff,
ModDepth: modDepth,
ModFreq: modFreq,
Low: low,
Mid: mid,
High: high,
LowCut: lowcut,
HighCut: highcut,
}.Rate(AR),
}.Rate(AR)
} | vendor/github.com/scgolang/sc/jpverb.go | 0.698227 | 0.490114 | jpverb.go | starcoder |
package cron
import (
"fmt"
"strings"
"time"
)
var (
_ Schedule = (*DailySchedule)(nil)
_ fmt.Stringer = (*DailySchedule)(nil)
)
// WeeklyAtUTC returns a schedule that fires on every of the given days at the given time by hour, minute and second in UTC.
func WeeklyAtUTC(hour, minute, second int, days ...time.Weekday) Schedule {
dayOfWeekMask := uint(0)
for _, day := range days {
dayOfWeekMask = dayOfWeekMask | 1<<uint(day)
}
return &DailySchedule{DayOfWeekMask: dayOfWeekMask, TimeOfDayUTC: time.Date(0, 0, 0, hour, minute, second, 0, time.UTC)}
}
// DailyAtUTC returns a schedule that fires every day at the given hour, minute and second in UTC.
func DailyAtUTC(hour, minute, second int) Schedule {
return &DailySchedule{DayOfWeekMask: AllDaysMask, TimeOfDayUTC: time.Date(0, 0, 0, hour, minute, second, 0, time.UTC)}
}
// WeekdaysAtUTC returns a schedule that fires every week day at the given hour, minute and second in UTC>
func WeekdaysAtUTC(hour, minute, second int) Schedule {
return &DailySchedule{DayOfWeekMask: WeekDaysMask, TimeOfDayUTC: time.Date(0, 0, 0, hour, minute, second, 0, time.UTC)}
}
// WeekendsAtUTC returns a schedule that fires every weekend day at the given hour, minut and second.
func WeekendsAtUTC(hour, minute, second int) Schedule {
return &DailySchedule{DayOfWeekMask: WeekendDaysMask, TimeOfDayUTC: time.Date(0, 0, 0, hour, minute, second, 0, time.UTC)}
}
// DailySchedule is a schedule that fires every day that satisfies the DayOfWeekMask at the given TimeOfDayUTC.
type DailySchedule struct {
DayOfWeekMask uint
TimeOfDayUTC time.Time
}
func (ds DailySchedule) String() string {
if ds.DayOfWeekMask > 0 {
var days []string
for _, d := range DaysOfWeek {
if ds.checkDayOfWeekMask(d) {
days = append(days, d.String())
}
}
return fmt.Sprintf("%s on %s each week", ds.TimeOfDayUTC.Format(time.RFC3339), strings.Join(days, ", "))
}
return fmt.Sprintf("%s every day", ds.TimeOfDayUTC.Format(time.RFC3339))
}
func (ds DailySchedule) checkDayOfWeekMask(day time.Weekday) bool {
trialDayMask := uint(1 << uint(day))
bitwiseResult := (ds.DayOfWeekMask & trialDayMask)
return bitwiseResult > uint(0)
}
// Next implements Schedule.
func (ds DailySchedule) Next(after time.Time) time.Time {
if after.IsZero() {
after = Now()
}
todayInstance := time.Date(after.Year(), after.Month(), after.Day(), ds.TimeOfDayUTC.Hour(), ds.TimeOfDayUTC.Minute(), ds.TimeOfDayUTC.Second(), 0, time.UTC)
for day := 0; day < 8; day++ {
next := todayInstance.AddDate(0, 0, day) //the first run here it should be adding nothing, i.e. returning todayInstance ...
if ds.checkDayOfWeekMask(next.Weekday()) && next.After(after) { //we're on a day ...
return next
}
}
return Zero
} | cron/daily_schedule.go | 0.767864 | 0.617397 | daily_schedule.go | starcoder |
package semver
import (
"bytes"
"fmt"
"regexp"
"strings"
"github.com/blang/semver"
)
type queryOp int
// An arithmetic operation requires a hard version query.
// Placeholders are expanded to zero in these cases
const (
queryEq queryOp = 0
queryLt queryOp = 1
queryGt queryOp = 2
queryLe queryOp = 3
queryGe queryOp = 4
)
func (op queryOp) compare(a *semver.Version, b *semver.Version) bool {
switch op {
case queryEq:
return a.EQ(*b)
case queryLt:
return a.LT(*b)
case queryGt:
return b.GT(*a)
case queryLe:
return b.LE(*a)
case queryGe:
return a.GE(*b)
default:
return false
}
}
type Query interface {
Matches(ver *semver.Version) bool
FindBest(list List) *semver.Version
Str() string
}
type singleBound struct {
ver *semver.Version
op queryOp
}
// lower.op always queryGe or queryGt
// upper.op always queryLe or queryLt
type dualBound struct {
lower *singleBound
upper *singleBound
}
type queryList []Query
func (q *singleBound) Matches(ver *semver.Version) bool {
return q.op.compare(ver, q.ver)
}
func (q *singleBound) FindBest(list List) *semver.Version {
// assumes list is sorted
if len(list) <= 0 {
return nil
}
switch q.op {
case queryEq:
for _, ver := range list {
if q.ver.EQ(*ver) {
return ver
}
}
return nil
case queryLt, queryLe:
// find the highest matching version
for i := len(list) - 1; i >= 0; i-- {
if q.Matches(list[i]) {
return list[i]
}
}
return nil
case queryGt, queryGe:
// check if highest version matches
last := list[len(list)-1]
if q.Matches(last) {
return last
}
return nil
default:
return nil
}
}
func (q *singleBound) Str() string {
return fmt.Sprintf("%s%s", queryInv[q.op], q.ver.String())
}
func (q *dualBound) Matches(ver *semver.Version) bool {
return q.lower.Matches(ver) && q.upper.Matches(ver)
}
func (q *dualBound) FindBest(list List) *semver.Version {
// assumes list is sorted
if len(list) <= 0 {
return nil
}
// find highest allowable that meets lower bound
top := q.upper.FindBest(list)
if top != nil && q.lower.Matches(top) {
return top
}
return nil
}
func (q *dualBound) Str() string {
return fmt.Sprintf("%s %s", q.lower.Str(), q.upper.Str())
}
func (ql queryList) Matches(ver *semver.Version) bool {
for _, q := range ql {
if q.Matches(ver) {
return true
}
}
return false
}
func (ql queryList) FindBest(list List) *semver.Version {
res := make(List, 0, len(ql))
for _, q := range ql {
if ver := q.FindBest(list); ver != nil {
res = append(res, ver)
}
}
if len(res) == 0 {
return nil
}
res.Sort()
return res[len(res)-1]
}
func (ql queryList) Str() string {
var buf bytes.Buffer
for _, q := range ql {
buf.WriteString(q.Str())
buf.WriteString(" || ")
}
res := buf.String()
return res[:len(res)-4]
}
func trimX(str string) string {
loc := anyMatch.FindStringIndex(str)
if loc != nil {
str = str[:loc[0]]
}
if len(str) > 0 && str[len(str)-1] == '.' {
str = str[:len(str)-1]
}
return str
}
func stripEqV(str string) string {
if len(str) > 0 && str[0] == '=' {
str = str[1:]
}
if len(str) > 0 && str[0] == 'v' {
str = str[1:]
}
return str
}
var cmpMatch = regexp.MustCompile(`^(>=|<=|>|<)v?([*xX]|[0-9]+)(\.([*xX]|[0-9]+)){0,2}(-[a-zA-Z0-9]+(\.[0-9]+)?)?$`)
var misMatch = regexp.MustCompile(`^=?v?(([*xX]|[0-9]+)(\.([*xX]|[0-9]+)){0,2})?(-[a-zA-Z0-9]+(\.[0-9]+)?)?$`)
var tildeMatch = regexp.MustCompile(`^~=?v?(([*xX]|[0-9]+)(\.([*xX]|[0-9]+)){0,2}(-[0-9a-zA-Z]+(\.[0-9]+)?)?)?$`)
var caretMatch = regexp.MustCompile(`^\^=?v?(([*xX]|[0-9]+)(\.([*xX]|[0-9]+)){0,2}(-[0-9a-zA-Z]+(\.[0-9]+)?)?)?$`)
var rangeMatch = regexp.MustCompile(`^[0-9]+(\.([*xX]|[0-9]+)){0,2}\s+-\s+[0-9]+(\.([*xX]|[0-9]+)){0,2}$`)
var andMatch = regexp.MustCompile(`^(>=|>)\s*[0-9]+(\.([*xX]|[0-9]+)){0,2}\s+(<=|<)\s*[0-9]+(\.([*xX]|[0-9]+)){0,2}$`)
var opMatch = regexp.MustCompile(`^(>=|<=|>|<)`)
var anyMatch = regexp.MustCompile(`[*xX]`)
var betMatch = regexp.MustCompile(`\s+-\s+`)
var spaceMatch = regexp.MustCompile(`\s+<`)
var orMatch = regexp.MustCompile(`\s+\|\|\s+`)
var queryMap = map[string]queryOp{
"=": queryEq,
"<": queryLt,
">": queryGt,
"<=": queryLe,
">=": queryGe,
}
var queryInv = [...]string{"=", "<", ">", "<=", ">="}
func parseIncompl(str string) *semver.Version {
str = trimX(str)
if str == "" {
str = "0"
}
str += strings.Repeat(".0", 2-strings.Count(str, "."))
return Parse(str)
}
func parseCmpQuery(str string) *singleBound {
loc := opMatch.FindStringIndex(str)
opStr := str[loc[0]:loc[1]]
verStr := strings.Trim(str[loc[1]:], " ")
return &singleBound{op: queryMap[opStr], ver: parseIncompl(verStr)}
}
func parseMisQuery(str string) Query {
lower := parseIncompl(str)
str = trimX(str)
ver := strings.Split(str, ".")
switch {
case len(str) == 0:
return &singleBound{op: queryGe, ver: lower}
case len(ver) == 1:
upper := &semver.Version{Major: lower.Major + 1}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
case len(ver) == 2:
upper := &semver.Version{Major: lower.Major, Minor: lower.Minor + 1}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
case len(ver) == 3:
return &singleBound{op: queryEq, ver: lower}
default:
return nil
}
}
func parseTildeQuery(str string) Query {
switch Parse(str) {
case nil:
return parseMisQuery(str)
default:
lower := Parse(str)
upper := &semver.Version{Major: lower.Major, Minor: lower.Minor + 1}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
}
}
func parseCaretQuery(str string) Query {
lower := parseIncompl(str)
str = trimX(str)
ver := strings.Split(str, ".")
switch {
case len(str) == 0:
return parseMisQuery(str)
case len(ver) == 1:
upper := &semver.Version{Major: lower.Major + 1}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
case len(ver) == 2:
upper := &semver.Version{}
if lower.Major == 0 {
upper.Minor = lower.Minor + 1
} else {
upper.Major = lower.Major + 1
}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
case len(ver) == 3:
upper := &semver.Version{}
if lower.Major == 0 {
if lower.Minor == 0 {
upper.Patch = lower.Patch + 1
} else {
upper.Minor = lower.Minor + 1
}
} else {
upper.Major = lower.Major + 1
}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: upper},
}
default:
return nil
}
}
func parseRangeQuery(str string) *dualBound {
bounds := betMatch.Split(str, -1)
lower := parseIncompl(bounds[0])
upper := bounds[1]
if Parse(upper) != nil {
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLe, ver: Parse(upper)},
}
}
upper = trimX(upper)
ver := parseIncompl(upper)
switch strings.Count(upper, ".") {
case 0:
ver.Major++
case 1:
ver.Minor++
}
return &dualBound{
lower: &singleBound{op: queryGe, ver: lower},
upper: &singleBound{op: queryLt, ver: ver},
}
}
func parseAndQuery(str string) *dualBound {
bounds := spaceMatch.Split(str, -1)
return &dualBound{
lower: parseCmpQuery(bounds[0]),
upper: parseCmpQuery("<" + bounds[1]),
}
}
func parseOrQuery(str string) queryList {
queries := orMatch.Split(str, -1)
ql := make(queryList, 0, len(queries))
for _, query := range queries {
if q := MakeQuery(query); q != nil {
ql = append(ql, q)
} else {
return nil
}
}
return ql
}
func MakeQuery(str string) Query {
// should never be called if valid Version passed
if Parse(str) != nil {
return &singleBound{ver: Parse(str), op: queryEq}
}
switch {
case cmpMatch.MatchString(str):
return parseCmpQuery(str)
case misMatch.MatchString(str):
return parseMisQuery(stripEqV(str))
case tildeMatch.MatchString(str):
return parseTildeQuery(stripEqV(str[1:]))
case caretMatch.MatchString(str):
return parseCaretQuery(stripEqV(str[1:]))
case rangeMatch.MatchString(str):
return parseRangeQuery(str)
case andMatch.MatchString(str):
return parseAndQuery(str)
case orMatch.MatchString(str):
return parseOrQuery(str)
default:
return nil
}
} | pkg/npm/semver/query.go | 0.615088 | 0.495484 | query.go | starcoder |
package main
import (
"fmt"
)
type position struct {
X int
Y int
}
func (pos position) E() position {
return position{pos.X + 1, pos.Y}
}
func (pos position) SE() position {
return position{pos.X + 1, pos.Y + 1}
}
func (pos position) NE() position {
return position{pos.X + 1, pos.Y - 1}
}
func (pos position) N() position {
return position{pos.X, pos.Y - 1}
}
func (pos position) S() position {
return position{pos.X, pos.Y + 1}
}
func (pos position) W() position {
return position{pos.X - 1, pos.Y}
}
func (pos position) SW() position {
return position{pos.X - 1, pos.Y + 1}
}
func (pos position) NW() position {
return position{pos.X - 1, pos.Y - 1}
}
func (pos position) Distance(to position) int {
deltaX := Abs(to.X - pos.X)
deltaY := Abs(to.Y - pos.Y)
return deltaX + deltaY
}
func (pos position) MaxCardinalDist(to position) int {
deltaX := Abs(to.X - pos.X)
deltaY := Abs(to.Y - pos.Y)
if deltaX > deltaY {
return deltaX
}
return deltaY
}
func (pos position) DistanceX(to position) int {
deltaX := Abs(to.X - pos.X)
return deltaX
}
func (pos position) DistanceY(to position) int {
deltaY := Abs(to.Y - pos.Y)
return deltaY
}
type direction int
const (
NoDir direction = iota
E
ENE
NE
NNE
N
NNW
NW
WNW
W
WSW
SW
SSW
S
SSE
SE
ESE
)
func (dir direction) String() (s string) {
switch dir {
case NoDir:
s = ""
case E:
s = "E"
case ENE:
s = "ENE"
case NE:
s = "NE"
case NNE:
s = "NNE"
case N:
s = "N"
case NNW:
s = "NNW"
case NW:
s = "NW"
case WNW:
s = "WNW"
case W:
s = "W"
case WSW:
s = "WSW"
case SW:
s = "SW"
case SSW:
s = "SSW"
case S:
s = "S"
case SSE:
s = "SSE"
case SE:
s = "SE"
case ESE:
s = "ESE"
}
return s
}
func KeyToDir(k action) (dir direction) {
switch k {
case ActionW, ActionRunW:
dir = W
case ActionE, ActionRunE:
dir = E
case ActionS, ActionRunS:
dir = S
case ActionN, ActionRunN:
dir = N
}
return dir
}
func (pos position) To(dir direction) position {
to := pos
switch dir {
case E, ENE, ESE:
to = pos.E()
case NE:
to = pos.NE()
case NNE, N, NNW:
to = pos.N()
case NW:
to = pos.NW()
case WNW, W, WSW:
to = pos.W()
case SW:
to = pos.SW()
case SSW, S, SSE:
to = pos.S()
case SE:
to = pos.SE()
}
return to
}
func (pos position) Dir(from position) direction {
deltaX := Abs(pos.X - from.X)
deltaY := Abs(pos.Y - from.Y)
switch {
case pos.X > from.X && pos.Y == from.Y:
return E
case pos.X > from.X && pos.Y < from.Y:
switch {
case deltaX > deltaY:
return ENE
case deltaX == deltaY:
return NE
default:
return NNE
}
case pos.X == from.X && pos.Y < from.Y:
return N
case pos.X < from.X && pos.Y < from.Y:
switch {
case deltaY > deltaX:
return NNW
case deltaX == deltaY:
return NW
default:
return WNW
}
case pos.X < from.X && pos.Y == from.Y:
return W
case pos.X < from.X && pos.Y > from.Y:
switch {
case deltaX > deltaY:
return WSW
case deltaX == deltaY:
return SW
default:
return SSW
}
case pos.X == from.X && pos.Y > from.Y:
return S
case pos.X > from.X && pos.Y > from.Y:
switch {
case deltaY > deltaX:
return SSE
case deltaX == deltaY:
return SE
default:
return ESE
}
default:
panic(fmt.Sprintf("internal error: invalid position:%+v-%+v", pos, from))
}
}
func (pos position) Parents(from position, p []position) []position {
switch pos.Dir(from) {
case E:
p = append(p, pos.W())
case ENE:
p = append(p, pos.W(), pos.SW())
case NE:
p = append(p, pos.SW())
case NNE:
p = append(p, pos.S(), pos.SW())
case N:
p = append(p, pos.S())
case NNW:
p = append(p, pos.S(), pos.SE())
case NW:
p = append(p, pos.SE())
case WNW:
p = append(p, pos.E(), pos.SE())
case W:
p = append(p, pos.E())
case WSW:
p = append(p, pos.E(), pos.NE())
case SW:
p = append(p, pos.NE())
case SSW:
p = append(p, pos.N(), pos.NE())
case S:
p = append(p, pos.N())
case SSE:
p = append(p, pos.N(), pos.NW())
case SE:
p = append(p, pos.NW())
case ESE:
p = append(p, pos.W(), pos.NW())
}
return p
}
func (pos position) RandomNeighbor(diag bool) position {
if diag {
return pos.RandomNeighborDiagonals()
}
return pos.RandomNeighborCardinal()
}
func (pos position) RandomNeighborDiagonals() position {
neighbors := [8]position{pos.E(), pos.W(), pos.N(), pos.S(), pos.NE(), pos.NW(), pos.SE(), pos.SW()}
var r int
switch RandInt(8) {
case 0:
r = RandInt(len(neighbors[0:4]))
case 1:
r = RandInt(len(neighbors[0:2]))
default:
r = RandInt(len(neighbors[4:]))
}
return neighbors[r]
}
func (pos position) RandomNeighborCardinal() position {
neighbors := [4]position{pos.E(), pos.W(), pos.N(), pos.S()}
var r int
switch RandInt(4) {
case 0, 1:
r = RandInt(len(neighbors[0:2]))
default:
r = RandInt(len(neighbors))
}
return neighbors[r]
}
func idxtopos(i int) position {
return position{i % DungeonWidth, i / DungeonWidth}
}
func (pos position) idx() int {
return pos.Y*DungeonWidth + pos.X
}
func (pos position) valid() bool {
return pos.Y >= 0 && pos.Y < DungeonHeight && pos.X >= 0 && pos.X < DungeonWidth
}
func (pos position) Laterals(dir direction) []position {
switch dir {
case E, ENE, ESE:
return []position{pos.NE(), pos.SE()}
case NE:
return []position{pos.E(), pos.N()}
case N, NNE, NNW:
return []position{pos.NW(), pos.NE()}
case NW:
return []position{pos.W(), pos.N()}
case W, WNW, WSW:
return []position{pos.SW(), pos.NW()}
case SW:
return []position{pos.W(), pos.S()}
case S, SSW, SSE:
return []position{pos.SW(), pos.SE()}
case SE:
return []position{pos.S(), pos.E()}
default:
// should not happen
return []position{}
}
}
func (dir direction) InViewCone(from, to position) bool {
if to == from {
return true
}
d := to.Dir(from)
if d == dir || from.Distance(to) <= 1 {
return true
}
switch dir {
case E:
switch d {
case ESE, ENE, NE, SE:
return true
}
case NE:
switch d {
case ENE, NNE, N, E:
return true
}
case N:
switch d {
case NNE, NNW, NE, NW:
return true
}
case NW:
switch d {
case NNW, WNW, N, W:
return true
}
case W:
switch d {
case WNW, WSW, NW, SW:
return true
}
case SW:
switch d {
case WSW, SSW, W, S:
return true
}
case S:
switch d {
case SSW, SSE, SW, SE:
return true
}
case SE:
switch d {
case SSE, ESE, S, E:
return true
}
}
return false
}
var alternateDirs = []direction{E, NE, N, NW, W, SW, S, SE}
func (dir direction) Left() (d direction) {
switch dir {
case E:
d = NE
case NE:
d = N
case N:
d = NW
case NW:
d = W
case W:
d = SW
case SW:
d = S
case S:
d = SE
case SE:
d = E
default:
d = alternateDirs[RandInt(len(alternateDirs))]
}
return d
}
func (dir direction) Right() (d direction) {
switch dir {
case E:
d = SE
case NE:
d = E
case N:
d = NE
case NW:
d = N
case W:
d = NW
case SW:
d = W
case S:
d = SW
case SE:
d = S
default:
d = alternateDirs[RandInt(len(alternateDirs))]
}
return d
} | pos.go | 0.653459 | 0.651895 | pos.go | starcoder |
package model
import (
"time"
)
// AggregationsConfig is used to configure aggregation behaviour.
type AggregationsConfig struct {
// MemoryAggregationInterval is the length of a single interval, for
// which the peak memory usage is computed.
// Memory usage peaks are aggregated in multiples of this interval. In other words
// there is one memory usage sample per interval (the maximum usage over that
// interval).
// Currently it is used as more of a counter for keeping the aggregate container state
// relevant with the amount of samples gone in
// MemoryAggregationInterval time.Duration
// DaysToPreserveContainerState is the total number of days to preserve the
// aggregate container state
// DaysToPreserveContainerState int64
// ThresholdMonitorTimeWindow is the time window for setting the local maxima of usage
// of resources. This window helps in calculating a local maxima within it for a given
// resource which inturn is used during the scale down to not recommend resources below it
ThresholdMonitorTimeWindow time.Duration
// ThresholdScaleDown is the scale down value multiple of the current usage of resource
// above which the scale value is not altered
ThresholdScaleDown float64
// ThresholdScaleUp is the scale up value multiple of the current usage of resource
// below which the scale value is not altered
ThresholdScaleUp float64
// ScaleDownSafetyFactor is the scale down safety lower margin of the current usage
// which will act as the least value recommended during scale down
ScaleDownSafetyFactor float64
// ScaleUpFactor is the actual multiple by which the scale up recommendation of the
// resource based on current usage is recommended
ScaleUpFactor float64
// ThresholdNumCrashes is the minimum number of crashes that is withstood before doubling
// both CPU and Memory resource based on the current usage
ThresholdNumCrashes int
// UpdateVpaStatus flag suggests if whether to update the VPA status with the newer recommendation
// value or just keep it a read only in annotations
UpdateVpaStatus bool
}
const (
// DefaultThresholdMonitorTimeWindow is the default time window to get local maxima of CPU and memory usage till the curren time
DefaultThresholdMonitorTimeWindow = time.Minute * 30
// DefaultThresholdScaleUp is the default threshold value beyond which VPA scale up should kick in
DefaultThresholdScaleUp = 0.7
// DefaultThresholdScaleDown is the default threshold value beyond which VPA scale down should kick in
DefaultThresholdScaleDown = 0.3
// DefaultScaleDownSafetyFactor is the default factor by which VPA recommender should suggest scale down based on current usage
DefaultScaleDownSafetyFactor = 1.2
// DefaultScaleUpFactor is the default scaling factor which needs to applied for resource scale up
DefaultScaleUpFactor = 2.0
// DefaultThresholdNumCrashes is the default total number of crashes to withstand before doubling both CPU and memory irrespective of usage
DefaultThresholdNumCrashes = 3
// UpdateVpaStatus is set to false by default. This enables read only mode for the VPA recommender and prevents updating details in status
DefaultUpdateVpaStatus = false
)
// GetMemoryAggregationWindowLength returns the total length of the memory usage history aggregated by VPA.
// func (a *AggregationsConfig) GetMemoryAggregationWindowLength() time.Duration {
// return a.MemoryAggregationInterval * time.Duration(a.DaysToPreserveContainerState)
// }
// NewAggregationsConfig creates a new AggregationsConfig based on the supplied parameters and default values.
func NewAggregationsConfig(
//MemoryAggregationInterval time.Duration,
//DaysToPreserveContainerState int64,
thresholdMonitorTimeWindow time.Duration,
thresholdScaleUp float64,
thresholdScaleDown float64,
ScaleDownSafetyFactor float64,
ScaleUpFactor float64,
thresholdNumCrashes int,
updateVpaStatus bool) *AggregationsConfig {
a := &AggregationsConfig{
// MemoryAggregationInterval: MemoryAggregationInterval,
// DaysToPreserveContainerState: DaysToPreserveContainerState,
ThresholdMonitorTimeWindow: thresholdMonitorTimeWindow,
ThresholdScaleUp: thresholdScaleUp,
ThresholdScaleDown: thresholdScaleDown,
ScaleDownSafetyFactor: ScaleDownSafetyFactor,
ScaleUpFactor: ScaleUpFactor,
ThresholdNumCrashes: thresholdNumCrashes,
UpdateVpaStatus: updateVpaStatus,
}
return a
}
var aggregationsConfig *AggregationsConfig
// GetAggregationsConfig gets the aggregations config. Initializes to default values if not initialized already.
func GetAggregationsConfig() *AggregationsConfig {
if aggregationsConfig == nil {
aggregationsConfig = NewAggregationsConfig(DefaultThresholdMonitorTimeWindow, DefaultThresholdScaleUp, DefaultThresholdScaleDown, DefaultScaleDownSafetyFactor, DefaultScaleUpFactor, DefaultThresholdNumCrashes, DefaultUpdateVpaStatus)
}
return aggregationsConfig
}
// InitializeAggregationsConfig initializes the global aggregations configuration. Not thread-safe.
func InitializeAggregationsConfig(config *AggregationsConfig) {
aggregationsConfig = config
} | pkg/recommender/model/aggregations_config.go | 0.71113 | 0.412294 | aggregations_config.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked5 struct {
*BulkOperationPacked
}
func newBulkOperationPacked5() BulkOperation {
return &BulkOperationPacked5{newBulkOperationPacked(5)}
}
func (op *BulkOperationPacked5) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 59))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>54) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>49) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>44) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>39) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>34) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>29) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>24) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>19) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>14) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>9) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>4) & 31)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 15) << 1) | (int64(uint64(block1) >> 63)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>58) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>53) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>48) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>43) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>38) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>33) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>28) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>23) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>18) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>13) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>8) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>3) & 31)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 7) << 2) | (int64(uint64(block2) >> 62)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>57) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>52) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>47) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>42) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>37) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>32) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>27) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>22) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>17) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>12) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>7) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>2) & 31)
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block2 & 3) << 3) | (int64(uint64(block3) >> 61)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>56) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>51) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>46) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>41) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>36) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>31) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>26) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>21) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>16) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>11) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>6) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>1) & 31)
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block3 & 1) << 4) | (int64(uint64(block4) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>55) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>50) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>45) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>40) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>35) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>30) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>25) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>20) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>15) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>10) & 31)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>5) & 31)
valuesOffset++
values[valuesOffset] = int32(block4 & 31)
valuesOffset++
}
}
func (op *BulkOperationPacked5) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint8(byte0) >> 3))
valuesOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0&7) << 2) | int64(uint8(byte1)>>6))
valuesOffset++
values[valuesOffset] = int32(int64(uint8(byte1)>>1) & 31)
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte1&1) << 4) | int64(uint8(byte2)>>4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2&15) << 1) | int64(uint8(byte3)>>7))
valuesOffset++
values[valuesOffset] = int32(int64(uint8(byte3)>>2) & 31)
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte3&3) << 3) | int64(uint8(byte4)>>5))
valuesOffset++
values[valuesOffset] = int32(int64(byte4) & 31)
valuesOffset++
}
}
func (op *BulkOperationPacked5) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 59)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>54) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>49) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>44) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>39) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>34) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>29) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>24) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>19) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>14) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>9) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>4) & 31
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 15) << 1) | (int64(uint64(block1) >> 63))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>58) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>53) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>48) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>43) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>38) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>33) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>28) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>23) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>18) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>13) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>8) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>3) & 31
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 7) << 2) | (int64(uint64(block2) >> 62))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>57) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>52) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>47) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>42) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>37) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>32) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>27) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>22) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>17) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>12) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>7) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>2) & 31
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block2 & 3) << 3) | (int64(uint64(block3) >> 61))
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>56) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>51) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>46) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>41) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>36) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>31) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>26) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>21) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>16) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>11) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>6) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>1) & 31
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block3 & 1) << 4) | (int64(uint64(block4) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>55) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>50) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>45) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>40) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>35) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>30) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>25) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>20) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>15) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>10) & 31
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>5) & 31
valuesOffset++
values[valuesOffset] = block4 & 31
valuesOffset++
}
}
func (op *BulkOperationPacked5) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(int64(uint8(byte0) >> 3))
valuesOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0&7) << 2) | int64(uint8(byte1)>>6))
valuesOffset++
values[valuesOffset] = int64(int64(uint8(byte1)>>1) & 31)
valuesOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte1&1) << 4) | int64(uint8(byte2)>>4))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2&15) << 1) | int64(uint8(byte3)>>7))
valuesOffset++
values[valuesOffset] = int64(int64(uint8(byte3)>>2) & 31)
valuesOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte3&3) << 3) | int64(uint8(byte4)>>5))
valuesOffset++
values[valuesOffset] = int64(int64(byte4) & 31)
valuesOffset++
}
} | core/util/packed/bulkOperation5.go | 0.55447 | 0.769384 | bulkOperation5.go | starcoder |
package hbook
import "sort"
// indices for the 2D-binning overflows
const (
BngNW int = 1 + iota
BngN
BngNE
BngE
BngSE
BngS
BngSW
BngW
)
type Binning2D struct {
Bins []Bin2D
Dist Dist2D
Outflows [8]Dist2D
XRange Range
YRange Range
Nx int
Ny int
XEdges []Bin1D
YEdges []Bin1D
}
func newBinning2D(nx int, xlow, xhigh float64, ny int, ylow, yhigh float64) Binning2D {
if xlow >= xhigh {
panic(errInvalidXAxis)
}
if ylow >= yhigh {
panic(errInvalidYAxis)
}
if nx <= 0 {
panic(errEmptyXAxis)
}
if ny <= 0 {
panic(errEmptyYAxis)
}
bng := Binning2D{
Bins: make([]Bin2D, nx*ny),
XRange: Range{Min: xlow, Max: xhigh},
YRange: Range{Min: ylow, Max: yhigh},
Nx: nx,
Ny: ny,
XEdges: make([]Bin1D, nx),
YEdges: make([]Bin1D, ny),
}
xwidth := bng.XRange.Width() / float64(bng.Nx)
ywidth := bng.YRange.Width() / float64(bng.Ny)
xmin := bng.XRange.Min
ymin := bng.YRange.Min
for ix := range bng.XEdges {
xbin := &bng.XEdges[ix]
xbin.Range.Min = xmin + float64(ix)*xwidth
xbin.Range.Max = xmin + float64(ix+1)*xwidth
for iy := range bng.YEdges {
ybin := &bng.YEdges[iy]
ybin.Range.Min = ymin + float64(iy)*ywidth
ybin.Range.Max = ymin + float64(iy+1)*ywidth
i := iy*nx + ix
bin := &bng.Bins[i]
bin.XRange.Min = xbin.Range.Min
bin.XRange.Max = xbin.Range.Max
bin.YRange.Min = ybin.Range.Min
bin.YRange.Max = ybin.Range.Max
}
}
return bng
}
func newBinning2DFromEdges(xedges, yedges []float64) Binning2D {
if len(xedges) <= 1 {
panic(errShortXAxis)
}
if !sort.IsSorted(sort.Float64Slice(xedges)) {
panic(errNotSortedXAxis)
}
if len(yedges) <= 1 {
panic(errShortYAxis)
}
if !sort.IsSorted(sort.Float64Slice(yedges)) {
panic(errNotSortedYAxis)
}
var (
nx = len(xedges) - 1
ny = len(yedges) - 1
xlow = xedges[0]
xhigh = xedges[nx]
ylow = yedges[0]
yhigh = yedges[ny]
)
bng := Binning2D{
Bins: make([]Bin2D, nx*ny),
XRange: Range{Min: xlow, Max: xhigh},
YRange: Range{Min: ylow, Max: yhigh},
Nx: nx,
Ny: ny,
XEdges: make([]Bin1D, nx),
YEdges: make([]Bin1D, ny),
}
for ix, xmin := range xedges[:nx] {
xmax := xedges[ix+1]
if xmin == xmax {
panic(errDupEdgesXAxis)
}
bng.XEdges[ix].Range.Min = xmin
bng.XEdges[ix].Range.Max = xmax
for iy, ymin := range yedges[:ny] {
ymax := yedges[iy+1]
if ymin == ymax {
panic(errDupEdgesYAxis)
}
i := iy*nx + ix
bin := &bng.Bins[i]
bin.XRange.Min = xmin
bin.XRange.Max = xmax
bin.YRange.Min = ymin
bin.YRange.Max = ymax
}
}
for iy, ymin := range yedges[:ny] {
ymax := yedges[iy+1]
bng.YEdges[iy].Range.Min = ymin
bng.YEdges[iy].Range.Max = ymax
}
return bng
}
func (bng *Binning2D) entries() int64 {
return bng.Dist.Entries()
}
func (bng *Binning2D) effEntries() float64 {
return bng.Dist.EffEntries()
}
// xMin returns the low edge of the X-axis
func (bng *Binning2D) xMin() float64 {
return bng.XRange.Min
}
// xMax returns the high edge of the X-axis
func (bng *Binning2D) xMax() float64 {
return bng.XRange.Max
}
// yMin returns the low edge of the Y-axis
func (bng *Binning2D) yMin() float64 {
return bng.YRange.Min
}
// yMax returns the high edge of the Y-axis
func (bng *Binning2D) yMax() float64 {
return bng.YRange.Max
}
func (bng *Binning2D) fill(x, y, w float64) {
idx := bng.coordToIndex(x, y)
bng.Dist.fill(x, y, w)
if idx == len(bng.Bins) {
// GAP bin
return
}
if idx < 0 {
bng.Outflows[-idx-1].fill(x, y, w)
return
}
bng.Bins[idx].fill(x, y, w)
}
func (bng *Binning2D) coordToIndex(x, y float64) int {
ix := Bin1Ds(bng.XEdges).IndexOf(x)
iy := Bin1Ds(bng.YEdges).IndexOf(y)
switch {
case ix == bng.Nx && iy == bng.Ny: // GAP
return len(bng.Bins)
case ix == OverflowBin1D && iy == OverflowBin1D:
return -BngNE
case ix == OverflowBin1D && iy == UnderflowBin1D:
return -BngSE
case ix == UnderflowBin1D && iy == UnderflowBin1D:
return -BngSW
case ix == UnderflowBin1D && iy == OverflowBin1D:
return -BngNW
case ix == OverflowBin1D:
return -BngE
case ix == UnderflowBin1D:
return -BngW
case iy == OverflowBin1D:
return -BngN
case iy == UnderflowBin1D:
return -BngS
}
return iy*bng.Nx + ix
} | hbook/binning2d.go | 0.563378 | 0.562237 | binning2d.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ExpressionEvaluationDetails
type ExpressionEvaluationDetails struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Represents expression which has been evaluated.
expression *string
// Represents the details of the evaluation of the expression.
expressionEvaluationDetails []ExpressionEvaluationDetailsable
// Represents the value of the result of the current expression.
expressionResult *bool
// Defines the name of the property and the value of that property.
propertyToEvaluate PropertyToEvaluateable
}
// NewExpressionEvaluationDetails instantiates a new expressionEvaluationDetails and sets the default values.
func NewExpressionEvaluationDetails()(*ExpressionEvaluationDetails) {
m := &ExpressionEvaluationDetails{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateExpressionEvaluationDetailsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateExpressionEvaluationDetailsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewExpressionEvaluationDetails(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ExpressionEvaluationDetails) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetExpression gets the expression property value. Represents expression which has been evaluated.
func (m *ExpressionEvaluationDetails) GetExpression()(*string) {
if m == nil {
return nil
} else {
return m.expression
}
}
// GetExpressionEvaluationDetails gets the expressionEvaluationDetails property value. Represents the details of the evaluation of the expression.
func (m *ExpressionEvaluationDetails) GetExpressionEvaluationDetails()([]ExpressionEvaluationDetailsable) {
if m == nil {
return nil
} else {
return m.expressionEvaluationDetails
}
}
// GetExpressionResult gets the expressionResult property value. Represents the value of the result of the current expression.
func (m *ExpressionEvaluationDetails) GetExpressionResult()(*bool) {
if m == nil {
return nil
} else {
return m.expressionResult
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ExpressionEvaluationDetails) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["expression"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetExpression(val)
}
return nil
}
res["expressionEvaluationDetails"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateExpressionEvaluationDetailsFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]ExpressionEvaluationDetailsable, len(val))
for i, v := range val {
res[i] = v.(ExpressionEvaluationDetailsable)
}
m.SetExpressionEvaluationDetails(res)
}
return nil
}
res["expressionResult"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetExpressionResult(val)
}
return nil
}
res["propertyToEvaluate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreatePropertyToEvaluateFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetPropertyToEvaluate(val.(PropertyToEvaluateable))
}
return nil
}
return res
}
// GetPropertyToEvaluate gets the propertyToEvaluate property value. Defines the name of the property and the value of that property.
func (m *ExpressionEvaluationDetails) GetPropertyToEvaluate()(PropertyToEvaluateable) {
if m == nil {
return nil
} else {
return m.propertyToEvaluate
}
}
// Serialize serializes information the current object
func (m *ExpressionEvaluationDetails) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("expression", m.GetExpression())
if err != nil {
return err
}
}
if m.GetExpressionEvaluationDetails() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetExpressionEvaluationDetails()))
for i, v := range m.GetExpressionEvaluationDetails() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("expressionEvaluationDetails", cast)
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("expressionResult", m.GetExpressionResult())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("propertyToEvaluate", m.GetPropertyToEvaluate())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ExpressionEvaluationDetails) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetExpression sets the expression property value. Represents expression which has been evaluated.
func (m *ExpressionEvaluationDetails) SetExpression(value *string)() {
if m != nil {
m.expression = value
}
}
// SetExpressionEvaluationDetails sets the expressionEvaluationDetails property value. Represents the details of the evaluation of the expression.
func (m *ExpressionEvaluationDetails) SetExpressionEvaluationDetails(value []ExpressionEvaluationDetailsable)() {
if m != nil {
m.expressionEvaluationDetails = value
}
}
// SetExpressionResult sets the expressionResult property value. Represents the value of the result of the current expression.
func (m *ExpressionEvaluationDetails) SetExpressionResult(value *bool)() {
if m != nil {
m.expressionResult = value
}
}
// SetPropertyToEvaluate sets the propertyToEvaluate property value. Defines the name of the property and the value of that property.
func (m *ExpressionEvaluationDetails) SetPropertyToEvaluate(value PropertyToEvaluateable)() {
if m != nil {
m.propertyToEvaluate = value
}
} | models/expression_evaluation_details.go | 0.702428 | 0.491639 | expression_evaluation_details.go | starcoder |
package collections
import "fmt"
/*
Compare - This method will compare two collections to make sure they
are the same. The collection receiver is the master and represent the correct
data, the collection passed in as toTest represents the one we need to test.
*/
func (r *Collection) Compare(toTest *Collection) (bool, int, []string) {
return Compare(r, toTest)
}
/*
Compare - This function will compare two collections to make sure they
are the same. Collection correct is the master and represent the correct
data, collection toTest represents the one we need to test.
*/
func Compare(correct, toTest *Collection) (bool, int, []string) {
problemsFound := 0
details := make([]string, 0)
// Check ID Value
if toTest.ID != correct.ID {
problemsFound++
str := fmt.Sprintf("-- IDs Do Not Match: %s | %s", correct.ID, toTest.ID)
details = append(details, str)
} else {
str := fmt.Sprintf("++ IDs Match: %s | %s", correct.ID, toTest.ID)
details = append(details, str)
}
// Check Title Value
if toTest.Title != correct.Title {
problemsFound++
str := fmt.Sprintf("-- Titles Do Not Match: %s | %s", correct.Title, toTest.Title)
details = append(details, str)
} else {
str := fmt.Sprintf("++ Titles Match: %s | %s", correct.Title, toTest.Title)
details = append(details, str)
}
// Check Description Value
if toTest.Description != correct.Description {
problemsFound++
str := fmt.Sprintf("-- Descriptions Do Not Match: %s | %s", correct.Description, toTest.Description)
details = append(details, str)
} else {
str := fmt.Sprintf("++ Descriptions Match: %s | %s", correct.Description, toTest.Description)
details = append(details, str)
}
// Check Can Read Value
if toTest.CanRead != correct.CanRead {
problemsFound++
str := fmt.Sprintf("-- Can Read Values Do Not Match: %t | %t", correct.CanRead, toTest.CanRead)
details = append(details, str)
} else {
str := fmt.Sprintf("++ Can Read Values Match: %t | %t", correct.CanRead, toTest.CanRead)
details = append(details, str)
}
// Check Can Write Value
if toTest.CanWrite != correct.CanWrite {
problemsFound++
str := fmt.Sprintf("-- Can Write Values Do Not Match: %t | %t", correct.CanWrite, toTest.CanWrite)
details = append(details, str)
} else {
str := fmt.Sprintf("++ Can Write Values Match: %t | %t", correct.CanWrite, toTest.CanWrite)
details = append(details, str)
}
// Check Media Type Property Length
if len(toTest.MediaTypes) != len(correct.MediaTypes) {
problemsFound++
str := fmt.Sprintf("-- Media Type Lengths Do Not Match: %d | %d", correct.MediaTypes, toTest.MediaTypes)
details = append(details, str)
} else {
str := fmt.Sprintf("++ Media Type Lengths Match: %d | %d", correct.MediaTypes, toTest.MediaTypes)
details = append(details, str)
// If lengths are the same, then check each value
for index := range correct.MediaTypes {
if toTest.MediaTypes[index] != correct.MediaTypes[index] {
problemsFound++
str := fmt.Sprintf("-- Media Types Do Not Match: %s | %s", correct.MediaTypes[index], toTest.MediaTypes[index])
details = append(details, str)
} else {
str := fmt.Sprintf("++ Media Types Match: %s | %s", correct.MediaTypes[index], toTest.MediaTypes[index])
details = append(details, str)
}
}
}
if problemsFound > 0 {
return false, problemsFound, details
}
return true, 0, details
} | objects/taxii/collections/compare.go | 0.78789 | 0.492066 | compare.go | starcoder |
package expect
import (
. "fmt"
"reflect"
)
type Have struct {
Else *Else
And *Have
t T
actual interface{}
assert bool
}
func newHave(t T, e *Else, actual interface{}, assert bool) *Have {
have := &Have{
Else: e,
t: t,
actual: actual,
assert: assert,
}
have.And = have
return have
}
// Assert value to have length of the the given number
func (h *Have) Len(i int) *Have {
msg := h.msg(Sprintf("length of %v", i))
if l, ok := length(h.actual); ok {
if l == i != h.assert {
h.fail(2, msg)
}
} else {
h.t.Fatal(invMsg("Array, Slice, Map or String"))
}
return h
}
// Assert value to have capacity of the given number
func (h *Have) Cap(i int) *Have {
msg := h.msg(Sprint("capacity of %v", i))
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
if reflect.ValueOf(h.actual).Cap() == i != h.assert {
h.fail(2, msg)
}
default:
h.t.Fatal(invMsg("Array, Slice or Chan"))
}
return h
}
// Assert `key` exists on the given Map, and has optional value.
func (h *Have) Key(args ...interface{}) *Have {
// Test also value
testVal := len(args) > 1
msg := Sprintf("key: %v", args[0])
if testVal {
msg += Sprintf(" with value: %v", args[1])
}
msg = h.msg(msg)
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Map:
v := reflect.ValueOf(h.actual)
k := v.MapIndex(reflect.ValueOf(args[0]))
if (testVal && k.IsValid()) || k.IsValid() == h.assert {
// Compare value
if testVal && reflect.DeepEqual(k.Interface(), args[1]) != h.assert {
h.fail(2, msg)
}
} else {
h.fail(2, msg)
}
default:
h.t.Fatal(invMsg("Map"))
}
return h
}
// Assert `keys` exists on the given Map
func (h *Have) Keys(args ...interface{}) *Have {
msg := h.msg(Sprintf("keys: %v", args))
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Map:
v := reflect.ValueOf(h.actual)
for _, k := range args {
vk := v.MapIndex(reflect.ValueOf(k))
if vk.IsValid() != h.assert {
h.fail(2, msg)
}
}
default:
h.t.Fatal(invMsg("Map"))
}
return h
}
// Assert `field` exist on the given Struct, and has optional value.
func (h *Have) Field(s string, args ...interface{}) *Have {
// Test also value
testVal := len(args) > 0
msg := Sprintf("field: %v", s)
if testVal {
msg += Sprintf(" with value: %v", args[0])
}
msg = h.msg(msg)
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Struct:
v := reflect.ValueOf(h.actual)
f := v.FieldByName(s)
if (testVal && f.IsValid()) || f.IsValid() == h.assert {
// Compare value
if testVal && reflect.DeepEqual(f.Interface(), args[0]) != h.assert {
h.fail(2, msg)
}
} else {
h.fail(2, msg)
}
default:
h.t.Fatal(invMsg("Struct"))
}
return h
}
// Assert `fields` exists on the given Struct
func (h *Have) Fields(args ...string) *Have {
msg := h.msg(Sprintf("fields: %v", args))
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Struct:
v := reflect.ValueOf(h.actual)
for _, f := range args {
if v.FieldByName(f).IsValid() != h.assert {
h.fail(2, msg)
}
}
default:
h.t.Fatal(invMsg("Struct"))
}
return h
}
// Assert `method` exist on the given struct/ptr
func (h *Have) Method(m string) *Have {
msg := h.msg(Sprintf("method: %v", m))
switch reflect.TypeOf(h.actual).Kind() {
case reflect.Struct, reflect.Ptr:
v := reflect.ValueOf(h.actual)
if v.MethodByName(m).IsValid() != h.assert {
h.fail(2, msg)
}
default:
h.t.Fatal(invMsg("Struct or Ptr"))
}
return h
}
func (h *Have) fail(callers int, msg string) {
h.Else.failed = true
fail(h.t, callers, msg)
}
func (h *Have) msg(s string) string {
return errMsg("to have")(h.actual, s, h.assert)
} | have.go | 0.647241 | 0.554169 | have.go | starcoder |
package matcher
import (
"errors"
"fmt"
"reflect"
"regexp"
"github.com/stretchr/testify/assert"
"github.com/swaggest/assertjson"
)
// Matcher determines if the actual matches the expectation.
type Matcher interface {
Match(actual interface{}) (bool, error)
Expected() string
}
var _ Matcher = (*ExactMatcher)(nil)
// ExactMatcher matches by exact string.
type ExactMatcher struct {
expected interface{}
}
// Expected returns the expectation.
func (m ExactMatcher) Expected() string {
if v := strVal(m.expected); v != nil {
return *v
}
return fmt.Sprintf("%+v", m.expected)
}
// Match determines if the actual is expected.
func (m ExactMatcher) Match(actual interface{}) (bool, error) {
return assert.ObjectsAreEqual(m.expected, actual), nil
}
var _ Matcher = (*JSONMatcher)(nil)
// JSONMatcher matches by json with <ignore-diff> support.
type JSONMatcher struct {
expected string
}
// Expected returns the expectation.
func (m JSONMatcher) Expected() string {
return m.expected
}
// Match determines if the actual is expected.
func (m JSONMatcher) Match(actual interface{}) (bool, error) {
actualBytes, err := jsonVal(actual)
if err != nil {
return false, err
}
return assertjson.FailNotEqual([]byte(m.expected), actualBytes) == nil, nil
}
var _ Matcher = (*RegexMatcher)(nil)
// RegexMatcher matches by regex.
type RegexMatcher struct {
regexp *regexp.Regexp
}
// Expected returns the expectation.
func (m RegexMatcher) Expected() string {
return m.regexp.String()
}
// Match determines if the actual is expected.
func (m RegexMatcher) Match(actual interface{}) (bool, error) {
if v := strVal(actual); v != nil {
return m.regexp.MatchString(*v), nil
}
return false, nil
}
var _ Matcher = (*LenMatcher)(nil)
// LenMatcher matches by the length of the value.
type LenMatcher struct {
expected int
}
// Match determines if the actual is expected.
func (m LenMatcher) Match(actual interface{}) (_ bool, err error) {
if actual == nil {
return false, nil
}
defer func() {
if r := recover(); r != nil {
err = errors.New(recovered(r)) // nolint: goerr113
}
}()
val := reflect.ValueOf(actual)
if val.Type().Kind() == reflect.Ptr {
return m.Match(val.Elem().Interface())
}
return val.Len() == m.expected, nil
}
// Expected returns the expectation.
func (m LenMatcher) Expected() string {
return fmt.Sprintf("len is %d", m.expected)
}
var _ Matcher = (*EmptyMatcher)(nil)
// EmptyMatcher checks whether the value is empty.
type EmptyMatcher struct{}
// Match determines if the actual is expected.
func (EmptyMatcher) Match(actual interface{}) (bool, error) {
return isEmpty(actual), nil
}
// Expected returns the expectation.
func (EmptyMatcher) Expected() string {
return "is empty"
}
var _ Matcher = (*NotEmptyMatcher)(nil)
// NotEmptyMatcher checks whether the value is not empty.
type NotEmptyMatcher struct{}
// Match determines if the actual is expected.
func (NotEmptyMatcher) Match(actual interface{}) (bool, error) {
return !isEmpty(actual), nil
}
// Expected returns the expectation.
func (NotEmptyMatcher) Expected() string {
return "is not empty"
}
var _ Matcher = (*Callback)(nil)
// Callback matches by calling a function.
type Callback func() Matcher
// Expected returns the expectation.
func (m Callback) Expected() string {
return m().Expected()
}
// Match determines if the actual is expected.
func (m Callback) Match(actual interface{}) (bool, error) {
return m().Match(actual)
}
// Matcher returns the matcher.
func (m Callback) Matcher() Matcher {
return m()
}
// Exact matches two objects by their exact values.
func Exact(expected interface{}) ExactMatcher {
return ExactMatcher{expected: expected}
}
// Exactf matches two strings by the formatted expectation.
func Exactf(expected string, args ...interface{}) ExactMatcher {
return ExactMatcher{expected: fmt.Sprintf(expected, args...)}
}
// JSON matches two json strings with <ignore-diff> support.
func JSON(expected interface{}) JSONMatcher {
ex, err := jsonVal(expected)
if err != nil {
panic(err)
}
return JSONMatcher{expected: string(ex)}
}
// RegexPattern matches two strings by using regex.
func RegexPattern(pattern string) RegexMatcher {
return RegexMatcher{regexp: regexp.MustCompile(pattern)}
}
// Regex matches two strings by using regex.
func Regex(regexp *regexp.Regexp) RegexMatcher {
return RegexMatcher{regexp: regexp}
}
// Len matches by the length of the value.
func Len(expected int) LenMatcher {
return LenMatcher{expected: expected}
}
// IsEmpty checks whether the value is empty.
func IsEmpty() EmptyMatcher {
return EmptyMatcher{}
}
// IsNotEmpty checks whether the value is not empty.
func IsNotEmpty() NotEmptyMatcher {
return NotEmptyMatcher{}
}
func match(v interface{}) Matcher {
switch val := v.(type) {
case Matcher:
return val
case func() Matcher:
return Callback(val)
case *regexp.Regexp:
return Regex(val)
case fmt.Stringer:
return Exact(val.String())
}
return Exact(v)
}
// Match returns a matcher according to its type.
func Match(v interface{}) Matcher {
return match(v)
} | matcher.go | 0.866076 | 0.553204 | matcher.go | starcoder |
package main
import (
. "github.com/asuahsahua/advent2019/cmd/common"
"reflect"
"regexp"
"strconv"
)
// --- Day 12: The N-Body Problem ---
// The space near Jupiter is not a very safe place; you need to be careful of a
// big distracting red spot, extreme radiation, and a whole lot of moons
// swirling around.
// You decide to start by tracking the four largest moons: Io, Europa, Ganymede,
// and Callisto. After a brief scan, you calculate the position of each moon
// (your puzzle input).
var inputRex = regexp.MustCompile(`<x=(-?\d+), y=(-?\d+), z=(-?\d+)>`)
var PuzzleInput = `<x=-4, y=3, z=15>
<x=-11, y=-10, z=13>
<x=2, y=2, z=18>
<x=7, y=-1, z=0>`
// (to parse the puzzle input into points)
func ParseInput(str string) []Point3D {
// <x=-1, y=0, z=2>
points := make([]Point3D, 0)
for _, match := range inputRex.FindAllStringSubmatch(str, -1) {
X, xerr := strconv.Atoi(match[1])
PanicIfErr(xerr)
Y, yerr := strconv.Atoi(match[2])
PanicIfErr(yerr)
Z, zerr := strconv.Atoi(match[3])
PanicIfErr(zerr)
points = append(points, Point3D{
X: X,
Y: Y,
Z: Z,
})
}
return points
}
// You just need to simulate their motion so you can avoid them.
// Each moon has a 3-dimensional position (x, y, and z) and a 3-dimensional
// velocity. The position of each moon is given in your scan; the x, y, and z
// velocity of each moon starts at 0.
type Moon struct {
Position Point3D
Velocity Point3D
}
func NewMoon(position Point3D) *Moon { // :sparkles:
return &Moon{
Position: position,
Velocity: Point3D{},
}
}
func (m *Moon) Clone() *Moon {
return &Moon{
Position: m.Position,
Velocity: m.Velocity,
}
}
// Simulate the motion of the moons in time steps.
func (jup *Jupiter) StepTime(times int) {
// Within each time step...
for i := 0; i < times; i++ {
jup.UpdateVelocities()
jup.UpdatePositions()
// Time progresses by one step once all of the positions are updated.
jup.Time++
}
}
// First, update the velocity of every moon by applying gravity.
func (jup *Jupiter) UpdateVelocities() {
// To apply gravity, consider every pair of moons.
// On each axis (x, y, and z), the velocity of each moon changes by exactly
// +1 or -1 to pull the moons together
countX := jup.Moons.CountBy(func(moon *Moon) int { return moon.Position.X })
countY := jup.Moons.CountBy(func(moon *Moon) int { return moon.Position.Y })
countZ := jup.Moons.CountBy(func(moon *Moon) int { return moon.Position.Z })
// Small function to return a delta-velocity from a current position component
// and count distribution for that component
deltaV := func(posValue int, counts map[int]int) int {
totalChange := 0
for otherPos, count := range counts {
if otherPos > posValue {
totalChange += count
} else if otherPos < posValue {
totalChange -= count
} // otherwise don't change anything
}
return totalChange
}
// (for each moon...)
for _, moon := range jup.Moons.moons {
moon.Velocity.X += deltaV(moon.Position.X, countX)
moon.Velocity.Y += deltaV(moon.Position.Y, countY)
moon.Velocity.Z += deltaV(moon.Position.Z, countZ)
}
}
// (counts the moons into buckets by callback)
func (m Moons) CountBy(by func(*Moon) int) map[int]int {
counts := make(map[int]int, 0)
for _, moon := range m.moons {
counts[by(moon)]++
}
return counts
}
// Then, once all moons' velocities have been updated, update the position
// of every moon by applying velocity.
func (jup *Jupiter) UpdatePositions() {
for _, moon := range jup.Moons.moons {
moon.Position = moon.Position.Add(moon.Velocity)
}
}
// Then, it might help to calculate the total energy in the system.
func (jup Jupiter) TotalEnergy() int {
totalEnergy := 0
for _, moon := range jup.Moons.moons {
totalEnergy += moon.Energy()
}
return totalEnergy
}
// The total energy for a single moon is its potential energy multiplied by
// its kinetic energy.
func (m Moon) Energy() int {
return m.PotentialEnergy() * m.KineticEnergy()
}
// Potential energy is the sum of the absolute values of its position coordinates.
func (m Moon) PotentialEnergy() int {
return m.Position.AbsSum()
}
// Kinetic energy is the sum of the absolute values of its velocity coordinates.
func (m Moon) KineticEnergy() int {
return m.Velocity.AbsSum()
}
type Moons struct {
moons []*Moon
}
func NewMoons() *Moons {
return &Moons{
moons: make([]*Moon, 0),
}
}
func (m *Moons) Add(moon *Moon) {
// maybe should check for collisions
m.moons = append(m.moons, moon)
}
func (m *Moons) Clone() *Moons {
moons := make([]*Moon, 0)
for _, moon := range m.moons {
moons = append(moons, moon.Clone())
}
return &Moons{
moons: moons,
}
}
// mapp runs the given callback on each moon (`map` is reserved)
func (m *Moons) mapp(cb func(Point3D) int) ([]int, []int) {
pvals := make([]int, 0)
vvals := make([]int, 0)
for _, moon := range m.moons {
pvals = append(pvals, cb(moon.Position))
vvals = append(vvals, cb(moon.Velocity))
}
return pvals, vvals
}
type Jupiter struct {
Moons *Moons
Time int
}
func NewJupiter() *Jupiter {
return &Jupiter{
Moons: NewMoons(),
Time: 0,
}
}
func NewJupiterStr(input string) *Jupiter {
j := NewJupiter()
for _, pos := range ParseInput(input) {
j.Moons.Add(NewMoon(pos))
}
return j
}
func (jup *Jupiter) Clone() *Jupiter {
clone := NewJupiter()
clone.Time = jup.Time
clone.Moons = jup.Moons.Clone()
return clone
}
// FindMoonPeriodCallback finds the number of steps that it takes for the
// callback on each moon to match up again.
// For example, if you wanted to find the period for the x-axis, the callback would be:
// func (m *Moon) { return m.Position.X }
func (jup *Jupiter) FindMoonPeriodCallback(cb func(Point3D) int) int {
clone := jup.Clone()
startP, _ := clone.Moons.mapp(cb)
zerosV := make([]int, len(startP)) // need the ccomponent
for {
clone.StepTime(1)
currP, currV := clone.Moons.mapp(cb)
if reflect.DeepEqual(currP, startP) && reflect.DeepEqual(currV, zerosV) {
return clone.Time
} // otherwise continue
}
}
// FindMoonPeriod finds the number of steps until each moon is in their original
// location
func (jup *Jupiter) FindMoonPeriod() int {
// Find the period of each of the position components
xPeriod := jup.FindMoonPeriodCallback(func(p Point3D) int { return p.X })
yPeriod := jup.FindMoonPeriodCallback(func(p Point3D) int { return p.Y })
zPeriod := jup.FindMoonPeriodCallback(func(p Point3D) int { return p.Z })
return LCM(xPeriod, yPeriod, zPeriod)
}
func main() {
// What is the total energy in the system after simulating the moons given in
// your scan for 1000 steps?
jupiter := NewJupiterStr(PuzzleInput)
jupiter.StepTime(1000)
Part1("%d", jupiter.TotalEnergy())
// How many steps does it take to reach the first state that exactly matches a
// previous state?
jup2 := NewJupiterStr(PuzzleInput)
Part2("%d", jup2.FindMoonPeriod())
} | cmd/day12/main.go | 0.737158 | 0.499939 | main.go | starcoder |
package main
import (
"fmt"
"image/color"
"log"
"github.com/alexdesi/collisions/detector"
"github.com/alexdesi/collisions/shapes2D"
"github.com/alexdesi/collisions/vectors"
"github.com/alexdesi/collisions/wren"
"github.com/hajimehoshi/ebiten/v2"
)
type Game struct{}
func (g *Game) Update() error {
// Actual vel for sp1
sp1.X = sp1.X + sp1.Velocity[0]
sp2.X = sp2.X + sp2.Velocity[0]
sp1.Y = sp1.Y + sp1.Velocity[1]
sp2.Y = sp2.Y + sp2.Velocity[1]
if detector.DetectCollision(sp1, sp2) {
wren.Impact(&sp1, &sp2)
}
borderVector, borderCollision := detector.DetectCollisionWithBorders(sp1)
if borderCollision {
sp1.Velocity = vectors.Vector{sp1.Velocity[0] * borderVector[0], sp1.Velocity[1] * borderVector[1]}
}
borderVector, borderCollision = detector.DetectCollisionWithBorders(sp2)
if borderCollision {
sp2.Velocity = vectors.Vector{sp2.Velocity[0] * borderVector[0], sp2.Velocity[1] * borderVector[1]}
}
return nil
}
func (g *Game) Draw(screen *ebiten.Image) {
red := color.RGBA{0xff, 0, 0, 0xff}
shapes2D.DrawCircle(screen, int(sp1.X), int(sp1.Y), int(sp1.Radius), red)
// drawCircle(screen, int(sp1.X), int(sp1.Y), 2, red)
// ebitenutil.DrawLine(screen, sp1.X, sp1.Y, sp1.X+sp1.Velocity[0]*100, sp1.Y+sp1.Velocity[1]*100, red)
shapes2D.DrawCircle(screen, int(sp2.X), int(sp2.Y), int(sp2.Radius), red)
// drawCircle(screen, int(sp2.X), int(sp2.Y), 2, red)
// ebitenutil.DrawLine(screen, sp2.X, sp2.Y, sp2.X+sp2.Velocity[0]*100, sp2.Y+sp2.Velocity[1]*100, red)
// drawVector(screen, Un)
// drawVector(screen, Ut)
}
func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {
return 640, 480
}
var sp1 = wren.Sphere{X: 100, Y: 340, Velocity: vectors.Vector{3, 1}, Mass: 1, Radius: 20}
var sp2 = wren.Sphere{X: 300, Y: 240, Velocity: vectors.Vector{-1, 0}, Mass: 2, Radius: 100}
func main() {
fmt.Println("Elastic collision")
fmt.Println(sp1)
ebiten.SetWindowSize(640, 480)
ebiten.SetWindowTitle("Elastic collisions of two spheres")
if err := ebiten.RunGame(&Game{}); err != nil {
log.Fatal(err)
}
} | main.go | 0.581184 | 0.436022 | main.go | starcoder |
package view
import (
"fmt"
"unicode/utf8"
termbox "github.com/nsf/termbox-go"
"github.com/borkshop/bork/internal/point"
)
// Grid represents a sized buffer of terminal cells.
type Grid struct {
Size point.Point
Data []termbox.Cell
}
// MakeGrid makes a new Grid with the given size.
func MakeGrid(sz point.Point) Grid {
g := Grid{Size: sz}
g.Data = make([]termbox.Cell, sz.X*sz.Y)
return g
}
// Resize update the grid size, growing Data capacity or truncating its length
// as needed.
func (g *Grid) Resize(sz point.Point) {
g.Size = sz
if n := sz.X * sz.Y; n > cap(g.Data) {
g.Data = make([]termbox.Cell, n)
} else {
g.Data = g.Data[:n]
}
}
// Get sets a cell in the grid.
func (g Grid) Get(x, y int) termbox.Cell {
return g.Data[y*g.Size.X+x]
}
// Set sets a cell in the grid.
func (g Grid) Set(x, y int, ch rune, fg, bg termbox.Attribute) {
g.Data[y*g.Size.X+x] = termbox.Cell{Ch: ch, Fg: fg, Bg: bg}
}
// Merge merges data into a cell in the grid.
func (g Grid) Merge(x, y int, ch rune, fg, bg termbox.Attribute) {
i := y*g.Size.X + x
if ch != 0 {
g.Data[i].Ch = ch
}
if fg != 0 {
g.Data[i].Fg = fg
}
if bg != 0 {
g.Data[i].Bg = bg
}
}
// Copy copies another grid into this one, centered and clipped as necessary.
func (g Grid) Copy(og Grid) {
diff := g.Size.Sub(og.Size)
offset := diff.Div(2)
ix, nx := 0, og.Size.X
if diff.X < 0 {
ix = -offset.X
nx = ix + g.Size.X
}
y := 0
if diff.Y < 0 {
y = -offset.Y
offset.Y = -y
}
offset = offset.Max(point.Zero).Min(g.Size)
for yi := 0; yi < g.Size.Y && y < og.Size.Y; y, yi = y+1, yi+1 {
x := ix
i := (yi+offset.Y)*g.Size.X + offset.X
j := y*og.Size.X + x
for ; x < nx; x++ {
c := og.Data[j]
g.Data[i] = c
i++
j++
}
}
}
// WriteString writes a string into the grid at the given position, returning
// how many cells were affected.
func (g Grid) WriteString(x, y int, mess string, args ...interface{}) int {
if len(args) > 0 {
mess = fmt.Sprintf(mess, args...)
}
i := y*g.Size.X + x
j := i
for ; len(mess) > 0 && x < g.Size.X; x, j = x+1, j+1 {
r, n := utf8.DecodeRuneInString(mess)
mess = mess[n:]
g.Data[j].Ch = r
}
return j - i
}
// WriteStringRTL is like WriteString except it gose Right-To-Left (in both the
// string and the grid).
func (g Grid) WriteStringRTL(x, y int, mess string, args ...interface{}) int {
if len(args) > 0 {
mess = fmt.Sprintf(mess, args...)
}
i := y*g.Size.X + x
j := i
for ; len(mess) > 0 && x >= 0; x, j = x-1, j-1 {
r, n := utf8.DecodeLastRuneInString(mess)
mess = mess[:len(mess)-n]
g.Data[j].Ch = r
}
return j - i
}
// Lines returns a slice of row strings from the grid, filling in any
// zero runes with the given one.
func (g Grid) Lines(fillZero rune) []string {
lines := make([]string, g.Size.Y)
line := make([]rune, g.Size.X)
for y, i := 0, 0; y < g.Size.Y; y++ {
for x := 0; x < g.Size.X; x++ {
if ch := g.Data[i].Ch; ch != 0 {
line[x] = ch
} else {
line[x] = fillZero
}
i++
}
lines[y] = string(line)
}
return lines
} | internal/view/grid.go | 0.657868 | 0.428592 | grid.go | starcoder |
package ahocorasick
const (
TABLE_WIDTH = 1024
)
// A node in the trie structure used to implement Aho-Corasick
type node struct {
b []byte // The blice at this node
output bool // True means this node represents a blice that should
// be output when matching
index int // index into original dictionary if output is true
// The use of fixed size arrays is space-inefficient but fast for
// lookups.
child []int32 // A non-nil entry in this array means that the
// index represents a byte value which can be
// appended to the current node. Blices in the
// trie are built up byte by byte through these
// child node pointers.
suffix int32 // Pointer to the longest possible strict suffix of
// this node
fail int32 // Pointer to the next node which is in the dictionary
// which can be reached from here following suffixes. Called fail
// because it is used to fallback in the trie when a match fails.
}
// Matcher is returned by NewMatcher and contains a list of blices to
// match against
type Matcher struct {
root *node // Points to trie[0]
table [][]*node
tableSize int32
}
func (m *Matcher) tableGet(i int32) *node {
return m.table[i / TABLE_WIDTH][i % TABLE_WIDTH]
}
func (m *Matcher) tableSet(i int32, n *node) {
if i / TABLE_WIDTH >= int32(len(m.table)) {
m.table = append(m.table, make([]*node, TABLE_WIDTH))
}
m.table[i / TABLE_WIDTH][i % TABLE_WIDTH] = n
}
func (n *node) getChild(i byte) int32 {
if n.child == nil {
return 0
}
return n.child[i]
}
func (n *node) setChild(i byte, v int32) {
if n.child == nil {
n.child = make([]int32, 256)
}
n.child[i] = v
}
// finndBlice looks for a blice in the trie starting from the root and
// returns a pointer to the node representing the end of the blice. If
// the blice is not found it returns nil.
func (m *Matcher) findBlice(blice []byte) int32 {
i := int32(1)
for _, b := range blice {
i = m.tableGet(i).getChild(b)
if i == 0 {
break
}
}
return i
}
// buildTrie builds the fundamental trie structure from a set of
// blices.
func (m *Matcher) buildTrie(dictionary [][]byte) {
m.table = [][]*node{}
m.root = &node{}
m.tableSet(0, nil)
m.tableSet(1, m.root)
m.tableSize = 2
// This loop builds the nodes in the trie by following through
// each dictionary entry building the children pointers.
for i, blice := range dictionary {
n := m.root
var path []byte
for _, b := range blice {
path = append(path, b)
c := m.tableGet(n.getChild(b))
if c == nil {
c = &node{}
m.tableSet(m.tableSize, c)
n.setChild(b, m.tableSize)
m.tableSize += 1
c.b = make([]byte, len(path))
copy(c.b, path)
// Nodes directly under the root node will have the
// root as their fail point as there are no suffixes
// possible.
if len(path) == 1 {
c.fail = 1
}
c.suffix = 1
}
n = c
}
// The last value of n points to the node representing a
// dictionary entry
n.output = true
n.index = i
}
stack := []*node{m.root}
for len(stack) > 0 {
n := stack[len(stack) - 1]
stack = stack[: len(stack) - 1]
for i := 0; i <= 255; i++ {
ci := n.getChild(byte(i))
if ci != 0 {
c := m.tableGet(ci)
stack = append(stack, c)
for j := 1; j < len(c.b); j++ {
c.fail = m.findBlice(c.b[j:])
if c.fail != 0 {
break
}
}
if c.fail == 0 {
c.fail = 1
}
for j := 1; j < len(c.b); j++ {
si := m.findBlice(c.b[j:])
if si != 0 && m.tableGet(si).output {
c.suffix = si
break
}
}
}
}
}
}
// NewMatcher creates a new Matcher used to match against a set of
// blices
func NewMatcher(dictionary [][]byte) *Matcher {
m := new(Matcher)
m.buildTrie(dictionary)
return m
}
// NewStringMatcher creates a new Matcher used to match against a set
// of strings (this is a helper to make initialization easy)
func NewStringMatcher(dictionary []string) *Matcher {
m := new(Matcher)
var d [][]byte
for _, s := range dictionary {
d = append(d, []byte(s))
}
m.buildTrie(d)
return m
}
// Match searches in for blices and returns all the blices found as
// indexes into the original dictionary
func (m *Matcher) Match(in []byte) []int {
marks := make(map[int32]bool)
var hits []int
ni := int32(1)
n := m.root
for _, b := range in {
if ni != 1 && n.getChild(b) == 0 {
ni = n.fail
n = m.tableGet(n.fail)
}
fi := n.getChild(b)
if fi != 0 {
f := m.tableGet(fi)
ni = fi
n = f
_, marked := marks[fi]
if f.output && !marked {
hits = append(hits, f.index)
marks[fi] = true
}
for f.suffix != 1 {
fi = f.suffix
f = m.tableGet(fi)
_, marked := marks[fi]
if !marked {
hits = append(hits, f.index)
marks[fi] = true
} else {
// There's no point working our way up the
// suffixes if it's been done before for this call
// to Match. The matches are already in hits.
break
}
}
}
}
return hits
} | ahocorasick.go | 0.672009 | 0.496399 | ahocorasick.go | starcoder |
package gosfmt
// Int63r generates pseudo random int64 between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// random int64
func (r *Rand) Int63r(low, high int64) int64 {
return r.Int63()%(high-low+1) + low
}
// Int63s generates pseudo random integers between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Int63s(values []int64, low, high int64) {
if len(values) < 1 {
return
}
for i := 0; i < len(values); i++ {
values[i] = r.Int63r(low, high)
}
}
// Int63Shuffle - shuffles a slice of integers
func (r *Rand) Int63Shuffle(values []int64) {
var tmp int64
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Uint32 is int range generates pseudo random uint32 between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// random uint32
func (r *Rand) Uint32r(low, high uint32) uint32 {
return r.Uint32()%(high-low+1) + low
}
// Uint32s generates pseudo random integers between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Uint32s(values []uint32, low, high uint32) {
if len(values) < 1 {
return
}
for i := 0; i < len(values); i++ {
values[i] = r.Uint32r(low, high)
}
}
// Uint32Shuffle shuffles a slice of integers
func (r *Rand) Uint32Shuffle(values []uint32) {
var tmp uint32
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Uint64r generates pseudo random uint64 between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// random uint64
func (r *Rand) Uint64r(low, high uint64) uint64 {
return r.Uint64()%(high-low+1) + low
}
// Uint64s generates pseudo random integers between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Uint64s(values []uint64, low, high uint64) {
if len(values) < 1 {
return
}
for i := 0; i < len(values); i++ {
values[i] = r.Uint64r(low, high)
}
}
// Uint64Shuffle - shuffles a slice of integers
func (r *Rand) Uint64Shuffle(values []uint64) {
var tmp uint64
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Int31r is int range generates pseudo random int32 between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// random int32
func (r *Rand) Int31r(low, high int32) int32 {
return r.Int31()%(high-low+1) + low
}
// Int31s generates pseudo random integers between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Int31s(values []int32, low, high int32) {
if len(values) < 1 {
return
}
for i := 0; i < len(values); i++ {
values[i] = r.Int31r(low, high)
}
}
// Int31Shuffle - shuffles a slice of integers
func (r *Rand) Int31Shuffle(values []int32) {
var tmp int32
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Intr is int range generates pseudo random integer between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// random integer
func (r *Rand) Intr(low, high int) int {
return r.Int()%(high-low+1) + low
}
// Ints generates pseudo random integers between low and high.
// Input:
// low -- lower limit
// high -- upper limit
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Ints(values []int, low, high int) {
if len(values) < 1 {
return
}
for i := 0; i < len(values); i++ {
values[i] = r.Intr(low, high)
}
}
// IntShuffle shuffles a slice of integers
func (r *Rand) IntShuffle(values []int) {
var j, tmp int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Float64r generates a pseudo random real number between low and high; i.e. in [low, right)
// Input:
// low -- lower limit (closed)
// high -- upper limit (open)
// Output:
// random float64
func (r *Rand) Float64r(low, high float64) float64 {
return low + (high-low)*r.Float64()
}
// Float64s generates pseudo random real numbers between low and high; i.e. in [low, right)
// Input:
// low -- lower limit (closed)
// high -- upper limit (open)
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Float64s(values []float64, low, high float64) {
for i := 0; i < len(values); i++ {
values[i] = low + (high-low)*r.Float64()
}
}
// Float64Shuffle shuffles a slice of float point numbers
func (r *Rand) Float64Shuffle(values []float64) {
var tmp float64
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// Float32r generates a pseudo random real number between low and high; i.e. in [low, right)
// Input:
// low -- lower limit (closed)
// high -- upper limit (open)
// Output:
// random float32
func (r *Rand) Float32r(low, high float32) float32 {
return low + (high-low)*r.Float32()
}
// Float32s generates pseudo random real numbers between low and high; i.e. in [low, right)
// Input:
// low -- lower limit (closed)
// high -- upper limit (open)
// Output:
// values -- slice to be filled with len(values) numbers
func (r *Rand) Float32s(values []float32, low, high float32) {
for i := 0; i < len(values); i++ {
values[i] = low + (high-low)*r.Float32()
}
}
// Float32Shuffle shuffles a slice of float point numbers
func (r *Rand) Float32Shuffle(values []float32) {
var tmp float32
var j int
for i := len(values) - 1; i > 0; i-- {
j = r.Int() % i
tmp = values[j]
values[j] = values[i]
values[i] = tmp
}
}
// FlipCoin generates a Bernoulli variable; throw a coin with probability p
func (r *Rand) FlipCoin(p float64) bool {
if p == 1.0 {
return true
}
if p == 0.0 {
return false
}
if r.Float64() <= p {
return true
}
return false
} | rand_gosl.go | 0.511229 | 0.531817 | rand_gosl.go | starcoder |
package main
import (
"fmt"
"math"
)
type world struct {
radius float64
}
func rad(deg float64) float64 {
return deg * math.Pi / 180
}
type location struct {
lat, long float64
}
type coordinate struct {
d, m, s float64
h rune
}
// distance calculation using the Spherical Law of Cosines.
func (w world) distance(p1, p2 location) float64 {
s1, c1 := math.Sincos(rad(p1.lat))
s2, c2 := math.Sincos(rad(p2.lat))
clong := math.Cos(rad(p1.long - p2.long))
return w.radius * math.Acos(s1*s2+c1*c2*clong)
}
// newLocation from latitude, longitude d/m/s coordinates.
func newLocation(lat, long coordinate) *location {
return &location{lat.decimal(), long.decimal()}
}
func (c coordinate) decimal() float64 {
sign := 1.0
switch c.h {
case 'S', 'W', 's', 'w':
sign = -1
}
return sign * (c.d + c.m/60 + c.s/3600)
}
func main() {
// mercury := world{2439.7}
// venus := world{6051.8}
earth := world{6371.0}
mars := world{3389.5}
// jupiter := world{69911}
// saturn := world{58232}
// uranus := world{25362}
// neptune := world{24622}
opportunity := newLocation(coordinate{1, 56, 46.3, 'S'}, coordinate{354, 28, 24.2, 'E'})
spirit := newLocation(coordinate{14, 34, 6.2, 'S'}, coordinate{175, 28, 21.5, 'E'})
curiosity := newLocation(coordinate{1, 56, 46.3, 'S'}, coordinate{137, 26, 30.1, 'E'})
insight := newLocation(coordinate{4, 30, 0.0, 'N'}, coordinate{135, 54, 0, 'E'})
fmt.Println("Distance (km) between Spirit and Opportunity (farthest)", mars.distance(*opportunity, *spirit))
fmt.Println("Distance (km) between Spirit and Insight", mars.distance(*insight, *spirit))
fmt.Println("Distance (km) between Opportunity and Curiosity", mars.distance(*opportunity, *curiosity))
fmt.Println("Distance (km) between Opportunity and Insight", mars.distance(*opportunity, *insight))
fmt.Println("Distance (km) between Curiosity and Insight (closest)", mars.distance(*curiosity, *insight))
london := newLocation(coordinate{51, 30, 0, 'N'}, coordinate{0, 8, 0, 'W'})
paris := newLocation(coordinate{48, 51, 0, 'N'}, coordinate{2, 21, 0, 'E'})
eugene := newLocation(coordinate{44, 3, 7.449, 'N'}, coordinate{123, 5, 12.313, 'W'})
washingtonDC := newLocation(coordinate{38, 54, 25.892, 'N'}, coordinate{77, 2, 12.735, 'W'})
mtSharp := newLocation(coordinate{5, 4, 0, 'S'}, coordinate{137, 51, 0, 'E'})
olympusMons := newLocation(coordinate{18, 39, 0, 'N'}, coordinate{226, 12, 0, 'E'})
fmt.Println("Distance (km) between London and France", earth.distance(*london, *paris))
fmt.Println("Distance (km) between Eugene and Washington, D.C.", earth.distance(*eugene, *washingtonDC))
fmt.Println("Distance (km) between Mount Sharp and Olympus Mons", earth.distance(*mtSharp, *olympusMons))
} | lesson22/distance/distance.go | 0.792946 | 0.445831 | distance.go | starcoder |
// Package slicetype implements data types and utilities to describe
// Bigslice types: Slices, Frames, and Tasks all carry
// slicetype.Types.
package slicetype
import (
"fmt"
"reflect"
"strings"
)
// A Type is the type of a set of columns.
type Type interface {
// NumOut returns the number of columns.
NumOut() int
// Out returns the data type of the ith column.
Out(i int) reflect.Type
// Prefix returns the number of columns in the type
// which are considered the type's prefix. A type's
// prefix is the set of columns which are considered
// the type's key columns for operations like reduce.
Prefix() int
}
type typeSlice []reflect.Type
// New returns a new Type using the provided column types.
func New(types ...reflect.Type) Type {
return typeSlice(types)
}
func (t typeSlice) NumOut() int { return len(t) }
func (t typeSlice) Out(i int) reflect.Type { return t[i] }
func (t typeSlice) Prefix() int { return 1 }
// Assignable reports whether column type in can be
// assigned to out.
func Assignable(in, out Type) bool {
if in.NumOut() != out.NumOut() {
return false
}
for i := 0; i < in.NumOut(); i++ {
if !in.Out(i).AssignableTo(out.Out(i)) {
return false
}
}
return true
}
// Columns returns a slice of column types from the provided type.
func Columns(typ Type) []reflect.Type {
if slice, ok := typ.(typeSlice); ok {
return slice
}
out := make([]reflect.Type, typ.NumOut())
for i := range out {
out[i] = typ.Out(i)
}
return out
}
func Concat(types ...Type) Type {
var t typeSlice
for _, typ := range types {
t = append(t, Columns(typ)...)
}
return t
}
func String(typ Type) string {
elems := make([]string, typ.NumOut())
for i := range elems {
elems[i] = typ.Out(i).String()
}
return fmt.Sprintf("slice[%d]%s", typ.Prefix(), strings.Join(elems, ","))
}
type appendType struct {
t1, t2 Type
}
func (a appendType) NumOut() int {
return a.t1.NumOut() + a.t2.NumOut()
}
func (a appendType) Out(i int) reflect.Type {
if i < a.t1.NumOut() {
return a.t1.Out(i)
}
return a.t2.Out(i - a.t1.NumOut())
}
func (a appendType) Prefix() int { return a.t1.Prefix() }
func Append(t1, t2 Type) Type {
return appendType{t1, t2}
}
type sliceType struct {
t Type
i, j int
}
func (s sliceType) NumOut() int {
return s.j - s.i
}
func (s sliceType) Out(i int) reflect.Type {
if i >= s.NumOut() {
panic("invalid index")
}
return s.t.Out(s.i + i)
}
// BUG(marius): prefixes are lost when slicing a type.
func (s sliceType) Prefix() int {
// TODO(marius): figure out how to properly compute
// prefixes for appended types and sliced types.
// This is currently only used in places which do not
// accept prefixes anyway.
return 1
}
func Slice(t Type, i, j int) Type {
if i < 0 || i > t.NumOut() || j < i || j > t.NumOut() {
panic("slice: invalid argument")
}
return sliceType{t, i, j}
}
// Signature returns a Go function signature for a function that takes the
// provided arguments and returns the provided values.
func Signature(arg, ret Type) string {
args := make([]string, arg.NumOut())
for i := range args {
args[i] = arg.Out(i).String()
}
rets := make([]string, ret.NumOut())
for i := range rets {
rets[i] = ret.Out(i).String()
}
var b strings.Builder
b.WriteString("func(")
b.WriteString(strings.Join(args, ", "))
b.WriteString(")")
switch len(rets) {
case 0:
case 1:
b.WriteString(" ")
b.WriteString(rets[0])
default:
b.WriteString(" (")
b.WriteString(strings.Join(rets, ", "))
b.WriteString(")")
}
return b.String()
} | slicetype/slicetype.go | 0.581897 | 0.547948 | slicetype.go | starcoder |
package function
import (
"github.com/searKing/golang/go/error/exception"
"github.com/searKing/golang/go/util/class"
"github.com/searKing/golang/go/util/object"
)
/**
* Represents a function that accepts one argument and produces a result.
*
* <p>This is a <a href="package-summary.html">functional interface</a>
* whose functional method is {@link #apply(Object)}.
*
* @param <T> the type of the input to the function
* @param <R> the type of the result of the function
*
* @since 1.8
*/
type Function interface {
/**
* Applies this function to the given argument.
*
* @param t the function argument
* @return the function result
*/
Apply(t interface{}) interface{}
/**
* Returns a composed function that first applies the {@code before}
* function to its input, and then applies this function to the result.
* If evaluation of either function throws an exception, it is relayed to
* the caller of the composed function.
*
* @param <V> the type of input to the {@code before} function, and to the
* composed function
* @param before the function to apply before this function is applied
* @return a composed function that first applies the {@code before}
* function and then applies this function
* @throws NullPointerException if before is null
*
* @see #andThen(Function)
*/
Compose(before Function) Function
/**
* Returns a composed function that first applies this function to
* its input, and then applies the {@code after} function to the result.
* If evaluation of either function throws an exception, it is relayed to
* the caller of the composed function.
*
* @param <V> the type of output of the {@code after} function, and of the
* composed function
* @param after the function to apply after this function is applied
* @return a composed function that first applies this function and then
* applies the {@code after} function
* @throws NullPointerException if after is null
*
* @see #compose(Function)
*/
AndThen(before Function) Function
}
/**
* Returns a function that always returns its input argument.
*
* @param <T> the type of the input and output objects to the function
* @return a function that always returns its input argument
*/
func Identity() Function {
return FunctionFunc(func(t interface{}) interface{} {
return t
})
}
type FunctionFunc func(t interface{}) interface{}
// Apply calls f(t).
func (f FunctionFunc) Apply(t interface{}) interface{} {
return f(t)
}
func (f FunctionFunc) Compose(before Function) Function {
object.RequireNonNil(before)
return FunctionFunc(func(t interface{}) interface{} {
return f.Apply(before.Apply(t))
})
}
func (f FunctionFunc) AndThen(after Function) Function {
object.RequireNonNil(after)
return FunctionFunc(func(t interface{}) interface{} {
return after.Apply(f.Apply(t))
})
}
type AbstractFunction struct {
class.Class
}
func (f *AbstractFunction) Apply(t interface{}) interface{} {
panic(exception.NewIllegalStateException1("called wrong Apply method"))
}
func (f *AbstractFunction) Compose(before Function) Function {
object.RequireNonNil(before)
return FunctionFunc(func(t interface{}) interface{} {
return f.GetDerivedElse(f).(Function).Apply(before.Apply(t))
})
}
func (f *AbstractFunction) AndThen(after Function) Function {
object.RequireNonNil(after)
return FunctionFunc(func(t interface{}) interface{} {
return after.Apply(f.GetDerivedElse(f).(Function).Apply(t))
})
} | go/util/function/function.go | 0.853699 | 0.553807 | function.go | starcoder |
package core
import (
"bytes"
"reflect"
"strings"
"time"
)
type LogicalOperator int
type ComparisonOperator int
type Type int
const (
OR LogicalOperator = iota
AND
UnknownLogical
Equals ComparisonOperator = iota
NotEquals
LessThan
GreaterThan
LessThanOrEqual
GreaterThanOrEqual
Contains
NotContains
Unary
NotUnary
UnknownComparator
String Type = iota
Nil
Int
Int64
Uint
Float64
Complex128
Bool
Time
Today
Array
Unknown
)
var KindToType = map[reflect.Kind]Type{
reflect.String: String,
reflect.Int: Int,
reflect.Int64: Int64,
reflect.Uint: Uint,
reflect.Float64: Float64,
reflect.Complex128: Complex128,
reflect.Bool: Bool,
reflect.Array: Array,
reflect.Slice: Array,
reflect.Map: Array,
}
var TypeOperations = map[Type]map[ComparisonOperator]ConditionResolver{
String: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(string) == right.(string) },
LessThan: func(left, right interface{}) bool { return left.(string) < right.(string) },
},
Nil: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left == nil && right == nil },
LessThan: func(left, right interface{}) bool { return false },
},
Int: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(int) == right.(int) },
LessThan: func(left, right interface{}) bool { return left.(int) < right.(int) },
},
Int64: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(int64) == right.(int64) },
LessThan: func(left, right interface{}) bool { return left.(int64) < right.(int64) },
},
Uint: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(uint) == right.(uint) },
LessThan: func(left, right interface{}) bool { return left.(uint) < right.(uint) },
},
Float64: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(float64) == right.(float64) },
LessThan: func(left, right interface{}) bool { return left.(float64) < right.(float64) },
},
Complex128: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(complex128) == right.(complex128) },
LessThan: func(left, right interface{}) bool { return false },
},
Bool: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(bool) == right.(bool) },
LessThan: func(left, right interface{}) bool { return false },
},
Time: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return left.(time.Time).Unix() == right.(time.Time).Unix() },
LessThan: func(left, right interface{}) bool { return left.(time.Time).Unix() < right.(time.Time).Unix() },
},
Today: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool {
l, r := left.(time.Time), right.(time.Time)
return l.YearDay() == r.YearDay() && l.Year() == r.Year()
},
LessThan: func(left, right interface{}) bool {
l, r := left.(time.Time), right.(time.Time)
if l.Year() > r.Year() {
return false
}
if l.Year() < r.Year() {
return true
}
return l.YearDay() < r.YearDay()
},
},
Array: map[ComparisonOperator]ConditionResolver{
Equals: func(left, right interface{}) bool { return reflect.DeepEqual(left, right) },
LessThan: func(left, right interface{}) bool { return reflect.ValueOf(left).Len() < reflect.ValueOf(right).Len() },
},
}
// Resolves a condition
type ConditionResolver func(left, right interface{}) bool
var ConditionLookup = map[ComparisonOperator]ConditionResolver{
Unary: UnaryComparison,
NotUnary: NotUnaryComparison,
Equals: EqualsComparison,
NotEquals: NotEqualsComparison,
LessThan: LessThanComparison,
GreaterThan: GreaterThanComparison,
LessThanOrEqual: LessThanOrEqualComparison,
GreaterThanOrEqual: GreaterThanOrEqualComparison,
Contains: ContainsComparison,
}
type Completable interface {
Complete(value Value, operator ComparisonOperator)
Verifiable
}
type Verifiable interface {
IsTrue(data map[string]interface{}) bool
Inverse()
}
// represents a group of conditions
type ConditionGroup struct {
conditions []*Condition
joins []LogicalOperator
inverse bool
}
func (g *ConditionGroup) Inverse() {
g.inverse = true
}
func (g ConditionGroup) Complete(value Value, operator ComparisonOperator) {
for _, condition := range g.conditions {
condition.right = value
condition.operator = operator
}
}
func (g *ConditionGroup) IsTrue(data map[string]interface{}) bool {
l := len(g.conditions) - 1
if l == 0 {
return g.realReturn(g.conditions[0].IsTrue(data))
}
for i := 0; i <= l; i++ {
if g.conditions[i].IsTrue(data) {
if i == l || g.joins[i] == OR {
return g.realReturn(true)
}
} else if i != l && g.joins[i] == AND {
for ; i < l; i++ {
if g.joins[i] == OR {
break
}
}
}
}
return g.realReturn(false)
}
func (g *ConditionGroup) realReturn(b bool) bool {
if g.inverse {
return !b
}
return b
}
type TrueCondition struct {
inverse bool
}
func (t *TrueCondition) IsTrue(data map[string]interface{}) bool {
if t.inverse {
return false
}
return true
}
func (t *TrueCondition) Inverse() {
t.inverse = true
}
// represents a conditions (such as x == y)
type Condition struct {
left Value
operator ComparisonOperator
right Value
}
func (c *Condition) IsTrue(data map[string]interface{}) bool {
left := c.left.ResolveWithNil(data)
var right interface{}
if c.right != nil {
right = c.right.ResolveWithNil(data)
}
return ConditionLookup[c.operator](left, right)
}
func UnaryComparison(left, right interface{}) bool {
if left == nil {
return false
}
switch typed := left.(type) {
case bool:
return typed
case string:
return len(typed) > 0
case []byte:
return len(typed) > 0
}
return true
}
func NotUnaryComparison(left, right interface{}) bool {
return !UnaryComparison(left, right)
}
func EqualsComparison(left, right interface{}) bool {
if s, ok := right.(string); ok && s == "liquid:empty" {
if n, ok := ToLength(left); ok {
return n == 0
}
return false
}
var t Type
if left, right, t = convertToSameType(left, right); t == Unknown {
return false
}
return TypeOperations[t][Equals](left, right)
}
func NotEqualsComparison(left, right interface{}) bool {
return !EqualsComparison(left, right)
}
func LessThanComparison(left, right interface{}) bool {
var t Type
if left, right, t = convertToSameType(left, right); t == Unknown {
return false
}
return TypeOperations[t][LessThan](left, right)
}
func LessThanOrEqualComparison(left, right interface{}) bool {
var t Type
if left, right, t = convertToSameType(left, right); t == Unknown {
return false
}
return TypeOperations[t][Equals](left, right) || TypeOperations[t][LessThan](left, right)
}
func GreaterThanComparison(left, right interface{}) bool {
var t Type
if left, right, t = convertToSameType(left, right); t == Unknown {
return false
}
return !TypeOperations[t][Equals](left, right) && !TypeOperations[t][LessThan](left, right)
}
func GreaterThanOrEqualComparison(left, right interface{}) bool {
var t Type
if left, right, t = convertToSameType(left, right); t == Unknown {
return false
}
return !TypeOperations[t][LessThan](left, right)
}
// I think most of this sucks
func ContainsComparison(left, right interface{}) bool {
if s, ok := left.(string); ok {
return strings.Contains(s, ToString(right))
}
if b, ok := left.([]byte); ok {
return bytes.Contains(b, ToBytes(right))
}
if strs, ok := left.([]string); ok {
needle := ToString(right)
for i, l := 0, len(strs); i < l; i++ {
if strs[i] == needle {
return true
}
}
return false
}
if n, ok := left.([]int); ok {
needle, ok := ToInt(right)
if ok == false {
return false
}
for i, l := 0, len(n); i < l; i++ {
if n[i] == needle {
return true
}
}
return false
}
value := reflect.ValueOf(left)
kind := value.Kind()
if kind == reflect.Array || kind == reflect.Slice {
l := value.Len()
if l == 0 {
return false
}
for i := 0; i < l; i++ {
if EqualsComparison(value.Index(i).Interface(), right) {
return true
}
}
return false
}
if kind == reflect.Map {
if value.Len() == 0 {
return false
}
if b, ok := right.([]byte); ok {
right = string(b)
}
rightValue := reflect.ValueOf(right)
if rightValue.Type() == value.Type().Key() {
return value.MapIndex(rightValue).IsValid()
}
return false
}
return false
}
func NotContainsComparison(left, right interface{}) bool {
return !ContainsComparison(left, right)
}
func convertToSameType(left, right interface{}) (interface{}, interface{}, Type) {
//rely on the above code to handle this properly
if left == nil || right == nil {
return left, right, Nil
}
if s, ok := left.(string); ok {
return convertStringsToSameType(s, right)
} else if s, ok := right.(string); ok {
return convertStringsToSameType(s, left)
}
if b, ok := left.([]byte); ok {
return convertStringsToSameType(string(b), right)
} else if b, ok := right.([]byte); ok {
return convertStringsToSameType(string(b), left)
}
leftValue, rightValue := reflect.ValueOf(left), reflect.ValueOf(right)
leftKind, rightKind := leftValue.Kind(), rightValue.Kind()
if leftKind == rightKind {
if t, ok := KindToType[leftKind]; ok {
return left, right, t
}
}
if left, right, t := convertNumbersToSameType(leftValue, leftKind, rightValue, rightKind); t != Unknown {
return left, right, t
}
return left, right, Unknown
}
func convertStringsToSameType(a string, b interface{}) (interface{}, interface{}, Type) {
if a == "today" {
if t, ok := b.(time.Time); ok {
return Now(), t, Today
}
} else if a == "now" {
if t, ok := b.(time.Time); ok {
return Now(), t, Time
}
}
return a, ToString(b), String
}
func convertNumbersToSameType(leftValue reflect.Value, leftKind reflect.Kind, rightValue reflect.Value, rightKind reflect.Kind) (interface{}, interface{}, Type) {
if isInt(leftKind) {
if isInt(rightKind) {
return leftValue.Int(), rightValue.Int(), Int64
} else if isFloat(rightKind) {
return float64(leftValue.Int()), rightValue.Float(), Float64
}
} else if isFloat(leftKind) {
if isInt(rightKind) {
return leftValue.Float(), float64(rightValue.Int()), Float64
} else if isFloat(rightKind) {
return leftValue.Float(), rightValue.Float(), Float64
}
} else if isComplex(leftKind) && isComplex(rightKind) {
return leftValue.Complex(), rightValue.Complex(), Complex128
}
return nil, nil, Unknown
}
func isInt(kind reflect.Kind) bool {
return kind == reflect.Int || kind == reflect.Int8 || kind == reflect.Int16 || kind == reflect.Int32 || kind == reflect.Int64
}
func isFloat(kind reflect.Kind) bool {
return kind == reflect.Float64 || kind == reflect.Float32
}
func isComplex(kind reflect.Kind) bool {
return kind == reflect.Complex128 || kind == reflect.Complex64
} | core/condition.go | 0.628065 | 0.503723 | condition.go | starcoder |
package game
import (
"fmt"
"image/color"
"math"
"math/rand"
"github.com/hajimehoshi/ebiten/v2"
"gonum.org/v1/gonum/mat"
)
type TetrominoShape int
const (
ShapeI TetrominoShape = iota
ShapeJ
ShapeL
ShapeS
ShapeZ
ShapeT
ShapeO
)
// Tetromino represents the piece containing the individual tiles making up the shape and its tile colors
type Tetromino struct {
x, y int
shape TetrominoShape
color color.Color
matrix *mat.Dense
tiles map[*Tile]struct{}
}
// NewTetromino creates a new random Tetromino object and its Tiles.
func NewRandomTetromino() *Tetromino {
randShape := TetrominoShape(rand.Intn(math.MaxInt) % 7)
return NewTetromino(randShape)
}
// NewTetromino creates a new Tetromino object and its Tiles.
func NewTetromino(shape TetrominoShape) *Tetromino {
tetro := &Tetromino{
x: 0,
y: 0,
shape: shape,
tiles: map[*Tile]struct{}{},
}
switch shape {
case ShapeI:
tetro.initTetrominoI()
case ShapeJ:
tetro.initTetrominoJ()
case ShapeL:
tetro.initTetrominoL()
case ShapeS:
tetro.initTetrominoS()
case ShapeZ:
tetro.initTetrominoZ()
case ShapeT:
tetro.initTetrominoT()
case ShapeO:
tetro.initTetrominoO()
default:
panic(fmt.Errorf("unhandled shape: %v", shape))
}
return tetro
}
// Update updates the tiles in the Teromino's animation states.
func (t *Tetromino) Update() error {
for t := range t.tiles {
if err := t.Update(); err != nil {
return err
}
}
return nil
}
// Draw draws the tiles in this Tetromino to the given boardImage.
func (t *Tetromino) Draw(boardImage *ebiten.Image) {
for t := range t.tiles {
t.Draw(boardImage)
}
}
// IsOutOfBounds returns true if the position of any tile is out of bounds of the board
func (t *Tetromino) IsOutOfBounds(boardCols, boardRows int) bool {
for t := range t.tiles {
x, y := t.Pos()
if x < 0 || y < 0 {
return true
}
if x >= boardCols || y >= boardRows {
return true
}
}
return false
}
func (t *Tetromino) IsMoving() bool {
for t := range t.tiles {
if t.IsMoving() {
return true
}
}
return false
}
// ShiftPositionBy shifts the coordinates the Tetromino and all tiles by given delta X/Y
func (t *Tetromino) ShiftPositionBy(dx, dy int) {
t.x += dx
t.y += dy
t.shiftTilesBy(dx, dy)
}
// shiftTilesBy shifts the coordinates of all tiles by given delta X/Y
func (t *Tetromino) shiftTilesBy(dx, dy int) {
for tile := range t.tiles {
tile.x += dx
tile.y += dy
}
}
// RotatePositionCW rotates the piece ClockWise
func (t *Tetromino) RotatePositionCW() {
t.matrix = RotateMatrixCW(t.matrix)
t.updateTilesFromMatrix()
// put tiles back in position of Tetromino
t.shiftTilesBy(t.x, t.y)
}
// RotatePositionCCW rotates the piece CounterClockWise
func (t *Tetromino) RotatePositionCCW() {
t.matrix = RotateMatrixCCW(t.matrix)
t.updateTilesFromMatrix()
// put tiles back in position of Tetromino
t.shiftTilesBy(t.x, t.y)
}
// MatrixDims returns the raw matrix dimensions of the shape
func (t *Tetromino) MatrixDims() (r int, c int) {
if t.matrix == nil {
return 0, 0
}
return t.matrix.Dims()
}
// MatrixData returns the raw matrix data of the shape
func (t *Tetromino) MatrixData() []float64 {
if t.matrix == nil {
return nil
}
return t.matrix.RawMatrix().Data
}
// MatrixTilePositions returns only TilePosition data for tiles of the shape
func (t *Tetromino) MatrixTilePositions() map[TilePosition]struct{} {
rows, cols := t.MatrixDims()
positions := map[TilePosition]struct{}{}
rData := t.MatrixData()
tIndex := 0
for r := 0; r < rows; r++ {
// using bottom of matrix as Y origin instead of top
y := rows - r - 1
rowMult := r * cols
for x := 0; x < cols; x++ {
if rData[rowMult+x] == 1 {
tPos := TilePosition{x: x, y: y}
positions[tPos] = struct{}{}
tIndex++
}
}
}
return positions
}
func (t *Tetromino) stopAnimation() {
for t := range t.tiles {
t.stopAnimation()
}
}
// updateTiles updates the tiles object map based on the matrix of tile positions
func (t *Tetromino) updateTilesFromMatrix() {
t.tiles = map[*Tile]struct{}{}
tPositions := t.MatrixTilePositions()
for tPos := range tPositions {
tile := &Tile{
x: tPos.x,
y: tPos.y,
tetro: t,
poppingCount: maxPoppingCount,
}
t.tiles[tile] = struct{}{}
}
}
// initalize matrix shape as I piece
func (t *Tetromino) initTetrominoI() {
// Tealish #00A691
t.color = color.RGBA{0x00, 0xA6, 0x91, 0xff}
dI := []float64{
1,
1,
1,
1,
}
t.matrix = mat.NewDense(4, 1, dI)
t.updateTilesFromMatrix()
}
// initalize matrix shape as J piece
func (t *Tetromino) initTetrominoJ() {
// Purplish #AD42EB
t.color = color.RGBA{0xAD, 0x42, 0xEB, 0xff}
dJ := []float64{
0, 1,
0, 1,
1, 1,
}
t.matrix = mat.NewDense(3, 2, dJ)
t.updateTilesFromMatrix()
}
// initalize matrix shape as L piece
func (t *Tetromino) initTetrominoL() {
// Greenish #68FA3F
t.color = color.RGBA{0x68, 0xFA, 0x3F, 0xff}
dL := []float64{
1, 0,
1, 0,
1, 1,
}
t.matrix = mat.NewDense(3, 2, dL)
t.updateTilesFromMatrix()
}
// initalize matrix shape as S piece
func (t *Tetromino) initTetrominoS() {
// Pinkish #FF2FA8
t.color = color.RGBA{0xFF, 0x2F, 0xA8, 0xff}
dS := []float64{
0, 1, 1,
1, 1, 0,
}
t.matrix = mat.NewDense(2, 3, dS)
t.updateTilesFromMatrix()
}
// initalize matrix shape as Z piece
func (t *Tetromino) initTetrominoZ() {
// Cyanish #00FFFA
t.color = color.RGBA{0x00, 0xFF, 0xFA, 0xff}
dZ := []float64{
1, 1, 0,
0, 1, 1,
}
t.matrix = mat.NewDense(2, 3, dZ)
t.updateTilesFromMatrix()
}
// initalize matrix shape as T piece
func (t *Tetromino) initTetrominoT() {
// Orangeish #FF8500
t.color = color.RGBA{0xFF, 0x85, 0x00, 0xff}
dT := []float64{
1, 1, 1,
0, 1, 0,
}
t.matrix = mat.NewDense(2, 3, dT)
t.updateTilesFromMatrix()
}
// initalize matrix shape as Square piece
func (t *Tetromino) initTetrominoO() {
// Reddish #FF1E1E
t.color = color.RGBA{0xFF, 0x1E, 0x1E, 0xff}
dSq := []float64{
1, 1,
1, 1,
}
t.matrix = mat.NewDense(2, 2, dSq)
t.updateTilesFromMatrix()
} | game/tetro.go | 0.773815 | 0.529446 | tetro.go | starcoder |
package main
/*
1. think of pre-processing steps: sort, arrange the data, index the data, prefix sums!
2. split into small functions which you will implement later
3. solution scanning and offer alternatives (always talk about complexity in space and time)
1. pattern matching (find similar problems)
2. simplify and generalize (start with a simpler problem)
3. iterate through programming paradigms (greedy, divide and conquer, dynamic programming)
4. iterate through all data structures (lists, arrays, stacks, queues, heap, hash, tree, trie, bloom filter, union_find)
5. try free primitive and see if you make progress (sorting, bfs, dfs, strongly connected components, shortest path)
4. BUD optimisation:
1. bottleneck
2. unnecessary work
3. duplicate work
5. identify pain points: array indices, loop termination conditions.
*/
import "fmt"
func Solution(A []int) int {
pref := prefixes(A)
_, minp := recur(pref, 0, len(A) - 1)
return minp
}
func recur(pref []int, p, q int) (min float64, minp int) {
if p + 1 == q {
return avg(pref, p, q), p
}
all := avg(pref, p, q)
left := avg(pref, p, q - 1)
right := avg(pref, p + 1, q)
fmt.Println(">>>>>", p, q, all, left, right)
if all < minimum(left, right) {
return all, p
} else if left < minimum(all, right) {
return recur(pref, p, q - 1)
} else if right < minimum(all, left) {
return recur(pref, p + 1, q)
} else { // all equal
return all, p
}
}
func prefixes(A []int) []int {
prefixes := make([]int, len(A))
for i, a := range A {
prefixes[i] = a
if i > 0 {
prefixes[i] += prefixes[i-1]
}
}
return prefixes
}
func avg(pref []int, p, q int) float64 {
if p == 0 {
return float64(pref[q]) / float64(q + 1)
}
return float64(pref[q] - pref[p-1]) / float64(q - p + 1)
}
func minimum(l, r float64) float64 {
if l < r {
return l
}
return r
}
/*
func Solution(A []int) int {
prefixes := make([]int, len(A))
for i, a := range A {
prefixes[i] = a
if i > 0 {
prefixes[i] += prefixes[i-1]
}
}
p, q := 0, len(A) -1
min := prefixes(q)
minp := 0
for p < q {
avg := (prefixes[q] - prefixes[p]) / (q - p + 1)
if avg < min {
min = avg
minp = p
}
lavg :=
ravg :=
}
return 0
}
*/
func main() {} | go/interview/codility/5_4_min_avg_two_slice/main.go | 0.605799 | 0.657098 | main.go | starcoder |
package man
import (
. "github.com/gocircuit/circuit/gocircuit.org/render"
)
func RenderMetaphorPage() string {
return RenderHtml("Programming metaphor", Render(metaphorBody, nil))
}
const metaphorBody = `
<h1>Programming metaphor </h1>
<p>The purpose of each circuit server is to host a collection of control
primitives, called <em>elements</em>, on behalf of the user. On each server the
hosted elements are organized in a hierarchy (similarly to the file system in
Apache Zookeeper), whose nodes are called <em>anchors</em>. Anchors (akin to file
system directories) have names and each anchor can host one circuit element or
be empty.
<p>The hierarchies of all servers are logically unified by a global circuit root
anchor, whose children are the individual circuit server hierarchies. A
typical anchor path looks like this
<pre>
/X317c2314a386a9db/hi/charlie
</pre>
<p>The first component of the path is the ID of the circuit server hosting the leaf anchor.
<p>Except for the circuit root anchor (which does not correspond to any
particular circuit server), all other anchors can store a <em>process</em> or a
<em>channel</em> element, at most one, and additionally can have any number of sub-
anchors. In a way, anchors are like directories that can have any number of
subdirectories, but at most one file.
<p>Creating and interacting with circuit elements is the mechanism through which
the user controls and reflects on their distributed application.
This can be accomplished by means of the included Go client library, or using
the command-line tool embodied in the circuit executable itself.
<p>Process elements are used to execute, monitor and synchronize OS-level
processes at the hosting circuit server. They allow visibility and control
over OS processes from any machine in the circuit cluster, regardless
of the physical location of the underlying OS process.
<p>Channel elements are a synchronization primitive, similar to the channels in Go,
whose send and receive sides are accessible from any location in the
circuit cluster, while their data structure lives on the circuit server hosting
their anchor.
` | gocircuit.org/man/metaphor.go | 0.723895 | 0.64542 | metaphor.go | starcoder |
package main
import (
"encoding/json"
"errors"
"io/ioutil"
"strings"
"time"
"github.com/piquette/finance-go/quote"
)
// Portfolio instance variables must begin with capital letter to be exported by json
type Portfolio struct {
Name string
// Must contain `.json`
Directory string
// the sum Equity and Cash
Value float64
// The sum of all stocks cost
Cost float64
// The sum of all stocks equity
Equity float64
// TotalGainLoss equals Equity - Cost
TotalGainLoss float64
// TotalGainLossPrcnt equals Equity - Cost
TotalGainLossPrcnt float64
// amount of uninvested cash
Cash float64
// stock ticker is key, Stock object is value
Positions map[string]Stock
// date is key, list of entry object is value
// date format is DD-MM-YYY
History map[string][]Entry
}
// Stock instance variables must begin with capital letter to be exported by json
type Stock struct {
// CompanyName string
Shares float64
// Average price each share was bought at
AvgPrice float64
// retrieved from GoogleFinance/yahoo finance
LatestPrice float64
// Amount of Cash invested in stock
TotalCost float64
// latest price times the number of shares
Equity float64
// GainLoss is the Equity - TotalCost
GainLoss float64
// GainLossPrcnt is the percentage of earning/loss per share
// GainLossPrcnt = [(LatestPrice / AvgPrice) -1] * 100
GainLossPrcnt float64
// PrcntOfPort is how much stock equity makes up from total portfolio value
PrcntOfPort float64
}
// Entry instance variables must begin with capital letter to be exported by json
type Entry struct {
// Type of operation (buy, sell, deposit, withdraw)
Type string
Ticker string
Shares float64
UnitPrice float64
OrderTotal float64
}
// getStock is not exported
func (port *Portfolio) getStock(ticker string) *Stock {
myStock := port.Positions[ticker]
return &myStock
}
// Buy is exported function, retruns no value
func (port *Portfolio) Buy(ticker string, shares float64, price float64) {
port.RefreshData()
// fmt.Println("You bought", shares, ticker, "shares at", price, "dollars per share")
// Calculate order total
orderTotal := shares * price
// Check if there is enough cash
if port.Cash < orderTotal {
Check(errors.New("Can't execute order: insufficient funds"))
}
// get the pointer to the stock
stock := port.getStock(ticker)
// add purchase shares
stock.Shares += shares
// Add the orderTotal to the stock's cost
stock.TotalCost += orderTotal
// Calculate new average price
stock.AvgPrice = stock.TotalCost / stock.Shares
// store the modified stock in the portfolio
port.Positions[ticker] = *stock
// Remove used cash from portfolio
port.Cash -= orderTotal
// Create and add a new entry
today := time.Now().Format("01-02-2006")
newEntry := Entry{"Buy", ticker, shares, price, orderTotal}
port.History[today] = append(port.History[today], newEntry)
port.RefreshData()
}
// Sell is exported function, retruns no value
func (port *Portfolio) Sell(ticker string, shares float64, price float64) {
port.RefreshData()
// fmt.Println("You sold ", shares, ticker, " shares at ", price, " dollars per share")
// Calculate order total
orderTotal := shares * price
// get the pointer to the stock
stock := port.getStock(ticker)
// Check if there is enough shares to sell
if stock.Shares < shares {
Check(errors.New("Can't sell non-existent shares"))
}
// subtract sold shares
stock.Shares -= shares
// Adjust the total cost to reflect current shares
stock.TotalCost = stock.Shares * stock.AvgPrice
// store the modified stock in the portfolio
port.Positions[ticker] = *stock
// Add resulting cash to portfolio
port.Cash += orderTotal
// Create and add a new entry
today := time.Now().Format("01-02-2006")
newEntry := Entry{"Sell", ticker, shares, price, orderTotal}
port.History[today] = append(port.History[today], newEntry)
port.RefreshData()
}
// Deposit is exported function, retruns no value
// Add to the cash instance variable and write to json
func (port *Portfolio) Deposit(cash float64) {
if cash <= 0 {
Check(errors.New("Invalid deposit amount, zero or negative"))
}
// fmt.Println("You deposited ", cash, "in your portfolio")
port.Cash += cash
port.Value += cash
// Create and add a new entry
today := time.Now().Format("01-02-2006")
newEntry := Entry{"Deposit", "N/A", cash, 1.0, cash}
port.History[today] = append(port.History[today], newEntry)
port.RefreshData()
}
// Withdraw is exported function, retruns no value
// Subtract from the cash instance variable and write to json
func (port *Portfolio) Withdraw(cash float64) {
if cash <= 0 {
Check(errors.New("Invalid witdraw amount, zero or negative"))
}
if cash > port.Cash {
Check(errors.New("Withdraw amount can't exceed portfolio balance"))
}
// fmt.Println("You withdrew ", cash, "from your portfolio")
port.Cash -= cash
port.Value -= cash
// Create and add a new entry
today := time.Now().Format("01-02-2006")
newEntry := Entry{"Withdraw", "N/A", cash, 1.0, cash}
port.History[today] = append(port.History[today], newEntry)
port.RefreshData()
}
// RefreshData is exported function, retruns no value
func (port *Portfolio) RefreshData() {
// TODO: go over this more
// Handle running it on empty values
if len(port.Positions) == 0 {
return
}
totalCost := 0.0
totalEquity := 0.0
// Iterate through positions
// STOCK related changes
for ticker := range port.Positions {
// get stock pointer
currentStock := port.getStock(ticker)
if currentStock.Shares == 0 {
// Stock has no shares and it must be deleted
delete(port.Positions, ticker)
} else {
// get latest price from finance package
currentStock.LatestPrice = getStockData(ticker)
// calculate equity
currentStock.Equity = currentStock.Shares * currentStock.LatestPrice
// Calculate gain or loss
currentStock.GainLossPrcnt = ((currentStock.LatestPrice / currentStock.AvgPrice) - 1)
currentStock.GainLoss = currentStock.Equity - currentStock.TotalCost
port.Positions[ticker] = *currentStock
totalEquity += currentStock.Equity
totalCost += currentStock.TotalCost
}
}
// PORTFOLIO RELATED CHANGES
port.Cost = totalCost
port.Equity = totalEquity
port.TotalGainLoss = port.Equity - port.Cost
port.TotalGainLossPrcnt = ((port.Equity / port.Cost) - 1)
port.Value = port.Cash + totalEquity
// STOCK RTELATED CHANGES AGAIN
// iterate again to find percent of portfolio value
// TODO: go over this more
for ticker := range port.Positions {
currentStock := port.getStock(ticker)
currentStock.PrcntOfPort = (currentStock.Equity / port.Value)
port.Positions[ticker] = *currentStock
}
// fmt.Println("data refreshed")
}
// StoreData writes the content of the Portfolio to the directory
// It overwrites any previous contents
// assumes that the directory contains `.json`
func (port *Portfolio) StoreData() {
data, _ := json.Marshal(port)
err := ioutil.WriteFile("./portfolio/"+port.Directory, data, 0644)
Check(err)
}
// Check function is exported
func Check(e error) {
if e != nil {
panic(e)
}
}
func getStockData(ticker string) float64 {
ticker = strings.ToUpper(ticker)
q, err := quote.Get(ticker)
Check(err)
// handle non existent stocks
if q == nil {
Check(errors.New("Stock does not exist"))
}
return q.RegularMarketPrice
}
// TODO: add Clear(): erases everything on json and writes `{}` | src/portfolio.go | 0.600305 | 0.409044 | portfolio.go | starcoder |
package schema
const v2841 = `{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "VES Event Listener",
"type": "object",
"properties": {
"event": {
"$ref": "#/definitions/event"
},
"eventList": {
"$ref": "#/definitions/eventList"
}
},
"definitions": {
"schemaHeaderBlock": {
"description": "schema date, version, author and associated API",
"type": "object",
"properties": {
"associatedApi": {
"description": "VES Event Listener",
"type": "string"
},
"lastUpdatedBy": {
"description": "re2947",
"type": "string"
},
"schemaDate": {
"description": "September 19, 2017",
"type": "string"
},
"schemaVersion": {
"description": "28.4.1",
"type": "number"
}
}
},
"schemaLicenseAndCopyrightNotice": {
"description": "Copyright (c) 2017, AT&T Intellectual Property. All rights reserved",
"type": "object",
"properties": {
"apacheLicense2.0": {
"description": "Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at:",
"type": "string"
},
"licenseUrl": {
"description": "http://www.apache.org/licenses/LICENSE-2.0",
"type": "string"
},
"asIsClause": {
"description": "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"type": "string"
},
"permissionsAndLimitations": {
"description": "See the License for the specific language governing permissions and limitations under the License.",
"type": "string"
}
}
},
"codecsInUse": {
"description": "number of times an identified codec was used over the measurementInterval",
"type": "object",
"properties": {
"codecIdentifier": {
"type": "string"
},
"numberInUse": {
"type": "integer"
}
},
"required": [
"codecIdentifier",
"numberInUse"
]
},
"command": {
"description": "command from an event collector toward an event source",
"type": "object",
"properties": {
"commandType": {
"type": "string",
"enum": [
"heartbeatIntervalChange",
"measurementIntervalChange",
"provideThrottlingState",
"throttlingSpecification"
]
},
"eventDomainThrottleSpecification": {
"$ref": "#/definitions/eventDomainThrottleSpecification"
},
"heartbeatInterval": {
"type": "integer"
},
"measurementInterval": {
"type": "integer"
}
},
"required": [
"commandType"
]
},
"commandList": {
"description": "array of commands from an event collector toward an event source",
"type": "array",
"items": {
"$ref": "#/definitions/command"
},
"minItems": 0
},
"commonEventHeader": {
"description": "fields common to all events",
"type": "object",
"properties": {
"domain": {
"description": "the eventing domain associated with the event",
"type": "string",
"enum": [
"fault",
"heartbeat",
"measurementsForVfScaling",
"mobileFlow",
"other",
"sipSignaling",
"stateChange",
"syslog",
"thresholdCrossingAlert",
"voiceQuality"
]
},
"eventId": {
"description": "event key that is unique to the event source",
"type": "string"
},
"eventName": {
"description": "unique event name",
"type": "string"
},
"eventType": {
"description": "for example - applicationVnf, guestOS, hostOS, platform",
"type": "string"
},
"internalHeaderFields": {
"$ref": "#/definitions/internalHeaderFields"
},
"lastEpochMicrosec": {
"description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
"type": "number"
},
"nfcNamingCode": {
"description": "3 character network function component type, aligned with vfc naming standards",
"type": "string"
},
"nfNamingCode": {
"description": "4 character network function type, aligned with vnf naming standards",
"type": "string"
},
"priority": {
"description": "processing priority",
"type": "string",
"enum": [
"High",
"Medium",
"Normal",
"Low"
]
},
"reportingEntityId": {
"description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process",
"type": "string"
},
"reportingEntityName": {
"description": "name of the entity reporting the event, for example, an EMS name; may be the same as sourceName",
"type": "string"
},
"sequence": {
"description": "ordering of events communicated by an event source instance or 0 if not needed",
"type": "integer"
},
"sourceId": {
"description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process",
"type": "string"
},
"sourceName": {
"description": "name of the entity experiencing the event issue",
"type": "string"
},
"startEpochMicrosec": {
"description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
"type": "number"
},
"version": {
"description": "version of the event header",
"type": "number"
}
},
"required": [
"domain",
"eventId",
"eventName",
"lastEpochMicrosec",
"priority",
"reportingEntityName",
"sequence",
"sourceName",
"startEpochMicrosec",
"version"
]
},
"counter": {
"description": "performance counter",
"type": "object",
"properties": {
"criticality": {
"type": "string",
"enum": [
"CRIT",
"MAJ"
]
},
"name": {
"type": "string"
},
"thresholdCrossed": {
"type": "string"
},
"value": {
"type": "string"
}
},
"required": [
"criticality",
"name",
"thresholdCrossed",
"value"
]
},
"cpuUsage": {
"description": "usage of an identified CPU",
"type": "object",
"properties": {
"cpuIdentifier": {
"description": "cpu identifer",
"type": "string"
},
"cpuIdle": {
"description": "percentage of CPU time spent in the idle task",
"type": "number"
},
"cpuUsageInterrupt": {
"description": "percentage of time spent servicing interrupts",
"type": "number"
},
"cpuUsageNice": {
"description": "percentage of time spent running user space processes that have been niced",
"type": "number"
},
"cpuUsageSoftIrq": {
"description": "percentage of time spent handling soft irq interrupts",
"type": "number"
},
"cpuUsageSteal": {
"description": "percentage of time spent in involuntary wait which is neither user, system or idle time and is effectively time that went missing",
"type": "number"
},
"cpuUsageSystem": {
"description": "percentage of time spent on system tasks running the kernel",
"type": "number"
},
"cpuUsageUser": {
"description": "percentage of time spent running un-niced user space processes",
"type": "number"
},
"cpuWait": {
"description": "percentage of CPU time spent waiting for I/O operations to complete",
"type": "number"
},
"percentUsage": {
"description": "aggregate cpu usage of the virtual machine on which the VNFC reporting the event is running",
"type": "number"
}
},
"required": [
"cpuIdentifier",
"percentUsage"
]
},
"diskUsage": {
"description": "usage of an identified disk",
"type": "object",
"properties": {
"diskIdentifier": {
"description": "disk identifier",
"type": "string"
},
"diskIoTimeAvg": {
"description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the average over the measurement interval",
"type": "number"
},
"diskIoTimeLast": {
"description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the last value measurement within the measurement interval",
"type": "number"
},
"diskIoTimeMax": {
"description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the maximum value measurement within the measurement interval",
"type": "number"
},
"diskIoTimeMin": {
"description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the minimum value measurement within the measurement interval",
"type": "number"
},
"diskMergedReadAvg": {
"description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the average measurement within the measurement interval",
"type": "number"
},
"diskMergedReadLast": {
"description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the last value measurement within the measurement interval",
"type": "number"
},
"diskMergedReadMax": {
"description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the maximum value measurement within the measurement interval",
"type": "number"
},
"diskMergedReadMin": {
"description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the minimum value measurement within the measurement interval",
"type": "number"
},
"diskMergedWriteAvg": {
"description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the average measurement within the measurement interval",
"type": "number"
},
"diskMergedWriteLast": {
"description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the last value measurement within the measurement interval",
"type": "number"
},
"diskMergedWriteMax": {
"description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the maximum value measurement within the measurement interval",
"type": "number"
},
"diskMergedWriteMin": {
"description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the minimum value measurement within the measurement interval",
"type": "number"
},
"diskOctetsReadAvg": {
"description": "number of octets per second read from a disk or partition; provide the average measurement within the measurement interval",
"type": "number"
},
"diskOctetsReadLast": {
"description": "number of octets per second read from a disk or partition; provide the last measurement within the measurement interval",
"type": "number"
},
"diskOctetsReadMax": {
"description": "number of octets per second read from a disk or partition; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskOctetsReadMin": {
"description": "number of octets per second read from a disk or partition; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskOctetsWriteAvg": {
"description": "number of octets per second written to a disk or partition; provide the average measurement within the measurement interval",
"type": "number"
},
"diskOctetsWriteLast": {
"description": "number of octets per second written to a disk or partition; provide the last measurement within the measurement interval",
"type": "number"
},
"diskOctetsWriteMax": {
"description": "number of octets per second written to a disk or partition; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskOctetsWriteMin": {
"description": "number of octets per second written to a disk or partition; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskOpsReadAvg": {
"description": "number of read operations per second issued to the disk; provide the average measurement within the measurement interval",
"type": "number"
},
"diskOpsReadLast": {
"description": "number of read operations per second issued to the disk; provide the last measurement within the measurement interval",
"type": "number"
},
"diskOpsReadMax": {
"description": "number of read operations per second issued to the disk; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskOpsReadMin": {
"description": "number of read operations per second issued to the disk; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskOpsWriteAvg": {
"description": "number of write operations per second issued to the disk; provide the average measurement within the measurement interval",
"type": "number"
},
"diskOpsWriteLast": {
"description": "number of write operations per second issued to the disk; provide the last measurement within the measurement interval",
"type": "number"
},
"diskOpsWriteMax": {
"description": "number of write operations per second issued to the disk; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskOpsWriteMin": {
"description": "number of write operations per second issued to the disk; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskPendingOperationsAvg": {
"description": "queue size of pending I/O operations per second; provide the average measurement within the measurement interval",
"type": "number"
},
"diskPendingOperationsLast": {
"description": "queue size of pending I/O operations per second; provide the last measurement within the measurement interval",
"type": "number"
},
"diskPendingOperationsMax": {
"description": "queue size of pending I/O operations per second; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskPendingOperationsMin": {
"description": "queue size of pending I/O operations per second; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskTimeReadAvg": {
"description": "milliseconds a read operation took to complete; provide the average measurement within the measurement interval",
"type": "number"
},
"diskTimeReadLast": {
"description": "milliseconds a read operation took to complete; provide the last measurement within the measurement interval",
"type": "number"
},
"diskTimeReadMax": {
"description": "milliseconds a read operation took to complete; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskTimeReadMin": {
"description": "milliseconds a read operation took to complete; provide the minimum measurement within the measurement interval",
"type": "number"
},
"diskTimeWriteAvg": {
"description": "milliseconds a write operation took to complete; provide the average measurement within the measurement interval",
"type": "number"
},
"diskTimeWriteLast": {
"description": "milliseconds a write operation took to complete; provide the last measurement within the measurement interval",
"type": "number"
},
"diskTimeWriteMax": {
"description": "milliseconds a write operation took to complete; provide the maximum measurement within the measurement interval",
"type": "number"
},
"diskTimeWriteMin": {
"description": "milliseconds a write operation took to complete; provide the minimum measurement within the measurement interval",
"type": "number"
}
},
"required": [
"diskIdentifier"
]
},
"endOfCallVqmSummaries": {
"description": "provides end of call voice quality metrics",
"type": "object",
"properties": {
"adjacencyName": {
"description": " adjacency name",
"type": "string"
},
"endpointDescription": {
"description": "Either Caller or Callee",
"type": "string",
"enum": [
"Caller",
"Callee"
]
},
"endpointJitter": {
"description": "",
"type": "number"
},
"endpointRtpOctetsDiscarded": {
"description": "",
"type": "number"
},
"endpointRtpOctetsReceived": {
"description": "",
"type": "number"
},
"endpointRtpOctetsSent": {
"description": "",
"type": "number"
},
"endpointRtpPacketsDiscarded": {
"description": "",
"type": "number"
},
"endpointRtpPacketsReceived": {
"description": "",
"type": "number"
},
"endpointRtpPacketsSent": {
"description": "",
"type": "number"
},
"localJitter": {
"description": "",
"type": "number"
},
"localRtpOctetsDiscarded": {
"description": "",
"type": "number"
},
"localRtpOctetsReceived": {
"description": "",
"type": "number"
},
"localRtpOctetsSent": {
"description": "",
"type": "number"
},
"localRtpPacketsDiscarded": {
"description": "",
"type": "number"
},
"localRtpPacketsReceived": {
"description": "",
"type": "number"
},
"localRtpPacketsSent": {
"description": "",
"type": "number"
},
"mosCqe": {
"description": "1-5 1dp",
"type": "number"
},
"packetsLost": {
"description": "",
"type": "number"
},
"packetLossPercent": {
"description": "Calculated percentage packet loss based on Endpoint RTP packets lost (as reported in RTCP) and Local RTP packets sent. Direction is based on Endpoint description (Caller, Callee). Decimal (2 dp)",
"type": "number"
},
"rFactor": {
"description": "0-100",
"type": "number"
},
"roundTripDelay": {
"description": "millisecs",
"type": "number"
}
},
"required": [
"adjacencyName",
"endpointDescription"
]
},
"event": {
"description": "the root level of the common event format",
"type": "object",
"properties": {
"commonEventHeader": {
"$ref": "#/definitions/commonEventHeader"
},
"faultFields": {
"$ref": "#/definitions/faultFields"
},
"heartbeatFields": {
"$ref": "#/definitions/heartbeatFields"
},
"measurementsForVfScalingFields": {
"$ref": "#/definitions/measurementsForVfScalingFields"
},
"mobileFlowFields": {
"$ref": "#/definitions/mobileFlowFields"
},
"otherFields": {
"$ref": "#/definitions/otherFields"
},
"sipSignalingFields": {
"$ref": "#/definitions/sipSignalingFields"
},
"stateChangeFields": {
"$ref": "#/definitions/stateChangeFields"
},
"syslogFields": {
"$ref": "#/definitions/syslogFields"
},
"thresholdCrossingAlertFields": {
"$ref": "#/definitions/thresholdCrossingAlertFields"
},
"voiceQualityFields": {
"$ref": "#/definitions/voiceQualityFields"
}
},
"required": [
"commonEventHeader"
]
},
"eventDomainThrottleSpecification": {
"description": "specification of what information to suppress within an event domain",
"type": "object",
"properties": {
"eventDomain": {
"description": "Event domain enum from the commonEventHeader domain field",
"type": "string"
},
"suppressedFieldNames": {
"description": "List of optional field names in the event block that should not be sent to the Event Listener",
"type": "array",
"items": {
"type": "string"
}
},
"suppressedNvPairsList": {
"description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field",
"type": "array",
"items": {
"$ref": "#/definitions/suppressedNvPairs"
}
}
},
"required": [
"eventDomain"
]
},
"eventDomainThrottleSpecificationList": {
"description": "array of eventDomainThrottleSpecifications",
"type": "array",
"items": {
"$ref": "#/definitions/eventDomainThrottleSpecification"
},
"minItems": 0
},
"eventList": {
"description": "array of events",
"type": "array",
"items": {
"$ref": "#/definitions/event"
}
},
"eventThrottlingState": {
"description": "reports the throttling in force at the event source",
"type": "object",
"properties": {
"eventThrottlingMode": {
"description": "Mode the event manager is in",
"type": "string",
"enum": [
"normal",
"throttled"
]
},
"eventDomainThrottleSpecificationList": {
"$ref": "#/definitions/eventDomainThrottleSpecificationList"
}
},
"required": [
"eventThrottlingMode"
]
},
"faultFields": {
"description": "fields specific to fault events",
"type": "object",
"properties": {
"alarmAdditionalInformation": {
"description": "additional alarm information",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"alarmCondition": {
"description": "alarm condition reported by the device",
"type": "string"
},
"alarmInterfaceA": {
"description": "card, port, channel or interface name of the device generating the alarm",
"type": "string"
},
"eventCategory": {
"description": "Event category, for example: license, link, routing, security, signaling",
"type": "string"
},
"eventSeverity": {
"description": "event severity",
"type": "string",
"enum": [
"CRITICAL",
"MAJOR",
"MINOR",
"WARNING",
"NORMAL"
]
},
"eventSourceType": {
"description": "type of event source; examples: card, host, other, port, portThreshold, router, slotThreshold, switch, virtualMachine, virtualNetworkFunction",
"type": "string"
},
"faultFieldsVersion": {
"description": "version of the faultFields block",
"type": "number"
},
"specificProblem": {
"description": "short description of the alarm or problem",
"type": "string"
},
"vfStatus": {
"description": "virtual function status enumeration",
"type": "string",
"enum": [
"Active",
"Idle",
"Preparing to terminate",
"Ready to terminate",
"Requesting termination"
]
}
},
"required": [
"alarmCondition",
"eventSeverity",
"eventSourceType",
"faultFieldsVersion",
"specificProblem",
"vfStatus"
]
},
"featuresInUse": {
"description": "number of times an identified feature was used over the measurementInterval",
"type": "object",
"properties": {
"featureIdentifier": {
"type": "string"
},
"featureUtilization": {
"type": "integer"
}
},
"required": [
"featureIdentifier",
"featureUtilization"
]
},
"field": {
"description": "name value pair",
"type": "object",
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"required": [
"name",
"value"
]
},
"filesystemUsage": {
"description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second",
"type": "object",
"properties": {
"blockConfigured": {
"type": "number"
},
"blockIops": {
"type": "number"
},
"blockUsed": {
"type": "number"
},
"ephemeralConfigured": {
"type": "number"
},
"ephemeralIops": {
"type": "number"
},
"ephemeralUsed": {
"type": "number"
},
"filesystemName": {
"type": "string"
}
},
"required": [
"blockConfigured",
"blockIops",
"blockUsed",
"ephemeralConfigured",
"ephemeralIops",
"ephemeralUsed",
"filesystemName"
]
},
"gtpPerFlowMetrics": {
"description": "Mobility GTP Protocol per flow metrics",
"type": "object",
"properties": {
"avgBitErrorRate": {
"description": "average bit error rate",
"type": "number"
},
"avgPacketDelayVariation": {
"description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets",
"type": "number"
},
"avgPacketLatency": {
"description": "average delivery latency",
"type": "number"
},
"avgReceiveThroughput": {
"description": "average receive throughput",
"type": "number"
},
"avgTransmitThroughput": {
"description": "average transmit throughput",
"type": "number"
},
"durConnectionFailedStatus": {
"description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval",
"type": "number"
},
"durTunnelFailedStatus": {
"description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval",
"type": "number"
},
"flowActivatedBy": {
"description": "Endpoint activating the flow",
"type": "string"
},
"flowActivationEpoch": {
"description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available",
"type": "number"
},
"flowActivationMicrosec": {
"description": "Integer microseconds for the start of the flow connection",
"type": "number"
},
"flowActivationTime": {
"description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
"type": "string"
},
"flowDeactivatedBy": {
"description": "Endpoint deactivating the flow",
"type": "string"
},
"flowDeactivationEpoch": {
"description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time",
"type": "number"
},
"flowDeactivationMicrosec": {
"description": "Integer microseconds for the start of the flow connection",
"type": "number"
},
"flowDeactivationTime": {
"description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
"type": "string"
},
"flowStatus": {
"description": "connection status at reporting time as a working / inactive / failed indicator value",
"type": "string"
},
"gtpConnectionStatus": {
"description": "Current connection state at reporting time",
"type": "string"
},
"gtpTunnelStatus": {
"description": "Current tunnel state at reporting time",
"type": "string"
},
"ipTosCountList": {
"description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow",
"type": "array",
"items": {
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "number"
}
]
}
},
"ipTosList": {
"description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'",
"type": "array",
"items": {
"type": "string"
}
},
"largePacketRtt": {
"description": "large packet round trip time",
"type": "number"
},
"largePacketThreshold": {
"description": "large packet threshold being applied",
"type": "number"
},
"maxPacketDelayVariation": {
"description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets",
"type": "number"
},
"maxReceiveBitRate": {
"description": "maximum receive bit rate",
"type": "number"
},
"maxTransmitBitRate": {
"description": "maximum transmit bit rate",
"type": "number"
},
"mobileQciCosCountList": {
"description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow",
"type": "array",
"items": {
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "number"
}
]
}
},
"mobileQciCosList": {
"description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow",
"type": "array",
"items": {
"type": "string"
}
},
"numActivationFailures": {
"description": "Number of failed activation requests, as observed by the reporting node",
"type": "number"
},
"numBitErrors": {
"description": "number of errored bits",
"type": "number"
},
"numBytesReceived": {
"description": "number of bytes received, including retransmissions",
"type": "number"
},
"numBytesTransmitted": {
"description": "number of bytes transmitted, including retransmissions",
"type": "number"
},
"numDroppedPackets": {
"description": "number of received packets dropped due to errors per virtual interface",
"type": "number"
},
"numGtpEchoFailures": {
"description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2",
"type": "number"
},
"numGtpTunnelErrors": {
"description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1",
"type": "number"
},
"numHttpErrors": {
"description": "Http error count",
"type": "number"
},
"numL7BytesReceived": {
"description": "number of tunneled layer 7 bytes received, including retransmissions",
"type": "number"
},
"numL7BytesTransmitted": {
"description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions",
"type": "number"
},
"numLostPackets": {
"description": "number of lost packets",
"type": "number"
},
"numOutOfOrderPackets": {
"description": "number of out-of-order packets",
"type": "number"
},
"numPacketErrors": {
"description": "number of errored packets",
"type": "number"
},
"numPacketsReceivedExclRetrans": {
"description": "number of packets received, excluding retransmission",
"type": "number"
},
"numPacketsReceivedInclRetrans": {
"description": "number of packets received, including retransmission",
"type": "number"
},
"numPacketsTransmittedInclRetrans": {
"description": "number of packets transmitted, including retransmissions",
"type": "number"
},
"numRetries": {
"description": "number of packet retries",
"type": "number"
},
"numTimeouts": {
"description": "number of packet timeouts",
"type": "number"
},
"numTunneledL7BytesReceived": {
"description": "number of tunneled layer 7 bytes received, excluding retransmissions",
"type": "number"
},
"roundTripTime": {
"description": "round trip time",
"type": "number"
},
"tcpFlagCountList": {
"description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow",
"type": "array",
"items": {
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "number"
}
]
}
},
"tcpFlagList": {
"description": "Array of unique TCP Flags observed in the flow",
"type": "array",
"items": {
"type": "string"
}
},
"timeToFirstByte": {
"description": "Time in milliseconds between the connection activation and first byte received",
"type": "number"
}
},
"required": [
"avgBitErrorRate",
"avgPacketDelayVariation",
"avgPacketLatency",
"avgReceiveThroughput",
"avgTransmitThroughput",
"flowActivationEpoch",
"flowActivationMicrosec",
"flowDeactivationEpoch",
"flowDeactivationMicrosec",
"flowDeactivationTime",
"flowStatus",
"maxPacketDelayVariation",
"numActivationFailures",
"numBitErrors",
"numBytesReceived",
"numBytesTransmitted",
"numDroppedPackets",
"numL7BytesReceived",
"numL7BytesTransmitted",
"numLostPackets",
"numOutOfOrderPackets",
"numPacketErrors",
"numPacketsReceivedExclRetrans",
"numPacketsReceivedInclRetrans",
"numPacketsTransmittedInclRetrans",
"numRetries",
"numTimeouts",
"numTunneledL7BytesReceived",
"roundTripTime",
"timeToFirstByte"
]
},
"heartbeatFields": {
"description": "optional field block for fields specific to heartbeat events",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional heartbeat fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"heartbeatFieldsVersion": {
"description": "version of the heartbeatFields block",
"type": "number"
},
"heartbeatInterval": {
"description": "current heartbeat interval in seconds",
"type": "integer"
}
},
"required": [
"heartbeatFieldsVersion",
"heartbeatInterval"
]
},
"internalHeaderFields": {
"description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources",
"type": "object"
},
"jsonObject": {
"description": "json object schema, name and other meta-information along with one or more object instances",
"type": "object",
"properties": {
"objectInstances": {
"description": "one or more instances of the jsonObject",
"type": "array",
"items": {
"$ref": "#/definitions/jsonObjectInstance"
}
},
"objectName": {
"description": "name of the JSON Object",
"type": "string"
},
"objectSchema": {
"description": "json schema for the object",
"type": "string"
},
"objectSchemaUrl": {
"description": "Url to the json schema for the object",
"type": "string"
},
"nfSubscribedObjectName": {
"description": "name of the object associated with the nfSubscriptonId",
"type": "string"
},
"nfSubscriptionId": {
"description": "identifies an openConfig telemetry subscription on a network function, which configures the network function to send complex object data associated with the jsonObject",
"type": "string"
}
},
"required": [
"objectInstances",
"objectName"
]
},
"jsonObjectInstance": {
"description": "meta-information about an instance of a jsonObject along with the actual object instance",
"type": "object",
"properties": {
"objectInstance": {
"description": "an instance conforming to the jsonObject schema",
"type": "object"
},
"objectInstanceEpochMicrosec": {
"description": "the unix time aka epoch time associated with this objectInstance--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
"type": "number"
},
"objectKeys": {
"description": "an ordered set of keys that identifies this particular instance of jsonObject",
"type": "array",
"items": {
"$ref": "#/definitions/key"
}
}
},
"required": [
"objectInstance"
]
},
"key": {
"description": "tuple which provides the name of a key along with its value and relative order",
"type": "object",
"properties": {
"keyName": {
"description": "name of the key",
"type": "string"
},
"keyOrder": {
"description": "relative sequence or order of the key with respect to other keys",
"type": "integer"
},
"keyValue": {
"description": "value of the key",
"type": "string"
}
},
"required": [
"keyName"
]
},
"latencyBucketMeasure": {
"description": "number of counts falling within a defined latency bucket",
"type": "object",
"properties": {
"countsInTheBucket": {
"type": "number"
},
"highEndOfLatencyBucket": {
"type": "number"
},
"lowEndOfLatencyBucket": {
"type": "number"
}
},
"required": [
"countsInTheBucket"
]
},
"measurementsForVfScalingFields": {
"description": "measurementsForVfScaling fields",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional name-value-pair fields",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"additionalMeasurements": {
"description": "array of named name-value-pair arrays",
"type": "array",
"items": {
"$ref": "#/definitions/namedArrayOfFields"
}
},
"additionalObjects": {
"description": "array of JSON objects described by name, schema and other meta-information",
"type": "array",
"items": {
"$ref": "#/definitions/jsonObject"
}
},
"codecUsageArray": {
"description": "array of codecs in use",
"type": "array",
"items": {
"$ref": "#/definitions/codecsInUse"
}
},
"concurrentSessions": {
"description": "peak concurrent sessions for the VM or VNF over the measurementInterval",
"type": "integer"
},
"configuredEntities": {
"description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF",
"type": "integer"
},
"cpuUsageArray": {
"description": "usage of an array of CPUs",
"type": "array",
"items": {
"$ref": "#/definitions/cpuUsage"
}
},
"diskUsageArray": {
"description": "usage of an array of disks",
"type": "array",
"items": {
"$ref": "#/definitions/diskUsage"
}
},
"featureUsageArray": {
"description": "array of features in use",
"type": "array",
"items": {
"$ref": "#/definitions/featuresInUse"
}
},
"filesystemUsageArray": {
"description": "filesystem usage of the VM on which the VNFC reporting the event is running",
"type": "array",
"items": {
"$ref": "#/definitions/filesystemUsage"
}
},
"latencyDistribution": {
"description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges",
"type": "array",
"items": {
"$ref": "#/definitions/latencyBucketMeasure"
}
},
"meanRequestLatency": {
"description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running",
"type": "number"
},
"measurementInterval": {
"description": "interval over which measurements are being reported in seconds",
"type": "number"
},
"measurementsForVfScalingVersion": {
"description": "version of the measurementsForVfScaling block",
"type": "number"
},
"memoryUsageArray": {
"description": "memory usage of an array of VMs",
"type": "array",
"items": {
"$ref": "#/definitions/memoryUsage"
}
},
"numberOfMediaPortsInUse": {
"description": "number of media ports in use",
"type": "integer"
},
"requestRate": {
"description": "peak rate of service requests per second to the VNF over the measurementInterval",
"type": "number"
},
"vnfcScalingMetric": {
"description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC",
"type": "integer"
},
"vNicPerformanceArray": {
"description": "usage of an array of virtual network interface cards",
"type": "array",
"items": {
"$ref": "#/definitions/vNicPerformance"
}
}
},
"required": [
"measurementInterval",
"measurementsForVfScalingVersion"
]
},
"memoryUsage": {
"description": "memory usage of an identified virtual machine",
"type": "object",
"properties": {
"memoryBuffered": {
"description": "kibibytes of temporary storage for raw disk blocks",
"type": "number"
},
"memoryCached": {
"description": "kibibytes of memory used for cache",
"type": "number"
},
"memoryConfigured": {
"description": "kibibytes of memory configured in the virtual machine on which the VNFC reporting the event is running",
"type": "number"
},
"memoryFree": {
"description": "kibibytes of physical RAM left unused by the system",
"type": "number"
},
"memorySlabRecl": {
"description": "the part of the slab that can be reclaimed such as caches measured in kibibytes",
"type": "number"
},
"memorySlabUnrecl": {
"description": "the part of the slab that cannot be reclaimed even when lacking memory measured in kibibytes",
"type": "number"
},
"memoryUsed": {
"description": "total memory minus the sum of free, buffered, cached and slab memory measured in kibibytes",
"type": "number"
},
"vmIdentifier": {
"description": "virtual machine identifier associated with the memory metrics",
"type": "string"
}
},
"required": [
"memoryFree",
"memoryUsed",
"vmIdentifier"
]
},
"mobileFlowFields": {
"description": "mobileFlow fields",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional mobileFlow fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"applicationType": {
"description": "Application type inferred",
"type": "string"
},
"appProtocolType": {
"description": "application protocol",
"type": "string"
},
"appProtocolVersion": {
"description": "application protocol version",
"type": "string"
},
"cid": {
"description": "cell id",
"type": "string"
},
"connectionType": {
"description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc",
"type": "string"
},
"ecgi": {
"description": "Evolved Cell Global Id",
"type": "string"
},
"flowDirection": {
"description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow",
"type": "string"
},
"gtpPerFlowMetrics": {
"$ref": "#/definitions/gtpPerFlowMetrics"
},
"gtpProtocolType": {
"description": "GTP protocol",
"type": "string"
},
"gtpVersion": {
"description": "GTP protocol version",
"type": "string"
},
"httpHeader": {
"description": "HTTP request header, if the flow connects to a node referenced by HTTP",
"type": "string"
},
"imei": {
"description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device",
"type": "string"
},
"imsi": {
"description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device",
"type": "string"
},
"ipProtocolType": {
"description": "IP protocol type e.g., TCP, UDP, RTP...",
"type": "string"
},
"ipVersion": {
"description": "IP protocol version e.g., IPv4, IPv6",
"type": "string"
},
"lac": {
"description": "location area code",
"type": "string"
},
"mcc": {
"description": "mobile country code",
"type": "string"
},
"mnc": {
"description": "mobile network code",
"type": "string"
},
"mobileFlowFieldsVersion": {
"description": "version of the mobileFlowFields block",
"type": "number"
},
"msisdn": {
"description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device",
"type": "string"
},
"otherEndpointIpAddress": {
"description": "IP address for the other endpoint, as used for the flow being reported on",
"type": "string"
},
"otherEndpointPort": {
"description": "IP Port for the reporting entity, as used for the flow being reported on",
"type": "integer"
},
"otherFunctionalRole": {
"description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...",
"type": "string"
},
"rac": {
"description": "routing area code",
"type": "string"
},
"radioAccessTechnology": {
"description": "Radio Access Technology e.g., 2G, 3G, LTE",
"type": "string"
},
"reportingEndpointIpAddr": {
"description": "IP address for the reporting entity, as used for the flow being reported on",
"type": "string"
},
"reportingEndpointPort": {
"description": "IP port for the reporting entity, as used for the flow being reported on",
"type": "integer"
},
"sac": {
"description": "service area code",
"type": "string"
},
"samplingAlgorithm": {
"description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied",
"type": "integer"
},
"tac": {
"description": "transport area code",
"type": "string"
},
"tunnelId": {
"description": "tunnel identifier",
"type": "string"
},
"vlanId": {
"description": "VLAN identifier used by this flow",
"type": "string"
}
},
"required": [
"flowDirection",
"gtpPerFlowMetrics",
"ipProtocolType",
"ipVersion",
"mobileFlowFieldsVersion",
"otherEndpointIpAddress",
"otherEndpointPort",
"reportingEndpointIpAddr",
"reportingEndpointPort"
]
},
"namedArrayOfFields": {
"description": "an array of name value pairs along with a name for the array",
"type": "object",
"properties": {
"name": {
"type": "string"
},
"arrayOfFields": {
"description": "array of name value pairs",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
}
},
"required": [
"name",
"arrayOfFields"
]
},
"otherFields": {
"description": "fields for events belonging to the 'other' domain of the commonEventHeader domain enumeration",
"type": "object",
"properties": {
"hashOfNameValuePairArrays": {
"description": "array of named name-value-pair arrays",
"type": "array",
"items": {
"$ref": "#/definitions/namedArrayOfFields"
}
},
"jsonObjects": {
"description": "array of JSON objects described by name, schema and other meta-information",
"type": "array",
"items": {
"$ref": "#/definitions/jsonObject"
}
},
"nameValuePairs": {
"description": "array of name-value pairs",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"otherFieldsVersion": {
"description": "version of the otherFields block",
"type": "number"
}
},
"required": [
"otherFieldsVersion"
]
},
"requestError": {
"description": "standard request error data structure",
"type": "object",
"properties": {
"messageId": {
"description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception",
"type": "string"
},
"text": {
"description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1",
"type": "string"
},
"url": {
"description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents",
"type": "string"
},
"variables": {
"description": "List of zero or more strings that represent the contents of the variables used by the message text",
"type": "string"
}
},
"required": [
"messageId",
"text"
]
},
"sipSignalingFields": {
"description": "sip signaling fields",
"type": "object",
"properties": {
"additionalInformation": {
"description": "additional sip signaling fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"compressedSip": {
"description": "the full SIP request/response including headers and bodies",
"type": "string"
},
"correlator": {
"description": "this is the same for all events on this call",
"type": "string"
},
"localIpAddress": {
"description": "IP address on VNF",
"type": "string"
},
"localPort": {
"description": "port on VNF",
"type": "string"
},
"remoteIpAddress": {
"description": "IP address of peer endpoint",
"type": "string"
},
"remotePort": {
"description": "port of peer endpoint",
"type": "string"
},
"sipSignalingFieldsVersion": {
"description": "version of the sipSignalingFields block",
"type": "number"
},
"summarySip": {
"description": "the SIP Method or Response (‘INVITE’, ‘200 OK’, ‘BYE’, etc)",
"type": "string"
},
"vendorVnfNameFields": {
"$ref": "#/definitions/vendorVnfNameFields"
}
},
"required": [
"correlator",
"localIpAddress",
"localPort",
"remoteIpAddress",
"remotePort",
"sipSignalingFieldsVersion",
"vendorVnfNameFields"
]
},
"stateChangeFields": {
"description": "stateChange fields",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional stateChange fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"newState": {
"description": "new state of the entity",
"type": "string",
"enum": [
"inService",
"maintenance",
"outOfService"
]
},
"oldState": {
"description": "previous state of the entity",
"type": "string",
"enum": [
"inService",
"maintenance",
"outOfService"
]
},
"stateChangeFieldsVersion": {
"description": "version of the stateChangeFields block",
"type": "number"
},
"stateInterface": {
"description": "card or port name of the entity that changed state",
"type": "string"
}
},
"required": [
"newState",
"oldState",
"stateChangeFieldsVersion",
"stateInterface"
]
},
"suppressedNvPairs": {
"description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling",
"type": "object",
"properties": {
"nvPairFieldName": {
"description": "Name of the field within which are the nvpair names to suppress",
"type": "string"
},
"suppressedNvPairNames": {
"description": "Array of nvpair names to suppress within the nvpairFieldName",
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"nvPairFieldName",
"suppressedNvPairNames"
]
},
"syslogFields": {
"description": "sysLog fields",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional syslog fields if needed provided as name=value delimited by a pipe ‘|’ symbol, for example: 'name1=value1|name2=value2|…'",
"type": "string"
},
"eventSourceHost": {
"description": "hostname of the device",
"type": "string"
},
"eventSourceType": {
"description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction",
"type": "string"
},
"syslogFacility": {
"description": "numeric code from 0 to 23 for facility--see table in documentation",
"type": "integer"
},
"syslogFieldsVersion": {
"description": "version of the syslogFields block",
"type": "number"
},
"syslogMsg": {
"description": "syslog message",
"type": "string"
},
"syslogPri": {
"description": "0-192 combined severity and facility",
"type": "integer"
},
"syslogProc": {
"description": "identifies the application that originated the message",
"type": "string"
},
"syslogProcId": {
"description": "a change in the value of this field indicates a discontinuity in syslog reporting",
"type": "number"
},
"syslogSData": {
"description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs",
"type": "string"
},
"syslogSdId": {
"description": "0-32 char in format name@number for example ourSDID@32473",
"type": "string"
},
"syslogSev": {
"description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8",
"type": "string",
"enum": [
"Alert",
"Critical",
"Debug",
"Emergency",
"Error",
"Info",
"Notice",
"Warning"
]
},
"syslogTag": {
"description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided",
"type": "string"
},
"syslogVer": {
"description": "IANA assigned version of the syslog protocol specification - typically 1",
"type": "number"
}
},
"required": [
"eventSourceType",
"syslogFieldsVersion",
"syslogMsg",
"syslogTag"
]
},
"thresholdCrossingAlertFields": {
"description": "fields specific to threshold crossing alert events",
"type": "object",
"properties": {
"additionalFields": {
"description": "additional threshold crossing alert fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"additionalParameters": {
"description": "performance counters",
"type": "array",
"items": {
"$ref": "#/definitions/counter"
}
},
"alertAction": {
"description": "Event action",
"type": "string",
"enum": [
"CLEAR",
"CONT",
"SET"
]
},
"alertDescription": {
"description": "Unique short alert description such as IF-SHUB-ERRDROP",
"type": "string"
},
"alertType": {
"description": "Event type",
"type": "string",
"enum": [
"CARD-ANOMALY",
"ELEMENT-ANOMALY",
"INTERFACE-ANOMALY",
"SERVICE-ANOMALY"
]
},
"alertValue": {
"description": "Calculated API value (if applicable)",
"type": "string"
},
"associatedAlertIdList": {
"description": "List of eventIds associated with the event being reported",
"type": "array",
"items": {
"type": "string"
}
},
"collectionTimestamp": {
"description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
"type": "string"
},
"dataCollector": {
"description": "Specific performance collector instance used",
"type": "string"
},
"elementType": {
"description": "type of network element - internal ATT field",
"type": "string"
},
"eventSeverity": {
"description": "event severity or priority",
"type": "string",
"enum": [
"CRITICAL",
"MAJOR",
"MINOR",
"WARNING",
"NORMAL"
]
},
"eventStartTimestamp": {
"description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
"type": "string"
},
"interfaceName": {
"description": "Physical or logical port or card (if applicable)",
"type": "string"
},
"networkService": {
"description": "network name - internal ATT field",
"type": "string"
},
"possibleRootCause": {
"description": "Reserved for future use",
"type": "string"
},
"thresholdCrossingFieldsVersion": {
"description": "version of the thresholdCrossingAlertFields block",
"type": "number"
}
},
"required": [
"additionalParameters",
"alertAction",
"alertDescription",
"alertType",
"collectionTimestamp",
"eventSeverity",
"eventStartTimestamp",
"thresholdCrossingFieldsVersion"
]
},
"vendorVnfNameFields": {
"description": "provides vendor, vnf and vfModule identifying information",
"type": "object",
"properties": {
"vendorName": {
"description": "VNF vendor name",
"type": "string"
},
"vfModuleName": {
"description": "ASDC vfModuleName for the vfModule generating the event",
"type": "string"
},
"vnfName": {
"description": "ASDC modelName for the VNF generating the event",
"type": "string"
}
},
"required": [
"vendorName"
]
},
"vNicPerformance": {
"description": "describes the performance and errors of an identified virtual network interface card",
"type": "object",
"properties": {
"receivedBroadcastPacketsAccumulated": {
"description": "Cumulative count of broadcast packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedBroadcastPacketsDelta": {
"description": "Count of broadcast packets received within the measurement interval",
"type": "number"
},
"receivedDiscardedPacketsAccumulated": {
"description": "Cumulative count of discarded packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedDiscardedPacketsDelta": {
"description": "Count of discarded packets received within the measurement interval",
"type": "number"
},
"receivedErrorPacketsAccumulated": {
"description": "Cumulative count of error packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedErrorPacketsDelta": {
"description": "Count of error packets received within the measurement interval",
"type": "number"
},
"receivedMulticastPacketsAccumulated": {
"description": "Cumulative count of multicast packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedMulticastPacketsDelta": {
"description": "Count of multicast packets received within the measurement interval",
"type": "number"
},
"receivedOctetsAccumulated": {
"description": "Cumulative count of octets received as read at the end of the measurement interval",
"type": "number"
},
"receivedOctetsDelta": {
"description": "Count of octets received within the measurement interval",
"type": "number"
},
"receivedTotalPacketsAccumulated": {
"description": "Cumulative count of all packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedTotalPacketsDelta": {
"description": "Count of all packets received within the measurement interval",
"type": "number"
},
"receivedUnicastPacketsAccumulated": {
"description": "Cumulative count of unicast packets received as read at the end of the measurement interval",
"type": "number"
},
"receivedUnicastPacketsDelta": {
"description": "Count of unicast packets received within the measurement interval",
"type": "number"
},
"transmittedBroadcastPacketsAccumulated": {
"description": "Cumulative count of broadcast packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedBroadcastPacketsDelta": {
"description": "Count of broadcast packets transmitted within the measurement interval",
"type": "number"
},
"transmittedDiscardedPacketsAccumulated": {
"description": "Cumulative count of discarded packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedDiscardedPacketsDelta": {
"description": "Count of discarded packets transmitted within the measurement interval",
"type": "number"
},
"transmittedErrorPacketsAccumulated": {
"description": "Cumulative count of error packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedErrorPacketsDelta": {
"description": "Count of error packets transmitted within the measurement interval",
"type": "number"
},
"transmittedMulticastPacketsAccumulated": {
"description": "Cumulative count of multicast packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedMulticastPacketsDelta": {
"description": "Count of multicast packets transmitted within the measurement interval",
"type": "number"
},
"transmittedOctetsAccumulated": {
"description": "Cumulative count of octets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedOctetsDelta": {
"description": "Count of octets transmitted within the measurement interval",
"type": "number"
},
"transmittedTotalPacketsAccumulated": {
"description": "Cumulative count of all packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedTotalPacketsDelta": {
"description": "Count of all packets transmitted within the measurement interval",
"type": "number"
},
"transmittedUnicastPacketsAccumulated": {
"description": "Cumulative count of unicast packets transmitted as read at the end of the measurement interval",
"type": "number"
},
"transmittedUnicastPacketsDelta": {
"description": "Count of unicast packets transmitted within the measurement interval",
"type": "number"
},
"valuesAreSuspect": {
"description": "Indicates whether vNicPerformance values are likely inaccurate due to counter overflow or other condtions",
"type": "string",
"enum": [
"true",
"false"
]
},
"vNicIdentifier": {
"description": "vNic identification",
"type": "string"
}
},
"required": [
"valuesAreSuspect",
"vNicIdentifier"
]
},
"voiceQualityFields": {
"description": "provides statistics related to customer facing voice products",
"type": "object",
"properties": {
"additionalInformation": {
"description": "additional voice quality fields if needed",
"type": "array",
"items": {
"$ref": "#/definitions/field"
}
},
"calleeSideCodec": {
"description": "callee codec for the call",
"type": "string"
},
"callerSideCodec": {
"description": "caller codec for the call",
"type": "string"
},
"correlator": {
"description": "this is the same for all events on this call",
"type": "string"
},
"endOfCallVqmSummaries": {
"$ref": "#/definitions/endOfCallVqmSummaries"
},
"phoneNumber": {
"description": "phone number associated with the correlator",
"type": "string"
},
"midCallRtcp": {
"description": "Base64 encoding of the binary RTCP data excluding Eth/IP/UDP headers",
"type": "string"
},
"vendorVnfNameFields": {
"$ref": "#/definitions/vendorVnfNameFields"
},
"voiceQualityFieldsVersion": {
"description": "version of the voiceQualityFields block",
"type": "number"
}
},
"required": [
"calleeSideCodec",
"callerSideCodec",
"correlator",
"midCallRtcp",
"vendorVnfNameFields",
"voiceQualityFieldsVersion"
]
}
}
}`
var _v2841 *JSONSchema
func init() {
sch, err := NewSchemaFromBytes([]byte(v2841))
if err != nil {
panic(err)
}
_v2841 = sch
}
// V2841 loads and returns VES Schema v28.4.1 (VES v5.4.1)
func V2841() *JSONSchema {
return _v2841
} | govel/schema/v28_4_1.go | 0.634656 | 0.419797 | v28_4_1.go | starcoder |
package m32
// Quaternions
/*
Resources:
https://answers.unity.com/questions/467614/what-is-the-source-code-of-quaternionlookrotation.html
*/
import (
"github.com/go-gl/mathgl/mgl32"
)
type (
// Quat mgl32 alias
Quat = mgl32.Quat
)
// QuatBetweenVectors
// QuatLookAt implementation from GLM
// glm/gtc/quaternion.inl + quat_cast
func QuatLookAt(dir, up vec3) quat {
var (
v = dir.Normalize()
v2 = up.Cross(v).Normalize()
v3 = v.Cross(v2)
m00, m01, m02 = v2[0], v2[1], v2[2]
m10, m11, m12 = v3[0], v3[1], v3[2]
m20, m21, m22 = v[0], v[1], v[2]
)
fourXSquaredMinus1 := m00 - m11 - m22
fourYSquaredMinus1 := m11 - m00 - m22
fourZSquaredMinus1 := m22 - m00 - m11
fourWSquaredMinus1 := m00 + m11 + m22
biggestIndex := 0
fourBiggestSquaredMinus1 := fourWSquaredMinus1
if fourXSquaredMinus1 > fourBiggestSquaredMinus1 {
fourBiggestSquaredMinus1 = fourXSquaredMinus1
biggestIndex = 1
}
if fourYSquaredMinus1 > fourBiggestSquaredMinus1 {
fourBiggestSquaredMinus1 = fourYSquaredMinus1
biggestIndex = 2
}
if fourZSquaredMinus1 > fourBiggestSquaredMinus1 {
fourBiggestSquaredMinus1 = fourZSquaredMinus1
biggestIndex = 3
}
biggestVal := Sqrt(fourBiggestSquaredMinus1+1) * 0.5
mult := 0.25 / biggestVal
switch biggestIndex {
case 0:
return quat{
W: biggestVal,
V: vec3{(m12 - m21) * mult, (m20 - m02) * mult, (m01 - m10) * mult},
}
case 1:
return quat{
W: (m12 - m21) * mult,
V: vec3{biggestVal, (m01 + m10) * mult, (m20 + m02) * mult},
}
case 2:
return quat{
W: (m20 - m02) * mult,
V: vec3{(m01 + m10) * mult, biggestVal, (m12 + m21) * mult},
}
case 3:
return quat{
W: (m01 - m10) * mult,
V: vec3{(m20 + m02) * mult, (m12 + m21) * mult, biggestVal},
}
}
return mgl32.QuatIdent()
}
// QuatEuler returns a quaternion based on those euler angles
// https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
func QuatEuler(x, y, z float32) quat {
cy, sy := Sincos(z * 0.5)
cp, sp := Sincos(y * 0.5)
cr, sr := Sincos(x * 0.5)
return quat{
W: cy*cp*cr + sy*sp*sr,
V: vec3{
cy*cp*sr - sy*sp*cr,
sy*cp*sr + cy*sp*cr,
sy*cp*cr - cy*sp*sr,
},
}
} | m32/quat.go | 0.792464 | 0.455259 | quat.go | starcoder |
package iso20022
// Posting to an account that results in an increase or decrease to a balance.
type EntryTransaction1 struct {
// Set of elements providing the identification of the underlying transaction.
References *TransactionReferences1 `xml:"Refs,omitempty"`
// Set of elements providing details information on the original amount.
//
// Usage: This component (on transaction level) should be used in case booking is for a single transaction and the original amount is different from the entry amount. It can also be used in case individual original amounts are provided in case of a batch or aggregate booking.
AmountDetails *AmountAndCurrencyExchange2 `xml:"AmtDtls,omitempty"`
// Set of elements used to indicate when the booked funds will become available, ie can be accessed and start generating interest.
//
// Usage : this type of info is eg used in US, and is linked to particular instruments, such as cheques.
// Example : When a cheque is deposited, it will be booked on the deposit day, but the funds will only be accessible as of the indicated availability day (according to national banking regulations).
Availability []*CashBalanceAvailability1 `xml:"Avlbty,omitempty"`
// Set of elements to fully identify the type of underlying transaction resulting in an entry.
BankTransactionCode *BankTransactionCodeStructure1 `xml:"BkTxCd,omitempty"`
// Provides information on the charges included in the entry amount.
//
// Usage : This component (on transaction level) can be used in case the booking is for a single transaction, and charges are included in the entry amount. It can also be used in case individual charge amounts are applied to individual transactions in case of a batch or aggregate amount booking.
Charges []*ChargesInformation3 `xml:"Chrgs,omitempty"`
// Set of elements providing details on the interest amount included in the entry amount.
//
// Usage : This component (on transaction level) can be used in case the booking is for a single transaction, and interest amount is included in the entry amount. It can also be used in case individual interest amounts are applied to individual transactions in case of a batch or aggregate amount booking.
Interest []*TransactionInterest1 `xml:"Intrst,omitempty"`
// Set of elements identifying the parties related to the underlying transaction.
RelatedParties *TransactionParty1 `xml:"RltdPties,omitempty"`
// Set of elements identifying the agents related to the underlying transaction.
RelatedAgents *TransactionAgents1 `xml:"RltdAgts,omitempty"`
// Underlying reason for the payment transaction, eg, a charity payment, or a commercial agreement between the creditor and the debtor.
//
// Usage: purpose is used by the end-customers, ie originating party, initiating party, debtor, creditor, final party, to provide information concerning the nature of the payment transaction. Purpose is a content element, which is not used for processing by any of the agents involved in the payment chain.
Purpose *Purpose1Choice `xml:"Purp,omitempty"`
// Information related to the handling of the remittance information by any of the agents in the transaction processing chain.
RelatedRemittanceInformation []*RemittanceLocation1 `xml:"RltdRmtInf,omitempty"`
// Information that enables the matching, ie, reconciliation, of a payment with the items that the payment is intended to settle, eg, commercial invoices in an account receivable system.
RemittanceInformation *RemittanceInformation1 `xml:"RmtInf,omitempty"`
// Set of elements identifying the dates related to the underlying transactions.
RelatedDates *TransactionDates1 `xml:"RltdDts,omitempty"`
// Set of elements identifying the price information related to the underlying transaction.
RelatedPrice *TransactionPrice1Choice `xml:"RltdPric,omitempty"`
// Identifies related quantities (eg of securities) in the underlying transaction.
RelatedQuantities []*TransactionQuantities1Choice `xml:"RltdQties,omitempty"`
// Identification of a security, as assigned under a formal or proprietary identification scheme.
FinancialInstrumentIdentification *SecurityIdentification4Choice `xml:"FinInstrmId,omitempty"`
// Amount of money due to the government or tax authority, according to various pre-defined parameters such as thresholds or income.
Tax *TaxInformation2 `xml:"Tax,omitempty"`
// Set of elements specifying the return information.
ReturnInformation *ReturnReasonInformation5 `xml:"RtrInf,omitempty"`
// Set of elements identifying the underlying corporate action.
CorporateAction *CorporateAction1 `xml:"CorpActn,omitempty"`
// Safekeeping or investment account. A safekeeping account is an account on which a securities entry is made. An investment account is an account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
SafekeepingAccount *CashAccount7 `xml:"SfkpgAcct,omitempty"`
// Further details on the transaction details.
AdditionalTransactionInformation *Max500Text `xml:"AddtlTxInf,omitempty"`
}
func (e *EntryTransaction1) AddReferences() *TransactionReferences1 {
e.References = new(TransactionReferences1)
return e.References
}
func (e *EntryTransaction1) AddAmountDetails() *AmountAndCurrencyExchange2 {
e.AmountDetails = new(AmountAndCurrencyExchange2)
return e.AmountDetails
}
func (e *EntryTransaction1) AddAvailability() *CashBalanceAvailability1 {
newValue := new (CashBalanceAvailability1)
e.Availability = append(e.Availability, newValue)
return newValue
}
func (e *EntryTransaction1) AddBankTransactionCode() *BankTransactionCodeStructure1 {
e.BankTransactionCode = new(BankTransactionCodeStructure1)
return e.BankTransactionCode
}
func (e *EntryTransaction1) AddCharges() *ChargesInformation3 {
newValue := new (ChargesInformation3)
e.Charges = append(e.Charges, newValue)
return newValue
}
func (e *EntryTransaction1) AddInterest() *TransactionInterest1 {
newValue := new (TransactionInterest1)
e.Interest = append(e.Interest, newValue)
return newValue
}
func (e *EntryTransaction1) AddRelatedParties() *TransactionParty1 {
e.RelatedParties = new(TransactionParty1)
return e.RelatedParties
}
func (e *EntryTransaction1) AddRelatedAgents() *TransactionAgents1 {
e.RelatedAgents = new(TransactionAgents1)
return e.RelatedAgents
}
func (e *EntryTransaction1) AddPurpose() *Purpose1Choice {
e.Purpose = new(Purpose1Choice)
return e.Purpose
}
func (e *EntryTransaction1) AddRelatedRemittanceInformation() *RemittanceLocation1 {
newValue := new (RemittanceLocation1)
e.RelatedRemittanceInformation = append(e.RelatedRemittanceInformation, newValue)
return newValue
}
func (e *EntryTransaction1) AddRemittanceInformation() *RemittanceInformation1 {
e.RemittanceInformation = new(RemittanceInformation1)
return e.RemittanceInformation
}
func (e *EntryTransaction1) AddRelatedDates() *TransactionDates1 {
e.RelatedDates = new(TransactionDates1)
return e.RelatedDates
}
func (e *EntryTransaction1) AddRelatedPrice() *TransactionPrice1Choice {
e.RelatedPrice = new(TransactionPrice1Choice)
return e.RelatedPrice
}
func (e *EntryTransaction1) AddRelatedQuantities() *TransactionQuantities1Choice {
newValue := new (TransactionQuantities1Choice)
e.RelatedQuantities = append(e.RelatedQuantities, newValue)
return newValue
}
func (e *EntryTransaction1) AddFinancialInstrumentIdentification() *SecurityIdentification4Choice {
e.FinancialInstrumentIdentification = new(SecurityIdentification4Choice)
return e.FinancialInstrumentIdentification
}
func (e *EntryTransaction1) AddTax() *TaxInformation2 {
e.Tax = new(TaxInformation2)
return e.Tax
}
func (e *EntryTransaction1) AddReturnInformation() *ReturnReasonInformation5 {
e.ReturnInformation = new(ReturnReasonInformation5)
return e.ReturnInformation
}
func (e *EntryTransaction1) AddCorporateAction() *CorporateAction1 {
e.CorporateAction = new(CorporateAction1)
return e.CorporateAction
}
func (e *EntryTransaction1) AddSafekeepingAccount() *CashAccount7 {
e.SafekeepingAccount = new(CashAccount7)
return e.SafekeepingAccount
}
func (e *EntryTransaction1) SetAdditionalTransactionInformation(value string) {
e.AdditionalTransactionInformation = (*Max500Text)(&value)
} | EntryTransaction1.go | 0.923342 | 0.658198 | EntryTransaction1.go | starcoder |
package text
import (
"image/color"
"math"
"github.com/gopherd/plot/font"
"github.com/gopherd/plot/vg"
)
// Handler parses, formats and renders text.
type Handler interface {
// Cache returns the cache of fonts used by the text handler.
Cache() *font.Cache
// Extents returns the Extents of a font.
Extents(fnt font.Font) font.Extents
// Lines splits a given block of text into separate lines.
Lines(txt string) []string
// Box returns the bounding box of the given non-multiline text where:
// - width is the horizontal space from the origin.
// - height is the vertical space above the baseline.
// - depth is the vertical space below the baseline, a positive number.
Box(txt string, fnt font.Font) (width, height, depth vg.Length)
// Draw renders the given text with the provided style and position
// on the canvas.
Draw(c vg.Canvas, txt string, sty Style, pt vg.Point)
}
// XAlignment specifies text alignment in the X direction. Three preset
// options are available, but an arbitrary alignment
// can also be specified using XAlignment(desired number).
type XAlignment float64
const (
// XLeft aligns the left edge of the text with the specified location.
XLeft XAlignment = 0
// XCenter aligns the horizontal center of the text with the specified location.
XCenter XAlignment = -0.5
// XRight aligns the right edge of the text with the specified location.
XRight XAlignment = -1
)
// YAlignment specifies text alignment in the Y direction. Three preset
// options are available, but an arbitrary alignment
// can also be specified using YAlignment(desired number).
type YAlignment float64
const (
// YTop aligns the top of of the text with the specified location.
YTop YAlignment = -1
// YCenter aligns the vertical center of the text with the specified location.
YCenter YAlignment = -0.5
// YBottom aligns the bottom of the text with the specified location.
YBottom YAlignment = 0
)
// Position specifies the text position.
const (
PosLeft = -1
PosBottom = -1
PosCenter = 0
PosTop = +1
PosRight = +1
)
// Style describes what text will look like.
type Style struct {
// Color is the text color.
Color color.Color
// Font is the font description.
Font font.Font
// Rotation is the text rotation in radians, performed around the axis
// defined by XAlign and YAlign.
Rotation float64
// XAlign and YAlign specify the alignment of the text.
XAlign XAlignment
YAlign YAlignment
// Handler parses and formats text according to a given
// dialect (Markdown, LaTeX, plain, ...)
// The default is a plain text handler.
Handler Handler
}
// FontExtents returns the extents of this Style's font.
func (s Style) FontExtents() font.Extents {
return s.Handler.Extents(s.Font)
}
// Width returns the width of lines of text
// when using the given font before any text rotation is applied.
func (s Style) Width(txt string) (max vg.Length) {
w, _ := s.box(txt)
return w
}
// Height returns the height of the text when using
// the given font before any text rotation is applied.
func (s Style) Height(txt string) vg.Length {
_, h := s.box(txt)
return h
}
// box returns the bounding box of a possibly multi-line text.
func (s Style) box(txt string) (w, h vg.Length) {
var (
lines = s.Handler.Lines(txt)
e = s.FontExtents()
linegap = (e.Height - e.Ascent - e.Descent)
)
for i, line := range lines {
ww, hh, dd := s.Handler.Box(line, s.Font)
if ww > w {
w = ww
}
h += hh + dd
if i > 0 {
h += linegap
}
}
return w, h
}
// Rectangle returns a rectangle giving the bounds of
// this text assuming that it is drawn at (0, 0).
func (s Style) Rectangle(txt string) vg.Rectangle {
e := s.Handler.Extents(s.Font)
w, h := s.box(txt)
desc := vg.Length(e.Height - e.Ascent) // descent + linegap
xoff := vg.Length(s.XAlign) * w
yoff := vg.Length(s.YAlign)*h - desc
// lower left corner
p1 := rotatePoint(s.Rotation, vg.Point{X: xoff, Y: yoff})
// upper left corner
p2 := rotatePoint(s.Rotation, vg.Point{X: xoff, Y: h + yoff})
// lower right corner
p3 := rotatePoint(s.Rotation, vg.Point{X: w + xoff, Y: yoff})
// upper right corner
p4 := rotatePoint(s.Rotation, vg.Point{X: w + xoff, Y: h + yoff})
return vg.Rectangle{
Max: vg.Point{
X: max(p1.X, p2.X, p3.X, p4.X),
Y: max(p1.Y, p2.Y, p3.Y, p4.Y),
},
Min: vg.Point{
X: min(p1.X, p2.X, p3.X, p4.X),
Y: min(p1.Y, p2.Y, p3.Y, p4.Y),
},
}
}
// rotatePoint applies rotation theta (in radians) about the origin to point p.
func rotatePoint(theta float64, p vg.Point) vg.Point {
if theta == 0 {
return p
}
x := float64(p.X)
y := float64(p.Y)
sin, cos := math.Sincos(theta)
return vg.Point{
X: vg.Length(x*cos - y*sin),
Y: vg.Length(y*cos + x*sin),
}
}
func max(d ...vg.Length) vg.Length {
o := vg.Length(math.Inf(-1))
for _, dd := range d {
if dd > o {
o = dd
}
}
return o
}
func min(d ...vg.Length) vg.Length {
o := vg.Length(math.Inf(1))
for _, dd := range d {
if dd < o {
o = dd
}
}
return o
} | text/text.go | 0.821331 | 0.530115 | text.go | starcoder |
package permprom
import (
"regexp"
"strconv"
"strings"
)
// BillionDance returns the order of the programs after one billion dances
func BillionDance(content string, n int) string {
lines := strings.Split(content, ",")
moves := make([]Move, len(lines))
for _, line := range lines {
moves = append(moves, parseLine(line))
}
firstDance := ""
historyDances := []string{}
p := InitializePrograms(n)
// Only perform the loop until we find two identical programs standing after executing many dances
// It will mean that every n dances, the dances ends with the same results (so, there is no need to execute 1,000,000,000 times the loop)
for i := 1; i <= 10000000000; i++ {
p.Dance(moves)
if i == 1 {
firstDance = string(p.progs)
historyDances = append(historyDances, firstDance)
} else {
currentDance := string(p.progs)
if currentDance == firstDance {
break
}
historyDances = append(historyDances, currentDance)
}
}
// Find the last dance standing
lastDance := 10000000000 % len(historyDances)
// If the remainder equals 0, it means the last dance standing is the last dance that has been recorded in the history
// Otherwise we need to substract one to the remainder to find the right index in the array of dances
if lastDance == 0 {
lastDance = len(historyDances) - 1
} else {
lastDance--
}
return historyDances[lastDance]
}
// Dance executes a series of dance moves with the sepcified programs
func (p *Programs) Dance(moves []Move) {
for _, move := range moves {
switch {
case move.name == "Spin":
// fmt.Printf("Spin %d\n", move.size)
p.Spin(move.size)
case move.name == "Exchange":
// fmt.Printf("Exchange %d with %d\n", move.pos1, move.pos2)
p.Exchange(move.pos1, move.pos2)
case move.name == "Partner":
// fmt.Printf("Partner %v with %v\n", string(move.prog1), string(move.prog2))
p.Partner(move.prog1, move.prog2)
}
}
}
// Dance returns the order of the programs after the dance
func Dance(content string, n int) string {
lines := strings.Split(content, ",")
moves := make([]Move, len(lines))
for _, line := range lines {
moves = append(moves, parseLine(line))
}
p := InitializePrograms(n)
p.Dance(moves)
return string(p.progs)
}
// InitializePrograms returns a Programs struct properly initialized
func InitializePrograms(n int) Programs {
var progs []rune
for i := 0; i < n; i++ {
progs = append(progs, rune(97+i))
}
return Programs{progs}
}
// Spin makes X programs move from the end to the front, but maintain their order otherwise
func (p *Programs) Spin(x int) {
length := len(p.progs)
toStart := p.progs[length-x : length]
toEnd := p.progs[0 : length-x]
newProgs := append(toStart, toEnd...)
copy(p.progs, newProgs)
}
// Exchange makes the programs at positions A and B swap places
func (p *Programs) Exchange(pos1 int, pos2 int) {
p.progs[pos1], p.progs[pos2] = p.progs[pos2], p.progs[pos1]
}
// Partner makes the programs named A and B swap places
func (p *Programs) Partner(p1 rune, p2 rune) {
pos1 := -1
pos2 := -1
for i, r := range p.progs {
if r == p1 {
pos1 = i
}
if r == p2 {
pos2 = i
}
if (pos1 > 0 || pos2 > 0) && pos1*pos2 > 0 {
break
}
}
p.Exchange(pos1, pos2)
}
func parseLine(line string) Move {
name := ""
size := 0
pos1 := 0
pos2 := 0
var prog1 rune
var prog2 rune
re := regexp.MustCompile(`s(?P<size>\d+)|x(?P<pos1>\d+)\/(?P<pos2>\d+)|p(?P<prog1>[a-z])\/(?P<prog2>[a-z])`)
groupNames := re.SubexpNames()
for _, match := range re.FindAllStringSubmatch(line, -1) {
for groupIdx, groupValue := range match {
groupName := groupNames[groupIdx]
switch {
case groupName == "size" && groupValue != "":
name = "Spin"
size, _ = strconv.Atoi(groupValue)
case groupName == "pos1" && groupValue != "":
name = "Exchange"
pos1, _ = strconv.Atoi(groupValue)
case groupName == "pos2" && groupValue != "":
pos2, _ = strconv.Atoi(groupValue)
case groupName == "prog1" && groupValue != "":
name = "Partner"
prog1 = rune(groupValue[0])
case groupName == "prog2" && groupValue != "":
prog2 = rune(groupValue[0])
}
}
}
m := Move{name, size, pos1, pos2, prog1, prog2}
return m
}
// Programs represents the list of prgrams who are participating in the dance
type Programs struct {
progs []rune
}
// Move represents a Dance move
type Move struct {
name string
size int
pos1 int
pos2 int
prog1 rune
prog2 rune
} | permprom/permprom.go | 0.585457 | 0.515559 | permprom.go | starcoder |
package godel
import (
"github.com/go-gl/mathgl/mgl64"
"math"
)
type Projection interface {
Projection(wicth, height float64) mgl64.Mat4
}
type AutoPerspective struct {
Yfov float64
ZNear float64
ZFar float64
}
func NewAutoPerspective(yfov float64, ZNear float64, ZFar float64) *AutoPerspective {
return &AutoPerspective{Yfov: yfov, ZNear: ZNear, ZFar: ZFar}
}
func (s AutoPerspective) Projection(wicth, height float64) mgl64.Mat4 {
return mgl64.Perspective(s.Yfov, wicth / height, s.ZNear, s.ZFar)
}
type Perspective struct {
AspectRatio float64
Yfov float64
ZNear float64
ZFar float64
}
func NewPerspective(aspectRatio float64, yfov float64, ZNear float64, ZFar float64) *Perspective {
return &Perspective{AspectRatio: aspectRatio, Yfov: yfov, ZNear: ZNear, ZFar: ZFar}
}
func (s Perspective) Projection(wicth, height float64) mgl64.Mat4 {
return mgl64.Perspective(s.Yfov, s.AspectRatio, s.ZNear, s.ZFar)
}
type AutoInfPerspective struct {
Yfov float64
ZNear float64
}
func NewAutoInfPerspective(yfov float64, ZNear float64) *AutoInfPerspective {
return &AutoInfPerspective{Yfov: yfov, ZNear: ZNear}
}
func (s AutoInfPerspective) Projection(wicth, height float64) mgl64.Mat4 {
const e = 0.000001
f := 1. / math.Tan(float64(s.Yfov)/2.0)
return mgl64.Mat4{f / (wicth / height), 0, 0, 0, 0, f, 0, 0, 0, 0, -1 + e, -1, 0, 0, -s.ZNear, 0}
}
type InfPerspective struct {
AspectRatio float64
Yfov float64
ZNear float64
}
func NewInfPerspective(aspectRatio float64, yfov float64, ZNear float64) *InfPerspective {
return &InfPerspective{AspectRatio: aspectRatio, Yfov: yfov, ZNear: ZNear}
}
func (s InfPerspective) Projection(wicth, height float64) mgl64.Mat4 {
const e = 0.000001
f := 1. / math.Tan(float64(s.Yfov)/2.0)
return mgl64.Mat4{f / s.AspectRatio, 0, 0, 0, 0, f, 0, 0, 0, 0, -1 + e, -1, 0, 0, -s.ZNear, 0}
}
type Orthographic struct {
XMag float64
YMag float64
ZNear float64
ZFar float64
}
func NewOrthographic(XMag float64, YMag float64, ZNear float64, ZFar float64) *Orthographic {
return &Orthographic{XMag: XMag, YMag: YMag, ZNear: ZNear, ZFar: ZFar}
}
func (s Orthographic) Projection(wicth, height float64) mgl64.Mat4 {
return mgl64.Ortho(-s.XMag, s.XMag, -s.YMag, s.YMag, s.ZNear, s.ZFar)
}
type AutoOrthographic struct {
ZNear float64
ZFar float64
}
func (s AutoOrthographic) Projection(wicth, height float64) mgl64.Mat4 {
xmag := wicth/2
ymag := height/2
return mgl64.Ortho(-xmag, xmag, -ymag, ymag, s.ZNear, s.ZFar)
} | iProjection.go | 0.85446 | 0.606324 | iProjection.go | starcoder |
package geotiff
// https://github.com/geotiffjs/geotiff.js/
import (
"fmt"
"os"
)
// GeoTIFF represent a GeoTIFF file
type GeoTIFF struct {
source *os.File //The datasource to read from.
littleEndian bool //Whether the image uses little endian.
bigTiff bool //Whether the image uses bigTIFF conventions.
firstIFDOffset uint //The numeric byte-offset from the start of the image to the first IFD.
}
// FromFile instantiate a new GeoTIFF from a file
func FromFile(filePath string) (*GeoTIFF, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("cannot open GeoTIFF file %v due to %v", filePath, err)
}
headerData := make([]byte, 1024, 1024)
_, err = file.Read(headerData)
if err != nil {
return nil, fmt.Errorf("cannot read from GeoTIFF file %v due to %v", filePath, err)
}
//Byte Order Mark
dataView := DataView(headerData)
bom := dataView.Uint16(0, false)
var littleEndian bool
if bom == 0x4949 {
littleEndian = true
} else if bom == 0x4d4d {
littleEndian = false
} else {
return nil, fmt.Errorf("invalid Byte Order Value - BOM")
}
magicNumber := dataView.Uint16(2, littleEndian)
var bigTiff bool
if magicNumber == 42 {
bigTiff = false
} else if magicNumber == 43 {
bigTiff = true
offsetByteSize := dataView.Uint16(4, littleEndian)
if offsetByteSize != 8 {
return nil, fmt.Errorf("unsupported offset byte-size %v instead of 8", offsetByteSize)
}
} else {
return nil, fmt.Errorf("invalid magic number: %v", magicNumber)
}
var firstIFDOffset uint
if bigTiff {
firstIFDOffset = uint(dataView.Uint64(8, littleEndian))
} else {
firstIFDOffset = uint(dataView.Uint32(4, littleEndian))
}
geoTiff := GeoTIFF{file, littleEndian, bigTiff, firstIFDOffset}
return &geoTiff, nil
}
// GetSlice returns a DataSlice of data from the GeoTIFF file
func (g *GeoTIFF) GetSlice(offset uint, size uint) (*DataSlice, error) {
buffer := make([]byte, size)
_, err := g.source.ReadAt(buffer, int64(offset))
if err != nil {
return nil, fmt.Errorf("cannot read data from file due to %v", err)
}
dataSlice := NewDataSlice(buffer, offset, g.littleEndian, g.bigTiff)
return dataSlice, nil
} | pkg/geotiff/geotiff.go | 0.637934 | 0.456046 | geotiff.go | starcoder |
package imputation
import (
"errors"
"fmt"
"log"
"sort"
"time"
)
// TimeSeries stores a time series. Dates are expressed in ISO 8061 format as illustrated in the constants below.
type TimeSeries map[string]float64
// Assume 3 formattig for the dates: daily, monthly, and yearly.
const (
dayfmt = "2006-01-02"
monthfmt = "2006-01"
yearfmt = "2006"
)
// The diff function returns the minimum gap (will change to GCD) from a given set of data points
// that all have the same format.
// TODO(eftekhari-mhs): add OK, ERR return values and handle errors.
func diff(keys []string, dateFormat string) (int, int, int) {
year, month, day := 0, 0, 0
duration := 1000 //A large number.
sort.Strings(keys)
for i := 0; i < len(keys)-1; i++ {
start, _ := time.Parse(dateFormat, keys[i])
end, _ := time.Parse(dateFormat, keys[i+1])
// Calculate total number of days.
if delta := int(end.Sub(start).Hours() / 24); duration > delta { //TODO: instead of min use GCD
duration = delta
y1, M1, d1 := start.Date()
y2, M2, d2 := end.Date()
year = int(y2 - y1)
month = int(M2 - M1)
day = int(d2 - d1)
}
}
return year, month, day
}
// FillMean returns TimeSeries with missing datapoints with value = mean(value of existing points).
// TODO(eftekhari-mhs): Handle error cases.
func FillMean(ts TimeSeries) (TimeSeries, error) {
if len(ts) < 3 {
log.Printf("not enough data to impute")
return ts, nil
}
keys := make([]string, 0)
var mean float64 = 0
for k, v := range ts {
keys = append(keys, k)
mean += v
}
mean = mean / float64(len(keys))
var parseFormat string
switch len(keys[0]) {
case 4:
parseFormat = yearfmt
case 7:
parseFormat = monthfmt
case 10:
parseFormat = dayfmt
}
if parseFormat == "" {
return ts, errors.New("date format is not ISO 8601")
}
yStep, mStep, dStep := diff(keys, parseFormat)
log.Printf("Step is equal to : %v years, %v months, %v days \n", yStep, mStep, dStep)
sort.Strings(keys)
//TODO(eftekhari-mhs): Handle errors.
startDate, _ := time.Parse(parseFormat, keys[0])
endDate, _ := time.Parse(parseFormat, keys[len(keys)-1])
for d := startDate; d.After(endDate) == false; d = d.AddDate(yStep, mStep, dStep) {
if _, ok := ts[fmt.Sprint(d.Format(parseFormat))]; !ok {
ts[fmt.Sprint(d.Format(parseFormat))] = mean
}
}
return ts, nil
} | imputation/ts_impute.go | 0.503906 | 0.474327 | ts_impute.go | starcoder |
package gxqueue
const (
fastGrowThreshold = 1024
)
// CircularUnboundedQueue is a circular structure and will grow automatically if it exceeds the capacity.
// CircularUnboundedQueue is not thread-safe.
type CircularUnboundedQueue struct {
data []interface{}
head, tail int
icap int // initial capacity
quota int // specify the maximum size of the queue, setting to 0 denotes unlimited.
}
func NewCircularUnboundedQueue(capacity int) *CircularUnboundedQueue {
return NewCircularUnboundedQueueWithQuota(capacity, 0)
}
func NewCircularUnboundedQueueWithQuota(capacity, quota int) *CircularUnboundedQueue {
if capacity < 0 {
panic("capacity should be greater than zero")
}
if quota < 0 {
panic("quota should be greater or equal to zero")
}
if quota != 0 && capacity > quota {
capacity = quota
}
return &CircularUnboundedQueue{
data: make([]interface{}, capacity+1),
icap: capacity,
quota: quota,
}
}
func (q *CircularUnboundedQueue) IsEmpty() bool {
return q.head == q.tail
}
func (q *CircularUnboundedQueue) Push(t interface{}) bool {
if nextTail := (q.tail + 1) % len(q.data); nextTail != q.head {
q.data[q.tail] = t
q.tail = nextTail
return true
}
if q.grow() {
// grow succeed
q.data[q.tail] = t
q.tail = (q.tail + 1) % len(q.data)
return true
}
return false
}
func (q *CircularUnboundedQueue) Pop() interface{} {
if q.IsEmpty() {
panic("queue has no element")
}
t := q.data[q.head]
q.head = (q.head + 1) % len(q.data)
return t
}
func (q *CircularUnboundedQueue) Peek() interface{} {
if q.IsEmpty() {
panic("queue has no element")
}
return q.data[q.head]
}
func (q *CircularUnboundedQueue) Cap() int {
return len(q.data) - 1
}
func (q *CircularUnboundedQueue) Len() int {
head, tail := q.head, q.tail
if head > tail {
tail += len(q.data)
}
return tail - head
}
func (q *CircularUnboundedQueue) Reset() {
q.data = make([]interface{}, q.icap+1)
q.head, q.tail = 0, 0
}
func (q *CircularUnboundedQueue) InitialCap() int {
return q.icap
}
func (q *CircularUnboundedQueue) grow() bool {
oldcap := q.Cap()
if oldcap == 0 {
oldcap++
}
var newcap int
if oldcap < fastGrowThreshold {
newcap = oldcap * 2
} else {
newcap = oldcap + oldcap/4
}
if q.quota != 0 && newcap > q.quota {
newcap = q.quota
}
if newcap == q.Cap() {
return false
}
newdata := make([]interface{}, newcap+1)
copy(newdata[0:], q.data[q.head:])
if q.head > q.tail {
copy(newdata[len(q.data)-q.head:], q.data[:q.head-1])
}
q.head, q.tail = 0, q.Cap()
q.data = newdata
return true
} | container/queue/circular_unbounded_queue.go | 0.773858 | 0.430686 | circular_unbounded_queue.go | starcoder |
package mnist
import (
"encoding/binary"
"fmt"
"os"
)
const (
// ImageWidth is a pixel width of image.
ImageWidth = 28
// ImageHeight is a pixel height of image.
ImageHeight = 28
// TrainSize is a size of training dataset.
TrainSize = 60000
// EvalSize is a size of evaluation dataset.
EvalSize = 10000
)
// Image is a handwritten image.
type Image struct {
Label uint8
Buffer [ImageWidth * ImageHeight]byte
}
// At return a pixel value at (x, y).
func (image *Image) At(x, y uint) byte {
return image.Buffer[ImageWidth*y+x]
}
// Print print Image to stdout.
func (image *Image) Print() {
for y := uint(0); y < ImageHeight; y++ {
for x := uint(0); x < ImageWidth; x++ {
v := image.At(x, y)
if v > 200 {
fmt.Print("#")
} else if v > 100 {
fmt.Print("+")
} else {
fmt.Print(" ")
}
}
fmt.Println()
}
}
// Scanner is file handler of MNIST dataset.
type Scanner struct {
image *os.File
label *os.File
next *Image
}
// Next check if next data exists and load a Image to memory.
func (scanner *Scanner) Next() bool {
image := new(Image)
_, err := scanner.image.Read(image.Buffer[:])
if err != nil {
return false
}
err = binary.Read(scanner.label, binary.BigEndian, &image.Label)
if err != nil {
return false
}
scanner.next = image
return true
}
// Image returns a Image loaded at last Next().
func (scanner *Scanner) Image() *Image {
return scanner.next
}
func (scanner *Scanner) Close() {
scanner.image.Close()
scanner.label.Close()
}
// Open open
func Open(image, label string) (*Scanner, error) {
imageFile, err := os.Open(image)
if err != nil {
return nil, err
}
labelFile, err := os.Open(label)
if err != nil {
return nil, err
}
// skip first 4 byte (header).
for i := 0; i < 4; i++ {
var v uint32
binary.Read(imageFile, binary.BigEndian, &v)
}
// skip first 2 byte (header).
for i := 0; i < 2; i++ {
var v uint32
binary.Read(labelFile, binary.BigEndian, &v)
}
sc := new(Scanner)
sc.image = imageFile
sc.label = labelFile
return sc, nil
} | mnist/mnist.go | 0.671471 | 0.430746 | mnist.go | starcoder |
package require
import "github.com/instana/testify/assert"
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorAs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorAsf(t, err, target, msg, args...) {
return
}
t.FailNow()
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorIs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorIsf(t, err, target, msg, args...) {
return
}
t.FailNow()
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotErrorIs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotErrorIsf(t, err, target, msg, args...) {
return
}
t.FailNow()
} | require/require_go1.13.go | 0.637934 | 0.406037 | require_go1.13.go | starcoder |
package models
import (
"fmt"
"log"
"time"
gongsim_models "github.com/fullstack-lang/gongsim/go/models"
gosatellite "github.com/joshuaferrara/go-satellite"
)
// Satellite is a moving object
// swagger:model satellite
type Satellite struct {
MovingObject // concept
Line1 string
Line2 string
// Agent
// swagger:ignore
gongsim_models.Agent
Name string
// the reference simulation
gosatellite gosatellite.Satellite
VerticalSpeed float64
// time stamps of the satellite
timestamp time.Time
Timestampstring string
}
func (satellite *Satellite) Register() (res *Satellite) {
gongsim_models.AppendToSingloton(satellite)
res = satellite
return
}
func (satellite *Satellite) InitFromTLE() (res *Satellite) {
res = satellite
res.gosatellite = gosatellite.TLEToSat(res.Line1, res.Line2, "wgs72")
return
}
// FireNextEvent fire next Event
func (satellite *Satellite) FireNextEvent() {
event, _ := satellite.GetNextEventAndRemoveIt()
switch event.(type) {
case *gongsim_models.UpdateState:
checkStateEvent := event.(*gongsim_models.UpdateState)
// post next event
checkStateEvent.SetFireTime(checkStateEvent.GetFireTime().Add(checkStateEvent.Period))
satellite.QueueEvent(checkStateEvent)
// get the time
// func Propagate(sat Satellite, year int, month int, day, hours, minutes, seconds int) (position, velocity Vector3) {
julianDay := gosatellite.JDay(checkStateEvent.GetFireTime().Year(),
int(checkStateEvent.GetFireTime().Month()),
checkStateEvent.GetFireTime().Day(),
checkStateEvent.GetFireTime().Hour(),
checkStateEvent.GetFireTime().Minute(),
checkStateEvent.GetFireTime().Second())
thetaG := gosatellite.ThetaG_JD(julianDay)
position, velocity :=
gosatellite.Propagate(satellite.gosatellite,
checkStateEvent.GetFireTime().Year(),
int(checkStateEvent.GetFireTime().Month()),
checkStateEvent.GetFireTime().Day(),
checkStateEvent.GetFireTime().Hour(),
checkStateEvent.GetFireTime().Minute(),
checkStateEvent.GetFireTime().Second())
// compute position in lat lng
computedAltitude, b, latLngRad := gosatellite.ECIToLLA(position, thetaG)
_ = computedAltitude
_ = b
_ = position
_ = velocity
// convert
satellite.Level = computedAltitude * 3280.84
latLngDeg := gosatellite.LatLongDeg(latLngRad)
_ = latLngDeg
satellite.Lat = latLngDeg.Latitude
satellite.Lng = latLngDeg.Longitude
satellite.timestamp = checkStateEvent.GetFireTime()
satellite.Timestampstring = satellite.timestamp.String()
case *Order:
order := event.(*Order)
log.Printf("satellite %s receives order %s at %s", satellite.Name, order.OrderMessage, order.GetFireTime())
default:
err := fmt.Sprintf("unkown event type %T", event)
log.Panic(err)
}
}
// functions to satisty the visual interface for track
func (satellite *Satellite) GetLat() float64 { return satellite.Lat }
func (satellite *Satellite) GetLng() float64 { return satellite.Lng }
func (satellite *Satellite) GetHeading() float64 { return satellite.Heading }
// speed is displayed in tens of nautical miles
func (satellite *Satellite) GetSpeed() float64 {
return satellite.Speed / 18.52
}
func (satellite *Satellite) GetVerticalSpeed() float64 { return satellite.VerticalSpeed }
func (satellite *Satellite) GetLevel() float64 { return satellite.Level }
func (satellite *Satellite) GetName() (name string) { return satellite.Name }
// specific
// func (satellite *Satellite) GetColorEnum() ColorEnum { return GREY }
func (satellite *Satellite) GetDisplay() bool { return true }
func (*Satellite) GetLayerGroupName() (name string) { return string(Satellite_) } | go/models/satellite.go | 0.755907 | 0.532182 | satellite.go | starcoder |
package eqn
import (
"math"
)
func (*Time) Eval(t float64, x, c, s []float64) float64 { return t }
func (v *Var) Eval(t float64, x, c, s []float64) float64 { return x[v.P] }
func (dv *DVar) Eval(t float64, x, c, s []float64) float64 { return x[dv.I] }
func (cnst *Constant) Eval(t float64, x, c, s []float64) float64 { return c[cnst.P] }
func (cnst *ConstantF) Eval(t float64, x, c, s []float64) float64 { return cnst.F }
func (sys *System) Eval(t float64, x, c, s []float64) float64 { return s[sys.P] }
func (u *Neg) Eval(t float64, x, c, s []float64) float64 {
return -1. * u.C.Eval(t, x, c, s)
}
func (u *Abs) Eval(t float64, x, c, s []float64) float64 {
return math.Abs(u.C.Eval(t, x, c, s))
}
func (u *Sqrt) Eval(t float64, x, c, s []float64) float64 {
return math.Sqrt(u.C.Eval(t, x, c, s))
}
func (u *Sin) Eval(t float64, x, c, s []float64) float64 {
return math.Sin(u.C.Eval(t, x, c, s))
}
func (u *Cos) Eval(t float64, x, c, s []float64) float64 {
return math.Cos(u.C.Eval(t, x, c, s))
}
func (u *Tan) Eval(t float64, x, c, s []float64) float64 {
return math.Tan(u.C.Eval(t, x, c, s))
}
func (u *Exp) Eval(t float64, x, c, s []float64) float64 {
return math.Exp(u.C.Eval(t, x, c, s))
}
func (u *Log) Eval(t float64, x, c, s []float64) float64 {
return math.Log1p(u.C.Eval(t, x, c, s))
}
func (u *PowI) Eval(t float64, x, c, s []float64) float64 {
return math.Pow(u.Base.Eval(t, x, c, s), float64(u.Power))
}
func (u *PowF) Eval(t float64, x, c, s []float64) float64 {
return math.Pow(u.Base.Eval(t, x, c, s), u.Power)
}
func (n *PowE) Eval(t float64, x, c, s []float64) float64 {
return math.Pow(n.Base.Eval(t, x, c, s), n.Power.Eval(t, x, c, s))
}
func (n *Div) Eval(t float64, x, c, s []float64) float64 {
return n.Numer.Eval(t, x, c, s) / n.Denom.Eval(t, x, c, s)
}
func (n *Add) Eval(t float64, x, c, s []float64) float64 {
ret := 0.0
for _, C := range n.CS {
if C == nil {
continue
}
ret += C.Eval(t, x, c, s)
}
return ret
}
func (n *Mul) Eval(t float64, x, c, s []float64) float64 {
ret := 1.0
for _, C := range n.CS {
if C == nil {
continue
}
ret *= C.Eval(t, x, c, s)
}
return ret
}
// x_out is the return here, pass in so we don't have to allocate each time
func RK4(e []Eqn, c [][]float64, s []float64, ti, tj float64, x_in, x_tmp, x_out []float64) {
var k [32][4]float64 // that 32 seems pretty arbitrary... XXX TODO
L := len(e)
h := tj - ti
for i := 0; i < L; i++ {
k[i][0] = e[i].Eval(ti, x_in, c[i], s)
}
for i := 0; i < L; i++ {
x_tmp[i] = x_in[i] + (h * k[i][0] / 2.0)
}
for i := 0; i < L; i++ {
k[i][1] = e[i].Eval(ti, x_tmp, c[i], s)
}
for i := 0; i < L; i++ {
x_tmp[i] = x_in[i] + (h * k[i][1] / 2.0)
}
for i := 0; i < L; i++ {
k[i][2] = e[i].Eval(ti, x_tmp, c[i], s)
}
for i := 0; i < L; i++ {
x_tmp[i] = x_in[i] + (h * k[i][2])
}
for i := 0; i < L; i++ {
k[i][3] = e[i].Eval(ti, x_tmp, c[i], s)
}
for i := 0; i < L; i++ {
x_out[i] = ((k[i][0] + 2.0*k[i][1] + 2.0*k[i][2] + k[i][3]) * (h / 6.0))
}
return
}
func PRK4(xn int, e Eqn, c, s []float64, ti, tj float64, x_in, x_out, x_tmp []float64) float64 {
var k [4]float64
L := len(x_in)
h := tj - ti
for i := 0; i < L; i++ {
mid := (0.5 * (x_out[i] - x_in[i]))
x_tmp[i] = x_in[i] + mid
}
k[0] = e.Eval(ti, x_in, c, s)
x_tmp[xn] = x_in[xn] + (h * k[0] / 2.0)
k[1] = e.Eval(ti, x_tmp, c, s)
x_tmp[xn] = x_in[xn] + (h * k[1] / 2.0)
k[2] = e.Eval(ti, x_tmp, c, s)
x_tmp[xn] = x_in[xn] + (h * k[2])
k[3] = e.Eval(ti, x_tmp, c, s)
return ((k[0] + 2.0*k[1] + 2.0*k[2] + k[3]) * (h / 6.0))
}
// func PrintPRK4(xn int, e Eqn, ti, to float64, x_in, x_out, x_tmp, c, s []float64) float64 {
// var k [4]float64
// L := len(x_in)
// h := to - ti
// for i := 0; i < L; i++ {
// x_tmp[i] = x_in[i] + (0.5 * (x_out[i] - x_in[i]))
// }
// fmt.Printf("in: %v\n", x_in)
// fmt.Printf("out: %v\n", x_out)
// fmt.Printf("tmp: %v\n", x_tmp)
// k[0] = e.Eval(ti, x_in, c, s)
// x_tmp[xn] = x_in[xn] + (h * k[0] / 2.0)
// fmt.Printf("tmp: %v\n", x_tmp)
// k[1] = e.Eval(ti, x_tmp, c, s)
// x_tmp[xn] = x_in[xn] + (h * k[1] / 2.0)
// fmt.Printf("tmp: %v\n", x_tmp)
// k[2] = e.Eval(ti, x_tmp, c, s)
// x_tmp[xn] = x_in[xn] + (h * k[2])
// fmt.Printf("tmp: %v\n", x_tmp)
// k[3] = e.Eval(ti, x_tmp, c, s)
// fmt.Printf("k: %v\n", k)
// ans := ((k[0] + 2.0*k[1] + 2.0*k[2] + k[3]) * (h / 6.0))
// fmt.Printf("ans: %.4f => %.4f\n\n", ans, x_out[xn]-x_in[xn])
// return ans
// } | evaluator/eqn/eval.go | 0.57344 | 0.502991 | eval.go | starcoder |
package servo
import (
"math"
"sync"
"time"
)
const floatEpsilon = 0.001
func distance(x0, y0, x1, y1 float64) float64 {
return math.Sqrt(math.Pow(x0-x1, 2) + math.Pow(y0-y1, 2))
}
//PercentPoint - a point percent values of XY
type PercentPoint struct {
X float64
Y float64
}
//FieldXY is a two-dimensional field that controls two servos (one for X, and one for Y axes)
type FieldXY struct {
ServoX *Servo
ServoY *Servo
FlipX bool
FlipY bool
CurrentPercentPointCh chan PercentPoint
sync.Mutex
currentX float64
currentY float64
targetX float64
targetY float64
cancelNoiseCh chan struct{}
}
func (f *FieldXY) tick() {
f.Lock()
currentX := f.currentX
currentY := f.currentY
targetX := f.targetX
targetY := f.targetY
f.Unlock()
d := distance(currentX, currentY, targetX, targetY)
if d < floatEpsilon {
return
}
stepCount := int(d * 100)
if stepCount < 0 {
stepCount *= -1
}
if stepCount < 1 {
stepCount = 1
}
dX := (targetX - currentX) / float64(stepCount)
dY := (targetY - currentY) / float64(stepCount)
f.SetPoint(currentX+dX, currentY+dY)
}
// SetPoint moves servos to a single point on the field
func (f *FieldXY) SetPoint(x, y float64) {
f.Lock()
f.currentX = x
f.currentY = y
f.Unlock()
select {
case f.CurrentPercentPointCh <- PercentPoint{
X: x,
Y: y,
}:
default:
}
if f.FlipX {
x = 1 - x
}
if f.FlipY {
y = 1 - y
}
f.ServoX.SetPercent(x)
f.ServoY.SetPercent(y)
}
// LineTo - smooth movement to the point from current position
func (f *FieldXY) LineTo(x, y float64) {
f.Lock()
f.targetX = x
f.targetY = y
f.Unlock()
}
// RunAway from the point
func (f *FieldXY) RunAway(x, y, radius float64, alwaysStayOnRadius bool) {
f.Lock()
dotX := f.currentX * 4
dotY := f.currentY * 3
f.Unlock()
const W = 1.0 * 4
const H = 1.0 * 3
x = x * 4
y = y * 3
keepAwayR := radius * 4
// closest point from the current laser position to the "keep away" circle
kaX := x + (keepAwayR*(dotX-x))/math.Sqrt(math.Pow(dotX-x, 2)+math.Pow(dotY-y, 2))
kaY := y + (keepAwayR*(dotY-y))/math.Sqrt(math.Pow(dotX-x, 2)+math.Pow(dotY-y, 2))
if !alwaysStayOnRadius && distance(x, y, dotX, dotY) < distance(x, y, kaX, kaY) {
dotX = kaX
dotY = kaY
}
// pushed out of canvas
if dotX < 0 || dotX > W || dotY < 0 || dotY > H {
intersections := [][]float64{}
// pushed to the top
if y-keepAwayR < 0 {
dx := math.Sqrt(math.Pow(keepAwayR, 2) - math.Pow(y, 2))
if 0 <= x+dx && x+dx <= W {
intersections = append(intersections, []float64{x + dx, 0})
}
if 0 <= x-dx && x-dx <= W {
intersections = append(intersections, []float64{x - dx, 0})
}
}
// pushed to the bottom
if y+keepAwayR > H {
dx := math.Sqrt(math.Pow(keepAwayR, 2) - math.Pow(H-y, 2))
if 0 <= x+dx && x+dx <= W {
intersections = append(intersections, []float64{x + dx, H})
}
if 0 <= x-dx && x-dx <= W {
intersections = append(intersections, []float64{x - dx, H})
}
}
// pushed to the left
if x-keepAwayR < 0 {
dy := math.Sqrt(math.Pow(keepAwayR, 2) - math.Pow(x, 2))
if 0 <= y+dy && y+dy <= H {
intersections = append(intersections, []float64{0, y + dy})
}
if 0 <= y-dy && y-dy <= H {
intersections = append(intersections, []float64{0, y - dy})
}
}
// pushed to the right
if x+keepAwayR > W {
dy := math.Sqrt(math.Pow(keepAwayR, 2) - math.Pow(W-x, 2))
if 0 <= y+dy && y+dy <= H {
intersections = append(intersections, []float64{W, y + dy})
}
if 0 <= y-dy && y-dy <= H {
intersections = append(intersections, []float64{W, y - dy})
}
}
if len(intersections) > 0 {
minDistance := keepAwayR
closestPoint := intersections[0]
for _, v := range intersections {
d := distance(dotX, dotY, v[0], v[1])
if d < minDistance {
minDistance = d
closestPoint = v
}
}
dotX = closestPoint[0]
dotY = closestPoint[1]
}
}
f.LineTo(dotX/4, dotY/3)
}
// SetRandomMovements - move dot a little bit
// step=0 to disable random movements
func (f *FieldXY) SetRandomMovements(step float64, interval time.Duration) {
// cancel previous noise motions if running
select {
case f.cancelNoiseCh <- struct{}{}:
default:
}
if step > 0 {
go func() {
ticker := time.NewTicker(time.Second * 3)
for {
select {
case <-f.cancelNoiseCh:
ticker.Stop()
return
case <-ticker.C:
f.MoveRandom(step)
}
}
}()
}
}
// MoveRandom - move
func (f *FieldXY) MoveRandom(step float64) {
f.Lock()
x := f.currentX
y := f.currentY
f.Unlock()
if x+step > 1 {
x -= step
} else if x-step < 0 {
x += step
} else if time.Now().Nanosecond()%2 == 0 {
x += step
} else {
x -= step
}
if y+step > 1 {
y -= step
} else if y-step < 0 {
y += step
} else if time.Now().Nanosecond()%2 == 0 {
y += step
} else {
y -= step
}
f.SetPoint(x, y)
}
// NewFieldXY creates new FieldXY
func NewFieldXY(servoX, servoY *Servo, flipX, flipY bool) *FieldXY {
fieldXY := &FieldXY{
ServoX: servoX,
ServoY: servoY,
FlipX: flipX,
FlipY: flipY,
CurrentPercentPointCh: make(chan PercentPoint),
cancelNoiseCh: make(chan struct{}),
}
ticker := time.NewTicker(time.Second / 200)
go func() {
for {
select {
case <-ticker.C:
fieldXY.tick()
}
}
}()
return fieldXY
} | pkg/servo/fieldxy.go | 0.707506 | 0.496094 | fieldxy.go | starcoder |
package MailSlurpClient
import (
"time"
)
// WaitForConditions Conditions that a `waitForXEmails` endpoint operates on. The methods wait until given conditions are met or a timeout is reached. If the conditions are met without needing to wait the results will be returned immediately. Can include `unreadOnly` to ignore already read emails that were returned in an API call or viewing in the dashboard. Can also include matches for emails containing `from`, `subject`, `hasAttachments` etc.
type WaitForConditions struct {
// ISO Date Time latest time of email to consider. Filter for matching emails that were received before this date
Before time.Time `json:"before,omitempty"`
// Number of results that should match conditions. Either exactly or at least this amount based on the `countType`. If count condition is not met and the timeout has not been reached the `waitFor` method will retry the operation.
Count int32 `json:"count,omitempty"`
// How should the found count be compared to the expected count.
CountType string `json:"countType,omitempty"`
// Max time in milliseconds to wait between retries if a `timeout` is specified.
DelayTimeout int64 `json:"delayTimeout,omitempty"`
// ID of inbox to search within and apply conditions to. Essentially filtering the emails found to give a count.
InboxId string `json:"inboxId"`
// Conditions that should be matched for an email to qualify for results. Each condition will be applied in order to each email within an inbox to filter a result list of matching emails you are waiting for.
Matches []MatchOption `json:"matches,omitempty"`
// ISO Date Time earliest time of email to consider. Filter for matching emails that were received after this date
Since time.Time `json:"since,omitempty"`
// Direction to sort matching emails by created time
SortDirection string `json:"sortDirection,omitempty"`
// Max time in milliseconds to retry the `waitFor` operation until conditions are met.
Timeout int64 `json:"timeout"`
// Apply conditions only to **unread** emails. All emails begin with `read=false`. An email is marked `read=true` when an `EmailDto` representation of it has been returned to the user at least once. For example you have called `getEmail` or `waitForLatestEmail` etc., or you have viewed the email in the dashboard.
UnreadOnly bool `json:"unreadOnly,omitempty"`
} | model_wait_for_conditions.go | 0.641085 | 0.454835 | model_wait_for_conditions.go | starcoder |
package fixed
import (
"encoding/binary"
"errors"
)
var ErrOverflow = errors.New("overflow")
func (x Fixed) String() string {
return x.format()
}
func New(val int) Fixed {
return fixed(int64(val))
}
func New64(val int64) Fixed {
return fixed(val)
}
func From(val float64) Fixed {
return from(val)
}
var One = fixedOne
var Zero = Fixed{}
func (x Fixed) Abs() Fixed {
return x.abs()
}
func (x Fixed) Neg() Fixed {
return x.neg()
}
func (x Fixed) Floor() int64 {
return x.floor()
}
func (x Fixed) Ceil() int64 {
return x.ceil()
}
func (x Fixed) Round() int64 {
return x.round()
}
func (x Fixed) Float() float64 {
return x.float()
}
func (x Fixed) Mul(y Fixed) Fixed {
return mul(x, y)
}
func (x Fixed) Div(y Fixed) Fixed {
return div(x, y)
}
func (x Fixed) Add(y Fixed) Fixed {
return add(x, y)
}
func (x Fixed) Sub(y Fixed) Fixed {
return sub(x, y)
}
func (x Fixed) LessThan(y Fixed) bool {
return x.less(y)
}
// GreaterThan compares fixed values and returns true if x > y
func (x Fixed) GreaterThan(y Fixed) bool {
return x.greater(y)
}
// EqualTo compares fixed values and returns true if x == y
func (x Fixed) EqualTo(y Fixed) bool {
return x.equal(y)
}
func DivUint64(p, q uint64) Fixed {
return udiv(ufixed(p), ufixed(q))
}
// Div64 creates new Fixed equal to p/q signed result
func Div64(p, q int64) Fixed {
return div(fixed(p), fixed(q))
}
// FracFromBytes takes only fractional part from bytes array and return fixed value
func FracFromBytes(x []byte) Fixed {
return rawfixed(int64(binary.LittleEndian.Uint64(x)) & fracMask)
}
// FromBytes creates fixed value from bytes array
func FromBytes(x []byte) Fixed {
return Fixed{lo: binary.LittleEndian.Uint64(x[:8]), hi: binary.LittleEndian.Uint64(x[8:])}
}
// Bytes converts fixed value into bytes array
func (x Fixed) Bytes() []byte {
b := [16]byte{}
binary.LittleEndian.PutUint64(b[:8], x.lo)
binary.LittleEndian.PutUint64(b[8:], x.hi)
return b[:]
}
func BinCDF(n int, p Fixed, x int) Fixed {
if x < 0 {
return Zero
} else if x >= n {
return One
} else {
return incomplete(int64(n-x), int64(x+1), oneValue-p.fixed56())
}
}
func BinCDF64(n int64, p Fixed, x int64) Fixed {
if x < 0 {
return Zero
} else if x >= n {
return One
} else {
return incomplete(n-x, x+1, oneValue-p.fixed56())
}
} | fixed.go | 0.770724 | 0.405566 | fixed.go | starcoder |
package curves
import (
"math"
)
// EaseInQuad eases in a Quad transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInQuad(completed float64) float64 {
return math.Pow(completed, 2)
}
// EaseOutQuad eases out a Quad transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutQuad(completed float64) float64 {
return 1 - EaseInQuad(1-completed)
}
// EaseInOutQuad eases in and out a Quad transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutQuad(completed float64) float64 {
if completed < 0.5 {
return EaseInQuad(completed*2) / 2
}
return 1 - EaseInQuad((completed*-2)+2)/2
}
// EaseInCubic eases in a Cubic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInCubic(completed float64) float64 {
return math.Pow(completed, 3)
}
// EaseOutCubic eases out a Cubic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutCubic(completed float64) float64 {
return 1 - EaseInCubic(1-completed)
}
// EaseInOutCubic eases in and out a Cubic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutCubic(completed float64) float64 {
if completed < 0.5 {
return EaseInCubic(completed*2) / 2
}
return 1 - EaseInCubic((completed*-2)+2)/2
}
// EaseInQuart eases in a Quart transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInQuart(completed float64) float64 {
return math.Pow(completed, 4)
}
// EaseOutQuart eases out a Quart transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutQuart(completed float64) float64 {
return 1 - EaseInQuart(1-completed)
}
// EaseInOutQuart eases in and out a Quart transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutQuart(completed float64) float64 {
if completed < 0.5 {
return EaseInQuart(completed*2) / 2
}
return 1 - EaseInQuart((completed*-2)+2)/2
}
// EaseInQuint eases in a Quint transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInQuint(completed float64) float64 {
return math.Pow(completed, 5)
}
// EaseOutQuint eases out a Quint transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutQuint(completed float64) float64 {
return 1 - EaseInQuint(1-completed)
}
// EaseInOutQuint eases in and out a Quint transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutQuint(completed float64) float64 {
if completed < 0.5 {
return EaseInQuint(completed*2) / 2
}
return 1 - EaseInQuint((completed*-2)+2)/2
}
// EaseInExpo eases in a Expo transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInExpo(completed float64) float64 {
return math.Pow(completed, 6)
}
// EaseOutExpo eases out a Expo transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutExpo(completed float64) float64 {
return 1 - EaseInExpo(1-completed)
}
// EaseInOutExpo eases in and out a Expo transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutExpo(completed float64) float64 {
if completed < 0.5 {
return EaseInExpo(completed*2) / 2
}
return 1 - EaseInExpo((completed*-2)+2)/2
}
// EaseInSine eases in a Sine transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInSine(completed float64) float64 {
return 1 - math.Cos(completed*math.Pi/2)
}
// EaseOutSine eases out a Sine transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutSine(completed float64) float64 {
return 1 - EaseInSine(1-completed)
}
// EaseInOutSine eases in and out a Sine transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutSine(completed float64) float64 {
if completed < 0.5 {
return EaseInSine(completed*2) / 2
}
return 1 - EaseInSine((completed*-2)+2)/2
}
// EaseInCirc eases in a Circ transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInCirc(completed float64) float64 {
return 1 - math.Sqrt(1-completed*completed)
}
// EaseOutCirc eases out a Circ transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutCirc(completed float64) float64 {
return 1 - EaseInCirc(1-completed)
}
// EaseInOutCirc eases in and out a Circ transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutCirc(completed float64) float64 {
if completed < 0.5 {
return EaseInCirc(completed*2) / 2
}
return 1 - EaseInCirc((completed*-2)+2)/2
}
// EaseInElastic eases in a Elastic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInElastic(completed float64) float64 {
if completed == 0 || completed == 1 {
return completed
}
return -math.Pow(2, 8*(completed-1)) * math.Sin(((completed-1)*80-7.5)*math.Pi/15)
}
// EaseOutElastic eases out a Elastic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutElastic(completed float64) float64 {
return 1 - EaseInElastic(1-completed)
}
// EaseInOutElastic eases in and out a Elastic transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutElastic(completed float64) float64 {
if completed < 0.5 {
return EaseInElastic(completed*2) / 2
}
return 1 - EaseInElastic((completed*-2)+2)/2
}
// EaseInBack eases in a Back transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInBack(completed float64) float64 {
return completed * completed * (3*completed - 2)
}
// EaseOutBack eases out a Back transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutBack(completed float64) float64 {
return 1 - EaseInBack(1-completed)
}
// EaseInOutBack eases in and out a Back transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutBack(completed float64) float64 {
if completed < 0.5 {
return EaseInBack(completed*2) / 2
}
return 1 - EaseInBack((completed*-2)+2)/2
}
// EaseInBounce eases in a Bounce transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInBounce(completed float64) float64 {
bounce := float64(3)
var pow2 float64
for pow2 = math.Pow(2, bounce); completed < ((pow2 - 1) / 11); pow2 = math.Pow(2, bounce) {
bounce--
}
return 1/math.Pow(4, 3-bounce) - 7.5625*math.Pow((pow2*3-2)/22-completed, 2)
}
// EaseOutBounce eases out a Bounce transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseOutBounce(completed float64) float64 {
return 1 - EaseInBounce(1-completed)
}
// EaseInOutBounce eases in and out a Bounce transition.
// See http://jqueryui.com/easing/ for curve in action.
func EaseInOutBounce(completed float64) float64 {
if completed < 0.5 {
return EaseInBounce(completed*2) / 2
}
return 1 - EaseInBounce((completed*-2)+2)/2
} | curves/ease.go | 0.902821 | 0.48249 | ease.go | starcoder |
package max
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type entry struct {
value int32
index int
}
// window is a circular buffer which keeps track of the maximum value observed in a particular time.
// Based on the "ascending minima algorithm" (http://web.archive.org/web/20120805114719/http://home.tiac.net/~cri/2001/slidingmin.html).
type window struct {
maxima []entry
first, length int
}
// newWindow creates an descending minima window buffer of size size.
func newWindow(size int) *window {
return &window{
maxima: make([]entry, size),
}
}
// Record records a value for a monotonically increasing index.
func (m *window) Record(index int, v int32) {
// Step One: Remove any elements where v > element.
// An element that's lower than the new element can never influence the
// maximum again, because the new element is both larger _and_ more
// recent than it.
// Search backwards because that way we can delete by just decrementing length.
// The elements are guaranteed to be in descending order as described in Step Three.
for ; m.length > 0; m.length-- {
if v < m.maxima[m.index(m.first+m.length-1)].value {
// The elements are sorted, no point continuing.
break
}
}
// Step Two: Remove out of date elements from front of array.
// We only ever add at end of list, so the indexes are in ascending order,
// therefore the oldest are always first.
for m.length > 0 && index-m.maxima[m.first].index >= len(m.maxima) {
m.length--
m.first++
// Circle around the buffer if necessary.
if m.first == len(m.maxima) {
m.first = 0
}
}
// Step 2b: To be defensive against multiple values being recorded against
// the same index, if the last index is the same as this one, we'll pick the largest.
if m.length > 0 {
if last := m.maxima[m.index(m.first+m.length-1)]; last.index == index {
if last.value > v {
v = last.value
}
// Remove last element because we'll add it back in Step Three.
m.length--
}
}
// Step Three: Add the new value to the end (which maintains sorted order
// since we removed any lesser values above, so value we're appending is
// always smallest value in list).
m.maxima[m.index(m.first+m.length)] = entry{index: index, value: v}
m.length++
// We removed any items from the list in Step Two that were added more than
// len(maxima) ago, so length can never be larger than len(maxima).
if m.length > len(m.maxima) {
panic(fmt.Sprintf("length %d exceeded buffer size %d. This should be impossible. Current state: %v", m.length, len(m.maxima), spew.Sdump(m)))
}
}
// Current returns the current maximum value observed.
func (m *window) Current() int32 {
return m.maxima[m.first].value
}
func (m *window) index(i int) int {
return i % len(m.maxima)
} | pkg/autoscaler/aggregation/max/window.go | 0.791821 | 0.453443 | window.go | starcoder |
package expr
import (
"reflect"
"time"
)
// ComparisonOperator is a comparison operator.
type ComparisonOperator uint8
const (
ComparisonNone ComparisonOperator = iota
ComparisonCustom
ComparisonEqual
ComparisonNotEqual
ComparisonLessThan
ComparisonGreaterThan
ComparisonLessThanOrEqualTo
ComparisonGreaterThanOrEqualTo
ComparisonBetween
ComparisonNotBetween
ComparisonIn
ComparisonNotIn
ComparisonIs
ComparisonIsNot
ComparisonLike
ComparisonNotLike
ComparisonRegexp
ComparisonNotRegexp
)
// Comparison represents the relationship between values.
type Comparison struct {
op ComparisonOperator
custom string // The custom operator when not empty.
value interface{}
}
// CustomOperator returns the custom operator of the comparison.
func (c *Comparison) CustomOperator() string {
return c.custom
}
// Operator returns the ComparisonOperator.
func (c *Comparison) Operator() ComparisonOperator {
return c.op
}
// Value returns the value of the comparison.
func (c *Comparison) Value() interface{} {
return c.value
}
func newComparison(op ComparisonOperator, v interface{}) *Comparison {
return &Comparison{
op: op,
value: v,
}
}
func newCustomComparison(op string, v interface{}) *Comparison {
return &Comparison{
op: ComparisonCustom,
custom: op,
value: v,
}
}
// Gte is a comparison that means: is greater than or equal to the value.
func Gte(value interface{}) *Comparison {
return newComparison(ComparisonGreaterThanOrEqualTo, value)
}
// Lte is a comparison that means: is less than or equal to the value.
func Lte(value interface{}) *Comparison {
return newComparison(ComparisonLessThanOrEqualTo, value)
}
// Eq is a comparison that means: is equal to the value.
func Eq(value interface{}) *Comparison {
return newComparison(ComparisonEqual, value)
}
// NotEq is a comparison that means: is not equal to the value.
func NotEq(value interface{}) *Comparison {
return newComparison(ComparisonNotEqual, value)
}
// Gt is a comparison that means: is greater than the value.
func Gt(value interface{}) *Comparison {
return newComparison(ComparisonGreaterThan, value)
}
// Lt is a comparison that means: is less than the value.
func Lt(value interface{}) *Comparison {
return newComparison(ComparisonLessThan, value)
}
// In is a comparison that means: is any of the values.
func In(value ...interface{}) *Comparison {
return newComparison(ComparisonIn, toInterfaceArray(value))
}
// NotIn is a comparison that means: is none of the values.
func NotIn(value ...interface{}) *Comparison {
return newComparison(ComparisonNotIn, toInterfaceArray(value))
}
// AnyOf is a comparison that means: is any of the values of the slice.
func AnyOf(value interface{}) *Comparison {
return newComparison(ComparisonIn, toInterfaceArray(value))
}
// NotAnyOf is a comparison that means: is none of the values of the slice.
func NotAnyOf(value interface{}) *Comparison {
return newComparison(ComparisonNotIn, toInterfaceArray(value))
}
// After is a comparison that means: is after the time.
func After(value time.Time) *Comparison {
return newComparison(ComparisonGreaterThan, value)
}
// Before is a comparison that means: is before the time.
func Before(value time.Time) *Comparison {
return newComparison(ComparisonLessThan, value)
}
// OnOrAfter is a comparison that means: is on or after the time.
func OnOrAfter(value time.Time) *Comparison {
return newComparison(ComparisonGreaterThanOrEqualTo, value)
}
// OnOrBefore is a comparison that means: is on or before the time.
func OnOrBefore(value time.Time) *Comparison {
return newComparison(ComparisonLessThanOrEqualTo, value)
}
// Between is a comparison that means: is between lower and upper bound.
func Between(lower, upper interface{}) *Comparison {
return newComparison(ComparisonBetween, []interface{}{lower, upper})
}
// NotBetween is a comparison that means: is not between lower and upper bound.
func NotBetween(lower, upper interface{}) *Comparison {
return newComparison(ComparisonNotBetween, []interface{}{lower, upper})
}
// Is is a comparison that means: is equivalent to nil, true or false.
func Is(value interface{}) *Comparison {
return newComparison(ComparisonIs, value)
}
// IsNot is a comparison that means: is not equivalent to nil, true nor false.
func IsNot(value interface{}) *Comparison {
return newComparison(ComparisonIsNot, value)
}
// IsNull is a comparison that means: is equivalent to nil.
func IsNull() *Comparison {
return newComparison(ComparisonIs, nil)
}
// IsNotNull is a comparison that means: is not equivalent to nil.
func IsNotNull() *Comparison {
return newComparison(ComparisonIsNot, nil)
}
// Like is a comparison that checks whether the reference matches the wildcard
// of the value.
func Like(value string) *Comparison {
return newComparison(ComparisonLike, value)
}
// NotLike is a comparison that checks whether the reference does not match the
// wildcard of the value.
func NotLike(value string) *Comparison {
return newComparison(ComparisonNotLike, value)
}
// Regexp is a comparison that checks whether the reference matches the regular
// expression.
func Regexp(value string) *Comparison {
return newComparison(ComparisonRegexp, value)
}
// NotRegexp is a comparison that checks whether the reference does not match
// the regular expression.
func NotRegexp(value string) *Comparison {
return newComparison(ComparisonNotRegexp, value)
}
// Op returns a comparison with the custom operator.
func Op(op string, value interface{}) *Comparison {
return newCustomComparison(op, value)
}
func toInterfaceArray(v interface{}) []interface{} {
vv := reflect.ValueOf(v)
switch vv.Type().Kind() {
case reflect.Ptr:
return toInterfaceArray(vv.Elem().Interface())
case reflect.Slice:
elems := vv.Len()
args := make([]interface{}, elems)
for i := 0; i < elems; i++ {
args[i] = vv.Index(i).Interface()
}
return args
}
return []interface{}{v}
} | expr/comparison.go | 0.888345 | 0.497253 | comparison.go | starcoder |
package models
// There are two decreasing phases in this pattern that use the same price calculations,
// so we are going to make an embeddable type for them.
type smallSpikeDecreasingBase struct {
phaseCoreAuto
}
func (phase *smallSpikeDecreasingBase) AdjustPriceMultiplier(
factor float32, isMin bool,
) float32 {
if isMin {
// In order to match the EXACT calculations from the game, we need to subtract
// both 0.02 and 0.03 discreetly, otherwise we end up with a SLIGHTLY different
// float value that can result in a perice different from what the game would
// yield.
return factor - 0.02 - 0.03
}
return factor - 0.03
}
func (phase *smallSpikeDecreasingBase) BasePriceMultiplier(int) (
min float32, max float32,
) {
return 0.4, 0.9
}
// DECREASING PHASE 1
type smallSpikeDecreasing1 struct {
smallSpikeDecreasingBase
}
func (phase *smallSpikeDecreasing1) Name() string {
return "steady decrease"
}
func (phase *smallSpikeDecreasing1) PossibleLengths(
[]PatternPhase,
) (possibilities []int) {
phase.PossibilitiesComplete()
return []int{0, 1, 2, 3, 4, 5, 6, 7}
}
func (phase *smallSpikeDecreasing1) MaxLength() int {
return 7
}
func (phase *smallSpikeDecreasing1) Duplicate() phaseImplement {
return &smallSpikeDecreasing1{
smallSpikeDecreasingBase{
phase.smallSpikeDecreasingBase.phaseCoreAuto,
},
}
}
// INCREASING PHASE
type smallSpikeIncreasing struct {
phaseCoreAuto
}
func (phase *smallSpikeIncreasing) IsSpike(subPeriod int) (isSpike bool, isBig bool) {
if subPeriod < 2 {
return false, false
}
return true, false
}
func (phase *smallSpikeIncreasing) FinalPriceAdjustment(subPeriod int) int {
if subPeriod == 2 || subPeriod == 4 {
return -1
}
// For period 3 and 5, we subtract 1 from the total after doing our calculation.
return 0
}
func (phase *smallSpikeIncreasing) BasePriceMultiplier(
subPeriod int,
) (min float32, max float32) {
if subPeriod < 2 {
return 0.9, 1.4
}
return 1.4, 2.0
}
func (phase *smallSpikeIncreasing) Name() string {
return "small hasSpikeAny"
}
func (phase *smallSpikeIncreasing) PossibleLengths(
[]PatternPhase,
) (possibilities []int) {
phase.PossibilitiesComplete()
return []int{5}
}
func (phase *smallSpikeIncreasing) MaxLength() int {
return 5
}
func (phase *smallSpikeIncreasing) Duplicate() phaseImplement {
return &smallSpikeIncreasing{
phase.phaseCoreAuto,
}
}
// DECREASING PHASE 1
type smallSpikeDecreasing2 struct {
smallSpikeDecreasingBase
}
func (phase *smallSpikeDecreasing2) Name() string {
return "steady decrease"
}
func (phase *smallSpikeDecreasing2) PossibleLengths(
phases []PatternPhase,
) (possibilities []int) {
phase.PossibilitiesComplete()
return []int{7 - phases[0].Length()}
}
func (phase *smallSpikeDecreasing2) MaxLength() int {
return 7
}
func (phase *smallSpikeDecreasing2) Duplicate() phaseImplement {
return &smallSpikeDecreasing2{
smallSpikeDecreasingBase{
phase.smallSpikeDecreasingBase.phaseCoreAuto,
},
}
}
// Generates a new set of fluctuating phases to branch possible weeks off of.
func smallSpikeProgression(ticker *PriceTicker) []PatternPhase {
phases := []PatternPhase{
&patternPhaseAuto{phaseImplement: new(smallSpikeDecreasing1)},
&patternPhaseAuto{phaseImplement: new(smallSpikeIncreasing)},
&patternPhaseAuto{phaseImplement: new(smallSpikeDecreasing2)},
}
for _, thisPhase := range phases {
thisPhase.SetTicker(ticker)
}
return phases
} | models/phasesSmallSpike.go | 0.79546 | 0.509825 | phasesSmallSpike.go | starcoder |
package spec
import (
"bytes"
"encoding/json"
"fmt"
"math/big"
"github.com/attestantio/go-execution-client/types"
"github.com/pkg/errors"
)
// Transaction is a struct that covers all transaction types.
type Transaction struct {
Type TransactionType
Type0Transaction *Type0Transaction
Type1Transaction *Type1Transaction
Type2Transaction *Type2Transaction
}
// transactionTypeJSON is a simple struct to fetch the transaction type.
type transactionTypeJSON struct {
Type TransactionType `json:"type"`
}
// MarshalJSON marshals a typed transaction.
func (t *Transaction) MarshalJSON() ([]byte, error) {
switch t.Type {
case TransactionType0:
return json.Marshal(t.Type0Transaction)
case TransactionType1:
return json.Marshal(t.Type1Transaction)
case TransactionType2:
return json.Marshal(t.Type2Transaction)
default:
return nil, fmt.Errorf("unhandled transaction type %v", t.Type)
}
}
// UnmarshalJSON implements json.Unmarshaler.
func (t *Transaction) UnmarshalJSON(input []byte) error {
var data transactionTypeJSON
err := json.Unmarshal(input, &data)
if err != nil {
return errors.Wrap(err, "invalid JSON")
}
t.Type = data.Type
switch t.Type {
case TransactionType0:
t.Type0Transaction = &Type0Transaction{}
err = json.Unmarshal(input, t.Type0Transaction)
case TransactionType1:
t.Type1Transaction = &Type1Transaction{}
err = json.Unmarshal(input, t.Type1Transaction)
case TransactionType2:
t.Type2Transaction = &Type2Transaction{}
err = json.Unmarshal(input, t.Type2Transaction)
default:
err = fmt.Errorf("unhandled transaction type %v", data.Type)
}
return err
}
// AccessList returns the access list of the transaction.
// This value can be nil, if the transaction does not support access lists.
func (t *Transaction) AccessList() []*AccessListEntry {
switch t.Type {
case TransactionType0:
return nil
case TransactionType1:
return t.Type1Transaction.AccessList
case TransactionType2:
return t.Type2Transaction.AccessList
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// BlockHash returns the block hash of the transaction.
// This value can be nil, if the transaction is not included in a block.
func (t *Transaction) BlockHash() *types.Hash {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.BlockHash
case TransactionType1:
return t.Type1Transaction.BlockHash
case TransactionType2:
return t.Type2Transaction.BlockHash
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// BlockNumber returns the block number of the transaction.
// This value can be nil, if the transaction is not included in a block.
func (t *Transaction) BlockNumber() *uint32 {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.BlockNumber
case TransactionType1:
return t.Type1Transaction.BlockNumber
case TransactionType2:
return t.Type2Transaction.BlockNumber
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// From returns the sender of the transaction.
func (t *Transaction) From() types.Address {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.From
case TransactionType1:
return t.Type1Transaction.From
case TransactionType2:
return t.Type2Transaction.From
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// Gas returns the gas limit of the transaction.
func (t *Transaction) Gas() uint32 {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.Gas
case TransactionType1:
return t.Type1Transaction.Gas
case TransactionType2:
return t.Type2Transaction.Gas
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// GasPrice returns the gas price of the transaction.
// This will be 0 for transactions that do not have an individual
// gas price, for example type 2 transactions.
func (t *Transaction) GasPrice() uint64 {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.GasPrice
case TransactionType1:
return t.Type1Transaction.GasPrice
case TransactionType2:
return 0
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// Hash returns the hash of the transaction.
func (t *Transaction) Hash() types.Hash {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.Hash
case TransactionType1:
return t.Type1Transaction.Hash
case TransactionType2:
return t.Type2Transaction.Hash
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// Input returns the input data of the transaction.
func (t *Transaction) Input() []byte {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.Input
case TransactionType1:
return t.Type1Transaction.Input
case TransactionType2:
return t.Type2Transaction.Input
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// MaxFeePerGas returns the maximum fee per gas paid by the transaction.
// This value can be 0, if the transaction does not support this (e.g. type 0 transactions).
func (t *Transaction) MaxFeePerGas() uint64 {
switch t.Type {
case TransactionType0:
return 0
case TransactionType1:
return 0
case TransactionType2:
return t.Type2Transaction.MaxFeePerGas
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// MaxPriorityFeePerGas returns the maximum priority fee per gas paid by the transaction.
// This value can be 0, if the transaction does not support this (e.g. type 0 transactions).
func (t *Transaction) MaxPriorityFeePerGas() uint64 {
switch t.Type {
case TransactionType0:
return 0
case TransactionType1:
return 0
case TransactionType2:
return t.Type2Transaction.MaxPriorityFeePerGas
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// Nonce returns the nonce of the transaction.
func (t *Transaction) Nonce() uint64 {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.Nonce
case TransactionType1:
return t.Type1Transaction.Nonce
case TransactionType2:
return t.Type2Transaction.Nonce
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// R returns the R portion of the signature of the transaction.
func (t *Transaction) R() *big.Int {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.R
case TransactionType1:
return t.Type1Transaction.R
case TransactionType2:
return t.Type2Transaction.R
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// S returns the S portion of the signature of the transaction.
func (t *Transaction) S() *big.Int {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.S
case TransactionType1:
return t.Type1Transaction.S
case TransactionType2:
return t.Type2Transaction.S
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// To returns the recipient of the transaction.
// This can be nil, for example on contract creation.
func (t *Transaction) To() *types.Address {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.To
case TransactionType1:
return t.Type1Transaction.To
case TransactionType2:
return t.Type2Transaction.To
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// TransactionIndex returns the index of the transaction in its block.
// This value can be nil, if the transaction is not included in a block.
func (t *Transaction) TransactionIndex() *uint32 {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.TransactionIndex
case TransactionType1:
return t.Type1Transaction.TransactionIndex
case TransactionType2:
return t.Type2Transaction.TransactionIndex
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// V returns the V portion of the signature of the transaction.
func (t *Transaction) V() *big.Int {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.V
case TransactionType1:
return t.Type1Transaction.V
case TransactionType2:
return t.Type2Transaction.V
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// Value returns the value of the transaction.
func (t *Transaction) Value() *big.Int {
switch t.Type {
case TransactionType0:
return t.Type0Transaction.Value
case TransactionType1:
return t.Type1Transaction.Value
case TransactionType2:
return t.Type2Transaction.Value
default:
panic(fmt.Errorf("unhandled transaction type %s", t.Type))
}
}
// String returns a string version of the structure.
func (t *Transaction) String() string {
data, err := json.Marshal(t)
if err != nil {
return fmt.Sprintf("ERR: %v", err)
}
return string(bytes.TrimSuffix(data, []byte("\n")))
} | spec/transaction.go | 0.736021 | 0.432243 | transaction.go | starcoder |
package day_02
const input = `
forward 1
forward 8
down 9
forward 3
down 6
down 1
down 1
forward 5
forward 5
forward 8
down 6
forward 7
down 3
down 4
up 1
forward 7
forward 5
forward 8
down 1
forward 9
forward 9
forward 7
up 8
forward 5
forward 9
down 3
forward 2
down 5
down 4
up 2
up 5
forward 3
forward 8
down 9
down 1
forward 2
forward 6
up 2
up 9
forward 8
down 7
forward 2
up 5
forward 7
down 9
forward 9
forward 5
down 8
down 6
forward 2
up 4
down 2
down 9
forward 4
down 4
forward 9
down 8
down 7
down 4
forward 6
forward 9
up 7
down 2
up 6
down 8
forward 3
forward 9
forward 7
down 1
forward 1
forward 7
forward 5
down 4
down 1
forward 4
forward 9
forward 6
down 5
down 1
forward 9
down 6
down 4
forward 8
up 8
forward 9
up 9
up 9
up 7
forward 4
down 4
forward 4
forward 2
forward 2
forward 9
up 4
forward 4
forward 3
forward 5
down 3
up 4
forward 3
forward 5
forward 9
forward 7
down 1
forward 4
down 5
up 6
down 9
forward 8
down 1
forward 3
down 2
up 8
down 5
down 8
forward 5
down 6
forward 6
down 7
up 5
forward 8
forward 5
forward 7
up 3
down 6
up 9
forward 1
forward 7
forward 3
forward 8
up 4
up 9
down 5
forward 5
forward 4
forward 4
down 9
up 6
forward 5
forward 2
down 6
down 2
forward 9
down 7
forward 8
down 4
forward 8
forward 8
up 7
up 5
forward 6
forward 4
up 4
forward 2
up 3
down 8
forward 1
forward 8
forward 8
forward 4
up 7
forward 2
down 8
forward 1
down 1
down 4
up 3
forward 8
forward 1
down 9
up 5
down 5
forward 9
forward 4
up 7
down 2
down 4
forward 1
forward 6
up 8
up 1
forward 8
down 5
down 5
forward 9
down 8
forward 9
down 1
up 7
forward 8
up 2
down 2
up 1
up 9
forward 6
down 5
forward 1
down 2
forward 5
down 3
up 8
down 2
down 8
forward 7
forward 2
up 3
forward 3
down 1
forward 4
down 7
up 5
forward 8
forward 8
up 6
up 4
up 4
down 7
forward 2
forward 1
forward 6
up 3
forward 3
forward 9
down 4
forward 2
down 9
down 8
forward 5
forward 2
forward 4
forward 5
forward 7
down 4
up 5
down 7
forward 4
up 7
down 9
down 4
down 3
forward 8
down 5
forward 8
forward 8
down 3
down 8
down 2
forward 4
forward 5
down 7
down 7
down 6
up 9
down 9
down 5
forward 4
down 1
down 7
up 4
down 8
up 5
up 1
down 9
down 7
forward 9
forward 7
down 6
up 2
forward 1
down 8
up 2
forward 8
down 2
forward 2
down 9
forward 2
forward 3
forward 7
up 3
up 3
up 3
forward 5
up 9
up 1
down 9
down 4
down 5
up 9
up 7
down 9
forward 3
down 5
down 6
down 3
up 6
forward 8
up 6
up 8
down 4
forward 8
down 8
forward 7
up 1
forward 4
down 2
forward 7
down 6
up 8
forward 1
down 8
down 4
forward 7
forward 2
up 4
forward 9
forward 9
down 8
up 1
down 3
forward 7
down 9
forward 2
forward 5
down 4
down 8
down 3
up 1
down 2
up 3
forward 1
forward 5
forward 3
down 2
up 2
forward 7
down 6
forward 3
down 8
forward 5
forward 4
up 2
forward 4
up 2
down 4
forward 8
forward 5
down 1
forward 6
down 2
down 5
up 8
forward 5
down 2
up 5
down 5
forward 4
forward 3
forward 8
down 9
up 3
forward 4
forward 2
forward 2
forward 2
down 4
forward 8
forward 5
up 3
down 1
forward 3
up 5
forward 8
down 6
forward 6
down 9
forward 6
up 5
down 6
up 9
forward 8
down 2
forward 9
down 7
down 5
down 4
forward 3
forward 2
forward 1
forward 7
down 3
forward 4
up 6
down 5
down 4
down 8
down 4
up 6
forward 3
down 4
down 6
forward 9
forward 6
up 4
down 2
down 7
forward 2
forward 9
down 4
down 8
down 3
down 4
forward 9
forward 4
forward 1
down 7
forward 2
up 1
forward 7
down 7
forward 7
forward 5
up 8
down 4
up 7
up 2
up 7
up 8
down 9
forward 8
forward 8
down 3
forward 9
down 3
up 7
down 1
down 9
forward 2
up 4
down 2
forward 2
up 5
up 5
up 1
forward 7
up 5
down 3
up 8
down 9
down 7
up 4
down 8
down 4
forward 8
up 6
down 2
down 4
forward 7
forward 8
forward 8
forward 1
down 4
down 4
forward 4
down 7
forward 3
down 3
down 6
down 7
down 6
forward 8
down 4
down 7
down 1
down 7
down 5
down 2
up 5
forward 9
down 3
down 4
down 4
forward 5
down 9
forward 1
up 1
up 1
down 3
forward 8
up 6
up 6
down 1
up 2
down 7
down 9
up 7
forward 7
down 6
down 5
down 5
up 8
forward 7
down 1
down 6
forward 4
forward 5
forward 5
forward 2
down 5
up 6
down 5
forward 1
down 9
up 4
down 7
down 8
down 5
down 8
forward 7
forward 2
up 3
down 7
forward 1
forward 4
forward 5
forward 1
forward 7
down 6
forward 9
forward 8
down 5
forward 5
forward 8
forward 3
up 1
forward 6
forward 9
forward 8
down 3
forward 8
forward 7
down 1
forward 9
down 9
forward 6
forward 4
forward 2
forward 9
down 7
down 6
forward 4
forward 1
forward 3
forward 9
up 4
down 5
forward 6
down 8
up 5
down 9
down 6
forward 7
down 9
forward 6
forward 5
down 1
down 4
up 6
forward 2
down 8
down 5
up 5
forward 6
forward 5
down 8
down 4
down 3
down 8
forward 3
forward 9
up 6
down 9
down 1
forward 7
forward 5
down 5
down 1
forward 7
forward 3
up 6
forward 1
up 9
forward 1
down 4
down 3
down 3
up 6
forward 9
forward 5
up 9
up 5
forward 5
forward 7
forward 9
down 8
forward 2
down 7
down 7
forward 3
down 2
up 1
down 4
down 6
down 8
forward 8
forward 6
up 3
forward 9
down 8
up 7
forward 6
forward 9
up 8
forward 3
down 9
forward 3
forward 4
down 3
down 2
forward 2
down 5
down 9
down 2
down 7
down 4
down 6
forward 5
up 1
forward 9
forward 2
up 8
forward 8
down 3
forward 7
down 5
forward 5
down 5
down 9
down 9
down 6
down 5
down 4
up 6
forward 9
down 2
down 5
up 3
forward 8
forward 1
down 4
down 8
forward 6
forward 7
up 2
forward 1
forward 2
down 7
down 6
up 7
forward 7
down 6
down 6
down 8
forward 8
up 5
up 9
forward 8
forward 1
down 6
down 6
up 7
forward 6
up 5
forward 7
down 1
forward 1
forward 9
up 7
down 3
forward 4
down 6
down 6
up 5
up 7
down 1
up 2
down 8
down 1
forward 2
down 3
forward 8
forward 2
up 2
down 7
forward 5
forward 7
down 2
up 1
down 1
down 6
down 4
up 1
forward 2
forward 7
forward 8
down 1
forward 6
down 5
down 8
up 6
down 7
forward 6
down 8
down 6
down 9
forward 8
down 9
down 6
up 2
down 2
down 5
down 5
up 8
forward 6
forward 7
up 4
down 5
up 5
forward 6
forward 8
up 6
up 7
up 3
up 9
down 6
forward 3
forward 3
down 6
down 8
down 2
down 2
up 7
up 6
forward 5
forward 4
down 2
down 3
forward 8
down 9
forward 3
down 8
down 8
forward 9
forward 7
down 8
down 7
up 2
down 8
down 1
down 7
up 7
forward 3
forward 5
up 1
down 9
forward 3
down 4
down 5
down 3
down 8
up 7
forward 4
down 6
forward 9
forward 9
forward 1
up 6
up 8
forward 6
down 6
down 6
forward 1
up 4
down 2
forward 8
forward 6
down 2
down 9
down 6
down 4
forward 5
down 5
down 7
down 6
forward 8
down 8
down 2
up 2
up 2
down 3
forward 2
down 2
down 5
down 3
up 5
down 8
forward 8
down 8
down 4
down 3
forward 7
forward 1
forward 1
down 7
down 6
down 2
up 9
up 7
down 9
forward 1
down 3
down 4
down 7
forward 6
down 8
forward 3
down 6
forward 4
down 3
down 5
down 4
forward 4
up 4
up 1
up 2
down 3
forward 6
up 6
forward 8
forward 9
forward 3
forward 4
forward 2
forward 8
forward 7
up 4
down 5
forward 8
forward 6
down 2
forward 3
down 5
down 8
forward 3
forward 4
down 3
down 9
down 6
up 6
down 1
down 8
forward 7
down 3
forward 8
forward 4
down 2
up 7
down 5
forward 1
forward 7
forward 1
forward 3
down 8
down 7
forward 5
forward 3
down 1
forward 7
down 4
down 3
down 6
down 9
forward 6
down 1
forward 3
forward 3
forward 5
forward 9
up 3
up 6
forward 7
up 5
up 9
down 2
down 5
up 5
forward 4
forward 4
forward 6
up 2
down 9
down 4
down 2
forward 7
down 3
up 4
up 6
forward 7
forward 1
forward 1
down 7
forward 5
forward 4
up 6
forward 4
forward 2
forward 6
up 1
up 5
forward 8
up 2
forward 3
forward 5
up 9
down 4
forward 1
up 7
down 5
forward 7
forward 2
forward 8
down 8
down 2
forward 2
forward 4
forward 8
forward 1
forward 8
forward 2
down 9
forward 7
down 7
down 5
up 9
forward 5
down 1
down 9
down 2
forward 6
down 8
up 9
forward 5
down 2
forward 1
up 4
forward 1
down 9
up 3
down 3
down 2
forward 9
down 5
forward 4
down 1
forward 4
down 8
down 3
forward 8
forward 2
forward 3
down 1
forward 1
down 2
forward 6
up 3
up 5
up 9
forward 9
down 5
down 5
forward 4
up 6
down 7
down 2
forward 8
forward 6
forward 6
up 6
down 7
forward 8
` | adventofcode_2021/day_02/input.go | 0.805556 | 0.506897 | input.go | starcoder |
package ram
import "github.com/jameycribbs/hare/dberr"
// Ram is a struct that holds a map of all the
// tables in the datastore.
type Ram struct {
tables map[string]*table
}
// New takes a map of maps with seed data
// and returns a pointer to a Ram struct.
func New(seedData map[string]map[int]string) (*Ram, error) {
var ram Ram
if err := ram.init(seedData); err != nil {
return nil, err
}
return &ram, nil
}
// Close closes the datastore.
func (ram *Ram) Close() error {
ram.tables = nil
return nil
}
// CreateTable takes a table name, creates a new table
// and adds it to the map of tables in the datastore.
func (ram *Ram) CreateTable(tableName string) error {
if ram.TableExists(tableName) {
return dberr.ErrTableExists
}
ram.tables[tableName] = newTable()
return nil
}
// DeleteRec takes a table name and a record id and deletes
// the associated record.
func (ram *Ram) DeleteRec(tableName string, id int) error {
table, err := ram.getTable(tableName)
if err != nil {
return err
}
if err = table.deleteRec(id); err != nil {
return err
}
return nil
}
// GetLastID takes a table name and returns the greatest record
// id found in the table.
func (ram *Ram) GetLastID(tableName string) (int, error) {
table, err := ram.getTable(tableName)
if err != nil {
return 0, err
}
return table.getLastID(), nil
}
// IDs takes a table name and returns an array of all record IDs
// found in the table.
func (ram *Ram) IDs(tableName string) ([]int, error) {
table, err := ram.getTable(tableName)
if err != nil {
return nil, err
}
return table.ids(), nil
}
// InsertRec takes a table name, a record id, and a byte array and adds
// the record to the table.
func (ram *Ram) InsertRec(tableName string, id int, rec []byte) error {
table, err := ram.getTable(tableName)
if err != nil {
return err
}
if table.recExists(id) {
return dberr.ErrIDExists
}
table.writeRec(id, rec)
return nil
}
// ReadRec takes a table name and an id, reads the record from the
// table, and returns a populated byte array.
func (ram *Ram) ReadRec(tableName string, id int) ([]byte, error) {
table, err := ram.getTable(tableName)
if err != nil {
return nil, err
}
rec, err := table.readRec(id)
if err != nil {
return nil, err
}
return rec, err
}
// RemoveTable takes a table name and deletes that table from the
// datastore.
func (ram *Ram) RemoveTable(tableName string) error {
if !ram.TableExists(tableName) {
return dberr.ErrNoTable
}
delete(ram.tables, tableName)
return nil
}
// TableExists takes a table name and returns a bool indicating
// whether or not the table exists in the datastore.
func (ram *Ram) TableExists(tableName string) bool {
_, ok := ram.tables[tableName]
return ok
}
// TableNames returns an array of table names.
func (ram *Ram) TableNames() []string {
var names []string
for k := range ram.tables {
names = append(names, k)
}
return names
}
// UpdateRec takes a table name, a record id, and a byte array and updates
// the table record with that id.
func (ram *Ram) UpdateRec(tableName string, id int, rec []byte) error {
table, err := ram.getTable(tableName)
if err != nil {
return err
}
if !table.recExists(id) {
return dberr.ErrNoRecord
}
table.writeRec(id, rec)
return nil
}
//******************************************************************************
// UNEXPORTED METHODS
//******************************************************************************
func (ram *Ram) getTable(tableName string) (*table, error) {
table, ok := ram.tables[tableName]
if !ok {
return nil, dberr.ErrNoTable
}
return table, nil
}
func (ram *Ram) getTables() ([]string, error) {
var tableNames []string
for name := range ram.tables {
tableNames = append(tableNames, name)
}
return tableNames, nil
}
func (ram *Ram) init(seedData map[string]map[int]string) error {
ram.tables = make(map[string]*table)
for tableName, tableData := range seedData {
ram.tables[tableName] = newTable()
for id, rec := range tableData {
if err := ram.InsertRec(tableName, id, []byte(rec)); err != nil {
return err
}
}
}
return nil
} | datastores/ram/ram.go | 0.617167 | 0.471223 | ram.go | starcoder |
package expr
import (
"fmt"
"math"
"reflect"
)
type Env map[Var]interface{}
type runtimePanic string
func SafetyEvalBool(expr Expr, env Env) (value bool, err error) {
defer func() {
switch x := recover().(type) {
case nil:
// no panic
case runtimePanic:
value = false
err = fmt.Errorf("%s", x)
default:
// unexpected panic: resume state of panic.
panic(x)
}
}()
if expr == nil {
return false, nil
}
value = ConvertToBool(expr.Eval(env))
return
}
func (v Var) Eval(env Env) reflect.Value {
switch v {
case "true":
return reflect.ValueOf(true)
case "false":
return reflect.ValueOf(false)
default:
if i, ok := env[v]; ok {
return reflect.ValueOf(i)
}
panic(runtimePanic(fmt.Sprintf("undefined variable: %s", v)))
}
}
func (l literal) Eval(_ Env) reflect.Value {
return reflect.ValueOf(l.value)
}
func (u unary) Eval(env Env) reflect.Value {
switch u.op {
case "+":
return unaryPlus(u.x.Eval(env))
case "-":
return unaryMinus(u.x.Eval(env))
case "!":
return logicalNegation(u.x.Eval(env))
case "~":
return bitwiseComplement(u.x.Eval(env))
}
panic(runtimePanic(fmt.Sprintf("unsupported unary operator: %q", u.op)))
}
func (b binary) Eval(env Env) reflect.Value {
switch b.op {
case "+":
return addition(b.x.Eval(env), b.y.Eval(env))
case "-":
return subtraction(b.x.Eval(env), b.y.Eval(env))
case "*":
return multiplication(b.x.Eval(env), b.y.Eval(env))
case "/":
return division(b.x.Eval(env), b.y.Eval(env))
case "%":
return modulus(b.x.Eval(env), b.y.Eval(env))
case "&":
return bitwiseAnd(b.x.Eval(env), b.y.Eval(env))
case "&&":
return logicalAnd(b.x.Eval(env), b.y.Eval(env))
case "|":
return bitwiseOr(b.x.Eval(env), b.y.Eval(env))
case "||":
return logicalOr(b.x.Eval(env), b.y.Eval(env))
case "=", "==":
return comparisonEqual(b.x.Eval(env), b.y.Eval(env))
case ">":
return comparisonGreater(b.x.Eval(env), b.y.Eval(env))
case ">=":
return comparisonGreaterOrEqual(b.x.Eval(env), b.y.Eval(env))
case "<":
return comparisonLess(b.x.Eval(env), b.y.Eval(env))
case "<=":
return comparisonLessOrEqual(b.x.Eval(env), b.y.Eval(env))
case "!=":
return comparisonNotEqual(b.x.Eval(env), b.y.Eval(env))
}
panic(runtimePanic(fmt.Sprintf("unsupported binary operator: %q", b.op)))
}
func (c call) Eval(env Env) reflect.Value {
switch c.fn {
case "pow":
return reflect.ValueOf(math.Pow(ConvertToFloat(c.args[0].Eval(env)), ConvertToFloat(c.args[1].Eval(env))))
case "sin":
return reflect.ValueOf(math.Sin(ConvertToFloat(c.args[0].Eval(env))))
case "sqrt":
return reflect.ValueOf(math.Sqrt(ConvertToFloat(c.args[0].Eval(env))))
}
panic(runtimePanic(fmt.Sprintf("unsupported function call: %s", c.fn)))
}
func ConvertToBool(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return v.Uint() != 0
case reflect.Float32, reflect.Float64:
return v.Float() != 0
default:
panic(runtimePanic(fmt.Sprintf("cannot convert data type: %s to bool", v.Kind().String())))
}
}
func ConvertToInt(v reflect.Value) int64 {
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
return 1
} else {
return 0
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(v.Uint())
case reflect.Float32, reflect.Float64:
return int64(v.Float())
default:
panic(runtimePanic(fmt.Sprintf("cannot convert data type: %s to int", v.Kind().String())))
}
}
func ConvertToUint(v reflect.Value) uint64 {
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
return 1
} else {
return 0
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return uint64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return v.Uint()
case reflect.Float32, reflect.Float64:
return uint64(v.Float())
default:
panic(runtimePanic(fmt.Sprintf("cannot convert data type: %s to uint", v.Kind().String())))
}
}
func ConvertToFloat(v reflect.Value) float64 {
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
return 1
} else {
return 0
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float64(v.Uint())
case reflect.Float32, reflect.Float64:
return v.Float()
default:
panic(runtimePanic(fmt.Sprintf("cannot convert data type: %s to float", v.Kind().String())))
}
}
func unaryPlus(v reflect.Value) reflect.Value {
return v
}
func unaryMinus(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Bool:
return v
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return reflect.ValueOf(-v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return reflect.ValueOf(-v.Uint())
case reflect.Float32, reflect.Float64:
return reflect.ValueOf(-v.Float())
default:
panic(runtimePanic(fmt.Sprintf("unary minus not support type: %s", v.Kind().String())))
}
}
func logicalNegation(v reflect.Value) reflect.Value {
return reflect.ValueOf(!ConvertToBool(v))
}
func bitwiseComplement(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Bool:
return reflect.ValueOf(!v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return reflect.ValueOf(^v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return reflect.ValueOf(^v.Uint())
case reflect.Float32, reflect.Float64:
panic(runtimePanic("cannot eval ~ for float"))
default:
panic(runtimePanic(fmt.Sprintf("bitwise complement not support type: %s", v.Kind().String())))
}
}
func typeLevel(k reflect.Kind) int {
switch k {
case reflect.Float32, reflect.Float64:
return 4
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return 3
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return 2
case reflect.Bool:
return 1
default:
return 0
}
}
func typeAscend(a reflect.Kind, b reflect.Kind) reflect.Kind {
if typeLevel(a) >= typeLevel(b) {
return a
} else {
return b
}
}
func addition(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) + ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) + ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) + ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) + ConvertToInt(right)
return reflect.ValueOf(r != 0)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support addition", left.Kind().String(), right.Kind().String())))
}
}
func subtraction(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) - ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) - ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) - ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) - ConvertToInt(right)
return reflect.ValueOf(r != 0)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support subtraction", left.Kind().String(), right.Kind().String())))
}
}
func multiplication(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) * ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) * ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) * ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) * ConvertToInt(right)
return reflect.ValueOf(r != 0)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support multiplication", left.Kind().String(), right.Kind().String())))
}
}
func division(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) / ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) / ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) / ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) / ConvertToInt(right)
return reflect.ValueOf(r != 0)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support division", left.Kind().String(), right.Kind().String())))
}
}
func modulus(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) % ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) % ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) % ConvertToInt(right)
return reflect.ValueOf(r != 0)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support division", left.Kind().String(), right.Kind().String())))
}
}
func bitwiseAnd(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) & ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) & ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToBool(left) && ConvertToBool(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support bitwise and", left.Kind().String(), right.Kind().String())))
}
}
func bitwiseOr(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) | ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) | ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToBool(left) || ConvertToBool(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support bitwise or", left.Kind().String(), right.Kind().String())))
}
}
func logicalAnd(left reflect.Value, right reflect.Value) reflect.Value {
r := ConvertToBool(left) && ConvertToBool(right)
return reflect.ValueOf(r)
}
func logicalOr(left reflect.Value, right reflect.Value) reflect.Value {
r := ConvertToBool(left) || ConvertToBool(right)
return reflect.ValueOf(r)
}
func comparisonEqual(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) == ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) == ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) == ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) == ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison equal", left.Kind().String(), right.Kind().String())))
}
}
func comparisonNotEqual(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) != ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) != ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) != ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) != ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison not equal", left.Kind().String(), right.Kind().String())))
}
}
func comparisonGreater(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) > ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) > ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) > ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) > ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison greater", left.Kind().String(), right.Kind().String())))
}
}
func comparisonGreaterOrEqual(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) >= ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) >= ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) >= ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) >= ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison greater or equal", left.Kind().String(), right.Kind().String())))
}
}
func comparisonLess(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) < ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) < ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) < ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) < ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison less", left.Kind().String(), right.Kind().String())))
}
}
func comparisonLessOrEqual(left reflect.Value, right reflect.Value) reflect.Value {
k := typeAscend(left.Kind(), right.Kind())
switch k {
case reflect.Float32, reflect.Float64:
r := ConvertToFloat(left) <= ConvertToFloat(right)
return reflect.ValueOf(r)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r := ConvertToUint(left) <= ConvertToUint(right)
return reflect.ValueOf(r)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r := ConvertToInt(left) <= ConvertToInt(right)
return reflect.ValueOf(r)
case reflect.Bool:
r := ConvertToInt(left) <= ConvertToInt(right)
return reflect.ValueOf(r)
default:
panic(runtimePanic(fmt.Sprintf("type %s and %s not support comparison less or equal", left.Kind().String(), right.Kind().String())))
}
} | app/service/live/broadcast-proxy/expr/eval.go | 0.571049 | 0.455259 | eval.go | starcoder |
package fp
func (a BoolArray) Find(p func(bool) bool) BoolOption {
for _, e := range a {
if p(e) { return MkBoolOption(e) }
}
return NoneBool}
func (a StringArray) Find(p func(string) bool) StringOption {
for _, e := range a {
if p(e) { return MkStringOption(e) }
}
return NoneString}
func (a IntArray) Find(p func(int) bool) IntOption {
for _, e := range a {
if p(e) { return MkIntOption(e) }
}
return NoneInt}
func (a Int64Array) Find(p func(int64) bool) Int64Option {
for _, e := range a {
if p(e) { return MkInt64Option(e) }
}
return NoneInt64}
func (a ByteArray) Find(p func(byte) bool) ByteOption {
for _, e := range a {
if p(e) { return MkByteOption(e) }
}
return NoneByte}
func (a RuneArray) Find(p func(rune) bool) RuneOption {
for _, e := range a {
if p(e) { return MkRuneOption(e) }
}
return NoneRune}
func (a Float32Array) Find(p func(float32) bool) Float32Option {
for _, e := range a {
if p(e) { return MkFloat32Option(e) }
}
return NoneFloat32}
func (a Float64Array) Find(p func(float64) bool) Float64Option {
for _, e := range a {
if p(e) { return MkFloat64Option(e) }
}
return NoneFloat64}
func (a AnyArray) Find(p func(Any) bool) AnyOption {
for _, e := range a {
if p(e) { return MkAnyOption(e) }
}
return NoneAny}
func (a Tuple2Array) Find(p func(Tuple2) bool) Tuple2Option {
for _, e := range a {
if p(e) { return MkTuple2Option(e) }
}
return NoneTuple2}
func (a BoolArrayArray) Find(p func([]bool) bool) BoolArrayOption {
for _, e := range a {
if p(e) { return MkBoolArrayOption(e) }
}
return NoneBoolArray}
func (a StringArrayArray) Find(p func([]string) bool) StringArrayOption {
for _, e := range a {
if p(e) { return MkStringArrayOption(e) }
}
return NoneStringArray}
func (a IntArrayArray) Find(p func([]int) bool) IntArrayOption {
for _, e := range a {
if p(e) { return MkIntArrayOption(e) }
}
return NoneIntArray}
func (a Int64ArrayArray) Find(p func([]int64) bool) Int64ArrayOption {
for _, e := range a {
if p(e) { return MkInt64ArrayOption(e) }
}
return NoneInt64Array}
func (a ByteArrayArray) Find(p func([]byte) bool) ByteArrayOption {
for _, e := range a {
if p(e) { return MkByteArrayOption(e) }
}
return NoneByteArray}
func (a RuneArrayArray) Find(p func([]rune) bool) RuneArrayOption {
for _, e := range a {
if p(e) { return MkRuneArrayOption(e) }
}
return NoneRuneArray}
func (a Float32ArrayArray) Find(p func([]float32) bool) Float32ArrayOption {
for _, e := range a {
if p(e) { return MkFloat32ArrayOption(e) }
}
return NoneFloat32Array}
func (a Float64ArrayArray) Find(p func([]float64) bool) Float64ArrayOption {
for _, e := range a {
if p(e) { return MkFloat64ArrayOption(e) }
}
return NoneFloat64Array}
func (a AnyArrayArray) Find(p func([]Any) bool) AnyArrayOption {
for _, e := range a {
if p(e) { return MkAnyArrayOption(e) }
}
return NoneAnyArray}
func (a Tuple2ArrayArray) Find(p func([]Tuple2) bool) Tuple2ArrayOption {
for _, e := range a {
if p(e) { return MkTuple2ArrayOption(e) }
}
return NoneTuple2Array} | fp/bootstrap_array_find.go | 0.73782 | 0.446193 | bootstrap_array_find.go | starcoder |
package dali
import (
"database/sql"
"fmt"
"reflect"
)
// One executes the query that should return rows and loads the
// resulting data from the first row into dest which must be a struct.
// Only fields that match the column names (after filtering through
// the mapperFunc) are filled. One returns sql.ErrNoRows if there are
// no rows.
func (q *Query) One(dest interface{}) error {
destv := reflect.ValueOf(dest)
if destv.Kind() != reflect.Ptr {
panic("dali: dest must be a pointer to a struct")
}
v := reflect.Indirect(destv)
if v.Kind() != reflect.Struct {
panic("dali: dest must be a pointer to a struct")
}
return q.loadStruct(v)
}
// All executes the query that should return rows, and loads the
// resulting data into dest which must be a slice of structs.
// Only fields that match the column names (after filtering through
// the mapperFunc) are filled.
func (q *Query) All(dest interface{}) error {
const errMsg = "dali: dest must be a pointer to a slice of structs or pointers to structs"
destv := reflect.ValueOf(dest)
if destv.Kind() != reflect.Ptr {
panic(errMsg)
}
slicev := reflect.Indirect(destv)
if slicev.Kind() != reflect.Slice {
panic(errMsg)
}
elemt := slicev.Type().Elem()
isPtr := false
if isPtr = elemt.Kind() == reflect.Ptr; isPtr {
elemt = elemt.Elem()
}
switch elemt.Kind() {
case reflect.Ptr:
panic("dali: a pointer to a pointer is not allowed as an element of dest")
case reflect.Struct:
return q.loadStructs(slicev, elemt, isPtr)
}
panic(errMsg)
}
func (q *Query) loadStruct(v reflect.Value) error { return q.load(v, v.Type(), true, false) }
func (q *Query) loadStructs(slicev reflect.Value, elemt reflect.Type, isPtr bool) error {
return q.load(slicev, elemt, false, isPtr)
}
func (q *Query) load(v reflect.Value, elemt reflect.Type, loadJustOne, isPtr bool) error {
rows, err := q.Rows()
if err != nil {
return err
}
defer rows.Close()
rowCols, err := rows.Columns()
if err != nil {
return err
}
cols, indexes := colNamesAndFieldIndexes(elemt, false)
fieldIndexes := make([][]int, len(rowCols))
for coln, rowCol := range rowCols {
var index []int
for i, col := range cols {
if rowCol == col {
index = indexes[i]
break
}
}
fieldIndexes[coln] = index
}
fields := make([]interface{}, len(fieldIndexes))
err = nil
if loadJustOne {
err = sql.ErrNoRows
}
for rows.Next() {
elemvptr := reflect.New(elemt)
elemv := reflect.Indirect(elemvptr)
noMatch := true
for i, index := range fieldIndexes {
if index == nil {
fields[i] = new(interface{})
continue
}
noMatch = false
fields[i] = elemv.FieldByIndex(index).Addr().Interface()
}
if noMatch {
return fmt.Errorf("dali: no match between columns and struct fields")
}
if err := rows.Scan(fields...); err != nil {
return err
}
if loadJustOne {
// v is a struct.
v.Set(elemv)
err = nil
break
// Otherwise, v must be a slice.
} else if isPtr {
v.Set(reflect.Append(v, elemvptr))
} else {
v.Set(reflect.Append(v, elemv))
}
}
if err := rows.Err(); err != nil {
return err
}
return err
}
// ScanAllRows executes the query that is expected to return rows.
// It copies the columns from the matched rows into the slices
// pointed at by dests.
func (q *Query) ScanAllRows(dests ...interface{}) error {
slicevals := make([]reflect.Value, len(dests))
elemtypes := make([]reflect.Type, len(dests))
for i, dests := range dests {
destv := reflect.ValueOf(dests)
if destv.Kind() != reflect.Ptr {
panic("dali: dests must be a pointer to a slice")
}
slicevals[i] = reflect.Indirect(destv)
if slicevals[i].Kind() != reflect.Slice {
panic("dali: dests must be a pointer to a slice")
}
elemtypes[i] = slicevals[i].Type().Elem()
}
rows, err := q.Rows()
if err != nil {
return err
}
defer rows.Close()
elemvptrs := make([]reflect.Value, len(dests))
args := make([]interface{}, len(dests))
for rows.Next() {
for i := range args {
elemvptrs[i] = reflect.New(elemtypes[i])
args[i] = elemvptrs[i].Interface()
}
if err := rows.Scan(args...); err != nil {
return err
}
for i := range args {
slicevals[i].Set(reflect.Append(slicevals[i], reflect.Indirect(elemvptrs[i])))
}
}
return rows.Err()
} | loading.go | 0.551332 | 0.413063 | loading.go | starcoder |
package sql
import (
"github.com/liquidata-inc/dolt/go/store/types"
)
var DoltToSQLType = map[types.NomsKind]string{
types.StringKind: VARCHAR,
types.BoolKind: BOOL,
types.FloatKind: FLOAT_TYPE,
types.IntKind: INT,
types.UintKind: INT + " " + UNSIGNED,
types.UUIDKind: UUID,
}
// TypeConversionFn is a function that converts one noms value to another of a different type in a guaranteed fashion,
// i.e. any conversion that could possibly fail (e.g. string -> int) are not allowed. Only SQL-safe conversions are
// allowed, even if they are guaranteed to be safe, so that e.g. there is no way to convert a numeric type to a string.
// These kinds of conversions must be explicit in SQL.
type TypeConversionFn func(types.Value) types.Value
var convFuncMap = map[types.NomsKind]map[types.NomsKind]TypeConversionFn{
types.StringKind: {
types.StringKind: identityConvFunc,
types.NullKind: convToNullFunc,
},
types.UUIDKind: {
types.UUIDKind: identityConvFunc,
types.NullKind: convToNullFunc,
},
types.UintKind: {
types.UintKind: identityConvFunc,
types.IntKind: convUintToInt,
types.FloatKind: convUintToFloat,
types.NullKind: convToNullFunc,
},
types.IntKind: {
types.UintKind: convIntToUint,
types.IntKind: identityConvFunc,
types.FloatKind: convIntToFloat,
types.NullKind: convToNullFunc,
},
types.FloatKind: {
types.FloatKind: identityConvFunc,
types.NullKind: convToNullFunc,
},
types.BoolKind: {
types.BoolKind: identityConvFunc,
types.NullKind: convToNullFunc,
},
types.NullKind: {
types.StringKind: convToNullFunc,
types.UUIDKind: convToNullFunc,
types.UintKind: convToNullFunc,
types.IntKind: convToNullFunc,
types.FloatKind: convToNullFunc,
types.BoolKind: convToNullFunc,
types.NullKind: convToNullFunc,
},
}
// GetTypeConversionFn takes in a source kind and a destination kind and returns a TypeConversionFn which can convert
// values of the source kind to values of the destination kind in a type-safe manner, or nil if no such conversion is
// possible.
func GetTypeConversionFn(srcKind, destKind types.NomsKind) TypeConversionFn {
var convFunc TypeConversionFn
if destKindMap, ok := convFuncMap[srcKind]; ok {
convFunc = destKindMap[destKind]
}
return convFunc
}
func identityConvFunc(value types.Value) types.Value {
return value
}
var convToNullFunc = func(types.Value) types.Value {
return nil
}
func convUintToInt(val types.Value) types.Value {
if val == nil {
return nil
}
n := uint64(val.(types.Uint))
return types.Int(int64(n))
}
func convUintToFloat(val types.Value) types.Value {
if val == nil {
return nil
}
n := uint64(val.(types.Uint))
return types.Float(float64(n))
}
func convIntToUint(val types.Value) types.Value {
if val == nil {
return nil
}
n := int64(val.(types.Int))
return types.Uint(uint64(n))
}
func convIntToFloat(val types.Value) types.Value {
if val == nil {
return nil
}
n := int64(val.(types.Int))
return types.Float(float64(n))
} | go/libraries/doltcore/sql/sqltypes.go | 0.706292 | 0.498291 | sqltypes.go | starcoder |
package fwt
import (
"github.com/dolthub/dolt/go/libraries/doltcore/row"
"github.com/dolthub/dolt/go/libraries/doltcore/table/pipeline"
"github.com/dolthub/dolt/go/store/types"
)
// TooLongBehavior determines how the FWTTransformer should behave when it encounters a column that is longer than what
// it expected
type TooLongBehavior int
const (
// ErrorWhenTooLong treats each row containing a column that is longer than expected as a bad row
ErrorWhenTooLong TooLongBehavior = iota
// SkipRowWhenTooLong skips any rows that have columns that are longer than expected
SkipRowWhenTooLong
// TruncateWhenTooLong will cut off the end of columns that are too long
TruncateWhenTooLong
// HashFillWhenTooLong will result in ######### being printed in place of the columns that are longer than expected.
HashFillWhenTooLong
// PrintAllWhenTooLong will print the entire column for every row. When this happens results will not be valid
// fixed width text files
PrintAllWhenTooLong
)
// FWTTransformer transforms columns to be of fixed width.
type FWTTransformer struct {
fwtSch *FWTSchema
colBuffs map[uint64][]rune
tooLngBhv TooLongBehavior
}
// NewFWTTransform creates a new FWTTransformer from a FWTSchema and a TooLongBehavior
func NewFWTTransformer(fwtSch *FWTSchema, tooLngBhv TooLongBehavior) *FWTTransformer {
numFields := fwtSch.Sch.GetAllCols().Size()
colBuffs := make(map[uint64][]rune, numFields)
for tag, numRunes := range fwtSch.TagToMaxRunes {
colBuffs[tag] = make([]rune, numRunes)
}
return &FWTTransformer{fwtSch, colBuffs, tooLngBhv}
}
// Transform takes in a row and transforms it so that it's columns are of the correct width.
func (fwtTr *FWTTransformer) Transform(r row.Row, props pipeline.ReadableMap) ([]*pipeline.TransformedRowResult, string) {
sch := fwtTr.fwtSch.Sch
destFields := make(row.TaggedValues)
for tag, colWidth := range fwtTr.fwtSch.TagToWidth {
buf := fwtTr.colBuffs[tag]
if colWidth != 0 {
val, _ := r.GetColVal(tag)
if types.IsNull(val) {
// don't assign a value for nil columns
continue
}
str := string(val.(types.String))
strWidth := StringWidth(str)
if strWidth > colWidth {
switch fwtTr.tooLngBhv {
case ErrorWhenTooLong:
col, _ := sch.GetAllCols().GetByTag(tag)
return nil, "Value for " + col.Name + " too long."
case SkipRowWhenTooLong:
return nil, ""
case TruncateWhenTooLong:
str = str[0:colWidth]
case HashFillWhenTooLong:
str = fwtTr.fwtSch.NoFitStrs[tag]
case PrintAllWhenTooLong:
break
}
}
strWidth = StringWidth(str)
if strWidth > colWidth {
buf = []rune(str)
} else {
n := copy(buf, []rune(str))
// Character widths are tricky. Always overwrite from where we left off to the end of the buffer to clear it.
for i := 0; n+i < len(buf); i++ {
buf[n+i] = ' '
}
}
}
destFields[tag] = types.String(buf)
}
var err error
r, err = row.New(r.Format(), sch, destFields)
if err != nil {
return nil, err.Error()
}
return []*pipeline.TransformedRowResult{{RowData: r}}, ""
} | go/libraries/doltcore/table/untyped/fwt/fwt_transform.go | 0.683314 | 0.418043 | fwt_transform.go | starcoder |
package iws
// AdrRecord is the container tag for the majority of the response data from DMDC's Identity Web Services: Real-time Broker Service REST API
type AdrRecord struct {
// The identifier that is used to represent the person within a Department of Defense Electronic Data Interchange. Externally the EDI-PI is referred to as the DoD ID, or the DoD ID Number. XML Tag - dodEdiPersonId
Edipi *uint64 `xml:"DOD_EDI_PN_ID,omitempty"` // <xsd:element minOccurs="0" name="DOD_EDI_PN_ID" type="tns:DOD_EDI_PN_ID"/>
// The identifier that is used to represent cross-reference between a person's Department of Defense Electronic Data Interchange identifiers. If the code is invalidated, this is the new DoD EDI PN ID to use instead of the current one. This ID will be zero unless the INVL_DEPI_NTFCN_CD is Y.
EdipiXRef *uint64 `xml:"DOD_EDI_PN_XR_ID,omitempty"` // <xsd:element minOccurs="0" name="DOD_EDI_PN_XR_ID" type="tns:DOD_EDI_PN_XR_ID"/>
// The date the customer ended their association with ADR/ADW. - Date format is YYYYMMDD
CstrAscEndDt string `xml:"CSTR_ASC_END_DT,omitempty"` // <xsd:element minOccurs="0" name="CSTR_ASC_END_DT" type="tns:CSTR_ASC_END_DT"/>
// The code that represents the reason that the customer's association with ADR/ADW ended or is expected to end (see PN_LOSS_RSN_CD).
CstrAscErsnCd *CustomerAssocEndReasonCode `xml:"CSTR_ASC_ERSN_CD,omitempty"` // <xsd:element minOccurs="0" name="CSTR_ASC_ERSN_CD" type="tns:CSTR_ASC_ERSN_CD"/>
PidsRecord *PidsRecord `xml:"PIDSRecord,omitempty"` // <xsd:element minOccurs="0" name="PIDSRecord" type="tns:PIDSRecord"/>
TidsRecord *TidsRecord `xml:"TIDSRecord,omitempty"` // <xsd:element minOccurs="0" name="TIDSRecord" type="tns:TIDSRecord"/>
ExtsRecord *ExtsRecord `xml:"EXTSRecord,omitempty"` // <xsd:element minOccurs="0" name="EXTSRecord" type="tns:EXTSRecord"/>
OldEdipis []uint64 `xml:"identifierHistory>OLD_DOD_EDI_PN_ID,omitempty"`
WorkEmail *WkEmaRecord `xml:"WKEMARecord,omitempty"`
Person *Person `xml:"person,omitempty"`
Personnel []Personnel `xml:"personnel,omitempty"`
}
// CustomerAssocEndReasonCode represents the reason that the customer's association with ADR/ADW ended or is expected to end (see PN_LOSS_RSN_CD).
type CustomerAssocEndReasonCode string
const (
// CustomerAssocEndReasonCodeNotInPopulation means that the provided ID is associated with a person who is not in the population supported by this integration
CustomerAssocEndReasonCodeNotInPopulation CustomerAssocEndReasonCode = "N"
// CustomerAssocEndReasonCodeSeparated means that the provided ID is associated with a person who has separated
CustomerAssocEndReasonCodeSeparated CustomerAssocEndReasonCode = "S"
// CustomerAssocEndReasonCodeIneligible means that the provided ID is associated with a person who is not eligible
CustomerAssocEndReasonCodeIneligible CustomerAssocEndReasonCode = "W"
// CustomerAssocEndReasonCodeNoLongerMatches means that the search no longer matches customer criteria.
CustomerAssocEndReasonCodeNoLongerMatches CustomerAssocEndReasonCode = "Y"
) | pkg/iws/adr_record.go | 0.611614 | 0.430686 | adr_record.go | starcoder |
package schema
import (
"k8s.io/gengo/types"
)
// Collection returns the type schema for a collection with given name and
// element type.
func Collection(name types.Name, elemType *types.Type) *types.Type {
sliceType := sliceType(elemType)
collectionType := collectionType(name)
collectionType.Methods = collectionMethods(collectionType, sliceType, elemType)
return collectionType
}
func collectionType(name types.Name) *types.Type {
return &types.Type{
Kind: types.Pointer,
Elem: &types.Type{
Kind: types.Struct,
Name: name,
},
Name: types.Name{
Name: "*" + name.String(),
},
}
}
func sliceType(elemType *types.Type) *types.Type {
elemName := elemType.Name
return &types.Type{
Kind: types.Slice,
Name: types.Name{Name: "[]" + elemName.String()},
Elem: elemType,
}
}
func collectionMethods(collectionType, sliceType, elemType *types.Type) map[string]*types.Type {
return map[string]*types.Type{
"All": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
types.Bool,
},
},
},
"Any": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
types.Bool,
},
},
},
"Append": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Variadic: true,
Parameters: []*types.Type{
sliceType,
},
Results: []*types.Type{
collectionType,
},
},
},
"Cap": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
types.Int,
},
},
},
"Collect": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
"Contains": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
"Copy": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
collectionType,
},
},
},
"Cut": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
types.Int,
},
Results: []*types.Type{
sliceType,
},
},
},
"Each": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
},
},
},
},
},
"EachIndex": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
types.Int,
},
},
},
},
},
},
"Filter": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
"Find": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
elemType,
},
},
},
"FindOk": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
elemType,
types.Bool,
},
},
},
"First": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
elemType,
},
},
},
"FirstN": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
},
Results: []*types.Type{
sliceType,
},
},
},
"Get": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
},
Results: []*types.Type{
elemType,
},
},
},
"IndexOf": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Int,
},
},
},
"InsertItem": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
types.Int,
},
Results: []*types.Type{
collectionType,
},
},
},
"Interface": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
{
Kind: types.Interface,
Name: types.Name{Name: "interface{}"},
},
},
},
},
"IsSorted": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
types.Bool,
},
},
},
"Items": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
sliceType,
},
},
},
"Last": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
elemType,
},
},
},
"LastN": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
},
Results: []*types.Type{
sliceType,
},
},
},
"Len": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
types.Int,
},
},
},
"Map": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
elemType,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
"MapIndex": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
types.Int,
},
Results: []*types.Type{
elemType,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
"Nth": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
},
Results: []*types.Type{
elemType,
},
},
},
"Partition": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
collectionType,
collectionType,
},
},
},
"Prepend": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Variadic: true,
Parameters: []*types.Type{
sliceType,
},
Results: []*types.Type{
collectionType,
},
},
},
"Reduce": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
elemType,
},
Results: []*types.Type{
elemType,
},
},
},
},
Results: []*types.Type{
elemType,
},
},
},
"Reject": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
"Remove": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
},
Results: []*types.Type{
collectionType,
},
},
},
"RemoveItem": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
},
Results: []*types.Type{
collectionType,
},
},
},
"Reverse": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Results: []*types.Type{
collectionType,
},
},
},
"Slice": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
types.Int,
types.Int,
},
Results: []*types.Type{
sliceType,
},
},
},
"Sort": &types.Type{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
{
Kind: types.Func,
Signature: &types.Signature{
Parameters: []*types.Type{
elemType,
elemType,
},
Results: []*types.Type{
types.Bool,
},
},
},
},
Results: []*types.Type{
collectionType,
},
},
},
}
} | internal/schema/schema.go | 0.555556 | 0.401688 | schema.go | starcoder |
package gorange
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
)
// Range is a struct for representing infinite, open-ended, and finite ranges of values
type Range struct {
Start float64 `json:"start"`
End float64 `json:"end"`
}
// Equal tests if two ranges are identical
func (r Range) Equal(other Range) bool {
return r.Start == other.Start && r.End == other.End
}
// Overlap tests if the values of one range overlap the values of another
func (r Range) Overlap(other Range) bool {
return r.End >= other.Start || other.Start <= r.End
}
// Infinite tests if a range is infinite in both directions
func (r Range) Infinite() bool {
return r.Start == math.Inf(-1) && r.End == math.Inf(1)
}
// Contains tests if a range contains a given value
func (r Range) Contains(float float64) bool {
return float >= r.Start && float <= r.End
}
// Merge merges one range with another.
// It will return an error if the ranges do not overlap
func (r Range) Merge(other Range) (Range, error) {
if !r.Overlap(other) {
return r, errors.New(fmt.Sprintf("Range %v does not overlap range %v", r, other))
} else {
newRange, err := NewRange(math.Min(r.Start, other.Start), math.Max(r.End, other.End))
if err != nil {
return r, err
} else {
return newRange, nil
}
}
}
func (r Range) values(fn func(float64) float64) []float64 {
valueList := []float64{}
if r.Start == math.Inf(-1) {
valueList = append(valueList, fn(math.Inf(-1)))
valueList = append(valueList, fn(r.End))
return valueList
} else if r.End == math.Inf(1) {
valueList = append(valueList, fn(r.Start))
valueList = append(valueList, fn(math.Inf(1)))
return valueList
} else {
for i := r.Start; i <= r.End; i++ {
valueList = append(valueList, fn(i))
}
}
return valueList
}
// Values returns the values in a range. If one end of the
// range is open-ended, this function will return a list of the
// range's start and end.
func (r Range) Values() []float64 {
return r.values(func(v float64) float64 { return v })
}
// EachValue accepts a function to be applied to each value of a range.
func (r Range) EachValue(fn func(float64) float64) {
r.values(fn)
}
// ValueMap accepts a function to be applied to each value of a range the modified
// values will be returned in place of the original values.
func (r Range) ValueMap(fn func(float64) float64) []float64 {
return r.values(fn)
}
func (r Range) valuesInRange(other Range, fn func(float64) float64) []float64 {
if !r.Overlap(other) {
return []float64{}
}
if other.Infinite() && r.Infinite() {
return []float64{fn(math.Inf(-1)), fn(math.Inf(1))}
} else if other.Infinite() {
return r.values(fn)
} else if other.Start == math.Inf(-1) {
if r.Start == math.Inf(-1) {
return []float64{fn(math.Inf(-1)), fn(math.Min(other.End, r.End))}
} else {
values := []float64{}
for i := r.Start; i <= math.Min(other.End, r.End); i++ {
values = append(values, fn(i))
}
return values
}
} else if other.End == math.Inf(1) {
if r.End == math.Inf(1) {
return []float64{fn(math.Max(other.Start, r.Start)), fn(math.Inf(1))}
} else {
values := []float64{}
for i := math.Max(other.Start, r.Start); i <= r.End; i++ {
values = append(values, fn(i))
}
return values
}
} else {
values := []float64{}
for i := math.Max(other.Start, r.Start); i <= math.Min(other.End, r.End); i++ {
values = append(values, fn(i))
}
return values
}
}
// ValuesInRange returns the values in this range that overlap with the values in the supplied range
func (r Range) ValuesInRange(other Range) []float64 {
return r.valuesInRange(other, func(v float64) float64 { return v })
}
// EachValueInRange executes function fn on each value in this range that overlaps with
// the supplied range.
func (r Range) EachValueInRange(other Range, fn func(float64) float64) {
r.valuesInRange(other, fn)
}
// MapValueInRange executes function fn on each value in this range that overlaps with
// the supplied range, and returns the values modified by fn.
func (r Range) MapValueInRange(other Range, fn func(float64) float64) []float64 {
return r.valuesInRange(other, fn)
}
// NewRange creates a new range. If end is less then start, it will return an error
func NewRange(start float64, end float64) (Range, error) {
if start > end {
return Range{}, errors.New(fmt.Sprintf("Start: %f is after End: %f", start, end))
}
return Range{Start: start, End: end}, nil
}
// ParseRange parses a range from a string. If the range is not in one of these forms
// (assuming the delimiter to be ":"), [":", "Num:", ":Num", "Num:Num"], ParseRange
// will return an error.
func ParseRange(srange string, delimiter string) (Range, error) {
var prange Range
if strings.Contains(srange, delimiter) {
index := strings.Index(srange, delimiter)
if srange == delimiter {
return Range{Start: math.Inf(-1), End: math.Inf(1)}, nil
}
var float float64
var err error = nil
switch index {
case 0:
float, err = strconv.ParseFloat(srange[1:], 64)
if err != nil {
return prange, parsingError(srange, delimiter, err)
}
var err error = nil
prange, err = NewRange(math.Inf(-1), float)
if err != nil {
return prange, err
}
case len(srange) - 1:
float, err = strconv.ParseFloat(srange[:len(srange)-1], 64)
if err != nil {
return prange, parsingError(srange, delimiter, err)
}
var err error = nil
prange, err = NewRange(float, math.Inf(1))
if err != nil {
return prange, err
}
default:
ends := strings.Split(srange, delimiter)
if len(ends) != 2 {
return prange, parsingError(srange, delimiter, err)
}
start, err := strconv.ParseFloat(ends[0], 64)
end, err := strconv.ParseFloat(ends[1], 64)
if err != nil {
return prange, parsingError(srange, delimiter, err)
}
err = nil
prange, err = NewRange(start, end)
if err != nil {
return prange, err
}
}
} else {
float, err := strconv.ParseFloat(srange, 64)
if err != nil {
return prange, parsingError(srange, delimiter, err)
}
err = nil
prange, err = NewRange(float, float)
}
return prange, nil
}
func parsingError(srange string, delimiter string, err error) error {
return errors.New(fmt.Sprintf("Error parsing range (%s) with delimiter (%s): %v", srange, delimiter, err))
} | range.go | 0.863046 | 0.667371 | range.go | starcoder |
package main
// Import packages
import ("image"; "image/color"; "image/png";
"os"
"log"
"math"; "math/cmplx";
"fmt")
// Converts a point on a canvas to a complex number
func toComplex(x, y int, zoom float64, center complex128) complex128 {
return center + complex(float64(x)/zoom, float64(y)/zoom)
}
// Iterate as per Mandelbrot algo and returns the magnitude
func iterate(c complex128, iter int) float64 {
z := complex(0, 0)
for i := 1; i < iter; i++ {
z = z*z + c
if cmplx.Abs(z) > 2000 {
return 2000
}
}
return cmplx.Abs(z)
}
// Converts magnitude into a color based on the image gradient
func createColorizer(fileName string) func(float64) color.Color {
gradient := CreateCanvas(fileName)
yLimit := gradient.Bounds().Size().Y - 1
return func(magnitude float64) color.Color {
m := int(math.Max(math.Min(300*magnitude, float64(yLimit)), 1))
return gradient.At(0, m)
}
}
func drawFractal(canvas *Canvas, zoom float64, center complex128, colorizer func(float64) color.Color) {
size := canvas.Bounds().Size()
for x := 0; x < size.X; x++ {
for y := 0; y < size.Y; y++ {
c := toComplex(x-size.X/2, y-size.Y/2, zoom, center)
mag := iterate(c, 150)
color := colorizer(mag)
canvas.Set(x, y, color)
}
}
}
func createFractal(zoom float64, real float64, imag float64, gradFile string) {
width, height := 2048, 1024
canvas := NewCanvas(image.Rect(0, 0, width, height))
center := complex(real, imag)
colorizer := createColorizer(gradFile)
drawFractal(canvas, zoom, center, colorizer)
name := fmt.Sprintf("fractal_%f_%f_%f.png", zoom, real, imag)
outFile, err := os.Create(name)
if err != nil {
log.Fatal(err)
}
defer outFile.Close()
png.Encode(outFile, canvas)
}
func main() {
createFractal(100, 0, 0, "gradients/gradient1.png")
createFractal(1000, 0, 0, "gradients/gradient2.png")
createFractal(16000, 0, 0, "gradients/gradient2.png")
createFractal(6000, 0.75, 0.25, "gradients/gradient3.png")
createFractal(16000.0, -0.71, -0.25, "gradients/gradient3.png")
createFractal(30000.0, -0.71, -0.25, "gradients/gradient3.png")
createFractal(100000.0, -0.71, -0.25, "gradients/gradient1.png")
} | fractal.go | 0.822046 | 0.422445 | fractal.go | starcoder |
// Variable package supplies generalised model variables that allow watchers of models to make decisions on those
// models by reacting the changes in model decision variables. Decision variables for a model are considered part of
// that model's public interface.
package variable
import (
"github.com/LindsayBradford/crem/pkg/name"
)
// DecisionVariable describes an interface between a Model and any decision making logic observing the model via its
// decision variables. This Value of a decision Variable should be a fine-grained indicator of how well a model is
// doing against some objective we have for that model.
// There should be one decision Variable representing each objective being evaluated for a model.
type DecisionVariable interface {
// Name returns the model-centric name of the DecisionVariable.
// Decision variables are expected to have unique names within a model.
name.Nameable
Value() float64
SetValue(value float64)
UnitOfMeasure() UnitOfMeasure
Precision() Precision
}
type UnitOfMeasure string
func (uom UnitOfMeasure) String() string { return string(uom) }
const (
NotApplicable UnitOfMeasure = "Not Applicable (NA)"
TonnesPerYear UnitOfMeasure = "Tonnes per Year (t/y)"
Dollars UnitOfMeasure = "Dollars ($)"
)
const defaultUnitOfMeasure = NotApplicable
type Precision int
const defaultPrecision = 3
var _ DecisionVariable = NewSimpleDecisionVariable("test")
func NewSimpleDecisionVariable(name string) *SimpleDecisionVariable {
return &SimpleDecisionVariable{
name: name,
value: 0,
unitOfMeasure: defaultUnitOfMeasure,
precision: defaultPrecision,
}
}
type SimpleDecisionVariable struct {
name string
value float64
unitOfMeasure UnitOfMeasure
precision Precision
}
func (v *SimpleDecisionVariable) Name() string { return v.name }
func (v *SimpleDecisionVariable) SetName(name string) { v.name = name }
func (v *SimpleDecisionVariable) Value() float64 { return v.value }
func (v *SimpleDecisionVariable) SetValue(value float64) { v.value = value }
func (v *SimpleDecisionVariable) UnitOfMeasure() UnitOfMeasure { return v.unitOfMeasure }
func (v *SimpleDecisionVariable) SetUnitOfMeasure(measure UnitOfMeasure) { v.unitOfMeasure = measure }
func (v *SimpleDecisionVariable) Precision() Precision { return v.precision }
func (v *SimpleDecisionVariable) SetPrecision(precision Precision) { v.precision = precision } | internal/pkg/model/variable/DecisionVariable.go | 0.794823 | 0.81119 | DecisionVariable.go | starcoder |
package gravity
import (
"github.com/go-gl/mathgl/mgl32"
)
// Transformer ...
type Transformer struct {
matrix mgl32.Mat4
rot mgl32.Quat
pos mgl32.Vec3
scale mgl32.Vec3
}
var (
xAxis = mgl32.Vec3{1, 0, 0}
yAxis = mgl32.Vec3{0, 1, 0}
zAxis = mgl32.Vec3{0, 0, 1}
)
// NewTransformer ...
func NewTransformer() *Transformer {
return &Transformer{
matrix: mgl32.Ident4(),
pos: mgl32.Vec3{0, 0, 0},
scale: mgl32.Vec3{1, 1, 1},
rot: mgl32.QuatIdent(),
}
}
// UpdateTransform ...
func (t *Transformer) UpdateTransform() {
t.matrix = t.rot.Mat4()
t.matrix[12] = t.pos.X()
t.matrix[13] = t.pos.Y()
t.matrix[14] = t.pos.Z()
scale(&t.matrix, t.scale)
}
// GetTransformMatrix ...
func (t *Transformer) GetTransformMatrix() *mgl32.Mat4 {
return &t.matrix
}
// GetRotation ...
func (t *Transformer) GetRotation() mgl32.Quat {
return t.rot
}
// GetPosition ...
func (t *Transformer) GetPosition() mgl32.Vec3 {
return t.pos
}
// SetPosition ...
func (t *Transformer) SetPosition(x, y, z float32) {
t.pos[0], t.pos[1], t.pos[2] = x, y, z
}
// SetRotation ...
func (t *Transformer) SetRotation(q mgl32.Quat) {
t.rot = q
}
// Translate ...
func (t *Transformer) Translate(v mgl32.Vec3) {
t.pos = t.pos.Add(v)
}
// TranslateX ...
func (t *Transformer) TranslateX(d float32) {
t.pos[0] += d
}
// TranslateY ...
func (t *Transformer) TranslateY(d float32) {
t.pos[1] += d
}
// TranslateZ ...
func (t *Transformer) TranslateZ(d float32) {
t.pos[2] += d
}
// // RotateX ...
// func (t *Transformer) RotateX(angle float32) {
// }
// // RotateY ...
// func (t *Transformer) RotateY(angle float32) {
// }
// // RotateZ ...
// func (t *Transformer) RotateZ(angle float32) {
// }
// Rotate ...
// func (t *Transformer) Rotate(q mgl32.Quat) {
// t.rot = q.Mul(t.rot)
// }
// GetScale ...
func (t *Transformer) GetScale() mgl32.Vec3 {
return t.scale
}
// Scale ...
func (t *Transformer) Scale(v mgl32.Vec3) {
t.scale = v
}
// Scalef ...
func (t *Transformer) Scalef(f float32) {
t.scale = t.scale.Mul(f)
}
// ScaleX ...
func (t *Transformer) ScaleX(f float32) {
t.scale[0] *= f
}
// ScaleY ...
func (t *Transformer) ScaleY(f float32) {
t.scale[1] *= f
}
// ScaleZ ...
func (t *Transformer) ScaleZ(f float32) {
t.scale[2] *= f
}
func scale(out *mgl32.Mat4, v mgl32.Vec3) {
out[0] *= v[0]
out[1] *= v[0]
out[2] *= v[0]
out[3] *= v[0]
out[4] *= v[1]
out[5] *= v[1]
out[6] *= v[1]
out[7] *= v[1]
out[8] *= v[2]
out[9] *= v[2]
out[10] *= v[2]
out[11] *= v[2]
} | components.go | 0.691081 | 0.566378 | components.go | starcoder |
package mongo
import (
"github.com/globalsign/mgo/bson"
"github.com/shopspring/decimal"
)
type M map[string]interface{}
type Decimal decimal.Decimal
func NewDecimal(num int64, exp int32) Decimal {
return Decimal(decimal.New(num, exp))
}
func NewDecimalFromString(s string) (d Decimal, err error) {
var dec decimal.Decimal
if dec, err = decimal.NewFromString(s); err != nil {
return d, err
}
return Decimal(dec), err
}
func (d Decimal) GetBSON() (interface{}, error) {
return bson.ParseDecimal128(decimal.Decimal(d).String())
}
func (d *Decimal) SetBSON(raw bson.Raw) (err error) {
var dec128 bson.Decimal128
if err = raw.Unmarshal(&dec128); err != nil {
return err
}
var dec decimal.Decimal
if dec, err = decimal.NewFromString(dec128.String()); err != nil {
return err
}
*d = Decimal(dec)
return nil
}
func (d Decimal) Abs() Decimal {
return Decimal(decimal.Decimal(d).Abs())
}
func (d Decimal) Add(d2 Decimal) Decimal {
return Decimal(decimal.Decimal(d).Add(decimal.Decimal(d2)))
}
func (d Decimal) Ceil() Decimal {
return Decimal(decimal.Decimal(d).Ceil())
}
func (d Decimal) Div(d2 Decimal) Decimal {
return Decimal(decimal.Decimal(d).Div(decimal.Decimal(d2)))
}
func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal {
return Decimal(decimal.Decimal(d).DivRound(decimal.Decimal(d2), precision))
}
func (d Decimal) Equal(d2 Decimal) bool {
return decimal.Decimal(d).Equal(decimal.Decimal(d2))
}
func (d Decimal) Float64() (f float64, exact bool) {
return decimal.Decimal(d).Float64()
}
func (d Decimal) Floor() Decimal {
return Decimal(decimal.Decimal(d).Floor())
}
func (d Decimal) GreaterThan(d2 Decimal) bool {
return decimal.Decimal(d).GreaterThan(decimal.Decimal(d2))
}
func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool {
return decimal.Decimal(d).GreaterThanOrEqual(decimal.Decimal(d2))
}
func (d Decimal) IsNegative() bool {
return decimal.Decimal(d).IsNegative()
}
func (d Decimal) IsPositive() bool {
return decimal.Decimal(d).IsPositive()
}
func (d Decimal) IsZero() bool {
return decimal.Decimal(d).IsZero()
}
func (d Decimal) LessThan(d2 Decimal) bool {
return decimal.Decimal(d).LessThan(decimal.Decimal(d2))
}
func (d Decimal) LessThanOrEqual(d2 Decimal) bool {
return decimal.Decimal(d).LessThanOrEqual(decimal.Decimal(d2))
}
func (d Decimal) MarshalJSON() ([]byte, error) {
return decimal.Decimal(d).MarshalJSON()
}
func (d Decimal) Mul(d2 Decimal) Decimal {
return Decimal(decimal.Decimal(d).Mul(decimal.Decimal(d2)))
}
func (d Decimal) Round(places int32) Decimal {
return Decimal(decimal.Decimal(d).Round(places))
}
func (d Decimal) RoundBank(places int32) Decimal {
return Decimal(decimal.Decimal(d).RoundBank(places))
}
func (d Decimal) RoundCash(interval uint8) Decimal {
return Decimal(decimal.Decimal(d).RoundCash(interval))
}
func (d Decimal) Sign() int {
return decimal.Decimal(d).Sign()
}
func (d Decimal) String() string {
return decimal.Decimal(d).String()
}
func (d Decimal) Truncate(precision int32) Decimal {
return Decimal(decimal.Decimal(d).Truncate(precision))
}
func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error {
return (*decimal.Decimal)(d).UnmarshalJSON(decimalBytes)
} | pkg/mongo/types.go | 0.724286 | 0.47859 | types.go | starcoder |
package common
//------------------------------------------------------------------------------
// UndefinedState indicates a component state is undefined
const UndefinedState string = "undefined"
// FailureState indicates a component related failure has occured
const FailureState string = "failure"
// InitialState indicates a component is in the initial state
const InitialState string = "initial"
// InactiveState indicates a component is in the inactive state
const InactiveState string = "inactive"
// ActiveState indicates a component is in the active state
const ActiveState string = "active"
// CreatingState indicates a component is in the creating state
const CreatingState string = "creating"
// DestroyingState indicates a component is in the destroying state
const DestroyingState string = "destroying"
// StartingState indicates a component is in the starting state
const StartingState string = "starting"
// StoppingState indicates a component is in the stopping state
const StoppingState string = "stopping"
// ConfiguringState indicates a component is in the configuring state
const ConfiguringState string = "configuring"
// ResettingState indicates a component is in the resetting state
const ResettingState string = "resetting"
//------------------------------------------------------------------------------
// CreateAction requests the instantiation of a resource
const CreateAction string = "create"
// DestroyAction requests the instantiation of a resource
const DestroyAction string = "destroy"
// StartAction requests the activation of a resource
const StartAction string = "start"
// StopAction requests the deactivation of a resource
const StopAction string = "stop"
// ConfigureAction requests the configuration of an inactive resource
const ConfigureAction string = "configure"
// ReconfigureAction requests the reconfiguration of an active resource
const ReconfigureAction string = "reconfigure"
// ResetAction requests the removal of a resource
const ResetAction string = "reset"
// StatusAction requests the status of a resource
const StatusAction string = "status"
//------------------------------------------------------------------------------
// RelationshipState describes the current state of a relationship
type RelationshipState struct {
Relationship string `yaml:"Relationship"` // name of relationship
Dependency string `yaml:"Dependency"` // name of dependency
Configuration string `yaml:"Configuration"` // configuration information
Endpoint string `yaml:"Endpoint"` // endpoint information in yaml format
}
//------------------------------------------------------------------------------
// InstanceState describes the current state of an instance
type InstanceState struct {
Instance string `yaml:"Instance"` // id of an instance
State string `yaml:"State"` // state of an instance
Endpoint string `yaml:"Endpoint"` // endpoint information in yaml format
}
//------------------------------------------------------------------------------
// Request sent to controller.
type Request struct {
Request string `yaml:"Request"` // request ID
Domain string `yaml:"Domain"` // name of the domain
Solution string `yaml:"Solution"` // name of solution
Version string `yaml:"Version"` // version of solution
Element string `yaml:"Element"` // name of element
Cluster string `yaml:"Cluster"` // name of cluster
Instance string `yaml:"Instance"` // name of instance
Component string `yaml:"Component"` // component type of instance
State string `yaml:"State"` // state of instance
Min int `yaml:"Min"` // min. size of the solution element cluster
Max int `yaml:"Max"` // max. size of the solution element cluster
Size int `yaml:"Size"` // size of the solution element cluster
Configuration string `yaml:"Configuration"` // configuration of instance
Relationships []RelationshipState `yaml:"Relationships"` // current state of all relationships
Instances []InstanceState `yaml:"Instances"` // current state of all instances
}
//------------------------------------------------------------------------------
// Response received from controller.
type Response struct {
Request string `yaml:"Request"` // request ID
Action string `yaml:"Action"` // requested action
Code int `yaml:"Code"` // response code
Status string `yaml:"Status"` // status information
Domain string `yaml:"Domain"` // name of the domain
Solution string `yaml:"Solution"` // name of solution
Version string `yaml:"Version"` // version of solution
Element string `yaml:"Element"` // name of element
Cluster string `yaml:"Cluster"` // name of cluster
Instance string `yaml:"Instance"` // name of instance
Component string `yaml:"Component"` // component type of instance
State string `yaml:"State"` // state of instance
Configuration string `yaml:"Configuration"` // configuration of instance
Endpoint string `yaml:"Endpoint"` // endpoint of instance
}
//------------------------------------------------------------------------------ | src/tsai.eu/solar/controller/openstackController/common/controller.go | 0.63624 | 0.496948 | controller.go | starcoder |
package keyproof
import (
"github.com/privacybydesign/gabi/big"
"github.com/privacybydesign/gabi/internal/common"
)
type (
AlmostSafePrimeProductProof struct {
Nonce *big.Int
Commitments []*big.Int
Responses []*big.Int
}
almostSafePrimeProductCommit struct {
nonce *big.Int
commitments []*big.Int
logs []*big.Int
}
)
func almostSafePrimeProductBuildCommitments(list []*big.Int, Pprime *big.Int, Qprime *big.Int) ([]*big.Int, almostSafePrimeProductCommit) {
// Setup proof structure
var commit almostSafePrimeProductCommit
// Calculate N and phiN
N := new(big.Int).Mul(new(big.Int).Add(new(big.Int).Lsh(Pprime, 1), big.NewInt(1)), new(big.Int).Add(new(big.Int).Lsh(Qprime, 1), big.NewInt(1)))
phiN := new(big.Int).Lsh(new(big.Int).Mul(Pprime, Qprime), 2)
// Generate nonce
nonceMax := new(big.Int).Lsh(big.NewInt(1), almostSafePrimeProductNonceSize)
commit.nonce = common.FastRandomBigInt(nonceMax)
for i := 0; i < almostSafePrimeProductIters; i++ {
// Calculate base from nonce
curc := common.GetHashNumber(commit.nonce, nil, i, uint(N.BitLen()))
curc.Mod(curc, N)
if new(big.Int).GCD(nil, nil, curc, N).Cmp(big.NewInt(1)) != 0 {
panic("Generated number not in Z_N")
}
log := common.FastRandomBigInt(phiN)
com := new(big.Int).Exp(curc, log, N)
list = append(list, com)
commit.commitments = append(commit.commitments, com)
commit.logs = append(commit.logs, log)
}
return list, commit
}
func almostSafePrimeProductBuildProof(Pprime *big.Int, Qprime *big.Int, challenge *big.Int, index *big.Int, commit almostSafePrimeProductCommit) AlmostSafePrimeProductProof {
// Setup proof structure
proof := AlmostSafePrimeProductProof{
Nonce: commit.nonce,
Commitments: commit.commitments,
}
// Calculate useful constants
N := new(big.Int).Mul(new(big.Int).Add(new(big.Int).Lsh(Pprime, 1), big.NewInt(1)), new(big.Int).Add(new(big.Int).Lsh(Qprime, 1), big.NewInt(1)))
phiN := new(big.Int).Lsh(new(big.Int).Mul(Pprime, Qprime), 2)
oddPhiN := new(big.Int).Mul(Pprime, Qprime)
factors := []*big.Int{
Pprime,
Qprime,
}
// Calculate responses
for i := 0; i < almostSafePrimeProductIters; i++ {
// Derive challenge
curc := common.GetHashNumber(challenge, index, i, uint(2*N.BitLen()))
log := new(big.Int).Mod(new(big.Int).Add(commit.logs[i], curc), phiN)
// Calculate response
x1 := new(big.Int).Mod(log, oddPhiN)
x2 := new(big.Int).Sub(oddPhiN, x1)
x3 := new(big.Int).Mod(new(big.Int).Mul(new(big.Int).ModInverse(big.NewInt(2), oddPhiN), x1), oddPhiN)
x4 := new(big.Int).Sub(oddPhiN, x3)
r1, ok1 := common.ModSqrt(x1, factors)
r2, ok2 := common.ModSqrt(x2, factors)
r3, ok3 := common.ModSqrt(x3, factors)
r4, ok4 := common.ModSqrt(x4, factors)
// And add the useful one
if ok1 {
proof.Responses = append(proof.Responses, r1)
} else if ok2 {
proof.Responses = append(proof.Responses, r2)
} else if ok3 {
proof.Responses = append(proof.Responses, r3)
} else if ok4 {
proof.Responses = append(proof.Responses, r4)
} else {
panic("none of +-x, +-x/2 are square")
}
}
return proof
}
func almostSafePrimeProductVerifyStructure(proof AlmostSafePrimeProductProof) bool {
if proof.Nonce == nil {
return false
}
if proof.Commitments == nil || proof.Responses == nil {
return false
}
if len(proof.Commitments) != almostSafePrimeProductIters || len(proof.Responses) != almostSafePrimeProductIters {
return false
}
for _, val := range proof.Commitments {
if val == nil {
return false
}
}
for _, val := range proof.Responses {
if val == nil {
return false
}
}
return true
}
func almostSafePrimeProductExtractCommitments(list []*big.Int, proof AlmostSafePrimeProductProof) []*big.Int {
return append(list, proof.Commitments...)
}
func almostSafePrimeProductVerifyProof(N *big.Int, challenge *big.Int, index *big.Int, proof AlmostSafePrimeProductProof) bool {
// Verify N=1(mod 3), as this decreases the error prob from 9/10 to 4/5
if new(big.Int).Mod(N, big.NewInt(3)).Cmp(big.NewInt(1)) != 0 {
return false
}
// Prepare gamma
gamma := new(big.Int).Lsh(big.NewInt(1), uint(N.BitLen()))
// Check responses
for i := 0; i < almostSafePrimeProductIters; i++ {
// Generate base
base := common.GetHashNumber(proof.Nonce, nil, i, uint(N.BitLen()))
base.Mod(base, N)
// Generate challenge
x := common.GetHashNumber(challenge, index, i, uint(2*N.BitLen()))
y := new(big.Int).Mod(
new(big.Int).Mul(
proof.Commitments[i],
new(big.Int).Exp(base, x, N)),
N)
// Verify
yg := new(big.Int).Exp(y, gamma, N)
t1 := new(big.Int).Exp(base, gamma, N)
t1.Exp(t1, proof.Responses[i], N)
t1.Exp(t1, proof.Responses[i], N)
t2 := new(big.Int).ModInverse(t1, N)
t3 := new(big.Int).Exp(t1, big.NewInt(2), N)
t4 := new(big.Int).ModInverse(t3, N)
ok1 := t1.Cmp(yg) == 0
ok2 := t2.Cmp(yg) == 0
ok3 := t3.Cmp(yg) == 0
ok4 := t4.Cmp(yg) == 0
if !ok1 && !ok2 && !ok3 && !ok4 {
return false
}
}
return true
} | keyproof/almostsafeprimeproduct.go | 0.535098 | 0.406214 | almostsafeprimeproduct.go | starcoder |
package containerruntimeconfig
// unionNode is a node in the union graph. In our case, it is a mirror host[:port].
type unionNode = string // An alias, so that the users don't have to explicitly cast data.
// internalUnionNode is the unionGraph representation of a node.
// There is exactly one instance of *internalUnionNode for each .public value.
type internalUnionNode struct {
public unionNode
parent *internalUnionNode // A higher-ranking member of the same disjoint set, or nil
rank int
}
// unionGraph is a set of unionNodes which supports identifying disjoint subsets.
// See e.g. https://en.wikipedia.org/wiki/Disjoint-set_data_structure .
// The graph is built implicitly, i.e. there is no explicit “add node” operation; it is perfectly valid
// to just call .Union or .Find using unionNode values that have never been mentioned before.
type unionGraph struct {
// This is not all that efficient, the hash map lookups by unionNode == string are likely more costly than any other
// use of the graph, but the graphs we need to handle are very small and readability is more important.
nodes map[unionNode]*internalUnionNode
}
// newUnionGraph returns an empty unionGraph.
func newUnionGraph() *unionGraph {
return &unionGraph{
nodes: map[unionNode]*internalUnionNode{},
}
}
// getNode returns g.nodes[node], creating it if it does not exist yet.
func (g *unionGraph) getNode(node unionNode) *internalUnionNode {
res, ok := g.nodes[node]
if !ok {
res = &internalUnionNode{
public: node,
parent: nil,
rank: 0,
}
g.nodes[node] = res
}
return res
}
// find is Find(), except it returns a *internalUnionNode
func (g *unionGraph) find(query unionNode) *internalUnionNode {
queryNode := g.getNode(query)
// Find root
x := queryNode
for x.parent != nil {
x = x.parent
}
root := x
// Compress path
x = queryNode
for x != root {
next := x.parent
x.parent = root
x = next
}
return root
}
// Find returns a representative member of the disjoint set containing query.
// All Find() calls on members of the same disjoint set return the same representative member until g.Union is called again.
func (g *unionGraph) Find(query unionNode) unionNode {
return g.find(query).public
}
// Union joins the sets of a, b
func (g *unionGraph) Union(a, b unionNode) {
inA := g.find(a)
inB := g.find(b)
if inA != inB {
if inA.rank > inB.rank {
inB.parent = inA
} else if inA.rank < inB.rank {
inA.parent = inB
} else { // inA.rank == inB.rank
inA.parent = inB
inB.rank++
}
}
} | pkg/controller/container-runtime-config/union.go | 0.803328 | 0.48499 | union.go | starcoder |
package cmd
import (
"fmt"
"strconv"
"strings"
"github.com/jaredbancroft/aoc2020/pkg/helpers"
"github.com/jaredbancroft/aoc2020/pkg/translator"
"github.com/spf13/cobra"
)
// day16Cmd represents the day16 command
var day16Cmd = &cobra.Command{
Use: "day16",
Short: "Advent of Code 2020 - Day16: Ticket Translation",
Long: `
Advent of Code 2020
--- Day 16: Ticket Translation ---
As you're walking to yet another connecting flight, you realize that one of the legs of your re-routed
trip coming up is on a high-speed train. However, the train ticket you were given is in a language you
don't understand. You should probably figure out what it says before you get to the train station after
the next flight.
Unfortunately, you can't actually read the words on the ticket. You can, however, read the numbers, and
so you figure out the fields these tickets must have and the valid ranges for values in those fields.
You collect the rules for ticket fields, the numbers on your ticket, and the numbers on other nearby
tickets for the same train service (via the airport security cameras) together into a single document
you can reference (your puzzle input).
The rules for ticket fields specify a list of fields that exist somewhere on the ticket and the valid
ranges of values for each field. For example, a rule like class: 1-3 or 5-7 means that one of the fields
in every ticket is named class and can be any value in the ranges 1-3 or 5-7 (inclusive, such that 3 and
5 are both valid in this field, but 4 is not).
Each ticket is represented by a single line of comma-separated values. The values are the numbers on the
ticket in the order they appear; every ticket has the same format. For example, consider this ticket:
.--------------------------------------------------------.
| ????: 101 ?????: 102 ??????????: 103 ???: 104 |
| |
| ??: 301 ??: 302 ???????: 303 ??????? |
| ??: 401 ??: 402 ???? ????: 403 ????????? |
'--------------------------------------------------------'
Here, ? represents text in a language you don't understand. This ticket might be represented as 101,102,
103,104,301,302,303,401,402,403; of course, the actual train tickets you're looking at are much more
complicated. In any case, you've extracted just the numbers in such a way that the first number is always
the same specific field, the second number is always a different specific field, and so on - you just don't
know what each position actually means!
Start by determining which tickets are completely invalid; these are tickets that contain values which
aren't valid for any field. Ignore your ticket for now.
For example, suppose you have the following notes:
class: 1-3 or 5-7
row: 6-11 or 33-44
seat: 13-40 or 45-50
your ticket:
7,1,14
nearby tickets:
7,3,47
40,4,50
55,2,20
38,6,12
It doesn't matter which position corresponds to which field; you can identify invalid nearby tickets by
considering only whether tickets contain values that are not valid for any field. In this example, the
values on the first nearby ticket are all valid for at least one field. This is not true of the other
three nearby tickets: the values 4, 55, and 12 are are not valid for any field. Adding together all of the
invalid values produces your ticket scanning error rate: 4 + 55 + 12 = 71.
Consider the validity of the nearby tickets you scanned. What is your ticket scanning error rate?`,
RunE: func(cmd *cobra.Command, args []string) error {
inputs, err := helpers.ReadGroupStringFile(input)
if err != nil {
return err
}
fields := inputs[0]
m := strings.Split(inputs[1][1], ",")
myTicket := make([]int, 0)
for _, i := range m {
j, _ := strconv.Atoi(i)
myTicket = append(myTicket, j)
}
otherTicketsRaw := inputs[2]
otherTickets := make([][]int, 0)
for _, ticket := range otherTicketsRaw[1:] {
t := strings.Split(ticket, ",")
otherTicket := make([]int, 0)
for _, i := range t {
j, _ := strconv.Atoi(i)
otherTicket = append(otherTicket, j)
}
otherTickets = append(otherTickets, otherTicket)
}
rules := make([]translator.Rule, 0)
for _, field := range fields {
fs := strings.Split(field, ":")
name := fs[0]
f := strings.TrimSpace(fs[1])
nums := strings.Split(f, "or")
range1 := strings.Split(strings.TrimSpace(nums[0]), "-")
range2 := strings.Split(strings.TrimSpace(nums[1]), "-")
range1Low, _ := strconv.Atoi(range1[0])
range1High, _ := strconv.Atoi(range1[1])
range2Low, _ := strconv.Atoi(range2[0])
range2High, _ := strconv.Atoi(range2[1])
rule := translator.NewRule(name, range1Low, range1High, range2Low, range2High)
rules = append(rules, rule)
}
ruler := translator.NewRuler(rules)
fmt.Println(ruler.CheckAll(myTicket, otherTickets))
return nil
},
}
func init() {
rootCmd.AddCommand(day16Cmd)
} | cmd/day16.go | 0.602997 | 0.463869 | day16.go | starcoder |
package opt
import (
"github.com/cpmech/gosl/fun"
"github.com/cpmech/gosl/io"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/plt"
"github.com/cpmech/gosl/utl"
)
// History holds history of optmization using directiors; e.g. for Debugging
type History struct {
Ndim int // dimension of x-vector
HistX []la.Vector // [it] history of x-values (position)
HistN []la.Vector // [it] history of n-values (direction)
HistF []float64 // [it] history of f-values
HistI []float64 // [it] index of iteration
ffcn fun.Sv // f({x}) function
}
// NewHistory returns new object
func NewHistory(nMaxIt int, f0 float64, x0 la.Vector, ffcn fun.Sv) (o *History) {
o = new(History)
o.Ndim = len(x0)
o.HistX = append(o.HistX, x0.GetCopy())
o.HistN = append(o.HistN, nil)
o.HistF = append(o.HistF, f0)
o.HistI = append(o.HistI, 0)
o.ffcn = ffcn
return
}
// Append appends new x and n vectors, and updates F and I arrays
func (o *History) Append(fx float64, x, n la.Vector) {
o.HistX = append(o.HistX, x.GetCopy())
o.HistN = append(o.HistN, n.GetCopy())
o.HistF = append(o.HistF, fx)
o.HistI = append(o.HistI, float64(len(o.HistI)))
}
// Plot plots history
func (o *History) Plot(iDim, jDim int, ximin, ximax, xjmin, xjmax float64, npts int) {
// contour
plt.Subplot(2, 1, 1)
xvec := la.NewVector(2)
xx, yy, zz := utl.MeshGrid2dF(ximin, ximax, xjmin, xjmax, npts, npts, func(r, s float64) float64 {
xvec[0], xvec[1] = r, s
return o.ffcn(xvec)
})
plt.ContourF(xx, yy, zz, nil)
// trajectory
for k := 0; k < len(o.HistX)-1; k++ {
x := o.HistX[k]
n := o.HistN[1+k]
if n.Norm() > 1e-10 {
plt.PlotOne(x[0], x[1], &plt.A{C: "y", M: "o", Z: 10, NoClip: true})
plt.DrawArrow2d(x, n, true, 1, nil)
}
}
// final point
l := len(o.HistX) - 1
plt.PlotOne(o.HistX[l][0], o.HistX[l][1], &plt.A{C: "y", M: "*", Ms: 10, Z: 10, NoClip: true})
// convergence
plt.Subplot(2, 1, 2)
plt.Plot(o.HistI, o.HistF, &plt.A{C: plt.C(2, 0), M: ".", Ls: "-", Lw: 2, NoClip: true})
plt.Text(o.HistI[0], o.HistF[0], io.Sf("%.3f", o.HistF[0]), &plt.A{C: plt.C(0, 0), Fsz: 7, Ha: "left", Va: "top", NoClip: true})
plt.Text(o.HistI[l], o.HistF[l], io.Sf("%.3f", o.HistF[l]), &plt.A{C: plt.C(0, 0), Fsz: 7, Ha: "right", Va: "bottom", NoClip: true})
plt.Gll("$iteration$", "$f(x)$", nil)
plt.HideTRborders()
} | opt/history.go | 0.570331 | 0.436742 | history.go | starcoder |
package util
import (
"fmt"
"reflect"
. "github.com/philc/gumshoedb/internal/github.com/cespare/a"
)
// DeepConvertibleEquals is a checker like DeepEqual which converts all numeric types to float64 before
// comparing. DeepConvertibleEquals only handles simple types, slices, and maps (but it is recursive).
// NOTE(caleb): Add more types as needed.
func DeepConvertibleEquals(args ...interface{}) (ok bool, message string) {
params, message, err := ExpectNArgs(2, args)
if err != nil {
return false, err.Error()
}
if !deepValueConvertibleEquals(reflect.ValueOf(params[0]), reflect.ValueOf(params[1])) {
if message != "" {
return false, message
}
return false, fmt.Sprintf("deep equal: expected %+v; got %+v", params[1], params[0])
}
return true, ""
}
// DeepEqualsUnordered is a checker similar to DeepConvertibleEquals but expects two top-level slices, and
// does order-independent comparisons (for the top level only).
func DeepEqualsUnordered(args ...interface{}) (ok bool, message string) {
params, message, err := ExpectNArgs(2, args)
if err != nil {
return false, err.Error()
}
v1 := reflect.ValueOf(params[0])
v2 := reflect.ValueOf(params[1])
if v1.Kind() != reflect.Slice || v2.Kind() != reflect.Slice {
return false, "expect a slice for both arguments"
}
if v1.Len() != v2.Len() {
return false, "slices have different length"
}
// This does the naive n^2 comparison between the slices.
found := make([]bool, v1.Len()) // index in v2
outer:
for i := 0; i < v1.Len(); i++ {
item1 := v1.Index(i)
for j := 0; j < v1.Len(); j++ {
if found[j] {
continue
}
item2 := v2.Index(j)
if deepValueConvertibleEquals(item1, item2) {
found[j] = true
continue outer
}
}
return false, fmt.Sprintf("element %+v found in first slice but not second", item1.Interface())
}
return true, ""
}
func deepValueConvertibleEquals(v1, v2 reflect.Value) bool {
if !v1.IsValid() && !v2.IsValid() {
return true
}
if !v1.IsValid() || !v2.IsValid() {
return false
}
if !v1.Type().ConvertibleTo(v2.Type()) {
return false
}
switch v2.Kind() {
// We want to convert to floats if either is a float, so if v2 is a float, swap before converting.
case reflect.Float32, reflect.Float64:
v1, v2 = v2, v1
}
v2 = v2.Convert(v1.Type())
kind := v1.Kind()
switch kind {
case reflect.Slice:
if v1.IsNil() != v2.IsNil() {
return false
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !deepValueConvertibleEquals(v1.Index(i), v2.Index(i)) {
return false
}
}
return true
case reflect.Map:
if v1.IsNil() != v2.IsNil() {
return false
}
if v1.Len() != v2.Len() {
return false
}
for _, k := range v1.MapKeys() {
if !deepValueConvertibleEquals(v1.MapIndex(k), v2.MapIndex(k)) {
return false
}
}
return true
case reflect.Struct:
if v1.NumField() != v2.NumField() {
return false
}
for i := 0; i < v1.NumField(); i++ {
if !deepValueConvertibleEquals(v1.Field(i), v2.Field(i)) {
return false
}
}
return true
case reflect.Interface:
if v1.IsNil() || v2.IsNil() {
return v1.IsNil() == v2.IsNil()
}
return deepValueConvertibleEquals(v1.Elem(), v2.Elem())
case reflect.String:
return v1.String() == v2.String()
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
// Ignore small rounding errors
f1, f2 := v1.Float(), v2.Float()
if f1 > f2 {
f1, f2 = f2, f1
}
if f2 == 0 {
return f1 == 0
}
return (f2-f1)/f2 < 0.001
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return v1.Uint() == v2.Uint()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return v1.Int() == v2.Int()
}
panic(fmt.Sprintf("deepEqual doesn't know about type %s", v1.Type()))
} | internal/util/util.go | 0.696681 | 0.433802 | util.go | starcoder |
package g3
import (
"fmt"
)
// Represents a 4x4 Matrix
type Matrix4x4 struct {
M11, M12, M13, M14 float32
M21, M22, M23, M24 float32
M31, M32, M33, M34 float32
M41, M42, M43, M44 float32
}
func MakeMatrixFromSlice(m []float32) Matrix4x4 {
return Matrix4x4{
m[0], m[1], m[2], m[3],
m[4], m[5], m[6], m[7],
m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15]}
}
func MakeIdentityMatrix() Matrix4x4 {
return Matrix4x4{
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0}
}
func MakeScaleMatrix(x, y, z float32) Matrix4x4 {
return Matrix4x4{
x, 0.0, 0.0, 0.0,
0.0, y, 0.0, 0.0,
0.0, 0.0, z, 0.0,
0.0, 0.0, 0.0, 1.0}
}
func MakeTranslationMatrix(x, y, z float32) Matrix4x4 {
return Matrix4x4{
1.0, 0.0, 0.0, x,
0.0, 1.0, 0.0, y,
0.0, 0.0, 1.0, z,
0.0, 0.0, 0.0, 1.0}
}
func MakeXRotationMatrix(theta float32) Matrix4x4 {
cosTheta := Cos(theta)
sinTheta := Sin(theta)
return Matrix4x4{
1.0, 0.0, 0.0, 0.0,
0.0, cosTheta, -sinTheta, 0.0,
0.0, sinTheta, cosTheta, 0.0,
0.0, 0.0, 0.0, 1.0}
}
func MakeYRotationMatrix(theta float32) Matrix4x4 {
cosTheta := Cos(theta)
sinTheta := Sin(theta)
return Matrix4x4{
cosTheta, 0.0, sinTheta, 0.0,
0.0, 1.0, 0.0, 0.0,
-sinTheta, 0.0, cosTheta, 0.0,
0.0, 0.0, 0.0, 1.0}
}
func MakeZRotationMatrix(theta float32) Matrix4x4 {
cosTheta := Cos(theta)
sinTheta := Sin(theta)
return Matrix4x4{
cosTheta, -sinTheta, 0.0, 0.0,
sinTheta, cosTheta, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0}
}
func MakePerspectiveMatrix(fovy, aspect, zNear, zFar float32) Matrix4x4 {
f := 1.0 / Tan(fovy/2.0)
a := 1.0 / (zNear - zFar)
return Matrix4x4{
f / aspect, 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (zFar + zNear) * a, 2.0 * zFar * zNear * a,
0.0, 0.0, -1.0, 0.0}
}
func MakeLookAtMatrix(eye, center, up *Vec3) Matrix4x4 {
f := center.Sub(*eye).Normalized()
u := up.Normalized()
s := f.Cross(u)
u = s.Cross(f)
t := MakeTranslationMatrix(-eye.X, -eye.Y, -eye.Z)
return Matrix4x4{
s.X, s.Y, s.Z, 0.0,
u.X, u.Y, u.Z, 0.0,
-f.X, -f.Y, -f.Z, 0.0,
0.0, 0.0, 0.0, 1.0}.Multiply(&t)
}
func (m Matrix4x4) Transposed() Matrix4x4 {
return Matrix4x4{
m.M11, m.M21, m.M31, m.M41,
m.M12, m.M22, m.M32, m.M42,
m.M13, m.M23, m.M33, m.M43,
m.M14, m.M24, m.M34, m.M44}
}
func (m1 Matrix4x4) Multiply(m2 *Matrix4x4) Matrix4x4 {
return Matrix4x4{
m1.M11*m2.M11 + m1.M12*m2.M21 + m1.M13*m2.M31 + m1.M14*m2.M41,
m1.M11*m2.M12 + m1.M12*m2.M22 + m1.M13*m2.M32 + m1.M14*m2.M42,
m1.M11*m2.M13 + m1.M12*m2.M23 + m1.M13*m2.M33 + m1.M14*m2.M43,
m1.M11*m2.M14 + m1.M12*m2.M24 + m1.M13*m2.M34 + m1.M14*m2.M44,
m1.M21*m2.M11 + m1.M22*m2.M21 + m1.M23*m2.M31 + m1.M24*m2.M41,
m1.M21*m2.M12 + m1.M22*m2.M22 + m1.M23*m2.M32 + m1.M24*m2.M42,
m1.M21*m2.M13 + m1.M22*m2.M23 + m1.M23*m2.M33 + m1.M24*m2.M43,
m1.M21*m2.M14 + m1.M22*m2.M24 + m1.M23*m2.M34 + m1.M24*m2.M44,
m1.M31*m2.M11 + m1.M32*m2.M21 + m1.M33*m2.M31 + m1.M34*m2.M41,
m1.M31*m2.M12 + m1.M32*m2.M22 + m1.M33*m2.M32 + m1.M34*m2.M42,
m1.M31*m2.M13 + m1.M32*m2.M23 + m1.M33*m2.M33 + m1.M34*m2.M43,
m1.M31*m2.M14 + m1.M32*m2.M24 + m1.M33*m2.M34 + m1.M34*m2.M44,
m1.M41*m2.M11 + m1.M42*m2.M21 + m1.M43*m2.M31 + m1.M44*m2.M41,
m1.M41*m2.M12 + m1.M42*m2.M22 + m1.M43*m2.M32 + m1.M44*m2.M42,
m1.M41*m2.M13 + m1.M42*m2.M23 + m1.M43*m2.M33 + m1.M44*m2.M43,
m1.M41*m2.M14 + m1.M42*m2.M24 + m1.M43*m2.M34 + m1.M44*m2.M44}
}
func (m Matrix4x4) Transform(v Vec3) Vec3 {
return Vec3 {
m.M11*v.X+m.M12*v.Y+m.M13*v.Z+m.M14,
m.M21*v.X+m.M22*v.Y+m.M23*v.Z+m.M24,
m.M31*v.X+m.M32*v.Y+m.M33*v.Z+m.M34}
}
func (m *Matrix4x4) String() string {
return fmt.Sprintf(
"/%f %f %f %f\\\n|%f %f %f %f|\n|%f %f %f %f|\n\\%f %f %f %f/",
m.M11, m.M12, m.M13, m.M14,
m.M21, m.M22, m.M23, m.M24,
m.M31, m.M32, m.M33, m.M34,
m.M41, m.M42, m.M43, m.M44)
} | src/pkg/g3/matrix.go | 0.765462 | 0.523542 | matrix.go | starcoder |
package fractals
import (
"math"
"sync"
mgl "github.com/go-gl/mathgl/mgl32"
)
func UpdateKoch(vertices []float32) []float32 {
var wg sync.WaitGroup
flattenedVertices := []float32{}
verticesOrdered := make([](*[]float32), len(vertices)/5)
// For each line segment
for i := 0; i < len(vertices); i += 5 {
wg.Add(1)
curVertex := &[]float32{}
go workerKoch(&wg, vertices, curVertex, i)
verticesOrdered[i/5] = curVertex
}
wg.Wait()
for i := 0; i < len(verticesOrdered); i++ {
flattenedVertices = append(flattenedVertices, *verticesOrdered[i]...)
}
return flattenedVertices
}
func workerKoch(wg *sync.WaitGroup, vertices []float32,
updatedVertices *[]float32, segID int) {
defer wg.Done()
pointsPerVertex := 5
from := vertices[segID : segID+pointsPerVertex]
startingIndex := (segID + pointsPerVertex) % len(vertices)
to := vertices[startingIndex : startingIndex+pointsPerVertex]
// Divide each segment into 3 equal parts keeping track of the froms
// of each segment (tos is just froms[i + 1] and the to value
ratio := float32(1.0 / 3.0)
froms := make([][]float32, 3)
for i := range froms {
froms[i] = make([]float32, 5)
}
froms[0] = from
for j := range from {
froms[1][j] = from[j]*ratio*2 + to[j]*ratio*1
froms[2][j] = from[j]*ratio*1 + to[j]*ratio*2
}
// Get third triangle point using this
// This method
//https://stackoverflow.com/questions/50547068/creating-an-equilateral-triangle-for-given-two-points-in-the-plane-python
mid := mgl.Vec2{(froms[1][0] + froms[2][0]) / 2.0,
(froms[1][1] + froms[2][1]) / 2.0}
orig := mgl.Vec2{(froms[1][0] - mid[0]), (froms[1][1] - mid[1])}
orig.Mul(3 * float32(math.Sqrt(3)))
transform := mgl.Rotate2D(mgl.DegToRad(90))
point := mid.Add(transform.Mul2x1(orig))
fullPoint := []float32{point[0], point[1],
froms[1][2] + froms[2][2], froms[1][3] + froms[2][3],
froms[1][4] + froms[2][4]}
// Add in first segment
*updatedVertices = append(*updatedVertices, froms[0]...)
*updatedVertices = append(*updatedVertices, froms[1]...)
// Add in the triangle segments
*updatedVertices = append(*updatedVertices, froms[1]...)
*updatedVertices = append(*updatedVertices, fullPoint...)
*updatedVertices = append(*updatedVertices, fullPoint...)
*updatedVertices = append(*updatedVertices, froms[2]...)
// Add in the final segment
*updatedVertices = append(*updatedVertices, froms[2]...)
*updatedVertices = append(*updatedVertices, to...)
} | fractals/fractals.go | 0.553747 | 0.428951 | fractals.go | starcoder |
package rbo
// Go implementation of https://github.com/dlukes/rbo/blob/master/rbo.py
import (
"fmt"
"math"
"github.com/thedahv/keyword-cluster-finder/pkg/rankings"
)
// RBO calculates the rank-biased overlap of 2 SERPs
// p is the probability of looking for overlap at rank k + 1 after having
// examined rank k
func RBO(a, b rankings.SERP, p float64) (min float64, res float64, ext float64, err error) {
if p < 0 || p > 1 {
err = fmt.Errorf("p must be between 0 and 1")
return
}
min = rboMin(a, b, p, 0)
res = rboRes(a, b, p)
ext = rboExt(a, b, p)
return
}
// rboMin calculates the tight lower bound on RBO.
// depth is the position in the SERP after which we don't consider rankings
// anymore. Set depth to 0 to have function calculate it automatically
func rboMin(a, b rankings.SERP, p float64, depth int) float64 {
if depth == 0 {
depth = min(a.Length(), b.Length())
}
xk := overlap(a, b, depth)
logTerm := xk * math.Log(1-p)
var sumTerm float64
for d := 1.0; d < float64(depth)+1.0; d++ {
o := overlap(a, b, int(d)) - xk
val := math.Pow(p, d) / d * o
sumTerm += val
}
return (1 - p) / p * (sumTerm - logTerm)
}
// rboRes calculates the upper bound on residual overlap beyond evaluated depth
func rboRes(a, b rankings.SERP, p float64) float64 {
S, L := orderByLength(a, b)
s, l := S.Length(), L.Length()
xl := overlap(a, b, l)
f := int(math.Ceil(float64(l) + float64(s) - xl))
var term1, term2, term3 float64
for d := s + 1; d < f+1; d++ {
term1 += math.Pow(p, float64(d)) / float64(d)
}
term1 = float64(s) * term1
for d := l + 1; d < f+1; d++ {
term2 += math.Pow(p, float64(d)) / float64(d)
}
term2 = float64(l) * term2
for d := 1; d < f+1; d++ {
term3 += math.Pow(p, float64(d)) / float64(d)
}
term3 = xl*math.Log(1.0/(1.0-p)) - term3
return math.Pow(p, float64(s)) +
math.Pow(p, float64(l)) -
math.Pow(p, float64(f)) -
(1.0-p)/p*(term1*term2*term3)
}
// RBO point estimate based on extrapolating observed overlap
func rboExt(a, b rankings.SERP, p float64) float64 {
S, L := orderByLength(a, b)
s, l := S.Length(), L.Length()
xl := overlap(a, b, l)
xs := overlap(a, b, s)
var sum1, sum2 float64
for d := 1; d < l+1; d++ {
sum1 += math.Pow(p, float64(d)) * agreement(a, b, d)
}
for d := s + 1; d < l+1; d++ {
sum2 += math.Pow(p, float64(d)) * xs * float64(d-s) / float64(s) / float64(d)
}
term1 := (1.0 - p) / p * (sum1 + sum2)
term2 := math.Pow(p, float64(l)) * ((xl-xs)/float64(l) + xs/float64(s))
return term1 + term2
}
func overlap(a, b rankings.SERP, depth int) float64 {
minDepth := float64(min(depth, a.Length(), b.Length()))
return agreement(a, b, depth) * minDepth
}
// agreement calculates the proportion of shared values between the two sorted
// lists at a given depth
func agreement(a, b rankings.SERP, depth int) float64 {
lenIntersect, lenA, lenB := rawOverlap(a, b, depth)
return float64(2*lenIntersect) / (float64(lenA + lenB))
}
// assumes a.Members and b.Members are unique, satisfying the expectation the
// lists behave as a Set
func rawOverlap(a, b rankings.SERP, depth int) (int, int, int) {
// Copies exist so we can sort without modifying the original
var aCopy, bCopy []rankings.SERPMember
for _, member := range a.Members {
aCopy = append(aCopy, member)
}
for _, member := range b.Members {
bCopy = append(bCopy, member)
}
aMembers := aCopy[:min(depth, len(aCopy))]
bMembers := bCopy[:min(depth, len(bCopy))]
intersect := intersection(aMembers, bMembers)
return len(intersect), len(aMembers), len(bMembers)
}
func min(nums ...int) int {
min := nums[0]
for _, n := range nums[1:] {
if n < min {
min = n
}
}
return min
}
func intersection(a, b []rankings.SERPMember) []string {
h := make(map[string]int)
for _, m := range a {
h[m.Domain]++
}
for _, m := range b {
h[m.Domain]++
}
var domains []string
for domain, count := range h {
if count >= 2 {
domains = append(domains, domain)
}
}
return domains
}
func orderByLength(a, b rankings.SERP) (smaller rankings.SERP, larger rankings.SERP) {
if a.Length() <= b.Length() {
smaller = a
larger = b
} else {
smaller = b
larger = a
}
return
} | pkg/rbo/rbo.go | 0.862945 | 0.598899 | rbo.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.