content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Type
from re import T
def load_instance(alfacase_content: DescriptionDocument, class_: Type[T]) -> T:
"""
Create an instance of class_ with the attributes found in alfacase_content.
"""
alfacase_to_case_description = get_case_description_attribute_loader_dict(class_)
case_values = to_case_values(alfacase_content, alfacase_to_case_description)
item_description = class_(**case_values)
return update_multi_input_flags(alfacase_content, item_description) | 5436c17f856461eac54b71018596c86e735c6ab5 | 3,628,300 |
import os
def build_pair_output_path(indices: list, save_dir: str) -> (str, str):
"""
Create directory for saving the paired data
:param indices: indices of the pair, the last one is for label
:param save_dir: directory of output
:return: - save_dir, str, directory for saving the moving/fixed image
- label_dir, str, directory for saving the rest outputs
"""
# cast indices to string and init directory name
pair_index = "pair_" + "_".join([str(x) for x in indices[:-1]])
pair_dir = os.path.join(save_dir, pair_index)
os.makedirs(pair_dir, exist_ok=True)
if indices[-1] >= 0:
label_index = f"label_{indices[-1]}"
label_dir = os.path.join(pair_dir, label_index)
os.makedirs(label_dir, exist_ok=True)
else:
label_dir = pair_dir
return pair_dir, label_dir | 2bf8ea4d9ea61d8767e9a128e55e1d4c1cdd1074 | 3,628,301 |
def number_scenarios(scenes):
"""
Add a 'scenario_number' and 'scenario_name' variable to each scenario.
The hash table for each scenario is altered!
"""
count = 0
for scene in scenes:
scene[1]['scenario_name'] = scene[0]
scene[1]['scenario_number'] = count
count += 1
return check_scenarios(scenes) | 5b3391e2113142bed49d8bcdb7c6fe4b1e0ab8cc | 3,628,302 |
def trimscan_corner_plot(det, element, ranges, chips=range(16)):
"""
"""
pixels = det.get_pixels()
labels = [r'$a_0$', r'$a_1$', r'$a_2$', r'$a_3$', r'$a_4$', r'$a_5$']
data = []
# Loop over pixels and do the analysis
for index in range(len(labels)):
fit_data = []
for x in range(pixels.shape[0]):
for y in range(pixels.shape[1]):
# Don't include data which is not used - this exludes all gap pixels
if pixels[x, y].good_enfit and pixels[x,y].chip in chips:
if element in pixels[x,y].elements:
elem_index = pixels[x, y].elements.index(element)
if pixels[x, y].good_trimfits[elem_index]:
fit_data.append(pixels[x, y].trimfit_params[elem_index, index])
data.append(fit_data)
# Make the plot using the general corner plot function
fig = corner_plot(data, labels, ranges, figsize=(16,12), plt_label=element)
return fig | 0e14c703d3d2d7a8160a51845f25db70a14d3f91 | 3,628,303 |
def find_key_value_in_list(listing, key, value):
"""
look for key with value in list and return dict
:param listing:
:param key:
:param value:
:return: dict_found
"""
# for l in listing:
# if key in l.keys():
# if l[key] == value:
# print("l[key = ", value)
# return l
dict_found = next(filter(lambda obj: obj[key] == value, listing), None)
if dict_found:
return dict_found
else:
return {} | 642d8e43cbfbeef9bc014c85085c7027380156e2 | 3,628,304 |
def request_test_suite_started(etos, activity_id):
"""Request test suite started from graphql.
:param etos: Etos Library instance for communicating with ETOS.
:type etos: :obj:`etos_lib.etos.ETOS`
:param activity_id: ID of activity in which the test suites started
:type activity_id: str
:return: Iterator of test suite started graphql responses.
:rtype: iterator
"""
for response in request(etos, TEST_SUITE_STARTED % activity_id):
if response:
for _, test_suite_started in etos.graphql.search_for_nodes(
response, "testSuiteStarted"
):
yield test_suite_started
return None # StopIteration
return None | 9bd0f8ba7a0846be440ece1d1d7e4c7335c05dcc | 3,628,305 |
import six
def cast_env(env):
"""Encode all the environment values as the appropriate type.
This assumes that all the data is or can be represented as UTF8"""
return {six.ensure_str(key): six.ensure_str(value) for key, value in env.items()} | b7adfbd20a95f3b2ff24f36f578a869317165c49 | 3,628,306 |
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations.
Parameters
----------
seg : np.ndarray, int type, arbitrary shape
A candidate segmentation.
gt : np.ndarray, int type, same shape as `seg`
The ground truth segmentation.
ignore_seg : list of int, optional
Values to ignore in `seg`. Voxels in `seg` having a value in this list
will not contribute to the contingency table. (default: [0])
ignore_gt : list of int, optional
Values to ignore in `gt`. Voxels in `gt` having a value in this list
will not contribute to the contingency table. (default: [0])
norm : bool, optional
Whether to normalize the table so that it sums to 1.
Returns
-------
cont : scipy.sparse.csc_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels
if `norm=True`.)
"""
segr = seg.ravel()
gtr = gt.ravel()
ignored = np.zeros(segr.shape, np.bool)
data = np.ones(len(gtr))
for i in ignore_seg:
ignored[segr == i] = True
for j in ignore_gt:
ignored[gtr == j] = True
data[ignored] = 0
cont = sparse.coo_matrix((data, (segr, gtr))).tocsc()
if norm:
cont /= float(cont.sum())
return cont | 6eb0835b92e9552d3686271dc00bd136fd873779 | 3,628,307 |
import logging
def create_object_detection_training(
train_object_detection_model_request: TrainImageModel,
):
"""[Train a Object Detection Model in AutoML GCP]
Args:
train_object_detection_model_request (TrainImageModel): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [description]
"""
try:
logging.info(
f"Create Object Detection Model Router: {train_object_detection_model_request}"
)
return TrainModelController().train_object_detection_model_controller(
request=train_object_detection_model_request
)
except Exception as error:
logging.error(f"{error=}")
raise error | 0944b898b9fdf8395ddd17f8f2e9d0c36a559c94 | 3,628,308 |
def jwt_authentication(secret, exc=exceptions.PermissionDenied()):
"""
Provide authentication for a view that must have a valid JWT token for the provided secret key
:param secret: The secret key that validates the jwt token provided in the request headers
:param exc: The exception to throw if the token is not valid
:return: The request result
"""
def decorator(f):
@wraps(f)
@decorators.context(f)
def wrapper(context, *args, **kwargs):
# Get the auth header
token = context.headers.get('Authorization', None)
if not token:
raise exc
# Validate the token
try:
token = token.encode('utf-8')
jwt.decode(token, key=secret, verify=True, algorithms='HS256')
except jwt.DecodeError:
raise exc
# Token was valid, call the wrapped function
return f(*args, **kwargs)
return wrapper
return decorator | 4e82b8eb4288092f769797571851555e6fcdef77 | 3,628,309 |
def strategy_regret(meta_games, subgame_index, ne=None, subgame_ne=None):
"""
Calculate the strategy regret based on a complete payoff matrix for PSRO.
strategy_regret of player equals to nash_payoff in meta_game - fix opponent nash strategy, player deviates to subgame_nash
Assume all players have the same number of policies.
:param meta_games: meta_games in PSRO
:param subgame_index: subgame to evaluate, redundant if subgame_nash supplied
:param: nash: equilibrium vector
:param: subgame_ne: equilibrium vector
:return: a list of regret, one for each player.
"""
num_players = len(meta_games)
num_new_pol = np.shape(meta_games[0])[0] - subgame_index
ne = nash_solver(meta_games, solver="gambit") if not ne else ne
index = [list(np.arange(subgame_index)) for _ in range(num_players)]
submeta_games = [ele[np.ix_(*index)] for ele in meta_games]
subgame_ne = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
nash_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(ne)
regrets = []
for i in range(num_players):
ne_payoff = np.sum(meta_games[i] * nash_prob_matrix)
dev_prob = ne.copy()
dev_prob[i] = list(np.append(subgame_ne[i], [0 for _ in range(num_new_pol)]))
dev_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
subgame_payoff = np.sum(meta_games[i] * dev_prob_matrix)
regrets.append(ne_payoff - subgame_payoff)
return regrets | c4514746d523084e8978861af2a5369d9fe2a1e8 | 3,628,310 |
def convert_int_to_form(num: int, form_num: int) -> int:
"""Converts decimal integer to specified form number.
Supports conversion to octal and binary forms.
"""
output = 0
bin_digits = []
while num > 0:
num, r = divmod(num , form_num)
bin_digits.insert(0, r)
num_digits = len(bin_digits) - 1
for i in range(num_digits + 1):
digit = bin_digits[i] * 10 ** (num_digits - i)
output += digit
return str(output) | 28a81a277baaefbb8ed27970bee1afb13d3cdde1 | 3,628,311 |
def less_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise,
which is equivalent function to the overloaded operator `<=`.
"""
helper = MpcLayerHelper("less_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
helper.append_op(
type='mpc_less_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond | 37cefd3c85265d73635f1b2de1c8c778dc9ece08 | 3,628,312 |
def _expiration(timeout, time_format=None):
"""
Return an expiration time
:param timeout: When
:param time_format: The format of the returned value
:return: A timeout date
"""
if timeout == "now":
return time_util.instant(time_format)
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, time_format=time_format) | 174f0395d9a748dd6bc47c4aa836cd4aa402c39e | 3,628,313 |
import os
def get_default_opts(project_name, **aux_opts):
"""
Creates default options using auxiliary options as keyword argument
Use this function if you want to use PyScaffold from another application
in order to generate an option dictionary that can than be passed to
:obj:`create_project`.
:param project_name: name of the project
:param aux_opts: auxiliary options as keyword parameters
:return: options with default values set as dictionary
"""
# Merge the default options generated by argparse
opts = parse_args([project_name])
# Remove inadvertent double definition of project_name
aux_opts.pop('project', None)
opts.update(aux_opts)
opts.setdefault('package', utils.make_valid_identifier(opts['project']))
opts.setdefault('author', info.username())
opts.setdefault('email', info.email())
opts.setdefault('release_date', date.today().strftime('%Y-%m-%d'))
opts.setdefault('year', date.today().year)
opts.setdefault('license', 'none')
opts.setdefault('description', 'Add a short description here!')
opts.setdefault('url', 'http://...')
opts.setdefault('version', pyscaffold.__version__)
opts.setdefault('title',
'='*len(opts['project']) + '\n' + opts['project'] + '\n' +
'='*len(opts['project']))
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python']
opts.setdefault('classifiers', utils.list2str(
classifiers, indent=4, brackets=False, quotes=False, sep=''))
opts.setdefault('url', 'http://...')
# Initialize empty list of all requirements
opts.setdefault('requirements', list())
opts['namespace'] = utils.prepare_namespace(opts['namespace'])
if opts['namespace']:
opts['root_pkg'] = opts['namespace'][0]
opts['namespace_pkg'] = ".".join([opts['namespace'][-1],
opts['package']])
else:
opts['root_pkg'] = opts['package']
opts['namespace_pkg'] = opts['package']
if opts['update']:
if not os.path.exists(project_name):
raise RuntimeError(
"Project {project} does not exist and thus cannot be "
"updated!".format(project=project_name))
opts = info.project(opts)
# Reset project name since the one from setup.cfg might be different
opts['project'] = project_name
if opts['django']:
opts['force'] = True
opts['package'] = opts['project'] # since this is required by Django
opts['requirements'].append('django')
if opts['cookiecutter_template']:
opts['force'] = True
return opts | 4b408cf9e2c8355ea9869dbc4b0f6418d90f37d9 | 3,628,314 |
def get_vgg_model(image_size, num_classes):
"""Get VGG16 model"""
inputs = Input(shape=[*image_size, 3])
model = VGG16(
include_top=False,
weights="imagenet",
classes=num_classes,
input_tensor=inputs,
classifier_activation=None,
)
model.trainable = False
# Route model's embeddings to avg pooling
x = GlobalAveragePooling2D(name="avg_pooling")(model.output)
x = BatchNormalization()(x)
outputs = Dense(num_classes, activation="softmax", name="preds")(x)
model = Model(inputs, outputs, name="VGG16")
return model | 17a6198a2d2186771ba1ee7c8442acc6c552d021 | 3,628,315 |
def random_weights(n_i: int, n_j: int, axis: int = 0, seed: int | None = None) -> ndarray:
"""Generate random weights for producer-injector gains.
Args
----
n_i : int
n_j : int
axis : int, default is 0
seed : int, default is None
Returns
-------
gains_guess: ndarray
"""
rng = np.random.default_rng(seed)
limit = 10 * (n_i if axis == 0 else n_j)
vec = rng.integers(0, limit, (n_i, n_j))
axis_sum = vec.sum(axis, keepdims=True)
return vec / axis_sum | 67d420db30a703d33174af9238d71fc8f68fd61f | 3,628,316 |
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session | 5d53bd3bd6a6d61f75cc3680d0618889fb14394f | 3,628,317 |
def reset(label):
"""
@api {delete} /:label Reset counter
@apiName ResetCounter
@apiGroup Counter
@apiParam {String} label Counter label.
@apiSuccess {Number} counter Zero value.
"""
store.delete(label)
return jsonify(counter=0) | 6834b33246ff8994525cd3781e72602497d1d381 | 3,628,318 |
def remove_duplicates(df, by=["full_text"]):
"""
Remove duplicates from raw data file by specific columns and save results in file with name given.
"""
boolean_mask = df.duplicated(subset=by, keep="first")
df = df[~boolean_mask]
return df | dfe99259a90280b346290dd2c880ab51e443e036 | 3,628,319 |
import gzip
import os
import re
def read_ecmwf_corrections(base_dir, LMAX, months, MMAX=None):
"""
Read atmospheric jump corrections from Fagiolini et al. (2015)
Arguments
---------
base_dir: Working data directory for GRACE/GRACE-FO data
LMAX: Upper bound of Spherical Harmonic Degrees
months: list of GRACE/GRACE-FO months
Keyword arguments
-----------------
MMAX: Upper bound of Spherical Harmonic orders
Returns
-------
clm: atmospheric correction cosine spherical harmonics
slm: atmospheric correction sine spherical harmonics
"""
#-- correction files
corr_file = {}
corr_file['GAE'] = 'TN-08_GAE-2_2006032-2010031_0000_EIGEN_G---_0005.gz'
corr_file['GAF'] = 'TN-09_GAF-2_2010032-2015131_0000_EIGEN_G---_0005.gz'
corr_file['GAG'] = 'TN-10_GAG-2_2015132-2099001_0000_EIGEN_G---_0005.gz'
#-- atmospheric correction coefficients
atm_corr_clm = {}
atm_corr_slm = {}
#-- number of months to consider in analysis
n_cons = len(months)
#-- set maximum order if not equal to maximum degree
MMAX = LMAX if (MMAX is None) else MMAX
#-- iterate through python dictionary keys (GAE, GAF, GAG)
for key, val in corr_file.items():
#-- allocate for clm and slm of atmospheric corrections
atm_corr_clm[key] = np.zeros((LMAX+1,MMAX+1))
atm_corr_slm[key] = np.zeros((LMAX+1,MMAX+1))
#-- GRACE correction files are compressed gz files
with gzip.open(os.path.join(base_dir, val),'rb') as f:
file_contents = f.read().decode('ISO-8859-1').splitlines()
#-- for each line in the GRACE correction file
for line in file_contents:
#-- find if line starts with GRCOF2
if bool(re.match(r'GRCOF2',line)):
#-- split the line into individual components
line_contents = line.split()
#-- degree and order for the line
l1 = np.int(line_contents[1])
m1 = np.int(line_contents[2])
#-- if degree and order are below the truncation limits
if ((l1 <= LMAX) and (m1 <= MMAX)):
atm_corr_clm[key][l1,m1] = np.float(line_contents[3])
atm_corr_slm[key][l1,m1] = np.float(line_contents[4])
#-- create output atmospheric corrections to be removed/added to data
atm_corr = {}
atm_corr['clm'] = np.zeros((LMAX+1,LMAX+1,n_cons))
atm_corr['slm'] = np.zeros((LMAX+1,LMAX+1,n_cons))
#-- for each considered date
for i,grace_month in enumerate(months):
#-- remove correction based on dates
if (grace_month >= 50) & (grace_month <= 97):
atm_corr['clm'][:,:,i] = atm_corr_clm['GAE'][:,:]
atm_corr['slm'][:,:,i] = atm_corr_slm['GAE'][:,:]
elif (grace_month >= 98) & (grace_month <= 161):
atm_corr['clm'][:,:,i] = atm_corr_clm['GAF'][:,:]
atm_corr['slm'][:,:,i] = atm_corr_slm['GAF'][:,:]
elif (grace_month > 161):
atm_corr['clm'][:,:,i] = atm_corr_clm['GAG'][:,:]
atm_corr['slm'][:,:,i] = atm_corr_slm['GAG'][:,:]
#-- return the atmospheric corrections
return atm_corr | 2cd8884748141cc4dc77595ce180c8eefbb7045c | 3,628,320 |
def tex_parenthesis(obj):
"""Return obj with parenthesis if there is a plus or minus sign."""
result = str(obj)
return f"({result})" if "+" in result or "-" in result else result | 356a3886d27d431e90de2a76e6590481ad85f05e | 3,628,321 |
def swap_target_nonterminals(target):
"""
Swap non-terminal tokens.
:param target: List of target tokens
:return: List of target tokens
"""
return ['X_1' if token == 'X_0' else 'X_0' if token == 'X_1' else token for token in target] | 56e91df1a513ee5dad1071337463e039ded57a86 | 3,628,322 |
def validate_nd_array(x):
"""Casts x as a numpy array of the original input shape."""
# Get shape and cast as 1d
x, shape = _get_shape_and_return_1d_array(x)
# Return to original shape
x = x.reshape(shape)
return x | 007ad923f9e1e712a6bdcbd659a8d257a6383cb8 | 3,628,323 |
def _expandaliases(aliases, tree, expanding, cache):
"""Expand aliases in tree, recursively.
'aliases' is a dictionary mapping user defined aliases to
revsetalias objects.
"""
if not isinstance(tree, tuple):
# Do not expand raw strings
return tree
alias = _getalias(aliases, tree)
if alias is not None:
if alias in expanding:
raise error.ParseError(_('infinite expansion of revset alias "%s" '
'detected') % alias.name)
expanding.append(alias)
if alias.name not in cache:
cache[alias.name] = _expandaliases(aliases, alias.replacement,
expanding, cache)
result = cache[alias.name]
expanding.pop()
if alias.args is not None:
l = getlist(tree[2])
if len(l) != len(alias.args):
raise error.ParseError(
_('invalid number of arguments: %s') % len(l))
l = [_expandaliases(aliases, a, [], cache) for a in l]
result = _expandargs(result, dict(zip(alias.args, l)))
else:
result = tuple(_expandaliases(aliases, t, expanding, cache)
for t in tree)
return result | 4c104aab5c20510f5a87a719139c33db7c23d657 | 3,628,324 |
import os
import stat
def is_executable_file(path):
"""Checks that path is an executable regular file (or a symlink to a file).
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but
on some platforms :func:`os.access` gives us the wrong answer, so this
checks permission bits directly.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
"""
# follow symlinks,
fpath = os.path.realpath(path)
# return False for non-files (directories, fifo, etc.)
if not os.path.isfile(fpath):
return False
# On Solaris, etc., "If the process has appropriate privileges, an
# implementation may indicate success for X_OK even if none of the
# execute file permission bits are set."
#
# For this reason, it is necessary to explicitly check st_mode
# get file mode using os.stat, and check if `other',
# that is anybody, may read and execute.
mode = os.stat(fpath).st_mode
if mode & stat.S_IROTH and mode & stat.S_IXOTH:
return True
# get current user's group ids, and check if `group',
# when matching ours, may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_gid in user_gids and
mode & stat.S_IRGRP and mode & stat.S_IXGRP):
return True
# finally, if file owner matches our effective userid,
# check if `user', may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_uid == os.geteuid() and
mode & stat.S_IRUSR and mode & stat.S_IXUSR):
return True
return False | b9ec4bfa15d0a121ff4958b146d8e1646e6b15ed | 3,628,325 |
def to_svd_numpy(numpy_array, compress_rate):
"""
We transform an image to its SVD representation: U x D x V^T.
We return as output for each channel X, 2 channels with values: U x D, and V^T.
The initial size is n^2, the final size is: 2np
:param numpy_array: the input image
:param compress_rate: the compression rate
:return: the image with 2 times more channels
"""
H = numpy_array.shape[-2]
W = numpy_array.shape[-1]
assert H == W
c = compress_rate / 100
"""
(1-c) = (2*H*index) / H * W
(1-c) = 2*index / W
index = (1-c) * W / 2
"""
index = int((1 - c) * W / 2)
try:
u, s, vh = svd(a=numpy_array, full_matrices=False)
u_c = u[..., :index]
s_c = s[..., :index]
vh_c = vh[..., :index, :]
u_s = u_c * s_c[..., None, :]
v_s = vh_c.transpose((0, 2, 1))
result = np.concatenate((u_s, v_s), axis=0)
except RuntimeError as ex:
msg = "SVD compression problem: " + ex
print(msg)
raise ex
return result | e407ffdb2049c248b2ea51b5ecb240628d94f508 | 3,628,326 |
def _get_login_player_name(html):
"""
指定されたHTMLからログインユーザー名を抽出する。
:param html: 投票ページのHTML
:type html: str
:return: ログインユーザー名
:rtype: str
"""
# ログインユーザー名を返却する
soup = bs4.BeautifulSoup(html, 'html.parser')
player_name_box = soup.find('div', {'class': 'player_name_box'})
if player_name_box:
return player_name_box.text.strip()
else:
return None | a6aaa72a92ae8dcefacfe4db042148aa69f24443 | 3,628,327 |
from datetime import datetime
def get_max_streak(submissions):
"""
Get the maximum of all streaks
@param submissions (List of tuples): [(DateTime object, count)...]
@return (Tuple): Returns streaks of the user
"""
streak = 0
max_streak = 0
prev = curr = None
total_submissions = 0
for i in submissions:
total_submissions += i[1]
if prev is None and streak == 0:
prev = i[0]
streak = 1
else:
curr = i[0]
if (curr - prev).days == 1:
streak += 1
elif curr != prev:
streak = 1
prev = curr
if streak > max_streak:
max_streak = streak
today = datetime.datetime.today().date()
# There are no submissions in the database for this user
if prev is None:
return (0,) * 4
# Check if the last streak is continued till today
if (today - prev).days > 1:
streak = 0
return max_streak, total_submissions, streak, len(submissions) | f021647e028475e66c548773fc9d1571e6f3b251 | 3,628,328 |
import requests
def check_node_reachable(context):
"""
Returns whether the specified node IP is reachable and informs user
"""
chat_id = context.job.context['chat_id']
user_data = context.job.context['user_data']
if 'is_node_reachable' not in user_data:
user_data['is_node_reachable'] = True
response = requests.get(NODE_STATUS_ENDPOINT)
if response.status_code == 200:
is_node_currently_reachable = True
else:
is_node_currently_reachable = False
if user_data['is_node_reachable'] == True and not is_node_currently_reachable:
user_data['is_node_reachable'] = False
text = 'The specified Node cannot be reached! 💀' + '\n' + \
'IP: ' + NODE_IP + '\n' + \
'Node monitoring will be restricted to publicly available node attributes until it is reachable again.' + '\n\n' + \
'Please check your Terra Node immediately!'
try_message_to_all_platforms(context=context, chat_id=chat_id, text=text)
elif user_data['is_node_reachable'] == False and is_node_currently_reachable:
user_data['is_node_reachable'] = True
text = 'The specified Node is reachable again! 👌' + '\n' + \
'Monitoring of node specific attributes resumes.'
try_message_to_all_platforms(context=context, chat_id=chat_id, text=text)
return is_node_currently_reachable | f8ab268d38cbd98cb8de017bd944d26f5ba746be | 3,628,329 |
def padZeros(numberString, numZeros, insertSide):
"""Return a string padded with zeros on the left or right side."""
if insertSide == 'left':
return '0' * numZeros + numberString
elif insertSide == 'right':
return numberString + '0' * numZeros | d0c2d08a392e4792b13a64d076c8fb6aff1572cb | 3,628,330 |
def stations():
""" Return a JSON list of stations from the dataset."""
# Create our session (link) from Python to the DB
session = Session(engine)
station_list = session.query(Measurement.station, Station.name, func.count(Measurement.station)).\
filter(Measurement.station == Station.station).\
group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
session.close()
all_stations = []
for station, name, count in station_list:
station_dict = {}
station_dict["Station"] = station
station_dict["Name"] = name
station_dict["Count"] = count
all_stations.append(station_dict)
return jsonify(all_stations) | adda327b3fba5dacfedde07fd14f1fca1955f945 | 3,628,331 |
def eval_sot_accuracy_robustness(results,
annotations,
burnin=10,
ignore_unknown=True,
videos_wh=None):
"""Calculate accuracy and robustness over all tracking sequences.
Args:
results (list[list[ndarray]]): The first list contains the
tracking results of each video. The second list contains the
tracking results of each frame in one video. The ndarray have two
cases:
- bbox: denotes the normal tracking box in [x1, y1, w, h]
format.
- special tracking state: [0] denotes the unknown state,
namely the skipping frame after failure, [1] denotes the
initialized state, and [2] denotes the failed state.
annotations (list[ndarray]): The list contains the gt_bboxes of each
video. The ndarray is gt_bboxes of one video. It's in (N, 4) shape.
Each bbox is in (x1, y1, w, h) format.
burnin: number of frames that have to be ignored after the
re-initialization when calculating accuracy. Default is 10.
ignore_unknown (bool): whether ignore the skipping frames after
failures when calculating accuracy. Default is True.
videos_wh (list[tuple(width, height), ...]): The list contains the
width and height of each video. Default is None.
Return:
dict{str: float}: accuracy and robustness in EAO evaluation metric.
"""
if vot is None:
raise ImportError(
'Please run'
'pip install git+https://github.com/votchallenge/toolkit.git'
'to manually install vot-toolkit')
accuracy = 0
num_fails = 0
weight = 0
for i, (gt_traj, pred_traj) in enumerate(zip(annotations, results)):
assert len(gt_traj) == len(pred_traj)
assert len(pred_traj[0]) == 1 and pred_traj[0][0] == 1
num_fails += count_failures(pred_traj)
accuracy += calc_accuracy(
gt_traj,
pred_traj,
burnin=burnin,
ignore_unknown=ignore_unknown,
video_wh=videos_wh[i]) * len(pred_traj)
weight += len(pred_traj)
accuracy /= weight
robustness = num_fails / weight * 100
return dict(accuracy=accuracy, robustness=robustness, num_fails=num_fails) | e5aeb63919ecab3d82c60688066b434a65b7442c | 3,628,332 |
def spares_kdt_compute_mean_std_v2(res_dicts, spaces, arch_key='eval_arch', perf_key='eval_perf', use_hash=False, sort_best_model_fn=None):
""" Just compute the average sKdT
Parameters
----------
res_dicts : list
dict of running, multiple seed run contains inside
arch_key : str, optional
[description], by default 'eval_arch'
perf_key : str, optional
[description], by default 'eval_perf'
Returns
-------
list [sparse_kdt, sparse_spr, kdt, spr, arch_perf_mean, model_ids]
of given epoch
Raises
------
ValueError
[description]
"""
if not len(res_dicts) > 0:
raise ValueError("Dict passed in is wrong.", res_dicts)
arch_perf = {}
skdt = []
for res_dict in res_dicts:
space = res_dict['args'].search_space
try:
eval_perf = res_dict[perf_key]
eval_arch = res_dict[arch_key]
except KeyError:
continue
# process model_ids
model_ids = []
for a in eval_arch:
if use_hash:
mid = sort_best_model_fn([a])
else:
mid = int(a)
model_ids.append(mid)
eval_perf = _round_accs(np.asanyarray(eval_perf).astype(np.float32).tolist())
gt_perfs = spaces[space].query_gt_perfs(model_ids)
_skdt = compute_sparse_kendalltau(model_ids, eval_perf, gt_perfs, threshold=1e-2, verbose=False)
# sspr = compute_sparse_spearmanr(model_ids, arch_perf_mean, gt_perfs, threshold=1e-2)
# kdt = kendalltau(arch_perf_mean, gt_perfs)
# spr = spearmanr(arch_perf_mean, gt_perfs)
skdt.append(_skdt[0])
# compute the statistics.
return np.mean(skdt), np.std(skdt) | bacb3b8283ed9a54268d9d5b0fea78b410bd90e6 | 3,628,333 |
def roi_proposal(rpn_cls_prob_reshape, rpn_bbox_pred, H, W, ANCHOR_PER_GRID, ANCHOR_BOX, TOP_N_DETECTION, NMS_THRESH, IM_H, IM_W):
"""
clip the predict results fron rpn output
appply nms
proposal topN results as final layer output, no backward operation need here
"""
box_probs = np.reshape(rpn_cls_prob_reshape,[-1,2])[:,1]
box_delta = np.reshape(rpn_bbox_pred,[H * W * ANCHOR_PER_GRID,4])
anchor_box = ANCHOR_BOX
pred_box_xyxy = utils.bbox_delta_convert_inv(anchor_box, box_delta)
box_nms, probs_nms, pick = utils.non_max_suppression_fast(pred_box_xyxy, box_probs, TOP_N_DETECTION, overlap_thresh=NMS_THRESH)
#box_nms = box_nms[probs_nms>0.90]
box = box_nms
#box = batch_generate.bbox2cxcy(box)
#clip box
proposal_region = clip_box(mc, box, IM_H, IM_W)
#print('the shape of proposaled region is ',proposal_region.shape)
#print('the proposaled region value is ', proposal_region)
batch_inds = np.zeros((proposal_region.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposal_region.astype(np.float32, copy=False)))
return blob, probs_nms | 871c5d81f047ecaa8fd5d08ec3bbd22f83059062 | 3,628,334 |
def mi_gg(x, y, biascorrect=True, demeaned=False):
"""Mutual information (MI) between two Gaussian variables in bits
I = mi_gg(x,y) returns the MI between two (possibly multidimensional)
Gassian variables, x and y, with bias correction.
If x and/or y are multivariate columns must correspond to samples, rows
to dimensions/variables. (Samples last axis)
biascorrect : true / false option (default true) which specifies whether
bias correction should be applied to the esimtated MI.
demeaned : false / true option (default false) which specifies whether th
input data already has zero mean (true if it has been copula-normalized)
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y must be at most 2d")
Ntrl = x.shape[1]
Nvarx = x.shape[0]
Nvary = y.shape[0]
Nvarxy = Nvarx+Nvary
if y.shape[1] != Ntrl:
raise ValueError("number of trials do not match")
# joint variable
xy = np.vstack((x,y))
if not demeaned:
xy = xy - xy.mean(axis=1)[:,np.newaxis]
Cxy = np.dot(xy,xy.T) / float(Ntrl - 1)
# submatrices of joint covariance
Cx = Cxy[:Nvarx,:Nvarx]
Cy = Cxy[Nvarx:,Nvarx:]
chCxy = np.linalg.cholesky(Cxy)
chCx = np.linalg.cholesky(Cx)
chCy = np.linalg.cholesky(Cy)
# entropies in nats
# normalizations cancel for mutual information
HX = np.sum(np.log(np.diagonal(chCx))) # + 0.5*Nvarx*(np.log(2*np.pi)+1.0)
HY = np.sum(np.log(np.diagonal(chCy))) # + 0.5*Nvary*(np.log(2*np.pi)+1.0)
HXY = np.sum(np.log(np.diagonal(chCxy))) # + 0.5*Nvarxy*(np.log(2*np.pi)+1.0)
ln2 = np.log(2)
if biascorrect:
psiterms = sp.special.psi((Ntrl - np.arange(1,Nvarxy+1)).astype(np.float)/2.0) / 2.0
dterm = (ln2 - np.log(Ntrl-1.0)) / 2.0
HX = HX - Nvarx*dterm - psiterms[:Nvarx].sum()
HY = HY - Nvary*dterm - psiterms[:Nvary].sum()
HXY = HXY - Nvarxy*dterm - psiterms[:Nvarxy].sum()
# MI in bits
I = (HX + HY - HXY) / ln2
return I | 2e2614f233c42e27d2b53d6761c1069d04c66f39 | 3,628,335 |
def reverse_path(dict_, root, child_to_parents):
"""
CommandLine:
python -m utool.util_graph --exec-reverse_path --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> child_to_parents = {
>>> 'chip': ['dummy_annot'],
>>> 'chipmask': ['dummy_annot'],
>>> 'descriptor': ['keypoint'],
>>> 'fgweight': ['keypoint', 'probchip'],
>>> 'keypoint': ['chip'],
>>> 'notch': ['dummy_annot'],
>>> 'probchip': ['dummy_annot'],
>>> 'spam': ['fgweight', 'chip', 'keypoint']
>>> }
>>> to_root = {
>>> 'fgweight': {
>>> 'keypoint': {
>>> 'chip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'dummy_annot': None,
>>> },
>>> },
>>> }
>>> reversed_ = reverse_path(to_root, 'dummy_annot', child_to_parents)
>>> result = ut.repr3(reversed_)
>>> print(result)
{
'dummy_annot': {
'chip': {
'keypoint': {
'fgweight': None,
},
},
'probchip': {
'fgweight': None,
},
},
}
"""
# Hacky but illustrative
# TODO; implement non-hacky version
allkeys = get_allkeys(dict_)
mat = np.zeros((len(allkeys), len(allkeys)))
for key in allkeys:
if key != root:
for parent in child_to_parents[key]:
rx = allkeys.index(parent)
cx = allkeys.index(key)
mat[rx][cx] = 1
end = None
seen_ = set([])
reversed_ = {root: traverse_path(root, end, seen_, allkeys, mat)}
return reversed_ | f27a248e800d73e895b0553856b3e7ba55684eb8 | 3,628,336 |
import math
def build_lifegame_model(n, **kwargs):
""" build a MIP model for a stable game of life configuration
chessboard is (n+1) x (n+1)
:param n:
:return:
"""
assert n >= 2
assert Model.supports_logical_constraints(), "This model requires logical constraints cplex.version must be 12.80 or higher"
lm = Model(name='game_of_life_{0}'.format(n), **kwargs)
border = range(0, n + 2)
inside = range(1, n + 1)
# one binary var per cell
life = lm.binary_var_matrix(border, border, name=lambda rc: 'life_%d_%d' % rc)
# store sum of alive neighbors for interior cells
sum_of_neighbors = {(i, j): lm.sum(life[i + n.dx, j + n.dy] for n in neighbors) for i in inside for j in inside}
# all borderline cells are dead
for j in border:
life[0, j].ub = 0
life[j, 0].ub = 0
life[j, n + 1].ub = 0
life[n + 1, j].ub = 0
# ct1: the sum of alive neighbors for an alive cell is greater than 2
for i in inside:
for j in inside:
lm.add(2 * life[i, j] <= sum_of_neighbors[i, j])
# ct2: the sum of alive neighbors for an alive cell is less than 3
for i in inside:
for j in inside:
lm.add(5 * life[i, j] + sum_of_neighbors[i, j] <= 8)
# ct3: for a dead cell, the sum of alive neighbors cannot be 3
for i in inside:
for j in inside:
ct3 = sum_of_neighbors[i, j] == 3
lm.add(ct3 <= life[i, j]) # use logical cts here
# satisfy the 'no 3 alive neighbors for extreme rows, columns
for i in border:
if i < n:
for d in [1, n]:
lm.add(life[i, d] + life[i + 1, d] + life[i + 2, d] <= 2)
lm.add(life[d, i] + life[d, i + 1] + life[d, i + 2] <= 2)
# symmetry breaking
n2 = int(math.ceil(n/2))
half1 = range(1, n2 + 1)
half2 = range(n2 + 1, n)
# there are more alive cells in left side
lm.add(lm.sum(life[i1, j1] for i1 in half1 for j1 in inside) >= lm.sum(
life[i2, j2] for i2 in half2 for j2 in inside))
# there are more alive cells in upper side
lm.add(lm.sum(life[i1, j1] for i1 in inside for j1 in half1) >= lm.sum(
life[i2, j2] for i2 in inside for j2 in half2))
# find maximum number of alive cells
lm.maximize(lm.sum(life))
# add a dummy kpi
nlines = lm.sum( (lm.sum(life[i, j] for j in inside) >= 1) for i in inside)
lm.add_kpi(nlines, 'nlines')
# parameters: branch up, use heusristics, emphasis on opt, threads free
lm.parameters.mip.strategy.branch = 1
lm.parameters.mip.strategy.heuristicfreq = 10
lm.parameters.emphasis.mip = 2
lm.parameters.threads = 0
# store data items as fields
lm.size = n
lm.life = life
ini_s = lifegame_make_initial_solution(lm)
if not ini_s.is_valid_solution():
print('error in initial solution')
else:
lm.add_mip_start(ini_s)
return lm | 74c615639d8a45bf6ff6ab055f0e7189ace1be07 | 3,628,337 |
def get_wofs_values(landsat_dataset):
"""classifies a landsat scene using the wofs algorithm
:param landsat_dataset: xarray with dims 'latitude','longitude' containing data from a landsat scene
:return: xarray dataset containing wofs classification values
"""
# landsat dataset needs dim 'time' for wofs_classify to work, re-add it here since using isel took it away
landsat_dataset = landsat_dataset.expand_dims('time')
clean_mask = None if hasattr(landsat_dataset, 'cf_mask') else get_clean_mask(landsat_dataset)
landsat_dataset_wofs = wofs_classify(landsat_dataset, clean_mask = clean_mask)
landsat_dataset_wofs = landsat_dataset_wofs.isel(time=0)
return landsat_dataset_wofs | e069da04de9800fe61f3416f981ec386cb8e1f2a | 3,628,338 |
def readdirs(DIR):
"""Implementation of perl readdir in list context"""
result = (DIR[0])[DIR[1]:]
DIR[1] = len(DIR[0])
return result | 98d9b588704ea2820b14ba2c5542ea0a619a02ce | 3,628,339 |
def expandChunk(layout, typesize, shape_json,
chunk_min=CHUNK_MIN, layout_class='H5D_CHUNKED'):
""" Extend the chunk shape until it is above the MIN target.
"""
if shape_json is None or shape_json["class"] == 'H5S_NULL':
return None
if shape_json["class"] == 'H5S_SCALAR':
return (1,) # just enough to store one item
layout = list(layout)
dims = shape_json["dims"]
rank = len(dims)
extendable_dims = 0 # number of dimensions that are extenable
maxdims = None
if "maxdims" in shape_json:
maxdims = shape_json["maxdims"]
for n in range(rank):
if maxdims[n] == 0 or maxdims[n] > dims[n]:
extendable_dims += 1
dset_size = get_dset_size(shape_json, typesize)
if dset_size <= chunk_min and extendable_dims == 0:
# just use the entire dataspace shape as one big chunk
return tuple(dims)
chunk_size = getChunkSize(layout, typesize)
if chunk_size >= chunk_min:
return tuple(layout) # good already
while chunk_size < chunk_min:
# just adjust along extendable dimensions first
old_chunk_size = chunk_size
for n in range(rank):
dim = rank - n - 1 # start from
if extendable_dims > 0:
if maxdims[dim] == 0:
# infinately extendable dimensions
layout[dim] *= 2
chunk_size = getChunkSize(layout, typesize)
if chunk_size > chunk_min:
break
elif maxdims[dim] > layout[dim]:
# can only be extended so much
layout[dim] *= 2
if layout[dim] >= dims[dim]:
layout[dim] = maxdims[dim] # trim back
extendable_dims -= 1 # one less extenable dimension
chunk_size = getChunkSize(layout, typesize)
if chunk_size > chunk_min:
break
else:
pass # ignore non-extensible for now
else:
# no extendable dimensions
if dims[dim] > layout[dim]:
# can expand chunk along this dimension
layout[dim] *= 2
if layout[dim] > dims[dim]:
layout[dim] = dims[dim] # trim back
chunk_size = getChunkSize(layout, typesize)
if chunk_size > chunk_min:
break
else:
pass # can't extend chunk along this dimension
if chunk_size <= old_chunk_size:
# reality check to see if we'll ever break out of the while loop
log.warn("Unexpected error in guess_chunk size")
break
elif chunk_size > chunk_min:
break # we're good
else:
pass # do another round
return tuple(layout) | 1d43c21629b77b5cdc0ab6ac35397729f8f96d89 | 3,628,340 |
def remove_domestic(images: pd.DataFrame, reset_index: bool = True) -> pd.DataFrame:
"""
Removes images where the identification corresponds to a domestic
species. See wiutils/_domestic for a list of the genera considered
as domestic.
Parameters
----------
images : DataFrame
DataFrame with the project's images.
reset_index : bool
Whether to reset the index of the resulting DataFrame. If True,
the index will be numeric from 0 to the length of the result.
Returns
-------
DataFrame
Copy of images with removed domestic species.
"""
images = images.copy()
images = images[~images[_labels.images.genus].isin(_domestic.genera)]
if reset_index:
images = images.reset_index(drop=True)
return images | a2038a36c6fbd6f0492c1abb8afc34919b6ecc4c | 3,628,341 |
def compute_cyclepoints(sig, fs, f_range, **find_extrema_kwargs):
"""Compute sample indices of cyclepoints.
Parameters
----------
sig : 1d array
Time series.
fs : float
Sampling rate, in Hz.
f_range : tuple of (float, float)
Frequency range, in Hz, to narrowband filter the signal. Used to find zero-crossings.
find_extrema_kwargs : dict, optional, default: None
Keyword arguments for the function to find peaks and troughs (:func:`~.find_extrema`)
that change filter parameters or boundary. By default, the boundary is set to zero.
Returns
-------
df_samples : pandas.DataFrame
Dataframe containing sample indices of cyclepoints.
Columns (listed for peak-centered cycles):
- ``peaks`` : signal indices of oscillatory peaks
- ``troughs`` : signal indices of oscillatory troughs
- ``rises`` : signal indices of oscillatory rising zero-crossings
- ``decays`` : signal indices of oscillatory decaying zero-crossings
- ``sample_peak`` : sample at which the peak occurs
- ``sample_zerox_decay`` : sample of the decaying zero-crossing
- ``sample_zerox_rise`` : sample of the rising zero-crossing
- ``sample_last_trough`` : sample of the last trough
- ``sample_next_trough`` : sample of the next trough
Examples
--------
Compute the signal indices of cyclepoints:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> fs = 500
>>> sig = sim_bursty_oscillation(10, fs, freq=10)
>>> df_samples = compute_cyclepoints(sig, fs, f_range=(8, 12))
"""
# Ensure arguments are within valid range
check_param_range(fs, 'fs', (0, np.inf))
# Find extrema and zero-crossings locations in the signal
peaks, troughs = find_extrema(sig, fs, f_range, **find_extrema_kwargs)
rises, decays = find_zerox(sig, peaks, troughs)
# For each cycle, identify the sample of each extrema and zero-crossing
samples = {}
samples['sample_peak'] = peaks[1:]
samples['sample_last_zerox_decay'] = decays[:-1]
samples['sample_zerox_decay'] = decays[1:]
samples['sample_zerox_rise'] = rises
samples['sample_last_trough'] = troughs[:-1]
samples['sample_next_trough'] = troughs[1:]
df_samples = pd.DataFrame.from_dict(samples)
return df_samples | c748b4f52dac249cf1021a9902e533a3cf485c6f | 3,628,342 |
def behav_data_inverted(df):
"""
Flips the dimensions that need inverting
Faster than using is_inverted_dim
"""
# Apparently groupby with categorical dtype is broken
# See https://github.com/pandas-dev/pandas/issues/22512#issuecomment-422422573
df["class_"] = df["class_"].astype(str)
inverted_map = (
df[(df["morph_pos"] == 1)]
.groupby(["subj", "morph_dim"], observed=True)
.agg(lambda x: x.iloc[0])["class_"]
== "R"
)
df = df.join(
inverted_map.to_frame(name="inverted"),
on=("subj", "morph_dim"),
how="left",
sort=False,
)
df["greater_response"] = (df["response"] == "R") != (df["inverted"])
return df | 69ad0d4cea1a12b2dd8dc256c77b13f1002ae6b8 | 3,628,343 |
def _resample_event_obs(obs, fx, obs_data):
"""
Resample the event observation.
Parameters
----------
obs : datamodel.Observation
The Observation being resampled.
fx : datamodel.EventForecast
The corresponding Forecast.
obs_data : pd.Series
Timeseries data of the event observation.
Returns
-------
obs_resampled : pandas.Series
Timeseries data of the Observation resampled to match the Forecast.
Raises
------
RuntimeError
If the Forecast and Observation do not have the same interval length.
"""
if fx.interval_length != obs.interval_length:
raise ValueError("Event observation and forecast time-series "
"must have matching interval length.")
else:
obs_resampled = obs_data
return obs_resampled | 1c66ae124aaa2e732c7d0ec3e733ae2b5caaa6cb | 3,628,344 |
def project_raw_gw(
raw_waveforms,
sample_params,
waveform_generator,
ifo,
get_snr=False,
noise_psd=None,
):
"""Project a raw gravitational wave onto an intterferometer
Args:
raw_waveforms: the plus and cross polarizations of a list of GWs
sample_params: dictionary of GW parameters
waveform_generator: the waveform generator that made the raw GWs
ifo: interferometer
get_snr: return the SNR of each sample
noise_psd: background noise PSD used to calculate SNR the sample
Returns:
An (n_samples, waveform_size) array containing the GW signals as they
would appear in the given interferometer with the given set of sample
parameters. If get_snr=True, also returns a list of the SNR associated
with each signal
"""
polarizations = {
"plus": raw_waveforms[:, 0, :],
"cross": raw_waveforms[:, 1, :],
}
sample_params = [
dict(zip(sample_params, col)) for col in zip(*sample_params.values())
]
n_sample = len(sample_params)
sample_rate = waveform_generator.sampling_frequency
waveform_duration = waveform_generator.duration
waveform_size = int(sample_rate * waveform_duration)
signals = np.zeros((n_sample, waveform_size))
snr = np.zeros(n_sample)
ifo = bilby.gw.detector.get_empty_interferometer(ifo)
for i, p in enumerate(sample_params):
# For less ugly function calls later on
ra = p["ra"]
dec = p["dec"]
geocent_time = p["geocent_time"]
psi = p["psi"]
# Generate signal in IFO
signal = np.zeros(waveform_size)
for mode, polarization in polarizations.items():
# Get ifo response
response = ifo.antenna_response(ra, dec, geocent_time, psi, mode)
signal += response * polarization[i]
# Total shift = shift to trigger time + geometric shift
dt = waveform_duration / 2.0
dt += ifo.time_delay_from_geocenter(ra, dec, geocent_time)
signal = np.roll(signal, int(np.round(dt * sample_rate)))
# Calculate SNR
if noise_psd is not None:
if get_snr:
snr[i] = calc_snr(signal, noise_psd, sample_rate)
signals[i] = signal
if get_snr:
return signals, snr
return signals | d7f1d652baae37f402e0ea520600ffe7423fac75 | 3,628,345 |
def threshold(weights, delta_size):
""" Sample for threshold minimizing pixel changes. """
return min((abs(added_pixel_count(weights,i) + delta_size), i)
for i in np.linspace(10**-2,10**-7,40))[1] | 01c00a55938df949c42299384b2574a07de8e3ff | 3,628,346 |
def davenport_matrix(B = None,
covariance_analysis = False,
**attitude_profile_kwargs):
"""Compute the Davenport matrix for a given attitude profile.
Accepts either an attitude profile matrix or the arguments
for the attitude_profile_matrix() function. Returns a 4x4
Davenport matrix K.
If the covariance_analysis argument is True, a slightly modified K
is returned, which may be used as part of a least squares approach
(such as the qEKF or the information matrix approach, SOAR). See
Eqs. 10--11 in [2] for details.
If you do not provide an attitude profile matrix to this method,
you must pass it the appropriate arguments for calling
attitude_profile_matrix().
References:
[0] Davenport, P. 1968. A vector approach to the algebra of
rotations with applications. NASA Technical Report X-546-65-437.
[1] De Ruiter, A.; Damaren, C.; Forbes, J. 2013. Spacecraft
Dynamics and Control: An Introduction, 1st Ed. Wiley, West
Sussex, U.K. pp. 468-471.
[2] Ainscough, Zanetti, Christian, Spanos. 2015. Q-method extended
Kalman filter. Journal of Guidance, Control, and Dynamics
38(4): 752--760.
Kwargs:
B attitude profile matrix (if not provided, one
will be computed using attitude_profile_kwargs)
covariance_analysis defaults to False for the standard Davenport
matrix; if True, returns the Davenport matrix
utilized for least squares
attitude_profile_kwargs additional arguments passed to the
attitude_profile_matrix() method if B is not
provided
Returns:
A 4x4 matrix.
"""
if B is None:
B = attitude_profile_matrix(**attitude_profile_kwargs)
S = B + B.T
mu = np.trace(B)
zx = B.T - B
z = np.array([[zx[2,1]], [zx[0,2]], [zx[1,0]]])
if covariance_analysis:
M = S - np.identity(3) * (2*mu)
upperleft = 0.0
else:
M = S - np.identity(3) * mu
upperleft = mu
K = np.vstack((np.hstack((np.array([[upperleft]]), z.T)),
np.hstack((z, M))))
return K | 5cb1e7b2fe81c0a985f1189f4786fa7be42cf378 | 3,628,347 |
def _arg_raw(dvi, delta):
"""Return *delta* without reading anything more from the dvi file"""
return delta | 041cfaaf23c6e229b60d5278e8cf27352e078a65 | 3,628,348 |
from typing import Sequence
import struct
async def write_mapInfoReply(maps: Sequence[BeatmapInfo]) -> bytearray:
""" Write `maps` into bytes (osu! map info). """
ret = bytearray(len(maps).to_bytes(4, 'little'))
# Write files
for m in maps:
ret.extend(struct.pack('<hiiiBbbbb',
m.id, m.map_id, m.set_id, m.thread_id, m.status,
m.osu_rank, m.fruits_rank, m.taiko_rank, m.mania_rank
))
ret.extend(await write_string(m.map_md5))
return ret | f601c7898acd7c94890fb26679d1814ecdd5cce2 | 3,628,349 |
def getPolarPoints2(x, y, center):
"""Convert list of cartesian points to polar points
The returned points are not rounded to the nearest point. User must do that by hand if desired.
Parameters
----------
x : (N,) :class:`numpy.ndarray`
List of cartesian x points to convert to polar domain
y : (N,) :class:`numpy.ndarray`
List of cartesian y points to convert to polar domain
center : (2,) :class:`numpy.ndarray`
Center to use for conversion to polar domain of cartesian points
Format of center is (x, y)
Returns
-------
r : (N,) :class:`numpy.ndarray`
Corresponding radii points from cartesian :obj:`x` and :obj:`y`
theta : (N,) :class:`numpy.ndarray`
Corresponding theta points from cartesian :obj:`x` and :obj:`y`
See Also
--------
:meth:`getPolarPoints`
"""
cX, cY = x - center[0], y - center[1]
r = np.sqrt(cX ** 2 + cY ** 2)
theta = np.arctan2(cY, cX)
# Make range of theta 0 -> 2pi instead of -pi -> pi
# According to StackOverflow, this is the fastest method:
# https://stackoverflow.com/questions/37358016/numpy-converting-range-of-angles-from-pi-pi-to-0-2pi
theta = np.where(theta < 0, theta + 2 * np.pi, theta)
return r, theta | f6f3ebed82eac397c26f2c20ce23a4e8cf19e3b1 | 3,628,350 |
def to_bytes(binary_string: str) -> bytes:
"""Change a string, like "00000011" to a bytestring
:param str binary_string: The string
:returns: The bytestring
:rtype: bytes
"""
if len(binary_string) % 8 != 0:
binary_string += "0" * (
8 - len(binary_string) % 8
) # fill out to a multiple of 8
assert set(binary_string) <= {"0", "1"} # it really is a binary string
assert len(binary_string) % 8 == 0 # it really is in 8-bit bytes
size = len(binary_string) // 8
binary_int = int(binary_string, 2)
return binary_int.to_bytes(size, byteorder="big") | 83dda243e27d7f7988d520c0455e43d1937d5447 | 3,628,351 |
def choose_best_assembly_name(assembly_names):
"""
Given a list of reference genome names returns the best according to the
following criteria:
1) Prefer Ensembl reference names to UCSC
2) Prefer reference names with higher numbers in them.
Parameters
----------
assembly_names : list of str
Returns
-------
str
"""
assembly_names = set(assembly_names)
if len(assembly_names) == 1:
return list(assembly_names)[0]
assembly_names_ucsc = {
name for name in assembly_names if is_ucsc_reference_name(name)}
assembly_names_ensembl = assembly_names.difference(assembly_names_ucsc)
if assembly_names_ensembl:
# drop the UCSC reference names and pick only between the Ensembl
# compatible names
return most_recent_assembly_name(assembly_names_ensembl)
else:
return most_recent_assembly_name(assembly_names_ucsc) | fc111677f5dfc0e3b10e14e74ac096fd0f22dbfb | 3,628,352 |
def user_follow(request):
"""
This one is VERY similar to the <image_like> func in app-image/views.
There are only two options after all (well, true for some cases) :D
"""
user_id = request.POST.get('id')
action = request.POST.get('action')
if user_id and action:
try:
user = User.objects.get(id=user_id)
if action == 'follow':
Contact.objects.get_or_create(user_from=request.user,
user_to=user)
create_action(request.user, 'is following', user)
else:
Contact.objects.filter(user_from=request.user,
user_to=user).delete()
return JsonResponse({ 'status': 'ok' })
except User.DoesNotExist:
return JsonResponse({ 'status': 'ko' })
return JsonResponse({ 'status': 'ko' }) | 7b4956ec002aea512758ba34a260b24bfd5622d8 | 3,628,353 |
def module_patch_twin(connectionId, twin): # noqa: E501
"""Updates the device twin
# noqa: E501
:param connectionId: Id for the connection
:type connectionId: str
:param twin:
:type twin: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
twin = Twin.from_dict(connexion.request.get_json()) # noqa: E501
# changed from return 'do some magic!'
return module_glue.send_twin_patch_sync(connectionId, twin) | 1b712264c5723a44a2fa0328ac7621cf090585b7 | 3,628,354 |
def rgbToHsv(r, g, b):
"""
Converts an RGB color value to HSV. Conversion formula
adapted from http://en.wikipedia.org/wiki/HSV_color_space.
Args:
r, g, b (int): red, green, and blue values between 0 and 255 inclusive
Returns:
Array [hue, saturation, value]
hue between 0 and 360 inclusive, s and v between 0 and 1 inclusive
"""
r /= 255.0
g /= 255.0
b /= 255.0
mx = max(r, g, b)
mn = min(r, g, b)
v = mx
d = mx - mn
s = 0 if mx == 0 else d / mx
h = None
if mx == mn:
h = 0
# achromatic
return [h, s, v]
if r == mx:
h = (60 * ((g - b) / d) + 360) % 360
if g == mx:
h = (60 * ((b - r) / d) + 120) % 360
if b == mx:
h = (60 * ((r - g) / d) + 240) % 360
return [round(h) % 360, s, v] | 5d7e40dc8f5deba686bcca29da3b5ea2d7d9f37c | 3,628,355 |
def assign_from_checkpoint(model_path, var_list):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.iteritems():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
raise ValueError(
'Checkpoint is missing variable [%s]' % ckpt_name)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict | f318ee5cc923ef147e5c937b74d44c3af101a405 | 3,628,356 |
def _read_tmpfd(fil):
"""Read from a temporary file object
Call this method only when nothing more will be written to the temporary
file - i.e., all the writing has already been done.
"""
fil.seek(0)
return fil.read() | 08648325e7e0e9bcd543d3238cb4630ac284f6ed | 3,628,357 |
def eigenvalues_and_eigenvectors(a: Matrix, epsilon=eps, max_iterations=1000) \
-> (TransposedVectorView, Matrix):
"""Eigenvalues|vectors with QR factorization"""
assert is_symmetric(a)
a_k = a.copy()
v_k = Matrix.identity(a.size()[0])
for i in range(max_iterations):
if almost_upper_triangular(a_k, epsilon):
break
q_i, r_i = factorization(a_k)
a_k = r_i * q_i
v_k = v_k * q_i
return a_k.diagonal(), v_k | 987d65f65bc95dd6acad1dc86061e2b036437a4c | 3,628,358 |
def evens(input):
"""
Returns a list with only the even elements of data
Example: evens([0, 1, 2, 3, 4]) returns [0,2,4]
Parameter input: The data to process
Precondition: input an iterable, each element an int
"""
result = []
for x in input:
if x % 2 == 0:
result.append(x)
return result | 8a219f8815d95a18bea148eaae117f3356a77d4b | 3,628,359 |
def _setup_output_skip_keys(args):
"""reduce tensors pulled from data files to save time/space
"""
if (args.subcommand_name == "dmim") or (args.subcommand_name == "synergy"):
skip_keys = []
elif (args.subcommand_name == "mutatemotifs"):
skip_keys = [
DataKeys.ORIG_SEQ_PWM_HITS,
DataKeys.MUT_MOTIF_ORIG_SEQ,
DataKeys.MUT_MOTIF_POS,
DataKeys.MUT_MOTIF_MASK]
for key in skip_keys:
if "motif_mut" in key:
skip_keys.append(key.replace("motif_mut", "null_mut"))
elif args.subcommand_name == "scanmotifs":
skip_keys = [
DataKeys.ORIG_SEQ_SHUF,
DataKeys.ORIG_SEQ_ACTIVE_SHUF,
DataKeys.ORIG_SEQ_PWM_SCORES,
DataKeys.WEIGHTED_SEQ_SHUF,
DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF,
DataKeys.WEIGHTED_SEQ_PWM_SCORES,
DataKeys.WEIGHTED_SEQ_PWM_HITS,
DataKeys.FEATURES,
DataKeys.LOGITS_SHUF]
if args.lite:
# add other tensors to skip
lite_keys = [
DataKeys.IMPORTANCE_GRADIENTS,
DataKeys.WEIGHTED_SEQ,
DataKeys.ORIG_SEQ_PWM_SCORES_THRESH,
DataKeys.ORIG_SEQ_PWM_HITS,
DataKeys.ORIG_SEQ_PWM_DENSITIES,
DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES,
DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH,
DataKeys.WEIGHTED_SEQ_PWM_HITS]
skip_keys += lite_keys
elif args.subcommand_name == "simulategrammar":
skip_keys = [
DataKeys.ORIG_SEQ_SHUF,
DataKeys.ORIG_SEQ_ACTIVE_SHUF,
DataKeys.ORIG_SEQ_PWM_SCORES,
DataKeys.ORIG_SEQ_PWM_SCORES_THRESH,
DataKeys.ORIG_SEQ_PWM_HITS,
DataKeys.WEIGHTED_SEQ_SHUF,
DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF,
DataKeys.WEIGHTED_SEQ_PWM_SCORES,
DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH,
DataKeys.WEIGHTED_SEQ_PWM_HITS,
DataKeys.FEATURES,
DataKeys.LOGITS_SHUF,
DataKeys.ORIG_SEQ_PWM_DENSITIES,
DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES]
elif args.subcommand_name == "analyzevariants":
skip_keys = [
DataKeys.ORIG_SEQ_SHUF,
DataKeys.ORIG_SEQ_ACTIVE_SHUF,
DataKeys.ORIG_SEQ_PWM_SCORES,
DataKeys.ORIG_SEQ_PWM_SCORES_THRESH,
DataKeys.WEIGHTED_SEQ_SHUF,
DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF,
DataKeys.WEIGHTED_SEQ_PWM_SCORES,
DataKeys.WEIGHTED_SEQ_PWM_SCORES_THRESH,
DataKeys.WEIGHTED_SEQ_PWM_HITS,
DataKeys.FEATURES,
DataKeys.LOGITS_SHUF,
DataKeys.ORIG_SEQ_PWM_DENSITIES,
DataKeys.ORIG_SEQ_PWM_MAX_DENSITIES]
elif args.subcommand_name == "buildtracks":
skip_keys = [
DataKeys.ORIG_SEQ,
DataKeys.ORIG_SEQ_ACTIVE,
DataKeys.PROBABILITIES,
DataKeys.LOGITS_CI,
DataKeys.LOGITS_CI_THRESH,
DataKeys.LOGITS_SHUF,
DataKeys.LOGITS_MULTIMODEL,
DataKeys.LOGITS_MULTIMODEL_NORM,
DataKeys.IMPORTANCE_GRADIENTS,
#DataKeys.WEIGHTED_SEQ,
DataKeys.WEIGHTED_SEQ_ACTIVE_CI,
DataKeys.WEIGHTED_SEQ_ACTIVE_CI_THRESH,
DataKeys.WEIGHTED_SEQ_ACTIVE_SHUF,
DataKeys.WEIGHTED_SEQ_THRESHOLDS,
DataKeys.ORIG_SEQ_ACTIVE_SHUF,
DataKeys.FEATURES]
else:
skip_keys = []
return skip_keys | 20a5d95f8ee811aeba4dea47eb58339264241a5e | 3,628,360 |
from typing import List
def create_example(speakers: List[List[nparr]]) -> nparr:
"""
:param speakers: a list of speakers where each item is a list of microphones where each contains
a sound file padded to a consistent length
:return: matrix of size [features(freq),file_len(time),(mic_num-1)*2] which contains the cosine and a sine of the
normalized stft phase of microphones relative to the reference mic
"""
mics = [sum(mic) for mic in zip(*speakers)] # sum the speakers for each microphone.
z = our_stft(mics) # calculate STFT for all microphones
angles = np.angle(z) # get STFT phase
angles = angles[:, :, 1:] - angles[:, :, 0][:, :, np.newaxis] # normalize by reference microphone
layers = np.concatenate((np.sin(angles), np.cos(angles)), axis=2) # return the sin and cos as the input features
return layers | 21c81d8f0f60e24ace3eca4f3a205604da98b3df | 3,628,361 |
def todo_detail(request, pk):
"""API endpoint to get single todo or update its last exec date
GET: Displays single ToDo
PUT: Updates last exec date
"""
todo = get_object_or_404(ToDo, pk=pk)
if request.method == 'GET':
serializer = ToDoSerializer(todo)
return Response(data=serializer.data)
if request.method == 'PUT':
serializer = DoToDoSerializer(todo, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(ToDoSerializer(todo).data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | 74d89ed578220304efc2148024922b5baeb3b566 | 3,628,362 |
def SaveNumSummary(doc:NexDoc, filename):
"""Saves the summary of numerical results to a text file with the specified name."""
return NexRun("SaveNumSummary", locals()) | b51924a149d96043d4c8e9397063b8ff419db11f | 3,628,363 |
def get_binary_balanced_purity_ranges(preds, class_labels, bin_size, total_class_counts):
"""
Get balanced purity for each class, for each range of probabilities, for binary classifier.
Return dict. of class names to balanced purity at each prob threshold (10 thresholds)
:param preds: List of Numpy rows of probabilites (last col is label) ; can be derived from model using preds = np.concatenate(model.results)
:param class_labels: class names in order to be treated
:param bin_size: width of probability bin, usually 0.1
"""
assignments = bp_binary_get_assignments(preds, class_labels, bin_size)
agg_assignments = bp_binary_aggregate_assignments(
assignments, class_labels, num_bins=10)
range_class_bps = bp_binary_get_range_bps(
agg_assignments, class_labels, total_class_counts)
# Reformat, so that instead of list of dicts, it is a dict from class name to list.
class_to_bps = {}
for class_name in class_labels:
cur_class_bps = []
for cur_range_bp in range_class_bps:
cur_class_bps.append(cur_range_bp[class_name])
class_to_bps[class_name] = cur_class_bps
return class_to_bps | 5389e44f3605ca344572483c6cbbc1f40a8b3ef7 | 3,628,364 |
def bond_symmetry_numbers(xgr, frm_bnd_key, brk_bnd_key):
""" symmetry numbers, by bond
the (approximate) symmetry number of the torsional potential for this bond,
based on the hydrogen counts for each atom
It is reduced to 1 if one of the H atoms in the torsional bond is a neighbor to the
special bonding atom (the atom that is being transferred)
"""
imp_xgr = implicit(xgr)
atm_imp_hyd_vlc_dct = atom_implicit_hydrogen_valences(imp_xgr)
bnd_keys = bond_keys(imp_xgr)
# bnd_max_hyd_vlcs = [max(map(atm_imp_hyd_vlc_dct.__getitem__, bnd_key))
# for bnd_key in bnd_keys]
# bnd_sym_nums = [3 if vlc == 3 else 1 for vlc in bnd_max_hyd_vlcs]
# bnd_sym_num_dct = dict(zip(bnd_keys, bnd_sym_nums))
tfr_atm = None
if frm_bnd_key and brk_bnd_key:
for atm_f in list(frm_bnd_key):
for atm_b in list(brk_bnd_key):
if atm_f == atm_b:
tfr_atm = atm_f
#tfr_atm = list(frm_bnd_key.intersection(list(brk_bnd_key)))
#tfr_atm = 1
if tfr_atm:
neighbor_dct = atom_neighbor_keys(xgr)
nei_tfr = neighbor_dct[tfr_atm]
gra = xgr[0]
all_hyds = []
for atm in gra:
if gra[atm][0] == 'H':
all_hyds.append(atm)
else:
nei_tfr = {}
bnd_sym_num_dct = {}
bnd_sym_nums = []
for bnd_key in bnd_keys:
bnd_sym = 1
vlc = max(map(atm_imp_hyd_vlc_dct.__getitem__, bnd_key))
if vlc == 3:
bnd_sym = 3
if tfr_atm:
for atm in nei_tfr:
nei_s = neighbor_dct[atm]
h_nei = 0
for nei in nei_s:
if nei in all_hyds:
h_nei += 1
if h_nei == 3:
bnd_sym = 1
bnd_sym_nums.append(bnd_sym)
bnd_sym_num_dct = dict(zip(bnd_keys, bnd_sym_nums))
# fill in the rest of the bonds for completeness
bnd_sym_num_dct = dict_.by_key(bnd_sym_num_dct, bond_keys(xgr), fill_val=1)
return bnd_sym_num_dct | 931a147e690ea5d2aa7564a353f9a18aa2be5a43 | 3,628,365 |
import subprocess
import os
def compile_catalog(locale_dir, domain, locale):
"""
Compile `*.po` files into `*.mo` files and saved them next to the
original po files found.
Parameters
----------
output_dir: str
FIXME:
domain: str
FIXME:
locale: str, optional
FIXME:
"""
# Check if locale exists!
cmd = [
"pybabel",
"compile",
"--domain={domain}".format(domain=domain),
"--dir={locale_dir}".format(locale_dir=locale_dir),
"--locale={locale}".format(locale=locale),
]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
return os.path.join(
locale_dir, locale, LC_MESSAGES, "{domain}.po".format(domain=domain)
) | 57eacdf96810d0243c03d8d5ac7568ce5771d3ff | 3,628,366 |
import logging
import json
def get_last_end_time(project_id, bucket_name):
""" Get the end_time as a string value from a JSON object in GCS.
This file is used to remember the last end_time in case one isn't provided
"""
last_end_time_str = ""
file_name = '{}.{}'.format(project_id, config.LAST_END_TIME_FILENAME)
logging.debug("get_last_end_time - file_name: {}".format(file_name))
try:
gcs_file = gcs.open('/{}/{}'.format(
bucket_name, file_name))
contents = gcs_file.read()
logging.debug("GCS FILE CONTENTS: {}".format(contents))
json_contents = json.loads(contents)
last_end_time_str = json_contents["end_time"]
gcs_file.close()
except NotFoundError as nfe:
logging.error("Missing file when reading {} from GCS: {}".format(file_name, nfe))
last_end_time_str = None
except Exception as e:
logging.error("Received error when reading {} from GCS: {}".format(file_name,e))
last_end_time_str = None
return last_end_time_str | 0c3c78fb10613fd2a9910b120bef0422764a29d8 | 3,628,367 |
import html
def make_header_layout():
"""The html layout for the dashboard's header view."""
return html.Div(
id="header",
className="header",
children=[
html.Div(id="title", className="header--project", children="rubicon-ml"),
html.Div(
id="links",
className="header--links",
children=[
html.A(
className="header--link",
href="https://capitalone.github.io/rubicon-ml",
children="Docs",
),
html.A(
className="header--link",
href="https://github.com/capitalone/rubicon-ml",
children="Github",
),
],
),
],
) | 3dfd4587242c305ebeb1239b976556952f6c5601 | 3,628,368 |
def quimbify(data, qtype=None, normalized=False, chopped=False,
sparse=None, stype=None, dtype=complex):
"""Converts data to 'quantum' i.e. complex matrices, kets being columns.
Parameters
----------
data : dense or sparse array_like
Array describing vector or operator.
qtype : {``'ket'``, ``'bra'`` or ``'dop'``}, optional
Quantum object type output type. Note that if an operator is given
as ``data`` and ``'ket'`` or ``'bra'`` as ``qtype``, the operator
will be unravelled into a column or row vector.
sparse : bool, optional
Whether to convert output to sparse a format.
normalized : bool, optional
Whether to normalise the output.
chopped : bool, optional
Whether to trim almost zero entries of the output.
stype : {``'csr'``, ``'csc'``, ``'bsr'``, ``'coo'``}, optional
Format of output matrix if sparse, defaults to ``'csr'``.
Returns
-------
dense or sparse vector or operator
Notes
-----
1. Will unravel an array if ``'ket'`` or ``'bra'`` given.
2. Will conjugate if ``'bra'`` given.
3. Will leave operators as is if ``'dop'`` given, but construct one if
vector given with the assumption that it was a ket.
Examples
--------
Create a ket (column vector):
>>> qu([1, 2j, 3])
qarray([[1.+0.j],
[0.+2.j],
[3.+0.j]])
Create a single precision bra (row vector):
>>> qu([1, 2j, 3], qtype='bra', dtype='complex64')
qarray([[1.-0.j, 0.-2.j, 3.-0.j]], dtype=complex64)
Create a density operator from a vector:
>>> qu([1, 2j, 3], qtype='dop')
qarray([[1.+0.j, 0.-2.j, 3.+0.j],
[0.+2.j, 4.+0.j, 0.+6.j],
[3.+0.j, 0.-6.j, 9.+0.j]])
Create a sparse density operator:
>>> qu([1, 0, 0], sparse=True, qtype='dop')
<3x3 sparse matrix of type '<class 'numpy.complex128'>'
with 1 stored elements in Compressed Sparse Row format>
"""
sparse_input = issparse(data)
sparse_output = ((sparse) or
(sparse_input and sparse is None) or
(sparse is None and stype))
# Infer output sparse format from input if necessary
if sparse_input and sparse_output and stype is None:
stype = data.format
if (qtype is None) and (np.ndim(data) == 1):
# assume quimbify simple list -> ket
qtype = 'ket'
if qtype is not None:
# Must be dense to reshape
data = qarray(data.A if sparse_input else data)
if qtype in ("k", "ket"):
data = data.reshape((prod(data.shape), 1))
elif qtype in ("b", "bra"):
data = data.reshape((1, prod(data.shape))).conj()
elif qtype in ("d", "r", "rho", "op", "dop") and isvec(data):
data = dot(quimbify(data, "ket"), quimbify(data, "bra"))
data = data.astype(dtype)
# Just cast as qarray
elif not sparse_output:
data = qarray(data.A if sparse_input else data, dtype=dtype)
# Check if already sparse matrix, or wanted to be one
if sparse_output:
data = sparse_matrix(data, dtype=dtype,
stype=(stype if stype is not None else "csr"))
# Optionally normalize and chop small components
if normalized:
normalize_(data)
if chopped:
chop_(data)
return data | 7392d51f35b2728e3f475de2e4180b2c4fd586b1 | 3,628,369 |
def _check_insert_data(obj, datatype, name):
""" Checks validity of an object """
if obj is None:
return False
if not isinstance(obj, datatype):
raise TypeError("{} must be {}; got {}".format(
name, datatype.__name__, type(obj).__name__))
return True | 057d0124db3f304e7efd4093510c663f5383af63 | 3,628,370 |
def create_importance_sampling(
baddr: GBAPOMDPThroughAugmentedState, num_samples: int, minimal_sample_size: float
) -> belief_types.BeliefUpdate:
"""Creates importance sampling
Returns a rejection sampling belief update that tracks ``num_samples``
particles in the ``baddr``. Basically glue between
``general_bayes_adaptive_pomdps`` and ``pomdp_belief_tracking``.
Uses ``baddr`` to simulate and reject steps.
:param baddr: the GBA-POMDP to track belief for
:param num_samples: number of particles to main
:param minimal_sample_size: threshold before resampling
"""
def transition_func(s, a):
return baddr.simulation_step(s, a, optimize=True).state
def obs_model(s, a, ss: GridverseAugmentedGodState, o) -> float:
# XXX: we know the observation function is deterministic. We also know
# exaclty how it is called under the hood. So here we call it, and see
# if it produces the observation perceived by the agent
return float(np.array_equal(o, ss.observation))
resample_condition = partial(IS.ineffective_sample_size, minimal_sample_size)
return IS.create_sequential_importance_sampling(
resample_condition, transition_func, obs_model, num_samples # type: ignore
) | c13e4f921e9d0ac32561b43d2a4b445e7633b2e9 | 3,628,371 |
import csv
def maps():
""" VALUES TO EDIT: """
name_to_open_file = "act_comercial.csv"
""" END OF VALUES TO EDIT """
full_path_to_open_file = "static/databases/" + name_to_open_file
with open(full_path_to_open_file) as csv_file:
reader = csv.DictReader(csv_file)
all_commercial_list = []
for row in reader:
if row["LAT"] and row["LNG"] and row["CODI_ACCES"]:
new_commercial = Commercial(row['CODI_ACCES'], row['ADRECA'], row['EPIGRAF_IAE'], row['EPIGRAF_DESC'], row['LAT'], row['LNG'], row['WKT'])
all_commercial_list.append(new_commercial)
print(new_commercial)
return render_template('leaflet_map.html', records=all_commercial_list) | 1c01b46f72ff32a8fb083b6973464388e1b270c7 | 3,628,372 |
from typing import Optional
def improved_land_choice(context: Context) -> Optional[int]:
"""
Play untapped land if needed, then ramp, then draw, then randomly choose a playable card
"""
hand = context.zones["hand"]
mana = context.mana
gold = context.gold
playable_cards = context.playable_cards()
# If there's a reason to play an untapped land, play it
untapped_lands = [k for k in playable_cards if hand[k].land and hand[k].netgain() != 0]
almost_playables = [k for k in playable_cards if hand[k].cost == mana+gold+1]
if len(almost_playables) > 0 and len(untapped_lands) > 0:
return choice(untapped_lands)
# Otherwise, if there are any ramp spells, play them
ramp = [k for k in playable_cards if hand[k].is_ramp()]
if len(ramp) > 0:
return choice(ramp)
# Otherwise, if there are any draw spells, play them
draw = [k for k in playable_cards if hand[k].is_draw()]
if len(draw) > 0:
return choice(draw)
# Otherwise, play randomly (tapped lands)
return choice(playable_cards) | 343b283d8c6938f00ce36a1077c244734016ef8d | 3,628,373 |
def _ParsePath(path):
"""Parses a path into a bucket name and an object name."""
if not path:
return '', ''
parts = path.split(_PATH_DELIMITER, 1)
bucket = parts[0] if parts[0] else None
object_name = parts[1] if 1 < len(parts) else None
return bucket, object_name | f059eabe500164a92ae7c32bbae4ca4bc30c0eac | 3,628,374 |
def instance ():
"""
Get single instance of cache. If not setup already, the cache will be set up by default parameters.
"""
global _CacheInstance
if _CacheInstance != None:
return _CacheInstance
else:
return setup () | 117248d9519e1f745a7a6b368425ea2a0759aedc | 3,628,375 |
def bboxes_iou(boxes1,boxes2):
"""
Argument:
bboxes:dim = (num_box,4), 4 : [x_min,y_min,x_max,y_max]
Retiurn:
a np.array,dim = (num_box,1)
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[...,2] - boxes1[...,0]) * (boxes1[...,3] - boxes1[...,1])
boxes2_area = (boxes2[...,2] - boxes2[...,0]) * (boxes2[...,3] - boxes2[...,1])
left_up = np.maximum(boxes1[...,:2],boxes2[...,:2])
right_down = np.minimum(boxes1[...,2:],boxes2[...,2:])
inter_section = np.maximum(right_down - left_up,0.)
inter_area = inter_section[...,0] * inter_section[...,1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area , np.finfo(np.float32).eps)
return ious | 6ab80cbab148280331f5e25e5455b01894034eb0 | 3,628,376 |
import os
def dir_exists(foldername):
""" Return True if folder exists, else False
"""
return os.path.isdir(foldername) | edf3bc0dcdb16e816f48134ede420b758aa53d16 | 3,628,377 |
def normalizeListVec(v):
"""Normalizes a vector list."""
length = v[0] ** 2 + v[1] ** 2 + v[2] ** 2
if length <= 0:
length = 1
v = [val / np.sqrt(length) for val in v]
return v | 8ee0f960011c79e8e9f8c666aa198c4a72002fe4 | 3,628,378 |
import itertools
def sim_1x5bits(K, kappa, num_sim):
"""
Compute simulation scalar products and
return a list of ranks and intersection values for several simulations.
Parameters:
K -- string
kappa -- string
num_sim -- integer
Return:
rank_wr_list -- list of tuples
"""
# Length of signal part
n = 5
# Message mu
mu = list(itertools.product([bool(0), bool(1)], repeat=n))
norm = Decimal(2**n)
# Signal reference vectors
S_ref = gen_S_ref(mu, n)
# Save the results of simulations
rank_wr_list = []
for j in range(num_sim):
# Noise power consumption for m bits
R = noise(mu)
# Initial values of scalar product
scalar_init = gen_scalar_init(n, R, S_ref)
init_scalar = [sublist.tolist() for sublist in scalar_init]
# List of indexes
list_kappa_idx = kappa_idx()
# Retrieve idx of kappa's from solution kappa
solution_idx = [sublist for sublist in list_kappa_idx if sublist[5] == kappa]
solution_idx = list(itertools.chain.from_iterable(solution_idx)) # Un-nest the previous list
common4b_idx = []
for j in range(5): # Five possibilities with four consecutive kappa bits in common
common4b_idx += [find_idx_common4b(j, kappa, list_kappa_idx, solution_idx)]
common3b_idx = []
for j in range(5): # Five possibilities with three consecutive kappa bits in common
common3b_idx += [find_idx_common3b(j, list_kappa_idx, solution_idx)]
common2b_idx = []
for j in range(5): # Ten possibilities with four consecutive kappa bits in common
idx0, idx1 = find_idx_common2b(j, list_kappa_idx, solution_idx)
common2b_idx.append(idx0)
common2b_idx.append(idx1)
nonsol_idx = find_idx_nonsol(list_kappa_idx, solution_idx)
# ------------------------------------------------------------------------------------------- #
# Retrieve corresponding scalar products
solution_init = [init_scalar[i][solution_idx[i]] for i in range(n)]
common4b_init = find_init(n, init_scalar, common4b_idx)
common3b_init = find_init(n, init_scalar, common3b_idx)
common2b_init = find_init(n, init_scalar, common2b_idx)
nonsol_init = find_init(n, init_scalar, nonsol_idx)
# Determine the sign of initial solution value depending on the activity of the register at bit i
xor_solution(n, K, kappa, solution_init)
xor_common(3, n, K, kappa, common4b_init)
xor_common(2, n, K, kappa, common3b_init)
xor_common(1, n, K, kappa, common2b_init)
# Find w0 for each bit of the register, tup = (w0, a)
# Sort result by increasing order on w0
# list_tup_w0 = find_w0(solution_init, len(mu))
# list_tup_w0 = sorted(list_tup_w0, key=lambda x: x[0])
# list_w0 = [tup[0] for tup in list_tup_w0]
# ------------------------------------------------------------------------------------------- #
# List of all intersections
wr = []
interval = [0, 1]
# Intersections between solution function and other scalar product functions
for j in range(len(common4b_init)):
wr1_common4b, wr2_common4b = find_wr_common4(j, n, norm, solution_init, common4b_init[j])
append_wr(1, wr1_common4b, interval, wr)
append_wr(2, wr2_common4b, interval, wr)
for j in range(len(common3b_init)):
wr1_common3b, wr2_common3b = find_wr_common3(j, n, norm, solution_init, common3b_init[j])
append_wr(1, wr1_common3b, interval, wr)
append_wr(2, wr2_common3b, interval, wr)
for j in range(len(common2b_init) // 2):
for l in range(2):
wr1_common2b, wr2_common2b = find_wr_common2(j, n, norm, solution_init, common2b_init[2*j + l])
append_wr(1, wr1_common2b, interval, wr)
append_wr(2, wr2_common2b, interval, wr)
for j in range(len(nonsol_init)):
wr1_nonsol, wr2_nonsol = find_wr_nonsol(norm, solution_init, nonsol_init[j])
append_wr(1, wr1_nonsol, interval, wr)
append_wr(2, wr2_nonsol, interval, wr)
# Determine the intervals for the ranks per noise vector R
rank_wr = compute_rank_wr(wr)
rank_wr_list += rank_wr
return rank_wr_list | 2b5fb3f48b4841fc9b4c1824b7afd91ee15d7a75 | 3,628,379 |
import http
from typing import Optional
def athenaupload_start(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
) -> http.HttpResponse:
"""Load a data frame using an Athena connection.
The parameters are obtained and if valid, an operation is scheduled for
execution.
:param request: Web request
:param pk: primary key of the Athena conn used
:param workflow: Workflow being processed.
:return: A page showing the low go view for status.
"""
conn = models.AthenaConnection.objects.filter(
pk=pk).filter(enabled=True).first()
if not conn:
return redirect(
'connection:athenaconns_index')
form = ontask.connection.forms.AthenaRequestConnectionParam(
request.POST or None,
workflow=workflow,
instance=conn)
if request.method == 'POST' and form.is_valid():
run_params = form.get_field_dict()
log_item = workflow.log(
request.user,
models.Log.WORKFLOW_DATA_ATHENA_UPLOAD,
connection=conn.name,
status='Preparing to execute')
# Batch execution
# athena_dataupload_task.delay(
# request.user.id,
# workflow.id,
# conn.id,
# run_params,
# log_item.id)
# Show log execution
return render(
request,
'dataops/operation_done.html',
{'log_id': log_item.id, 'back_url': reverse('table:display')})
return render(
request,
'dataops/athenaupload_start.html',
{
'form': form,
'wid': workflow.id,
'dtype': 'Athena',
'dtype_select': _('Athena connection'),
'connection': conn,
'valuerange': range(5) if workflow.has_table() else range(3),
'prev_step': reverse('connection:athenaconns_index')}) | 0fd167b85220c32dcab2dfcc59db96cfcddd0106 | 3,628,380 |
def dasum(x):
"""
Compute the sum of the absolute values of the entries in {x}
"""
# compute and return the result
return gsl.blas_dasum(x.data) | 78a09a0f23b88facba04361e2bd222777e769eea | 3,628,381 |
def _make_serverproxy_handler(name, command, environment, timeout, absolute_url, port, ready_check_path, mappath):
"""
Create a SuperviseAndProxyHandler subclass with given parameters
"""
# FIXME: Set 'name' properly
class _Proxy(SuperviseAndProxyHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self.proxy_base = name
self.absolute_url = absolute_url
self.requested_port = port
self.mappath = mappath
self.ready_check_path = ready_check_path
@property
def process_args(self):
return {
'port': self.port,
'base_url': self.base_url,
'presentation_path': self.presentation_path,
'presentation_basename': self.presentation_basename,
'presentation_dirname': self.presentation_dirname,
'origin_host': self.origin_host,
'-': '-',
'--': '--'
}
@property
def base_url(self):
return self.settings.get('base_url', '/')
@property
def presentation_path(self):
return self.settings.get('presentation_path', '.')
@property
def presentation_basename(self):
return self.settings.get('presentation_basename', '')
@property
def presentation_dirname(self):
return self.settings.get('presentation_dirname', '.')
@property
def hub_users(self):
return {self.settings['user']}
@property
def hub_groups(self):
if self.settings['group']:
return {self.settings['group']}
return set()
@property
def allow_all(self):
if 'anyone' in self.settings:
return self.settings['anyone'] == '1'
return super().allow_all
def _render_template(self, value):
args = self.process_args
if type(value) is str:
return value.format(**args)
elif type(value) is list:
return [self._render_template(v) for v in value]
elif type(value) is dict:
return {
self._render_template(k): self._render_template(v)
for k, v in value.items()
}
else:
raise ValueError('Value of unrecognized type {}'.format(type(value)))
def get_cmd(self):
if callable(command):
raise Exception("Not implemented: self._render_template(call_with_asked_args(command, self.process_args))")
else:
return self._render_template(command)
def get_env(self):
if callable(environment):
raise Exception("return self._render_template(call_with_asked_args(environment, self.process_args))")
else:
return self._render_template(environment)
def get_timeout(self):
return timeout
return _Proxy | b502f9776f6f6475e060c0bf919597222cf5f30f | 3,628,382 |
def get_datasets(filename):
"""
Get the names of datasets in an HDF5 file.
Parameters
----------
filename : str
Name of an HDF5 file visible to the arkouda server
Returns
-------
list of str
Names of the datasets in the file
See Also
--------
ls_hdf
"""
rep_msg = ls_hdf(filename)
datasets = [line.split()[0] for line in rep_msg.splitlines()]
return datasets | c844c072501116b505dfcee93236633b8c915de2 | 3,628,383 |
def stats_particle(difference):
""""
Returns the relevant statistics metrics about the distribution of the reative energy difference, as the mean,
standard deviation, standard error and an appropriate label.
:parameter difference: array containing the difference between true energy and the predicted energy for a particle.
:type difference: numpy.ndarray
:return: mean, std, error and label
:rtype: float, float, float, str.
"""
mean = np.mean(difference)
std = np.std(difference)
error = std / np.sqrt(len(difference))
label_part = 'Mean: %.3f $\pm$ %.3f \nStd. dev.: %.2f' % (mean, error, std)
return mean, std, error, label_part | ad5a886cf5167cd7b82deef2700f42eddb517163 | 3,628,384 |
import pprint
def oxe_set_flex(host, token, flex_ip_address, flex_port):
"""Summary
Args:
host (TYPE): Description
token (TYPE): Description
flex_ip_address (TYPE): Description
flex_port (TYPE): Description
Returns:
TYPE: Description
"""
packages.urllib3.disable_warnings(packages.urllib3.exceptions.InsecureRequestWarning)
payload = {
"Flex_Licensing_Enable": "Yes",
"Flex_Server_Address": flex_ip_address,
"Flex_Server_Port": flex_port,
"Flex_ProductId_Discovery": "Yes"
}
try:
response = put('https://' + host + '/api/mgt/1.0/Node/1/System_Parameters/1/Flex_Server/1',
headers=oxe_set_headers(token, 'PUT'),
json=payload,
verify=False)
except exceptions.RequestException as e:
pprint(e)
# todo: manage errors
return response.status_code | 9933bebc905e463abb523f3c94b2812cd00888be | 3,628,385 |
import requests
from bs4 import BeautifulSoup
import copy
import re
def process_advisory(url):
"""Process an advisory URL."""
global rule_number, RULE_TEMPLATE
logger.debug('process_advisory({0})'.format(url))
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
rule = copy.deepcopy(RULE_TEMPLATE)
found = False
rule['description'] = soup.find_all('h2')[0].get_text()
rule['rule_info'] = url
rule['id'] = 'DS{0}'.format(rule_number)
rule_number += 1
for table in soup.find_all('table'):
if not is_correct_table(table):
continue
for row in table.find_all('tr')[2:]:
cells = row.find_all('td')
package_name = cells[0].get_text().strip()
affected_version = ','.join(cells[1].strings)
if not ('System.' in package_name or 'Microsoft.' in package_name):
continue
version_regex = []
for version in re.split(r'[, ;]+', affected_version):
# Ignore if version is blank / empty
if version.strip() == '':
continue
version_regex.append(re.escape(version.strip()))
found = True
version_regex = '({0})'.format('|'.join(version_regex))
logger.info('Added {0} {1}'.format(package_name, version_regex))
rule['patterns'].append({
'pattern': '<package id="{0}" version="{1}"'.format(package_name, version_regex),
'type': 'regex'
})
return rule if found else False | e8e8908ecf2cc1db2a4c1cb28f677159b154d9d8 | 3,628,386 |
async def rest_handler(request):
"""Defines a GET handler for the '/rest' endpoint.
Users make requests of this handler with a query string containing the following arguments:
cmd: The command c to execute | c E {find, stats}
params: A list of key-value parameters corresponding to the commands attributes.
This handler will return an error if the querystring is in an incorrect format.
Args:
request (aiohttp.web.Request): The web request that initiated the handler.
"""
# verify the request
valid, reason = await verify_rest_request(request)
if not valid:
return generate_error(reason, 400)
json = await request.json()
# get the parameters
cmd = json['cmd']
params = json['params']
# pass off to the correct target handler
if cmd == 'find':
response = await _find_handler(request, params)
elif cmd == 'stats':
response = await _stats_handler(request, params)
elif cmd == 'download':
response = await _download_handler(request, params)
elif cmd == 'upload':
response = await _upload_handler(request, params)
elif cmd == 'provision':
response = await _provision_handler(request, params)
# return the response we get back fgrom the handler
return response | 4bfdaa44f1a0189614447780ae6223db3646b32c | 3,628,387 |
def if_true(value, replace_with=None):
"""Replaces the value with the passed if the value is true."""
if not value:
return ''
if replace_with is None:
return value
return Template(replace_with).safe_substitute(value=value) | 968fac956801317454a9fec8a27c53107ed31881 | 3,628,388 |
import os
def scenepath(scene):
"""Generate path for scene directory"""
return os.path.join(basedir, name(scene)) | cf78e6c0ff9674e48261d9cbdedafb3f33449bd6 | 3,628,389 |
async def quineables(ctx) -> None:
"""
Displays a list of files printable with
the quine command
"""
table = PrettyTable()
table.align = "c"
table.field_names = ["File Name", "File Path"]
for f, p in _generate_valid_files().items():
table.add_row([f, p])
return await ctx.send(f"```\n{table.get_string()}\n```") | 01cbef961c5e1383c23ca0055c81fd2167211263 | 3,628,390 |
def piece_not(piece: str) -> str:
"""
helper function to return the other game piece that is not the current game piece
Preconditions:
- piece in {'x', 'o'}
>>> piece_not('x')
'o'
>>> piece_not('o')
'x'
"""
return 'x' if piece == 'o' else 'o' | 18bb3b45accf98d4f914e3f50372c4c083c1db4d | 3,628,391 |
def util(cpu='all', state='user', mode='avg1', host_os=detect_host_os()):
"""
Returns average percentage of time spent by cpu in a state over a period
of time
:raises: WrongArgumentError if unknown cpu is supplied
:raises: WrongArgumentError if unknown state is supplied
:raises: WrongArgumentError if unknown mode is supplied
:depends on: [host_os.cpu_count, host_os.cpu_times_shifted,
host_os.cpu_times]
"""
validate_mode(state, CPU_TIMES)
available_cpus = list(range(host_os.cpu_count()))
if cpu == 'all':
cpus = available_cpus
else:
cpu = int(cpu)
validate_mode(cpu, available_cpus)
cpus = [cpu]
validate_mode(mode, AVERAGE_MODE.keys())
time_in_state = 0
time_total = 0
for cpu in cpus:
shifted_cpu_times = host_os.cpu_times_shifted(cpu, AVERAGE_MODE[mode])
if shifted_cpu_times is not None:
current_cpu_times = host_os.cpu_times(cpu)
cpu_time_in_state = (current_cpu_times._asdict()[state] -
shifted_cpu_times._asdict()[state])
cpu_time_total = (sum(current_cpu_times) - sum(shifted_cpu_times))
time_in_state += cpu_time_in_state
time_total += cpu_time_total
return (((time_in_state * 100) / time_total)
if time_total != 0
else 0.0) | 3a754200320a55211ce4d9e2470beb88b12eac57 | 3,628,392 |
import scipy
def fitgaussian(data):
"""Returns (height, x, y, width_x, width_y, angle)
the gaussian parameters of a 2D distribution found by a fit"""
params = moments(data)
errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) - data)
p, success = scipy.optimize.leastsq(errorfunction, params)
return p,success | c7aa83c280a471f7191ea70e81e442320dcff76e | 3,628,393 |
def retina_debug():
"""
Return a dictionary with parameters for the retina suitable for debugging.
"""
params = retina_default()
params.update({'description' : 'debug retina','N': 8})
return params | 27321a835417f9022dfc57f75ea19a517e76012f | 3,628,394 |
def argsum(*args):
"""sum of all arguments"""
return sum(args) | 2445ef4f3fc321b3eae1997a8c44c628cd72d70a | 3,628,395 |
import multiprocessing
def fit_data_multi_files(dir_path, file_prefix, param, start_i, end_i, interpath="entry/instrument/detector/data"):
"""
Fitting for multiple files with Multiprocessing.
Parameters
----------
dir_path : str
file_prefix : str
param : dict
start_i : int
start id of given file
end_i: int
end id of given file
interpath : str
path inside hdf5 file to fetch the data
Returns
-------
result : list
fitting result as list of dict
"""
num_processors_to_use = multiprocessing.cpu_count()
logger.info("cpu count: {}".format(num_processors_to_use))
pool = multiprocessing.Pool(num_processors_to_use)
result_pool = [
pool.apply_async(fit_pixel_per_file_no_multi, (dir_path, file_prefix, m, param, interpath))
for m in range(start_i, end_i + 1)
]
results = []
for r in result_pool:
results.append(r.get())
pool.terminate()
pool.join()
return results | 3633054ff8238895e1c6a4fa68be4530d2ea1b22 | 3,628,396 |
def mark_up_series(
issues: pd.DataFrame, series: str, area_of_testing: str, patterns: str
) -> pd.DataFrame:
""" Appends binarized series to df.
Parameters:
----------
issues:
Bug reports.
series:
df series name.
area_of_testing:
area of testing.
patterns:
searching elements.
Returns:
--------
The whole df with binarized series.
"""
with Pool() as pool:
issues[area_of_testing] = [
pool.apply_async(binarize_value, (el, patterns,)).get()
for el in issues[series]
]
return issues | fdfba7b11fe1bb40d7a6cac171af33be2fce1152 | 3,628,397 |
def get_exp_parameters():
"""
Defines the default values for the hyper-parameters of the experiment.
:return: A dictionnary with values of the hyper-parameters
:rtype: dict
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('--use_optuna', type=bool, default=False)
parser.add_argument('--log_artificat', type=bool, default=False)
parser.add_argument('--max_nb_epochs', type=int, default=20)
parser.add_argument('--n_trials', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--clip_value', type=int, default=10)
parser.add_argument('--learning_rate', default=2e-5, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--dropout', default=0.25, type=float)
parser.add_argument('--pool_filter', default=16, type=int)
parser.add_argument('--kernel_size', default=5, type=int)
parser.add_argument('--stride', default=2, type=int)
parser.add_argument('--features_start', default=16, type=int)
parser.add_argument('--latent_size', default=64, type=int)
parser.add_argument('--num_gauss', default=5, type=int)
parser.add_argument('--min_std', default=0.1, type=float)
parser.add_argument('--n_layers', default=4, type=int)
parser.add_argument('--out_size', default=1, type=int)
parser.add_argument('--in_size', default=99, type=int)
parser.add_argument('--multi_appliance', default=False, type=bool)
parser.add_argument('--custom_preprocess', default=None, type=int)
parser.add_argument('--custom_postprocess', default=None, type=int)
parser.add_argument('--patience_optim', default=5, type=int)
parser.add_argument('--patience_check', default=5, type=int)
parser.add_argument('--num_layer', default=3, type=int)
parser.add_argument('--experiment_label', default='', type=str)
parser.add_argument('--optimizer', type=str, default='adam', choices=['sgd', 'adam', 'adamw'])
parser.add_argument('--weight_decay', type=float, default=0.)
parser.add_argument('--momentum', type=float, default=None)
parser.add_argument('--decay_step', type=int, default=100)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--feature_type', default="combined", type=str)
parser.add_argument('--alpha', default=0.1, type=float)
parser.add_argument('--seed', default=3407, type=float)
parser.add_argument('--main_mu', default=150.0, type=float)
parser.add_argument('--main_std', default=350.0, type=float)
parser.add_argument('--input_norm', default="z-norm", type=str)
parser.add_argument('--q_filter', default=None, type=dict)
parser.add_argument('--sample_second', default=6, type=int)
parser.add_argument('--seq_type', default="seq2point", type=str)
parser.add_argument('--point_position', default="last_position", type=str)
parser.add_argument('--target_norm', default="lognorm", type=str)
parser.add_argument('--threshold_method', default="at", type=str)
parser.add_argument('--train', default=1, type=int)
parser.add_argument('--kfolds', default=1, type=int)
parser.add_argument('--gap', default=0, type=int)
parser.add_argument('--test_size', default=None, type=int)
parser.add_argument('--z_dim', default=10, type=int)
parser.add_argument('--hidden_dim', default=128, type=int)
parser.add_argument('--model_name', default=None, type=str)
parser.add_argument('--mdn_dist_type', default="normal", type=str)
parser.add_argument('--data', default="UKDALE", type=str)
parser.add_argument('--quantiles', default=[0.1, 0.25, 0.5, 0.75, 0.9], type=list)
parser.add_argument('--logs_path', default="logs", type=str)
# parser.add_argument('--results_path', default="results\\", type=str)
parser.add_argument('--figure_path', default="figures", type=str)
parser.add_argument('--checkpoints_path', default="checkpoints", type=str)
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--version', default=0, type=int)
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
return parser | f24240aa14c9bf28c81684a6afedf6ab30796932 | 3,628,398 |
from typing import List
from typing import Dict
from typing import Tuple
from typing import Callable
def enum_options(values: List[Dict]) -> Tuple[List[str], int, Callable]:
"""Enumerate options of a enum parameter for display in a select box.
Returns a 3-tuple containing a list of option value, the list index of
the default option, and a function that provides a mapping from option
values to their names (identifier).
Parameters
----------
values: list of dict
List of enumeration values from the parameter declaration.
Returns
-------
(list, int, callable)
"""
options = list()
default_index = 0
mapping = dict()
for i, obj in enumerate(values):
identifier = obj['value']
options.append(identifier)
mapping[identifier] = obj['name']
if obj.get('isDefault', False):
default_index = i
def mapfunc(value: str) -> str:
"""Mapping for option values to thier identifier."""
return mapping[value]
return options, default_index, mapfunc | c1214f319847b40705f425e529420a5916debe6e | 3,628,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.