content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_key_metrics_fig(confirmed_ser, recovered_ser, deaths_ser, metric_type):
"""
Return key metrics graph object figure
Parameters
----------
confirmed_ser: pandas.Series
Confirmed pandas series objects with index=dates,
values=number of cases
recovered_ser: pandas.Series
Recovered pandas series objects with index=dates,
values=number of cases
deaths_ser: pandas.Series
Deaths pandas series objects with index=dates,
values=number of cases
metric_type: str
One of ['cumulative', 'new]
"""
fig = go.Figure()
if metric_type == 'cumulative':
mode = 'number+delta'
delta_confirmed = {
'reference': confirmed_ser.values[-2],
'relative': False,
'position': "bottom",
'valueformat': ">,d",
'increasing.color': 'blue',
'increasing.symbol': '+'
}
delta_recovered = {
'reference': recovered_ser.values[-2],
'relative': False,
'position': "bottom",
'valueformat': ">,d",
'increasing.color': 'green',
'increasing.symbol': '+'
}
delta_deaths = {
'reference': deaths_ser.values[-2],
'relative': False,
'position': "bottom",
'valueformat': ">,d",
'increasing.color': 'red',
'increasing.symbol': '+'
}
elif metric_type == 'new':
mode = 'number'
delta_confirmed = None
delta_recovered = None
delta_deaths = None
fig.add_trace(go.Indicator(
mode=mode,
value=confirmed_ser.values[-1],
number={
"valueformat": ">,d",
'font': {
'size': 60,
'color': 'blue',
}
},
domain={'row': 0, 'column': 0},
title={
'text': 'Confirmed',
'font': {
'size': 24,
'color': 'blue',
}
},
delta=delta_confirmed))
fig.add_trace(go.Indicator(
mode=mode,
value=recovered_ser.values[-1],
number={
"valueformat": ">,d",
'font': {
'size': 60,
'color': 'green',
}
},
domain={'row': 0, 'column': 1},
title={
'text': 'Recovered',
'font': {
'size': 24,
'color': 'green',
}
},
delta=delta_recovered))
fig.add_trace(go.Indicator(
mode=mode,
value=deaths_ser.values[-1],
number={
"valueformat": ">,d",
'font': {
'size': 60,
'color': 'red',
}
},
domain={'row': 0, 'column': 2},
title={
'text': 'Deaths',
'font': {
'size': 24,
'color': 'red',
}
},
delta=delta_deaths))
fig.update_layout(
grid={'rows': 1, 'columns': 3},
autosize=True,
# width=500,
height=300,
# margin={'t': 100, 'b': 100, 'l': 0, 'r': 0}
)
return fig | a11130a21d124c2bf44a02d9c3c58bcde5a326cb | 3,636,900 |
from re import A
from re import I
from re import T
def cms_post_popup(r):
"""
Customized Map popup for cms_post resource
- style like the cards
- currently unused
"""
record = r.record
pkey = "cms_post.id"
# Construct the item ID
map_id = "default_map" # @ToDo: provide the map_id as a var in order to be able to support multiple maps
record_id = record[pkey]
item_id = "%s-%s" % (map_id, record_id)
item_class = "thumbnail"
db = current.db
table = db.cms_post
series = table.series_id.represent(record.series_id)
date = table.date.represent(record.date)
body = record.body
location_id = record.location_id
location = table.location_id.represent(location_id)
location_url = URL(c="gis", f="location", args=[location_id])
author_id = record.created_by
author = table.created_by.represent(author_id)
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
utable = db.auth_user
otable = db.org_organisation
query = (utable.id == author_id) & \
(otable.id == utable.organisation_id)
row = db(query).select(otable.id,
otable.name,
otable.logo,
limitby=(0, 1)
).first()
if row:
organisation_id = row.id
organisation = row.name
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
logo = URL(c="default", f="download", args=[row.logo])
else:
organisation_id = 0
organisation = ""
org_url = ""
logo = ""
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
#vars={"refresh": listid,
# "record": record_id}
),
_class="s3_modal",
_title=T("Edit %(type)s") % dict(type=T(series)),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
dtable = db.doc_document
query = (table.doc_id == dtable.doc_id) & \
(dtable.deleted == False)
documents = db(query).select(dtable.file)
if documents:
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
filename = doc.file
try:
doc_name = retrieve(filename)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[filename])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
icon = series.lower().replace(" ", "_")
card_label = TAG[""](I(_class="icon icon-%s" % icon),
SPAN(" %s" % T(series),
_class="card-title"))
# Type cards
if series == "Incident":
# Apply additional highlighting for Incidents
item_class = "%s disaster" % item_class
# Render the item
item = DIV(DIV(card_label,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
#edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
docs,
_class=item_class,
_id=item_id,
)
return item | c0ee3430d1fbee769c708df93f03748ab3c0554b | 3,636,901 |
from typing import List
def pr_curve(results: List[TrecEvalResults]) -> plt:
"""
Create a precision-recall graph from trec_eval results.
:param results: A list of TrecEvalResults files.
:return: a matplotlib plt object
"""
names = [r.run_id for r in results]
iprec = [[r.results['iprec_at_recall_0.00'],
r.results['iprec_at_recall_0.10'],
r.results['iprec_at_recall_0.20'],
r.results['iprec_at_recall_0.30'],
r.results['iprec_at_recall_0.40'],
r.results['iprec_at_recall_0.50'],
r.results['iprec_at_recall_0.60'],
r.results['iprec_at_recall_0.70'],
r.results['iprec_at_recall_0.80'],
r.results['iprec_at_recall_0.90'],
r.results['iprec_at_recall_1.00']] for r in results]
recall = np.arange(0, 1.1, 0.1)
mpl.rc('xtick', labelsize=35)
mpl.rc('ytick', labelsize=35)
plt.xlabel('Recall', fontsize=35)
plt.ylabel('Interpolated Precision', fontsize=35)
for p in iprec:
plt.plot(recall, p, linewidth=10)
plt.legend(names, fontsize=35)
return plt | 90f1e3234304fa7966b93ebdc76235e5356002e6 | 3,636,902 |
def plotData(datalist, part = "real", progressive = True, color = None, clip = False, tcutoff = None):
"""Plot real or imaginary parts of a given list of functions.
arguments:
datalist (list of tuples, each tuple of form (xlist,ylist)): data to plot;
xlist should be real numbers, ylist can be complex
part (str): "real" or "imag"; determines which part of ylist we use
progressive (bool): if True, alphalevel starts at 0 and increases linearly
as we go through datalist
color (str): a color supported by axes.plot; if None then gets set to "blue" or "red"
for real/imag parts respectively; if "rainbow" then color varies from blue to green
as we go through datalist
clip (bool): remove first and last items before plotting"""
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
alphalevel = 0.0
if color is None:
if part == "real":
color = "blue"
if part == "imag":
color = "red"
if color == "rainbow":
red = 0.5
green = 0.0
blue = 1.0
# iterate over list of functions
n = len(datalist)
for k,data in enumerate(datalist):
if tcutoff is not None:
tlist,zlist = zip(*[[t,z] for t,z in zip(*data) if abs(t) <= tcutoff])
else:
tlist,zlist = data
if clip:
tlist = tlist[1:-1]
zlist = zlist[1:-1]
if part == "real":
ylist = [z.real for z in zlist]
if part == "imag":
ylist = [z.imag for z in zlist]
# if "progressive" is set then vary alphalevel as we go through datalist
if progressive:
alphalevel += 1.0 / n
alphalevel = min(alphalevel, 1.0)
else:
alphalevel = 1.0
if color == "rainbow":
hue = float(k) / n
currentcolor = colors.hsv_to_rgb([hue, 0.8, 0.8])
else:
currentcolor = color
# now plot the points
axes.plot(tlist, ylist, color=currentcolor, alpha=alphalevel)
return fig | 5e328c19fe63389d7f2d55e5fd5d75d2dd5d7c24 | 3,636,903 |
def train_and_test(model, dataset, robustness_tests=None, base_config_dict=None, save_model=True):
"""
Train a recommendation model and run robustness tests.
Args:
model (str): Name of model to be trained.
dataset (str): Dataset name; must match the dataset's folder name located in 'data_path' path.
base_config_dict: Configuration dictionary. If no config passed, takes default values.
save_model (bool): Determines whether or not to externally save the model after training.
robustness_tests (dict): Configuration dictionary for robustness tests.
Returns:
"""
config_dict = get_config_dict(robustness_tests, base_config_dict)
config = Config(model=model, dataset=dataset, config_dict=config_dict)
init_seed(config['seed'], config['reproducibility'])
logger = getLogger()
if len(logger.handlers) != 0:
logger.removeHandler(logger.handlers[1])
init_logger(config)
logger.info(config)
# dataset filtering
dataset = create_dataset(config)
logger.info(dataset)
# dataset splitting
train_data, valid_data, test_data, robustness_testing_data = data_preparation(config, dataset, save=True)
for robustness_test in robustness_testing_data:
if robustness_testing_data[robustness_test] is not None:
logger.info(set_color('Robustness Test', 'yellow') + f': {robustness_test}')
# model loading and initialization
model = get_model(config['model'])(config, train_data).to(config['device'])
logger.info(model)
# trainer loading and initialization
trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, model)
# model training
best_valid_score, best_valid_result = trainer.fit(
train_data, valid_data, saved=save_model, show_progress=config['show_progress']
)
# model evaluation
test_result = trainer.evaluate(test_data, load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(set_color('best valid ', 'yellow') + f': {best_valid_result}')
logger.info(set_color('test result', 'yellow') + f': {test_result}')
test_result_transformation, test_result_sparsity, \
test_result_slice, test_result_distributional_slice = None, None, None, None
if robustness_testing_data['slice'] is not None:
test_result_slice = trainer.evaluate(robustness_testing_data['slice'], load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(set_color('test result for slice', 'yellow') + f': {test_result_slice}')
if robustness_testing_data['distributional_slice'] is not None:
test_result_distributional_slice = trainer.evaluate(robustness_testing_data['distributional_slice'],
load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(set_color('test result for distributional slice', 'yellow') + f': '
f'{test_result_distributional_slice}')
if robustness_testing_data['transformation_test'] is not None:
test_result_transformation = trainer.evaluate(robustness_testing_data['transformation_test'],
load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(set_color('test result for transformation on test', 'yellow') + f': {test_result_transformation}')
if robustness_testing_data['transformation_train'] is not None:
transformation_model = get_model(config['model'])(config, robustness_testing_data['transformation_train']).to(
config['device'])
logger.info(transformation_model)
transformation_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, transformation_model)
best_valid_score_transformation, best_valid_result_transformation = transformation_trainer.fit(
robustness_testing_data['transformation_train'], valid_data, saved=save_model,
show_progress=config['show_progress'])
test_result_transformation = transformation_trainer.evaluate(test_data, load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(
set_color('best valid for transformed training set', 'yellow') + f': {best_valid_result_transformation}')
logger.info(set_color('test result for transformed training set', 'yellow') + f': {test_result_transformation}')
if robustness_testing_data['sparsity'] is not None:
sparsity_model = get_model(config['model'])(config, robustness_testing_data['sparsity']).to(config['device'])
logger.info(sparsity_model)
sparsity_trainer = get_trainer(config['MODEL_TYPE'], config['model'])(config, sparsity_model)
best_valid_score_sparsity, best_valid_result_sparsity = sparsity_trainer.fit(
robustness_testing_data['sparsity'], valid_data, saved=save_model,
show_progress=config['show_progress'])
test_result_sparsity = sparsity_trainer.evaluate(test_data, load_best_model=save_model,
show_progress=config['show_progress'])
logger.info(set_color('best valid for sparsified training set', 'yellow') + f': {best_valid_result_sparsity}')
logger.info(set_color('test result for sparsified training set', 'yellow') + f': {test_result_sparsity}')
logger.handlers.clear()
shutdown()
del logger
return {
'test_result': test_result,
'distributional_test_result': test_result_distributional_slice,
'transformation_test_result': test_result_transformation,
'sparsity_test_result': test_result_sparsity,
'slice_test_result': test_result_slice
} | 3681bc732b2b39837c871267f4659825105d6e2b | 3,636,904 |
def total_cost(content_cost, style_cost, alpha, beta):
"""Return a tensor representing the total cost."""
return alpha * content_cost + beta * style_cost | 98d42bd8d62dc8cd7110b2f5eb9a9a4e4eb6bc65 | 3,636,905 |
def create_command_using_pip_action(
num_bash_entries=10, uninstall_use_creation_time=False, skip=0):
"""Create commands using latest pip action."""
valid_pip_commands = get_valid_pip_history(num_bash_entries)[skip:]
assert valid_pip_commands, 'No undoable pip commands.'
last_valid_pip_command = valid_pip_commands[0]
last_valid_pip_action = last_valid_pip_command.split()[1]
command = ''
if uninstall_use_creation_time:
command = 'pip uninstall -y {}'.format(get_uninstall_candidates())
elif last_valid_pip_action == 'install':
command = create_command_using_packages(get_pip_command_packages(
last_valid_pip_command))
elif last_valid_pip_action == 'uninstall':
command = 'pip install {}'.format(get_reinstall_candidates())
elif last_valid_pip_action == 'download':
command = 'rm {}'.format(get_file_candidates())
assert command, 'No undoable pip commands.'
return command | 5ba22f63d33c1ae60ec3a93590965453885e5e29 | 3,636,906 |
def extract_word_pos_sequences(form, unknown_category, morpheme_splitter=None, extract_morphemes=False):
"""Return the unique word-based pos sequences, as well as (possibly) the morphemes, implicit in the form.
:param form: a form model object
:param morpheme_splitter: callable that splits a strings into its morphemes and delimiters
:param str unknown_category: the string used in syntactic category strings when a morpheme-gloss pair is unknown
:param morphology: the morphology model object -- needed because its extract_morphemes_from_rules_corpus
attribute determines whether we return a list of morphemes.
:returns: 2-tuple: (set of pos/delimiter sequences, list of morphemes as (pos, (mb, mg)) tuples).
"""
if not form.syntactic_category_string:
return None, None
morpheme_splitter = morpheme_splitter or get_morpheme_splitter()
pos_sequences = set()
morphemes = []
sc_words = form.syntactic_category_string.split()
mb_words = form.morpheme_break.split()
mg_words = form.morpheme_gloss.split()
for sc_word, mb_word, mg_word in zip(sc_words, mb_words, mg_words):
pos_sequence = tuple(morpheme_splitter(sc_word))
if unknown_category not in pos_sequence:
pos_sequences.add(pos_sequence)
if extract_morphemes:
morpheme_sequence = morpheme_splitter(mb_word)[::2]
gloss_sequence = morpheme_splitter(mg_word)[::2]
for pos, morpheme, gloss in zip(pos_sequence[::2], morpheme_sequence, gloss_sequence):
morphemes.append((pos, (morpheme, gloss)))
return pos_sequences, morphemes | e08c285910c4da2f827f81ac65abc2ee3d62b1dc | 3,636,907 |
def model_test(Py, Px_y, testDataArr, testLabelArr):
"""
模型测试
@Args:
Py: 先验概率分布
Px_y: 条件概率分布
testDataArr: 测试集数据
testLabelArr: 测试集标签
@Returns:
准确率
@Riase:
"""
# 错误值
errorCnt = 0
# 循环遍历测试集中的每一个样本
for i in range(len(testDataArr)):
# 获取预测值
presict = NaiveBayes(Py, Px_y, testDataArr[i])
if presict != testLabelArr[i]:
errorCnt += 1
# 返回准确率
return 1 - (errorCnt / len(testDataArr)) | f1e06725511750c79e34dc528728aab8cebc7c34 | 3,636,908 |
from typing import Optional
import torch
def int2c2e(shortname: str, wrapper: LibcintWrapper,
other: Optional[LibcintWrapper] = None) -> torch.Tensor:
"""
2-centre 2-electron integrals where the `wrapper` and `other1` correspond
to the first electron, and `other2` corresponds to another electron.
The returned indices are sorted based on `wrapper`, `other1`, and `other2`.
The available shortname: "ar12"
"""
# don't really care, it will be ignored
rinv_pos = torch.zeros(1, dtype=wrapper.dtype, device=wrapper.device)
# check and set the others
otherw = _check_and_set(wrapper, other)
return _Int2cFunction.apply(
*wrapper.params,
rinv_pos,
[wrapper, otherw],
IntorNameManager("int2c2e", shortname)) | 1f5b6c70c8373c885103d6deb7cba77ea8d0aa73 | 3,636,909 |
def send_group_membership_request(request, group_id, template='group_send_request.html'):
"""
Send membership request to the administrator
of a private group.
"""
if request.method == 'POST':
form = GroupMembershipRequestForm(request.POST)
if form.is_valid():
group = Group.objects.get(pk=group_id)
form.save(user=request.user, group=group)
return redirect('group:group_list')
form = GroupMembershipRequestForm()
return render(request, template, {'form': form}) | 22324765a915e1677fb6abab41dfa214fcc05d40 | 3,636,910 |
import sys
import os
import argparse
import logging
def arg_parse():
"""Base all default folders from script location
scripts: ./pymetric/tools/cimis
tools: ./pymetric/tools
output: ./pymetric/cimis
"""
script_folder = sys.path[0]
code_folder = os.path.dirname(script_folder)
project_folder = os.path.dirname(code_folder)
cimis_folder = os.path.join(project_folder, 'cimis')
asc_folder = os.path.join(cimis_folder, 'input_asc')
img_folder = os.path.join(cimis_folder, 'input_img')
parser = argparse.ArgumentParser(
description='CIMIS extract/convert',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--start', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD',
help='Start date')
parser.add_argument(
'--end', required=True, type=_utils.valid_date, metavar='YYYY-MM-DD',
help='End date')
parser.add_argument(
'--ascii', default=asc_folder, metavar='PATH',
help='Input ascii root folder path')
parser.add_argument(
'--img', default=img_folder, metavar='PATH',
help='Output IMG raster folder path')
parser.add_argument(
'--stats', default=False, action="store_true",
help='Compute raster statistics')
parser.add_argument(
'-o', '--overwrite', default=False, action="store_true",
help='Force overwrite of existing files')
parser.add_argument(
'-d', '--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action="store_const", dest="loglevel")
args = parser.parse_args()
# Convert relative paths to absolute paths
if args.ascii and os.path.isdir(os.path.abspath(args.ascii)):
args.ascii = os.path.abspath(args.ascii)
if args.img and os.path.isdir(os.path.abspath(args.img)):
args.img = os.path.abspath(args.img)
return args | 05e5d5d33a475e25a76267c3aa8812f11084d9a1 | 3,636,911 |
def _dataset_type_dir(signer):
"""Returns the directory name of the corresponding dataset type.
There is a `TFRecord` file written for each of the 25 signers. The `TFRecord` files of the first 17 signers are
assigned to the train dataset, the `TFRecord` files of the next 4 signers are assigned to the validation dataset,
and the `TFRecord` files of the last 4 signers are assigned to the test dataset.
Arguments:
signer: The index of the signer.
Returns:
The directory name of the corresponding dataset type.
"""
if signer > 20:
return DatasetType.TEST.value
elif signer > 16:
return DatasetType.VALIDATION.value
else:
return DatasetType.TRAIN.value | 515fd2e0871cf9549f3f724da0b45bb07f09e24b | 3,636,912 |
def _merge_blanks(src, targ, verbose=False):
"""Read parallel corpus 2 lines at a time.
Merge both sentences if only either source or target has blank 2nd line.
If both have blank 2nd lines, then ignore.
Returns tuple (src_lines, targ_lines), arrays of strings sentences.
"""
merges_done = [] # array of indices of rows merged
sub = None # replace sentence after merge
with open(src, 'rb') as src_file, open(targ, 'rb') as targ_file:
src_lines = src_file.readlines()
targ_lines = targ_file.readlines()
print("src: %d, targ: %d" % (len(src_lines), len(targ_lines)))
print("=" * 30)
for i in range(0, len(src_lines) - 1):
s = src_lines[i].decode().rstrip()
s_next = src_lines[i + 1].decode().rstrip()
t = targ_lines[i].decode().rstrip()
t_next = targ_lines[i + 1].decode().rstrip()
if t == '.':
t = ''
if t_next == '.':
t_next = ''
if (len(s_next) == 0) and (len(t_next) > 0):
targ_lines[i] = "%s %s" % (t, t_next) # assume it has punctuation
targ_lines[i + 1] = b''
src_lines[i] = s if len(s) > 0 else sub
merges_done.append(i)
if verbose:
print("t [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i]))
print()
elif (len(s_next) > 0) and (len(t_next) == 0):
src_lines[i] = "%s %s" % (s, s_next) # assume it has punctuation
src_lines[i + 1] = b''
targ_lines[i] = t if len(t) > 0 else sub
merges_done.append(i)
if verbose:
print("s [%d] src: %s\n targ: %s" % (i, src_lines[i], targ_lines[i]))
print()
elif (len(s) == 0) and (len(t) == 0):
# both blank -- remove
merges_done.append(i)
else:
src_lines[i] = s if len(s) > 0 else sub
targ_lines[i] = t if len(t) > 0 else sub
# handle last line
s_last = src_lines[-1].decode().strip()
t_last = targ_lines[-1].decode().strip()
if (len(s_last) == 0) and (len(t_last) == 0):
merges_done.append(len(src_lines) - 1)
else:
src_lines[-1] = s_last
targ_lines[-1] = t_last
# remove empty sentences
for m in reversed(merges_done):
del src_lines[m]
del targ_lines[m]
print("merges done: %d" % len(merges_done))
return (src_lines, targ_lines) | fe5f765022b2b4de5320272701148cc9f8e691b8 | 3,636,913 |
import codecs
def get_line(file_path, line_rule):
"""
搜索指定文件的指定行到指定行的内容
:param file_path: 指定文件
:param line_rule: 指定行规则
:return:
"""
s_line = int(line_rule.split(',')[0])
e_line = int(line_rule.split(',')[1][:-1])
result = []
# with open(file_path) as file:
file = codecs.open(file_path, "r", encoding='utf-8', errors='ignore')
line_number = 0
for line in file:
line_number += 1
if s_line <= line_number <= e_line:
result.append(line)
return result | a6ccda48f8083e5ff6827306f4abd7f19e8d445c | 3,636,914 |
def _generate_odd_sequence(sequence_id: int, start_value: int,
k_factor: int, max_iterations: int):
"""
This method generates a Collatz sequence containing only odd numbers.
:param sequence_id: ID of the sequence.
:param start_value: The integer value to start with. The value must be a
natural number > 0. If an even number is handed over, the next odd number will be used
as start value.
:param k_factor: The factor by which odd numbers are multiplied in the sequence.
:param max_iterations: The maximum number of iterations performed
before the method exits.
:return: The Collatz sequence as a pandas data frame.
"""
odds = commons.odd_collatz_sequence(start_value, k_factor, max_iterations=max_iterations)
next_odds = odds[1:]
odds.pop()
collatz_frame = pd.DataFrame({"v_i": odds})
collatz_frame["sequence_id"] = sequence_id
collatz_frame["sequence_len"] = len(collatz_frame)
collatz_frame["n"] = collatz_frame.index + 1
collatz_frame["k_factor"] = k_factor
collatz_frame["v_1"] = start_value
collatz_frame["kv_i+1"] = collatz_frame["v_i"].apply(
commons.next_collatz_number, args=(k_factor,))
collatz_frame["v_i+"] = next_odds
collatz_frame["terminal"] = collatz_frame["v_i+"] == 1
collatz_frame["cycle"] = collatz_frame["v_i+"] == collatz_frame["v_1"]
# Logs
collatz_frame["v_i_log2"] = collatz_frame["v_i"].apply(log2)
collatz_frame["kv_i+1_log2"] = collatz_frame["kv_i+1"].apply(log2)
collatz_frame["v_i+_log2"] = collatz_frame["v_i+"].apply(log2)
# Binary strings
collatz_frame["v_1_bin"] = collatz_frame["v_1"].apply(commons.to_binary)
collatz_frame["v_i_bin"] = collatz_frame["v_i"].apply(commons.to_binary)
# Mods
collatz_frame["v_i_mod4"] = collatz_frame["v_i"] % 4
collatz_frame["kv_i+1_mod4"] = collatz_frame["kv_i+1"] % 4
collatz_frame["v_i+_mod4"] = collatz_frame["v_i+"] % 4
# Alpha
collatz_frame["alpha_i"] = collatz_frame["kv_i+1"].apply(commons.trailing_zeros)
collatz_frame["alpha_i"] = collatz_frame["alpha_i"].astype('int64')
collatz_frame["alpha_i_max"] = log2(k_factor) + collatz_frame["v_i"].apply(log2)
collatz_frame["alpha_i_max"] += (1 + 1 / (k_factor * collatz_frame["v_i"])).apply(log2)
# Round result here to avoid loss of precision errors
collatz_frame["alpha_i_max"] = collatz_frame["alpha_i_max"].round(9)
collatz_frame["alpha"] = collatz_frame["alpha_i"].cumsum()
collatz_frame["alpha_cycle"] = (log2(k_factor) * collatz_frame["n"]).astype('int64') + 1
collatz_frame["alpha_max"] = log2(start_value) + (collatz_frame["n"] * log2(k_factor))
collatz_frame["alpha_max"] = collatz_frame["alpha_max"].astype('int64') + 1
# Beta
collatz_frame["beta_i"] = 1 + 1 / (k_factor * collatz_frame["v_i"])
collatz_frame["beta"] = collatz_frame["beta_i"].cumprod()
# Lambda
collatz_frame["bin_len"] = collatz_frame["v_i_log2"].astype('int64') + 1
collatz_frame["next_bin_len"] = collatz_frame["kv_i+1_log2"].astype('int64') + 1
collatz_frame["bin_diff"] = collatz_frame["next_bin_len"] - collatz_frame["bin_len"]
collatz_frame["lambda_i"] = collatz_frame["bin_diff"]
collatz_frame.loc[collatz_frame["lambda_i"] < 0, "lambda_i"] = 0
collatz_frame["lambda"] = collatz_frame["lambda_i"].cumsum()
collatz_frame["lambda_i_min"] = int(log2(k_factor))
collatz_frame["lambda_i_max"] = int(log2(k_factor) + 1)
collatz_frame["lambda_hyp"] = (collatz_frame["n"] * log2(k_factor))
collatz_frame["lambda_min"] = collatz_frame["lambda_hyp"].astype('int64')
collatz_frame["lambda_max"] = collatz_frame["lambda_hyp"].astype('int64') + 2
# Omega
collatz_frame["omega_i"] = collatz_frame["lambda_i"] - collatz_frame["alpha_i"]
collatz_frame["omega"] = collatz_frame["lambda"] - collatz_frame["alpha"]
collatz_frame["omega_i_max"] = collatz_frame["lambda_i_max"] - 1
collatz_frame["omega_max"] = collatz_frame["lambda_max"] - collatz_frame["n"]
result_frame = collatz_frame[[
"sequence_id", "sequence_len", "n", "k_factor", "v_1",
"v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2",
"v_i_mod4", "kv_i+1_mod4", "v_i+_mod4",
"v_1_bin", "v_i_bin", "terminal", "cycle",
"alpha_i", "alpha_i_max", "alpha", "alpha_cycle", "alpha_max",
"beta_i", "beta", "bin_len", "next_bin_len",
"lambda_i", "lambda_i_min", "lambda_i_max",
"lambda", "lambda_min", "lambda_max",
"omega_i", "omega_i_max", "omega", "omega_max"]]
result_frame.columns = [
"sequence_id", "sequence_len", "n", "k", "v_1",
"v_i", "kv_i+1", "v_i+", "v_i_log2", "v_i+_log2", "kv_i+1_log2",
"v_i_mod4", "kv_i+1_mod4", "v_i+_mod4",
"v_1_bin", "v_i_bin", "terminal", "cycle",
"a_i", "a_i_max", "a", "a_cycle", "a_max",
"b_i", "b", "bin_len", "next_bin_len",
"l_i", "l_i_min", "l_i_max",
"l", "l_min", "l_max",
"o_i", "o_i_max", "o", "o_max"]
return result_frame | d886631d153531fafa2cd9b15be621df9746a909 | 3,636,915 |
import os
import cpuinfo
def find_initcpio_features(partitions, root_mount_point):
"""
Returns a tuple (hooks, modules, files) needed to support
the given @p partitions (filesystems types, encryption, etc)
in the target.
:param partitions: (from GS)
:param root_mount_point: (from GS)
:return 3-tuple of lists
"""
hooks = ["base", "udev", "autodetect", "modconf", "block", "keyboard", "keymap", "consolefont"]
modules = []
files = []
swap_uuid = ""
uses_btrfs = False
uses_lvm2 = False
encrypt_hook = False
openswap_hook = False
unencrypted_separate_boot = False
# It is important that the plymouth hook comes before any encrypt hook
if detect_plymouth():
hooks.append("plymouth")
for partition in partitions:
if partition["fs"] == "linuxswap" and not partition.get("claimed", None):
# Skip foreign swap
continue
if partition["fs"] == "linuxswap":
swap_uuid = partition["uuid"]
if "luksMapperName" in partition:
openswap_hook = True
if partition["fs"] == "btrfs":
uses_btrfs = True
if "lvm2" in partition["fs"]:
uses_lvm2 = True
if partition["mountPoint"] == "/" and "luksMapperName" in partition:
encrypt_hook = True
if (partition["mountPoint"] == "/boot" and "luksMapperName" not in partition):
unencrypted_separate_boot = True
if partition["mountPoint"] == "/usr":
hooks.append("usr")
if encrypt_hook:
if detect_plymouth() and unencrypted_separate_boot:
hooks.append("plymouth-encrypt")
else:
hooks.append("encrypt")
if not unencrypted_separate_boot and \
os.path.isfile(
os.path.join(root_mount_point, "crypto_keyfile.bin")
):
files.append("/crypto_keyfile.bin")
if uses_lvm2:
hooks.append("lvm2")
if swap_uuid != "":
if encrypt_hook and openswap_hook:
hooks.extend(["openswap"])
hooks.extend(["resume", "filesystems"])
else:
hooks.extend(["filesystems"])
if uses_btrfs:
modules.append("crc32c-intel" if cpuinfo().is_intel else "crc32c")
else:
hooks.append("fsck")
return (hooks, modules, files) | 9e35e597af3b3ac1d5bd749740491ffa728d2130 | 3,636,916 |
def _is_unique_rec_name(info_name):
"""
helper method to see if we should use the uniqueness recommendation on the
fact comparison
"""
UNIQUE_INFO_SUFFIXES = [".ipv4_addresses", ".ipv6_addresses", ".mac_address"]
UNIQUE_INFO_PREFIXES = ["fqdn"]
if info_name.startswith("network_interfaces.lo."):
return False
for prefix in UNIQUE_INFO_PREFIXES:
if info_name.startswith(prefix):
return True
for suffix in UNIQUE_INFO_SUFFIXES:
if info_name.endswith(suffix):
return True
return False | cba744e1e5b6a9612363d2ca12d4751e1894c8ad | 3,636,917 |
def initialized():
"""
Connection finished initializing?
"""
return __context__["netmiko_device"].get("initialized", False) | 6ca85744478bdb17ac99ce827825cde1db8bae3a | 3,636,918 |
def n_round(a, b):
"""safe round"""
element_round = np.vectorize(np.round)
return element_round(a, intify(b)) | 2c38e1585b71d5717ea3cc560521b8a006ceeee3 | 3,636,919 |
def _json_view_params(shape, affine, vmin, vmax, cut_slices, black_bg=False,
opacity=1, draw_cross=True, annotate=True, title=None,
colorbar=True, value=True):
""" Create a dictionary with all the brainsprite parameters.
Returns: params
"""
# Set color parameters
if black_bg:
cfont = '#FFFFFF'
cbg = '#000000'
else:
cfont = '#000000'
cbg = '#FFFFFF'
# Deal with limitations of json dump regarding types
if type(vmin).__module__ == 'numpy':
vmin = vmin.tolist() # json does not deal with numpy array
if type(vmax).__module__ == 'numpy':
vmax = vmax.tolist() # json does not deal with numpy array
params = {'canvas': '3Dviewer',
'sprite': 'spriteImg',
'nbSlice': {'X': shape[0],
'Y': shape[1],
'Z': shape[2]},
'overlay': {'sprite': 'overlayImg',
'nbSlice': {'X': shape[0],
'Y': shape[1],
'Z': shape[2]},
'opacity': opacity},
'colorBackground': cbg,
'colorFont': cfont,
'crosshair': draw_cross,
'affine': affine.tolist(),
'flagCoordinates': annotate,
'title': title,
'flagValue': value,
'numSlice': {'X': cut_slices[0] - 1,
'Y': cut_slices[1] - 1,
'Z': cut_slices[2] - 1}}
if colorbar:
params['colorMap'] = {'img': 'colorMap',
'min': vmin,
'max': vmax}
return params | 50ea71a5a99facf4c472f0c18984d84e23b8e301 | 3,636,920 |
from typing import List
from datetime import datetime
def get_timestamps_from_df_data(df) -> List[datetime.datetime]:
"""Get a list of timestamp from rows of a DataFrame containing
raw data.
"""
timestamps = []
for index, row in df.iterrows():
year = int(row["dteday"][:4])
month = int(row["dteday"][5:7])
day = int(row["dteday"][-2:])
hour = int(row["hr"])
timestamp = datetime.datetime(year, month, day, hour)
timestamps.append(timestamp)
return timestamps | 21f985ebf28d6f5819635a13294e8db0544a292b | 3,636,921 |
def debug_user(request):
"""
Allows superusers to log in as a regular user to troubleshoot problems.
"""
if not settings.DEBUG:
messages.error(request, "Can only use when in debug mode.")
redirect("/")
target = request.GET.get("uid", "")
profile = Profile.objects.filter(uid=target).first()
if not profile:
messages.error(request, "User does not exists.")
return redirect("/")
user = profile.user
login(request, user, backend="django.contrib.auth.backends.ModelBackend")
messages.success(request, "Login successful!")
logger.info(f"""uid={request.user.profile.uid} impersonated
uid={profile.uid}.""")
return redirect("/") | 73a1f3cfdaa45a5693c9589c012206f6d83d56d0 | 3,636,922 |
def select(var_name, attr_name=None):
"""
Return attribute(s) of a variable given the variable name and an optional field name, or list of attribute name(s)
:param var_name: Name of the variable we're interested in.
:param attr_name: A string representing the name of the attribute whose value we want to fetch. This can also be
a list of strings in case of multiple attributes. If None, all attributes of the variable are returned.
:return: A dictionary of attribute => value mappings if multiple attributes were requested (i.e. attr_name is a
list), or a string value if a single attribute name was requested (i.e. attr_name is a string)
"""
single = isinstance(attr_name, str)
if attr_name is not None:
if single:
params = {attr_name: attr_name}
else:
params = dict([(f, f) for f in attr_name])
else:
params = None
endpoint = 'variable/%s' % var_name
data = _get(endpoint, params)
return data[attr_name] if single else data | 22b65439ff4dc831c2fb334595b0f0cd2e764b67 | 3,636,923 |
import re
def _parseWinBuildTimings(logfile):
"""Variant of _parseBuildTimings for Windows builds."""
res = {'Compile': re.compile(r'\d+>Time Elapsed (\d+):(\d+):([0-9.]+)'),
'Test running': re.compile(r'.*?\.+.*?([0-9.]+) sec')}
times = dict([(k, 0.0) for k in res])
for line in logfile:
for key, regexp in res.iteritems():
m = regexp.match(line)
if m:
multiplier = 1
for time_part in reversed(m.groups()):
times[key] += float(time_part) * multiplier
multiplier *= 60
break
times['Total'] = sum(times.values())
return times | 0473c426d29bb7fe44ff3384f81962f121c11afa | 3,636,924 |
import typing
import requests
def _try_to_extract_issuing_url_from_well_known_metadata(well_known_address: str) -> typing.Optional[str]:
"""
Try to extract token issuing url from well-known location
:param well_known_address: well-known URL
:type well_known_address: str
:return: str or None -- token issuing URL
"""
try:
LOGGER.debug('Trying to extract well-known information from address %r', well_known_address)
response = requests.get(url=well_known_address)
data = response.json()
except requests.HTTPError as http_error:
LOGGER.debug('Failed to extract well-known information from address %r - %s', well_known_address, http_error)
return None
except ValueError as value_error:
LOGGER.debug('Failed to parse well-known information from address %r - %s', well_known_address, value_error)
return None
token_endpoint = data.get('token_endpoint')
if not token_endpoint:
LOGGER.debug('well-known information does not contain token_endpoint (%s)', well_known_address)
return
return token_endpoint | 209a521fe29c8f47b7bc63207cdbc5736d053887 | 3,636,925 |
def get_malid(anime: AnimeThemeAnime) -> int:
"""
Returns anime theme of resource.
"""
for resource in anime['resources']:
if resource["site"] == "MyAnimeList":
return resource['external_id'] | a745f95e73e8e061d98100e314faf5a662d69693 | 3,636,926 |
import glob
def wav16khz2mfcc(dir_name):
"""
Loads all *.wav files from directory dir_name (must be 16kHz), converts them into MFCC
features (13 coefficients) and stores them into a dictionary. Keys are the file names
and values and 2D numpy arrays of MFCC features.
"""
features = {}
for f in glob(dir_name + '/*.wav'):
print('Processing file: ', f)
rate, s = wavfile.read(f)
assert(rate == 16000)
features[f] = mfcc(s, 400, 240, 512, 16000, 23, 13)
return features | 6eae15a7ac999cd42c1e3161221356cf720d54c0 | 3,636,927 |
def add_metadata(infile, outfile, sample_metadata):
"""Add sample-level metadata to a biom file. Sample-level metadata
should be in a format akin to
http://qiime.org/tutorials/tutorial.html#mapping-file-tab-delimited-txt
:param infile: String; name of the biom file to which metadata
shall be added
:param outfile: String; name of the resulting metadata-enriched biom file
:param sample_metadata: String; name of the sample-level metadata
tab-delimited text file. Sample attributes are
taken from this file. Note: the sample names in
the `sample_metadata` file must match the sample
names in the biom file.
External dependencies
- biom-format: http://biom-format.org/
"""
return {
"name": "biom_add_metadata: " + infile,
"actions": [("biom add-metadata"
" -i "+infile+
" -o "+outfile+
" -m "+sample_metadata)],
"file_dep": [infile],
"targets": [outfile]
} | e779f876159741de60e99002a90906b151dc7530 | 3,636,928 |
def multinomial(n):
"""Finds the multinomial coefficient for a given array of numbers.
Args:
n (list): the interegs to be used.
"""
binomials = [[np.sum(n),n[0]]]
for i in range(1,len(n)):
new_sum = binomials[i-1][0]-binomials[i-1][1]
binomials.append([new_sum,n[i]])
bins = []
for b in binomials:
bins.append(binomial_coefficient(b[0],b[1]))
return np.prod(bins) | 6f38656d295a4d5ecf32a01a238cdad701e6e530 | 3,636,929 |
def get_qc_data(sample_prj, p_con, s_con, fc_id=None):
"""Get qc data for a project, possibly subset by flowcell.
:param sample_prj: project identifier
:param p_con: object of type <ProjectSummaryConnection>
:param s_con: object of type <SampleRunMetricsConnection>
:returns: dictionary of qc results
"""
project = p_con.get_entry(sample_prj)
application = project.get("application", None) if project else None
samples = s_con.get_samples(fc_id=fc_id, sample_prj=sample_prj)
qcdata = {}
for s in samples:
qcdata[s["name"]]={"sample":s.get("barcode_name", None),
"project":s.get("sample_prj", None),
"lane":s.get("lane", None),
"flowcell":s.get("flowcell", None),
"date":s.get("date", None),
"application":application,
"TOTAL_READS":int(s.get("picard_metrics", {}).get("AL_PAIR", {}).get("TOTAL_READS", -1)),
"PERCENT_DUPLICATION":s.get("picard_metrics", {}).get("DUP_metrics", {}).get("PERCENT_DUPLICATION", "-1.0"),
"MEAN_INSERT_SIZE":float(s.get("picard_metrics", {}).get("INS_metrics", {}).get("MEAN_INSERT_SIZE", "-1.0").replace(",", ".")),
"GENOME_SIZE":int(s.get("picard_metrics", {}).get("HS_metrics", {}).get("GENOME_SIZE", -1)),
"FOLD_ENRICHMENT":float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("FOLD_ENRICHMENT", "-1.0").replace(",", ".")),
"PCT_USABLE_BASES_ON_TARGET":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_USABLE_BASES_ON_TARGET", "-1.0"),
"PCT_TARGET_BASES_10X":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_TARGET_BASES_10X", "-1.0"),
"PCT_PF_READS_ALIGNED":s.get("picard_metrics", {}).get("AL_PAIR", {}).get("PCT_PF_READS_ALIGNED", "-1.0"),
}
target_territory = float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("TARGET_TERRITORY", -1))
pct_labels = ["PERCENT_DUPLICATION", "PCT_USABLE_BASES_ON_TARGET", "PCT_TARGET_BASES_10X",
"PCT_PF_READS_ALIGNED"]
for l in pct_labels:
if qcdata[s["name"]][l]:
qcdata[s["name"]][l] = float(qcdata[s["name"]][l].replace(",", ".")) * 100
if qcdata[s["name"]]["FOLD_ENRICHMENT"] and qcdata[s["name"]]["GENOME_SIZE"] and target_territory:
qcdata[s["name"]]["PERCENT_ON_TARGET"] = float(qcdata[s["name"]]["FOLD_ENRICHMENT"]/ (float(qcdata[s["name"]]["GENOME_SIZE"]) / float(target_territory))) * 100
return qcdata | f267148f48f86151852e12fa3be8d5f8aefc6b11 | 3,636,930 |
def sql_sanitize(sql_name):
"""
Return a SQL name (table or column) cleaned of problematic characters.
ex. punctuation )(][; whitespace
Don't use with values, which can be properly escaped with parameterization.
Ideally retaining only alphanumeric char.
Credits: Donald Miner, Source: StackOverflow, DateAccessed: 2020-02-20
"""
sanitize_name = "".join(char for char in sql_name if char.isalnum())
return sanitize_name | 9ce9e0e8bed2348079fb23f2d27c53880fa1c795 | 3,636,931 |
import os
def add_event_number(job_metrics, workdir):
"""
Extract event number from file and add to job metrics if it exists
:param job_metrics: job metrics (string).
:param workdir: work directory (string).
:return: updated job metrics (string).
"""
path = os.path.join(workdir, 'eventLoopHeartBeat.txt')
if os.path.exists(path):
last_line = find_last_line(path)
if last_line:
event_number = get_number_in_string(last_line)
if event_number:
job_metrics += get_job_metrics_entry("eventnumber", event_number)
else:
logger.debug('file %s does not exist (skip for now)', path)
return job_metrics | f4ac76a01ac9bb4ce5dec8baf754d54b6e1ac4e2 | 3,636,932 |
def exists(name):
"""
`True` if a category named `name` exists;
`False` otherwise.
"""
return db.cursor().execute('SELECT COUNT(*) FROM categories WHERE name = ?', (name,)).fetchone()[0] != 0 | e15f5d961a4420ef6bd00fa393ab9af440e5f983 | 3,636,933 |
def ESMP_MeshGetOwnedElementCount(mesh):
"""
Preconditions: An ESMP_Mesh has been created.\n
Postconditions: The owned elementCount for 'mesh' has been
returned.\n
Arguments:\n
:RETURN: integer :: elementCount\n
ESMP_Mesh :: mesh\n
"""
lec = ct.c_int(0)
rc = _ESMF.ESMC_MeshGetOwnedElementCount(mesh.struct.ptr, ct.byref(lec))
if rc != constants._ESMP_SUCCESS:
raise ValueError('ESMC_MeshGetOwnedElementCount() failed with rc = '+
str(rc)+'. '+constants._errmsg)
elementCount = lec.value
return elementCount | 067411ba3b2fbc4f862375e2a3699d617999b6ed | 3,636,934 |
def remove_control_chars_author(input):
"""
:param input:
:return:
"""
return CONTROL_CHAR_RE.sub('', input) | 632bb20de05f3461156fa7ed311b9a04459de60f | 3,636,935 |
def run():
"""Default Run Method"""
return problem51(8) | 3357bb4e6461f8142f93fc394f3b5aba0fba7ceb | 3,636,936 |
def calc_c(e, a, b, u=1): # Check units
"""
calculate the z components of 4 partial waves in medium
e: dielectric tensor
a,b: components of wavevector in direction of x and y direction
return a list containting 4 roots for the z components of the partial waves
"""
# assign names
x = e * u
x11, x12, x13 = x[0]
x21, x22, x23 = x[1]
x31, x32, x33 = x[2]
# calculate the coeffciency based on symbolic expression
coef4 = x33
coef3 = a * x13 + a * x31 + b * x23 + b * x32
coef2 = a**2*x11 + a**2*x33 + a*b*x12 + a*b*x21 + b**2*x22 + b**2*x33 - \
x11*x33 + x13*x31 - x22*x33 + x23*x32
coef1 = a**3*x13 + a**3*x31 + a**2*b*x23 + a**2*b*x32 + a*b**2*x13 + \
a*b**2*x31 + a*x12*x23 - a*x13*x22 + a*x21*x32 - a*x22*x31 + b**3*x23 \
+ b**3*x32 - b*x11*x23 - b*x11*x32 + b*x12*x31 + b*x13*x21
coef0 = a**4*x11 + a**3*b*x12 + a**3*b*x21 + a**2*b**2*x11 + a**2*b**2*x22 \
- a**2*x11*x22 - a**2*x11*x33 + a**2*x12*x21 + a**2*x13*x31 + a*b**3*x12 + \
a*b**3*x21 - a*b*x12*x33 + a*b*x13*x32 - a*b*x21*x33 + a*b*x23*x31 + \
b**4*x22 - b**2*x11*x22 + b**2*x12*x21 - b**2*x22*x33 + b**2*x23*x32 + \
x11*x22*x33 - x11*x23*x32 - x12*x21*x33 + x12*x23*x31 + x13*x21*x32 - \
x13*x22*x31
# calculate the roots of the quartic equation
c = np.roots([coef4, coef3, coef2, coef1, coef0])
if len(c) == 2:
return np.append(c, c)
return c | 46a1ae481c9525ecc7ae1e5e3b119b8d3983ca16 | 3,636,937 |
from typing import Sequence
from typing import Tuple
def _jax_decode(
compressed_message: ndarray,
tail_limit: int,
message_len: int,
message_shape: Sequence[int],
codec: CrayCodec,
cdf_state: Sequence[ndarray],
) -> Tuple[Tuple[ndarray, int], ndarray, Sequence[ndarray]]:
"""
JAX rANS decoding function.
At a high level, this function takes a stack of information
(``compressed_message``) and peeks at the top of the stack to see what the
current symbol is. After identifying the symbol, this function pops a
number of bits from the top of the stack approximately equal to the
information content of the symbol (i.e. ``-log(symbol probability)``). This
is done ``message_len`` times until the full message is retrieved.
Args:
compressed_message: The input stack containing the compressed meessage.
tail_limit: A pointer to the current end of the tail.
message_len: The size of the message to be decoded.
message_shape: The message shape containing the interleaved dimension
size.
codec: A named tuple object containing functions for push and pop
operations, as well as an initial state fo the CDF functions (for
context-adaptive coding) and a data type specification for the
message.
cdf_state: The initialization state of the inverse CDF function
(contains CDF array or can be used for conditional probabilites).
Returns:
A 3-tuple containing:
The decoded messages of size
``(message_len, *message_shape)``.
A byte array of compressed data after removing the target
message.
The final CDF state.
"""
message = jnp.zeros((message_len, *message_shape), dtype=codec.message_dtype)
def pop_one_symbol(msg_index, vals):
return codec.pop(msg_index, *vals)
result = lax.fori_loop(
0,
message_len,
pop_one_symbol,
(
array_to_craymessage(compressed_message, message_shape, tail_limit),
message,
cdf_state,
),
)
return craymessage_to_array(result[0]), result[1], result[2] | 648cb4af4ddaaec01e5d5997e8698aad6acd4c01 | 3,636,938 |
def subtask1_eval(_answers, _ref):
"""
子任务1的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
"""
_map = {
'11': 'TP',
'00': 'TN',
'10': 'FN',
'01': 'FP',
}
_st = {
'TP': 0,
'TN': 0,
'FN': 0,
'FP': 0,
}
for _k, _v in _ref.items():
_ga = int(_v)
_aa = int(_answers[_k]) if _k in _answers else 0
_st[_map[f"{_ga}{_aa}"]] += 1
_st['Accuracy'] = (_st['TP'] + _st['TN']) / (_st['TP'] + _st['FP'] + _st['FN'] + _st['TN'])
return _st | 7249992f70b67928a99e96c7877e5ef4be261429 | 3,636,939 |
def render_horizontal_fields(*fields_to_render, **kwargs):
"""Render given fields with optional labels"""
labels = kwargs.get('labels', True)
media = kwargs.get('media')
hidden_fields = []
visible_fields = []
for bound_field in fields_to_render:
if bound_field.field.widget.is_hidden:
hidden_fields.append(bound_field)
else:
visible_fields.append(bound_field)
return {
'fields_to_render': fields_to_render,
'hidden_fields': hidden_fields,
'visible_fields': visible_fields,
'labels': labels,
'media': media,
} | 22ac9c05b602c0f65ab2fc348ab9399855780bc3 | 3,636,940 |
def get_darwin_memory():
""" Use system-call to extract total memory on macOS """
system_output = sabnzbd.newsunpack.run_simple(["sysctl", "hw.memsize"])
return float(system_output.split()[1]) | 1458881c61cdb5b765c4c56fa494ff7c6f06c49b | 3,636,941 |
from datetime import datetime
def parseTextModeTimeStr(timeStr):
""" Parses the specified SMS text mode time string
The time stamp format is "yy/MM/dd,hh:mm:ss±zz"
(yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone
[Note: the unit of time zone is a quarter of an hour])
@param timeStr: The time string to parse
@type timeStr: str
@return: datetime object representing the specified time string
@rtype: datetime.datetime
"""
msgTime = timeStr[:-3]
tzOffsetHours = int(int(timeStr[-3:]) * 0.25)
return datetime.strptime(msgTime, '%y/%m/%d,%H:%M:%S').replace(tzinfo=SimpleOffsetTzInfo(tzOffsetHours)) | 52a45116a2b0153595161f94fda38129ddd59b3a | 3,636,942 |
import torch
def angle_to_rotation_matrix(angle) -> Tensor:
"""
Creates a rotation matrix out of angles in degrees
Args:
angle: (Tensor): tensor of angles in degrees, any shape.
Returns:
Tensor: tensor of *x2x2 rotation matrices.
Shape:
- Input: :math:`(*)`
- Output: :math:`(*, 2, 2)`
Examples:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_to_rotation_matrix(input) # Nx3x2x2
"""
ang_rad = angle * np.pi / 180
cos_a = torch.cos(ang_rad)
sin_a = torch.sin(ang_rad)
return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2) | 9b88eaa0277d0c3ad672e94e4d41ec45ebe0b272 | 3,636,943 |
def extract_text():
"""Extracts text from an HTML document."""
html = request.form['html']
article = Article(html)
try:
return article.text
except AttributeError as e:
log.warn(e)
# NOTE: When a parsing error occurs, an AttributeError is raised.
# We'll deal with this exception later.
return '' | 8efc10539462ab51715b54b17a018e5f296496eb | 3,636,944 |
import json
import time
def get_new_account_id(event):
"""Return account id for new account events."""
create_account_status_id = (
event["detail"]
.get("responseElements", {})
.get("createAccountStatus", {})["id"] # fmt: no
)
log.info("createAccountStatus = %s", create_account_status_id)
org = boto3.client("organizations")
while True:
account_status = org.describe_create_account_status(
CreateAccountRequestId=create_account_status_id
)
state = account_status["CreateAccountStatus"]["State"].upper()
if state == "SUCCEEDED":
return account_status["CreateAccountStatus"]["AccountId"]
elif state == "FAILED":
log.error("Account creation failed:\n%s", json.dumps(account_status))
raise AccountCreationFailedException
else:
log.info(
"Account state: %s. Sleeping 5 seconds and will try again...", state
)
time.sleep(5) | 4433b080b24d1a7ad276541103e55acf7bbfa137 | 3,636,945 |
from typing import List
def lag_indexes(tf_stat)-> List[pd.Series]:
"""
Calculates indexes for 3, 6, 9, 12 months backward lag for the given date range
:param begin: start of date range
:param end: end of date range
:return: List of 4 Series, one for each lag. For each Series, index is date in range(begin, end), value is an index
of target (lagged) date in a same Series. If target date is out of (begin,end) range, index is -1
"""
date_range = pd.date_range(tf_stat['days'][0],tf_stat['days'][-1])
# key is date, value is day index
base_index = pd.Series(np.arange(0, len(date_range)),index=date_range)
def lag(offset):
dates = date_range - offset
return pd.Series(data=base_index[dates].fillna(-1).astype(np.int16).values, index=date_range)
return [lag(pd.DateOffset(months=m)) for m in (1, 2)] | de8d355d213146013eb4720860dd844d22ccab45 | 3,636,946 |
def weather_outfit(req):
"""Returns a string containing text with a response to the user
with a indication if the outfit provided is appropriate for the
current weather or a prompt for more information
Takes a city, outfit and (optional) dates
uses the template responses found in weather_responses.py as templates
and the outfits listed in weather_entities.py
"""
# validate request parameters, return an error if there are issues
error, forecast_params = validate_params(req['queryResult']['parameters'])
if error:
return error
# Validate that there are the required parameters to retrieve a forecast
if not forecast_params['outfit']:
return 'What are you planning on wearing?'
# create a forecast object which retrieves the forecast from a external API
try:
forecast = Forecast(forecast_params)
# return an error if there is an error getting the forecast
except (ValueError, IOError) as error:
return error
return forecast.get_outfit_response() | ee5b3cd3ed10062155bbce532343ef51f9a83177 | 3,636,947 |
from sentence_splitter import SentenceSplitter
def parse_paragraphs(record):
"""
parse paragraphs into sentences, returns list
"""
splitter = SentenceSplitter(language='en')
sentences=splitter.split(record['value'])
article_id = remove_prefix(record['key'],'paragraphs:')
pre = 'sentence:' + article_id
l = [{ 'key': f'{pre}','idx':f'{idx}','value': sentence } for idx,sentence in enumerate(sentences)]
return l | 9a8cce4692af5e61b9f01becd8dafa9234c08f17 | 3,636,948 |
def get_stage_environment() -> str:
"""
Indicates whether the source is running as PRD or DEV. Accounts for the
user preference via TEST_WORKING_STAGE.
:return: One of the STAGE_* constants.
"""
return TEST_WORKING_STAGE | 1c2e14132af1760a13aae268b5179e70c79f5df5 | 3,636,949 |
def get_all_table_acls(conn, schema=None):
"""Get privileges for all tables, views, materialized views, and foreign
tables.
Specify `schema` to limit the results to that schema.
Returns:
List of :class:`~.types.SchemaRelationInfo` objects.
"""
stmt = _table_stmt(schema=schema)
return [SchemaRelationInfo(**row) for row in conn.execute(stmt)] | 9067a614197d19c3256828b2a8dbb491bede0fe6 | 3,636,950 |
def add_atom_map(molecule, **kwargs):
"""
Add canonical ordered atom map to molecule
Parameters
----------
molecule :
`oechem.OEMOl` or `rdkit.Chem.Mol`
Returns
-------
molecule with map indices
"""
toolkit = _set_toolkit(molecule)
return toolkit.add_atom_map(molecule, **kwargs) | 584324aae018f211fc31c9f727687e9a6971822d | 3,636,951 |
from typing import Any
def build_put_dictionary_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Put External Resource as a Dictionary.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. External Resource as a Dictionary to put.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). External Resource as a Dictionary to put.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = {
"str": {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"p.name": "str", # Optional.
"provisioningState": "str", # Optional.
"provisioningStateValues": "str", # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
"type": "str" # Optional.
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
}
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/model-flatten/dictionary")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, content=content, **kwargs) | 045c00835d592d777a155696bda76a5ecb12aa6f | 3,636,952 |
def midpoint(rooms):
"""
Helper function to help find the midpoint between the two rooms.
Args:
rooms: list of rooms
Returns:
int: Midpoint
"""
return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2 | 60b3ba53fb15154ff97ab9c6fa3cf1b726bc2df1 | 3,636,953 |
def secondSolution( fixed, c1, c2, c3 ):
"""
If given four tangent circles, calculate the other one that is tangent
to the last three.
@param fixed: The fixed circle touches the other three, but not
the one to be calculated.
@param c1, c2, c3: Three circles to which the other tangent circle
is to be calculated.
@type fixed: L{Circle}
@type c1: L{Circle}
@type c2: L{Circle}
@type c3: L{Circle}
@return: The circle.
@rtype: L{Circle}
"""
curf = fixed.curvature()
cur1 = c1.curvature()
cur2 = c2.curvature()
cur3 = c3.curvature()
curn = 2 * (cur1 + cur2 + cur3) - curf
mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn
return Circle( mn.real, mn.imag, 1/curn ) | a5f7545a3c4600e29bfdb9c516ede6ba244894c3 | 3,636,954 |
import random
def generate_concept_chain(concept_desc, sequential):
"""
Given a list of availiable concepts, generate a dict with (start, id) pairs
giving the start of each concept.
Parameters
----------
sequential: bool
If true, concept transitions are
determined by ID without randomness.
"""
concept_chain = []
num_samples = 0
more_appearences = True
appearence = 0
while more_appearences:
concepts_still_to_appear = []
for cID in concept_desc:
concept = concept_desc[cID]
if concept.appearences > appearence:
concepts_still_to_appear.append(concept)
more_appearences = len(concepts_still_to_appear) > 0
for concept in concepts_still_to_appear:
concept_chain.append(concept.id)
num_samples += concept.examples_per_appearence
appearence += 1
if not sequential:
random.shuffle(concept_chain)
return concept_chain, num_samples | fcfeb345d92d627684d04da4c1d445120554bf15 | 3,636,955 |
def get_inputs(input_queue,
num_classes,
merge_multiple_label_boxes=False,
use_multiclass_scores=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
use_multiclass_scores: Whether to use multiclass scores instead of
groundtruth_classes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot (or K-hot) float32 tensors containing
target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints_list: a list of 3-D float tensors of shape [num_boxes,
num_keypoints, 2] containing keypoints for objects if present in the
input queue. Else returns None.
weights_lists: a list of 1-D float32 tensors of shape [num_boxes]
containing groundtruth weight for each box.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
image = read_data[fields.InputDataFields.image]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
if merge_multiple_label_boxes and use_multiclass_scores:
raise ValueError(
'Using both merge_multiple_label_boxes and use_multiclass_scores is'
'not supported'
)
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
classes_gt = tf.cast(classes_gt, tf.float32)
elif use_multiclass_scores:
classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
tf.float32)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
if (merge_multiple_label_boxes and (
masks_gt is not None or keypoints_gt is not None)):
raise NotImplementedError('Multi-label support is only for boxes.')
weights_gt = read_data.get(
fields.InputDataFields.groundtruth_weights)
return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
weights_gt)
return zip(*map(extract_images_and_targets, read_data_list)) | 96185efe5e1b6ee3064136e052387da0bfb1ddaa | 3,636,956 |
def doFilter(pTable, proxyService):
"""
filter candidates by column header candidates
- column headers are kept, if they support at least (minSupport * #rows) many cells
- only filter for columns that are part of the targets (if activated)
subsequently remove:
- CTA candidates with less support
- CEA candidates that do not support any of the remaining CTA candidates of their column
"""
# keep track, if this changed anything
changed = False
# table cols
cols = pTable.getCols(unsolved=False)
# process each column separately
for col in cols:
if not col['sel_cand']:
continue
# check, if we have to process this column at all
if not pTable.isTarget(col_id=col['col_id']):
continue
# grab all cells in this column
cells = pTable.getCells(col_id=col['col_id'])
beforeCount = len(cells)
# get the hierarchy over our candidates
hierarchy = proxyService.get_hierarchy_for_lst.send([col['sel_cand']['uri']])
typesSupported = [col['sel_cand']['uri']]
for parentList in hierarchy.values():
typesSupported.extend([item['parent'] for item in parentList])
typesSupported = list(set(typesSupported))
# purge the candidate lists
# for cell in cells:
# candSupport = {}
# for cand in cell['cand']:
# candSupport[cand['uri']] = 0
# try:
# foundTypes = [t for t in cand['types'] if t in typesSupported]
# candSupport[cand['uri']] += len(foundTypes)
# except KeyError as e:
# candSupport[cand['uri']] += 0
# # keep cands with highest support only
# maxFreq = max([candSupport[uri] for uri in candSupport.keys()])
# for cand in cell['cand']:
# if candSupport[cand['uri']] < maxFreq:
# cell['cand'].remove(cand)
# purged = []
# # remove all CEA candidates from the cells that are not associated with any remaining type
for cell in cells:
# add_purged = []
# check if the sel_cand is semantically correct
for cand in cell['cand']:
try:
foundTypes = [t for t in cand['types'] if t in typesSupported]
if not foundTypes:
# add to purged cells
# add_purged.append(cand)
cell['cand'].remove(cand)
except KeyError as e:
# print(e)
# add_purged.append(cand)
cell['cand'].remove(cand)
# if add_purged:
# # update the cell
# cell['purged_cand'].extend(add_purged)
# collect purged candidates
# purged.extend(add_purged)
# purge the cell-pair list
# pTable.purgeCellPairs(purged)
# done
return changed | 8b28f945e94e37302b2086e23f695c40c08b8d7c | 3,636,957 |
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x) | d0a4def320f88655e494f89b7239e47e1ee70d0d | 3,636,958 |
def request_factory():
"""Pytest setup for factory."""
return RequestFactory() | d6b5710dd42da06f6bb10e23fe3826a6a754228a | 3,636,959 |
def is_onehotencoded(x):
"""If input is a one-hot encoded representation of some set of values.
Parameters
----------
x : array-like
Returns
-------
bool
Whether `x` is a one-hot encoded / categorical representation.
"""
if x.ndim != 2:
return False
fractional, integral = np.modf(x)
if fractional.sum() != 0:
return False
if not np.array_equal(integral, integral.astype(bool)):
return False
return np.all(integral.sum(axis=1) == 1) | 21a023afeec886512ef806c76ade5523817ef350 | 3,636,960 |
def sequence_of_words(fname_doc, dictionary):
"""
Compute Sequence-of-Words from word list and dictionary
"""
txtdata = loadtxt(fname_doc)
words = extract_keyword(txtdata, "all")
SOW = []
for i,word in enumerate(words):
print(word)
if word in dictionary.keys():
SOW.append(dictionary[word]["id"])
SOW = sp.array(SOW)
return SOW | 92aeb61ce91b7149143bfb67905793caee83d3be | 3,636,961 |
def shd(B_est, B_true):
"""Compute various accuracy metrics for B_est.
true positive = predicted association exists in condition in correct direction
reverse = predicted association exists in condition in opposite direction
false positive = predicted association does not exist in condition
Args:
B_true (np.ndarray): [d, d] ground truth graph, {0, 1}
B_est (np.ndarray): [d, d] estimate, {0, 1, -1}, -1 is undirected edge in CPDAG
Returns:
fdr: (reverse + false positive) / prediction positive
tpr: (true positive) / condition positive
fpr: (reverse + false positive) / condition negative
shd: undirected extra + undirected missing + reverse
nnz: prediction positive
"""
if (B_est == -1).any(): # cpdag
if not ((B_est == 0) | (B_est == 1) | (B_est == -1)).all():
raise ValueError('B_est should take value in {0,1,-1}')
if ((B_est == -1) & (B_est.T == -1)).any():
raise ValueError('undirected edge should only appear once')
else: # dag
if not ((B_est == 0) | (B_est == 1)).all():
raise ValueError('B_est should take value in {0,1}')
#if not is_dag(B_est):
# raise ValueError('B_est should be a DAG')
d = B_true.shape[0]
# linear index of nonzeros
pred_und = np.flatnonzero(B_est == -1)
pred = np.flatnonzero(B_est == 1)
cond = np.flatnonzero(B_true)
cond_reversed = np.flatnonzero(B_true.T)
cond_skeleton = np.concatenate([cond, cond_reversed])
# true pos
true_pos = np.intersect1d(pred, cond, assume_unique=True)
# treat undirected edge favorably
true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True)
true_pos = np.concatenate([true_pos, true_pos_und])
# false pos
false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)
false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True)
false_pos = np.concatenate([false_pos, false_pos_und])
# reverse
extra = np.setdiff1d(pred, cond, assume_unique=True)
reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)
# compute ratio
pred_size = len(pred) + len(pred_und)
cond_neg_size = 0.5 * d * (d - 1) - len(cond)
fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)
tpr = float(len(true_pos)) / max(len(cond), 1)
fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)
# structural hamming distance
pred_lower = np.flatnonzero(np.tril(B_est + B_est.T))
cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))
extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)
missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)
shd = len(extra_lower) + len(missing_lower) + len(reverse)
shd_wc = shd + len(pred_und)
prc = float(len(true_pos)) / max(float(len(true_pos)+len(reverse) + len(false_pos)), 1.)
rec = tpr
return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'prc': prc, 'rec' : rec, 'shd': shd, 'shd_wc': shd_wc, 'nnz': pred_size} | 04c1fb44025ae1a3cfd86bc877c68e93027b75fe | 3,636,962 |
from typing import Any
import time
import copy
import os
def test_separate_networks(
configs: dict[str, Any],
make_plots: bool = True,
**kwargs,
) -> TestOutputs:
"""Test training on separate networks."""
t0 = time.time()
logger.info(f'Testing separate networks')
configs_ = dict(copy.deepcopy(configs))
configs_['dynamics_config']['separate_networks'] = True
train_out = train(configs_, make_plots=make_plots,
verbose=False, num_chains=4, **kwargs)
x = train_out.x
dynamics = train_out.dynamics
logdir = train_out.logdir
runs_dir = os.path.join(logdir, 'inference')
run_out = None
if RANK == 0:
run_out = run(dynamics, configs_, x=x,
runs_dir=runs_dir, make_plots=make_plots)
logger.info(f'Passed! Took: {time.time() - t0:.4f} seconds')
return TestOutputs(train_out, run_out) | 246f608af98a41ed138ac5c80314661b29a88e07 | 3,636,963 |
def nohighlight(nick):
"""add a ZWNJ to nick to prevent highlight"""
return nick[0] + "\u200c" + nick[1:] | 1b8d0cafc5df4a442daafdece59af1675ab1de33 | 3,636,964 |
def _get_r_val(z, omega_m, omega_l):
"""Returns the comoving distance at for one z value.
Parameters
----------
z : float
Redshift.
omega_m : float
Present matter density.
omega_l : float
Present dark energy density.
"""
r, err = integrate.quad(_get_r_integrand, 0., z, args=(omega_m, omega_l))
r *= 3000.
return r | 4f33eccdf4485c640f5c71808485fbf96a5f7614 | 3,636,965 |
def user_response_controller(bank_request, user_response):
"""
processes user's response for bank's request sent
: bank_request --> what is user currently requesting for
: user_response --> what a user wants to actually do amongst the options in the
above bank_requests
"""
user_response = validate_user_input_to_int(user_response)
if user_response == "error":
return ['resend_same_bank_request', 'No valid option choosen']
if user_response >= 1 and user_response <= bank_request.get('available_options'):
return user_response
else:
return ['resend_same_bank_request', 'Selected option not found'] | 3d959fac84a8460e7ab228127d9b6f0b9cc1a21c | 3,636,966 |
from datetime import datetime
def create_features(datestrs):
"""
Find the features associated with a set of dates.
These will include:
weekday / weekend
day of week
season
month of year
Parameters
----------
datestrs: list of strings
Date strings of the format YYYY-MM-DD.
Returns
-------
features: DataFrame
Each row corrsponds to one date. The datestring is the index.
"""
feature_data = []
for datestr in datestrs:
current_date = datetime.datetime.strptime(datestr, '%Y-%m-%d').date()
current_weekday = current_date.weekday()
day_of_week = np.zeros(7)
day_of_week[current_weekday] = 1
current_month = current_date.month
month_of_year = np.zeros(12)
# Adjust months to January = 0
month_of_year[current_month - 1] = 1
# Season 0 = winter, 1 = spring, 2 = summer, 3 = autumn
season = np.zeros(4)
if current_month <= 2:
season[0] = 1
elif current_month <= 5:
season[1] = 1
elif current_month <= 8:
season[2] = 1
elif current_month <= 11:
season[3] = 1
else:
season[0] = 1
feature_set = {
'Saturday': day_of_week[5],
'Sunday': day_of_week[6],
'winter': season[0],
'spring': season[1],
'summer': season[2],
'autumn': season[3],
}
feature_data.append(feature_set)
features_df = pd.DataFrame(data=feature_data, index=datestrs)
return features_df | 75a72a54396150ed73ea43b3390994b1a41d2cf5 | 3,636,967 |
import inspect
def obj_src(py_obj, escape_docstring=True):
"""Get the source for the python object that gets passed in
Parameters
----------
py_obj : callable
Any python object
escape_doc_string : bool
If true, prepend the escape character to the docstring triple quotes
Returns
-------
list
Source code lines
Raises
------
IOError
Raised if the source code cannot be retrieved
"""
src = inspect.getsource(py_obj)
if escape_docstring:
src.replace("'''", "\\'''")
src.replace('"""', '\\"""')
return src
# return src.split('\n') | 8ce0c7cc7672de5005b5a1c60e6b6cf5fa9ee050 | 3,636,968 |
def get_back_button_handler(current_panel: "GenericPanel") -> CallbackQueryHandler:
"""
returns a Handler for BACK_PATTERN that returns the user to current_panel
:param GenericPanel current_panel: the destination panel
:return: a CallbackQueryHandler for BACK_PATTERN that returns the user to current_panel
"""
return CallbackQueryHandler(current_panel.prompt, pattern=Globals.BACK_PATTERN) | 365e37b3d362afa31d231613180070be69ac7972 | 3,636,969 |
from typing import Optional
def openocd_prog_path(request: FixtureRequest) -> Optional[str]:
"""Enable parametrization for the same cli option"""
return _request_param_or_config_option_or_default(request, 'openocd_prog_path', None) | f3628427bde73d7e26e5ed30e103d4ba36df7c1b | 3,636,970 |
def reindexMatrix(iss, jss, A):
"""iss and jss are lists of indices of equal size, representing
a permuation: iss[i] is replaced with jss[i]. all other indices which are
not in the lists left unchanged.
"""
n = len(A)
B = np.zeros_like(A)
tss = [i for i in range(n)]
for i in range(len(iss)):
tss[iss[i]] = jss[i]
print(tss)
for i in range(n):
for j in range(n):
B[i, j] = A[tss[i], tss[j]]
return B | 9c36802d7e5f35ca6789d49e47d8124bc4f74c57 | 3,636,971 |
def createInfoMatix(character_id):
"""初始化创建的角色的阵法
"""
petlist = getCharacterPetList(character_id)
sql = "INSERT INTO `tb_character_matrix`(`characterId`,`eyes_4`,`eyes_5`,`eyes_6`) \
VALUES(%d,%d,%d,%d);"%(character_id,petlist[0],petlist[1],petlist[2])
conn = dbpool.connection()
cursor = conn.cursor()
count = cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if(count >= 1):
return True
return False | 7392f899ed8b46fd35ed360601edd8621aace7ac | 3,636,972 |
def help():
"""<b>Print available functions as json.<br>"""
func_list = {}
for rule in app.url_map.iter_rules():
if rule.endpoint != 'static':
func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__
return jsonify(func_list) | 2ef2193aaa9d882b238a7681cb3e868690a58398 | 3,636,973 |
import os
async def grid_train(params: Grid, background_tasks: BackgroundTasks, credentials: HTTPBasicCredentials = Depends(validate_access)):
"""
Choose an estimator, and hyper-parameters to optimize for a GridSearchCV. Results can be recorded in Neptune.ai.
"""
try:
X_train, X_test, y_train, y_test = load_data(params.data_path, params.comment_col, params.target_col)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
params.estimator = params.estimator.upper()
if params.estimator not in MODELS.keys():
raise HTTPException(status_code=400, detail=f"The model isn't registered in the API. You can choose between {','.join(list(MODELS.keys()))}")
if params.parameters is None:
params.parameters = MODELS[params.estimator]["default_hyperparams"]
else:
# ici il faut une validation des hyper paramètres
params.parameters = {f"clf__{param}": liste for param, liste in params.parameters.items()}
if params.estimator == "SVC":
params.parameters["clf__probability"] = [True]
# start logging
run = None
if params.neptune_log:
try:
run = activate_monitoring(os.getenv('NEPTUNE_USER'), os.getenv('NEPTUNE_PROJECT'))
params.tags.extend([params.estimator, "grid"])
create_exp(params.parameters, params.tags, run)
except neptune.exceptions.NeptuneInvalidApiTokenException as e:
raise HTTPException(status_code=400, detail="Not currently connected to NEPTUNE.ai. Ask the developer to provide its user access.")
# run modeling in the background
background_tasks.add_task(grid_run_model, params, run, X_train, X_test, y_train, y_test)
return {'res' : "The model is running. You will receive a mail if you provided your email address."} | 30f68446ac1fb0d9bd76a54a11a99540fb9a32d6 | 3,636,974 |
def version():
"""
Returns the name, version and api_version of the application when
a HTTP GET request is made.
"""
return jsonify(
name='openshift-python-flask-sample',
version=VERSION
) | 70686195978cf9d26e2d4cd954c81fc216d7bd4d | 3,636,975 |
import json
def search_quotes(request, currency):
""" Consulta a API procurando por ações que contenham o campo 'currency' no nome """
# verifica se a barra de pesquisa foi preenchida ou se está vazia
if currency:
conn.request("GET", "/auto-complete?q="+currency+"®ion=BR", headers=headers)
res = conn.getresponse()
data = res.read()
api_quotes = json.loads(data.decode("utf-8"))['quotes']
# verifica se o usuário quer procurar somente na B3 ou se quer procurar globalmente utilizando selector na página
if 'onlyB3' in request.GET:
if request.GET['onlyB3'] == '1':
SA_quotes = [ { key: quote[key] for key in quote } for quote in api_quotes if quote['exchange'] == 'SAO' or quote['symbol'].endswith('.SA') ]
return { 'quotes': SA_quotes }
return { 'quotes': api_quotes }
else:
# se a pesquisa é vazia retorna um dicionario nulo
return { 'quotes': {} } | 926f29d802a7bb6a9681b3b90fd46966894a0604 | 3,636,976 |
def destroy(N, dtype=tf.complex64):
"""Returns a destruction (lowering) operator in the Fock basis.
Args:
N (int): Dimension of Hilbert space
dtype (tf.dtypes.DType, optional): Returned dtype. Defaults to c64.
Returns:
Tensor([N, N], dtype): NxN creation operator
"""
a = diag(tf.sqrt(tf.range(1, N, dtype=tf.float64)), k=1)
return tf.cast(a, dtype=dtype) | a92ef2cc5aa9b7bbe2c0cf109282c5fde56d4603 | 3,636,977 |
from typing import Optional
from typing import Any
def get_nearest_operation(
db: Redis[bytes], address: hash_t, subdag: Optional[str] = None
) -> Optional[Operation]:
"""Return the operation at address or the operation generating address."""
root = "root"
art = None
try:
node = Operation.grab(db, address)
return node
except RuntimeError:
# one possibility is that address is an artefact...
try:
art = Artefact[Any].grab(db, address)
except RuntimeError:
raise RuntimeError(
f"address {address} neither a valid operation nor a valid artefact."
)
if art.parent == root:
# We have basically just a single artefact as the network...
return None
else:
node = Operation.grab(db, art.parent)
return node | a402ed795d60f321cd362517e9350994be836cdd | 3,636,978 |
def load_CIFAR_batch(file_path):
""" load single batch of cifar """
data_dict = load_pickle(file_path)
data = data_dict['data']
labels = data_dict['labels']
data = data.reshape(10000, 3, 32, 32).astype("float")
labels = np.array(labels)
return data, labels | 0164293fb2f31e7361da5a817c64899db96c6156 | 3,636,979 |
def _disposable_and_async_gen_from_obs(obs: Observable):
"""
Compatability layer for legacy Observable to async generator
This should be removed and subscription resolvers changed to
return async generators after removal of flask & gevent based dagit.
"""
queue: Queue = Queue()
disposable = obs.subscribe(on_next=queue.put_nowait)
async def async_gen():
while True:
i = await queue.get()
yield i
return disposable, async_gen() | ed0620b3615a36e82c20789f6a3b40aa6ae61410 | 3,636,980 |
def interesting_pattern(x: float, y: float) -> float:
"""This function is interesting in x and y in range -10..10, returning
a float value in range 0..1
"""
z = 0.5 + (np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)) / 2
return z | 432e13324b1834cbdd62259f0ac0b59751008f90 | 3,636,981 |
import logging
def cut(st, sec_before_split=None):
"""
Cut/trim the record.
This method minimally requires that the windows.signal_end method has been
run, in which case the record is trimmed to the end of the signal that
was estimated by that method.
To trim the beginning of the record, the sec_before_split must be
specified, which uses the noise/signal split time that was estiamted by the
windows.signal_split mehtod.
Args:
st (StationStream):
Stream of data.
sec_before_split (float):
Seconds to trim before split. If None, then the beginning of the
record will be unchanged.
Returns:
stream: cut streams.
"""
if not st.passed:
return st
for tr in st:
logging.debug('Before cut end time: %s ' % tr.stats.endtime)
etime = tr.getParameter('signal_end')['end_time']
tr.trim(endtime=etime)
logging.debug('After cut end time: %s ' % tr.stats.endtime)
if sec_before_split is not None:
split_time = tr.getParameter('signal_split')['split_time']
stime = split_time - sec_before_split
logging.debug('Before cut start time: %s ' % tr.stats.starttime)
if stime < etime:
tr.trim(starttime=stime)
else:
tr.fail('The \'cut\' processing step resulting in '
'incompatible start and end times.')
logging.debug('After cut start time: %s ' % tr.stats.starttime)
tr.setProvenance(
'cut',
{
'new_start_time': tr.stats.starttime,
'new_end_time': tr.stats.endtime
}
)
return st | e71d8a7635aa35dc5746541f00f90d991b4c4e62 | 3,636,982 |
def interp_road(d,croad,roads,intersections,normD = False):
""" Get the position of a point along a road """
start_int = roads[croad]['start_int']
start_pos = intersections[start_int]['position']
end_int = roads[croad]['end_int']
end_pos = intersections[end_int]['position']
if not normD:
length = road_length(croad,roads,intersections)
if 'type' not in roads[croad] or roads[croad]['type'] == 'line':
if normD:
alpha = d
else:
alpha = d/length
return (1.0-alpha)*start_pos + alpha*end_pos
else:
C = roads[croad]['center'].reshape(2)
startR = np.sqrt(np.sum(np.power(C - start_pos,2.0)))
endR = np.sqrt(np.sum(np.power(C - end_pos,2.0)))
r = 0.5*(startR + endR)
startTheta = np.arctan2(start_pos[1] - C[1],start_pos[0] - C[0])
if startTheta < 0:
startTheta += 2.0*np.pi
endTheta = np.arctan2(end_pos[1] - C[1],end_pos[0] - C[0])
if endTheta < 0:
endTheta += 2.0*np.pi
if roads[croad]['turn_direction'] < 0 and endTheta > startTheta:
startTheta += 2.0*np.pi
elif roads[croad]['turn_direction'] > 0 and endTheta < startTheta:
endTheta += 2.0*np.pi
# startTheta = roads[croad]['start_theta']
# endTheta = roads[croad]['end_theta']
if normD:
curr_theta = (1.0-d)*startTheta + d*endTheta
else:
curr_theta = startTheta + (endTheta - startTheta)*(d/length)
return C + r*np.array([np.cos(curr_theta),np.sin(curr_theta)]).reshape(2) | f6406dbb586ba2870d95f627f18085ec12c3b64b | 3,636,983 |
def zero_pad2d(inputs, padding=0, output_dtype="float32", requires_grad=False):
"""
Zero padding for 2d tensor
Args:
-----------------------------
inputs : Tensor
shape [batch, channel, height, width]
padding: (optional:0) int or tuple
expected: (h_pad_up, h_pad_down, w_pad_up, w_pad_down)
output_dtype : str
requires_grad : bool
-----------------------------
Returns:
-----------------------------
Tensor
shape [batch, channel, padded_height, padded_width]
-----------------------------
"""
padding = (padding, padding, padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding
assert isinstance(padding, tuple), "type(padding)={}".format(type(padding))
if len(padding) == 2:
padding = (padding[0], padding[0], padding[1], padding[1])
assert (len(padding) == 4)
if all([padding[i] == 0 for i in range(len(padding))]):
return inputs
batch_size, in_channel, height, width = inputs.shape
padded_shape = (batch_size, in_channel, height + padding[0] + padding[1], width + padding[2] + padding[3])
padding_value = tvm.tir.expr.const(0, output_dtype)
def _inner_zero_pad2d(inputs):
def _for_spatial(b, c, h, w):
def _for_reduce():
return tvm.te.if_then_else(
tvm.te.all(h >= padding[0], h < height + padding[0], w >= padding[2], w < width + padding[2]),
inputs[b, c, h - padding[0], w - padding[2]],
padding_value
)
return _for_reduce, [], "none"
return _for_spatial
return Compute(padded_shape, output_dtype , inputs, fhint=_inner_zero_pad2d, name="zero_pad2d", requires_grad=requires_grad) | 77ae8065f6e1c3b181a6bb49bd84ae4951848d7b | 3,636,984 |
def gtfs_admin(request):
"""admin page for adding new review categories (and potentially other features down the road)"""
return render(request, 'admin/gtfs_admin.html') | 14fccf4c1a8758fa223133f6e191860b6aee01a9 | 3,636,985 |
def get_file_path():
"""
Get current file's directory.
Return `None` if there is no file path available.
"""
try:
file_path = sublime.active_window().extract_variables()['file_path']
except KeyError:
return None
else:
return file_path | 0f991da4edf82435260aad443a4b506d1e2a5453 | 3,636,986 |
import random
def mutate_word(word):
"""Introduce a random change into the word: delete, swap, repeat, and add
stray character. This may raise a ValueError. """
word = list(word)
choice = random.randrange(4)
if choice == 0: # Delete a character
word.pop(random.randrange(len(word)))
elif choice == 1: # Swap two characters
index = random.randrange(0, len(word) - 1)
word[index], word[index + 1] = word[index + 1], word[index]
elif choice == 2: # Repeat a character
index = random.randrange(0, len(word))
word.insert(index, word[index])
elif choice == 3: # Insert a stray character
char = chr(random.randint(ord('a'), ord('z')))
word.insert(random.randint(0, len(word)), char)
return ''.join(word) | f3b45f36893a7541131710ada5f1343387f06797 | 3,636,987 |
def data_layer_property_from_dict(data_layer_property_dictionary: dict,
client: cl.Client = None):
"""
The method converts a dictionary of DataLayerProperty to a DataLayerProperty object.
:param data_layer_property_dict: A dictionary that contains the keys of a DataLayerProperty.
:type data_layer_property_dict: dict
:param client: An IBM PAIRS client.
:type client: ibmpairs.client.Client
:rtype: ibmpairs.catalog.DataLayerProperty
:raises Exception: If not a dict.
"""
data_layer_property = DataLayerProperty.from_dict(data_layer_property_dictionary)
cli = common.set_client(input_client = client,
global_client = cl.GLOBAL_PAIRS_CLIENT)
data_layer_property.client = cli
return data_layer_property | ab579c1d6527abb176cd05c81d89fb1a74af50b0 | 3,636,988 |
def pcc_vector(v1, v2):
"""Pearson Correlation Coefficient for 2 vectors
"""
len1 = len(v1)
len2 = len(v2)
if len1 != len2:
return None
else:
length = len1
avg1 = 1.0 * sum(v1) / len(v1)
avg2 = 1.0 * sum(v2) / len(v2)
dxy = [(v1[i] - avg1) * (v2[i] - avg2) for i in range(length)]
dx2 = [(v1[i] - avg1) ** 2 for i in range(length)]
dy2 = [(v2[i] - avg2) ** 2 for i in range(length)]
return sum(dxy) / (sum(dx2) * sum(dy2)) ** 0.5 | 98e5f3cc304a5d844be479d65ab7eeb760a34ba3 | 3,636,989 |
from io import StringIO
def cypher_repr(obj):
""" Generate the Cypher representation of an object.
"""
string = StringIO()
writer = CypherWriter(string)
writer.write(obj)
return string.getvalue() | eae9e848076a4626a001e70b9cd925734864b3ae | 3,636,990 |
def firstlastmile_pipeline(**kwargs):
"""The first and last mile pipeline attaches any unattached elements to ensure a fully-connected graph"""
tags = ['flmile']
firstmile_nodes = [
node(
firstmile_edge,
['sjoin_oilfields_data','sjoin_edges_pipelines_oilfields','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'],
'flmile_edges_oilfields',
tags=tags+['firstmile','firstmile_oilfields']
), # assets, existing_edges, closest port, city, [pipeline/railway]
node(
firstmile_edge,
['sjoin_oilwells_data','sjoin_edges_pipelines_oilwells','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'],
'flmile_edges_oilwells',
tags=tags+['firstmile','firstmile_oilwells']
), # assets, existing_edges, closest port, city, [pipeline/railway]
node(
firstmile_edge,
['sjoin_coalmines_data','sjoin_edges_railways_coalmines','sjoin_ports_data','sjoin_cities_data','sjoin_railways_data'],
'flmile_edges_coalmines',
tags=tags+['firstmile','firstmile_coalmines']
), # assets, existing_edges, closest port, city, [pipeline/railway]
]
lastmile_nodes = [
node(
powerstations_lastmile,
['sjoin_powerstations_data','sjoin_edges_pipelines_powerstations','sjoin_edges_railways_powerstations','sjoin_railways_data','sjoin_pipelines_data','sjoin_ports_data','sjoin_cities_data'], # powerstations, ps_edges_pipelines, ps_edges_railways, railways, pipelines, ports, cities
'flmile_edges_powerstations',
tags=tags+['lastmile','lastmile_powerstations']
)
]
lastmile_nodes += [
node(
cities_delauney,
['sjoin_cities_data','ne'],
'flmile_edges_cities',
tags = tags+['lastmile','lastmile_cities']
),
node(
shippingroutes_lastmile,
['sjoin_edges_shippingroutes_ports','sjoin_shippingroutes_data','sjoin_ports_data'],
'flmile_edges_shippingroutes_ports',
tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_ports']
),
node(
shippingroutes_lastmile,
['sjoin_edges_shippingroutes_lngterminals','sjoin_shippingroutes_data','sjoin_lngterminals_data'],
'flmile_edges_shippingroutes_lngterminals',
tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_lng']
),
]
IDL_nodes = [
node(
connect_IDL,
'sjoin_shippingroutes_data',
'flmile_idl_edges',
tags=tags+['flmile_idl']
)
]
null_nodes = [node(null_forward, f'sjoin_{sector}_data', f'flmile_{sector}_data',tags = tags+['flm_null',f'flm_null_{sector}'])
for sector in ALL_SECTORS]
null_nodes += [node(null_forward, f'sjoin_edges_{sector1}_{sector2}', f'flmile_edges_{sector1}_{sector2}', tags=tags+['flm_null',f'flm_null_{sector1}_{sector2}'])
for sector1, sector2 in SJOIN_PAIRS if sector1!='shippingroutes']
return Pipeline(firstmile_nodes + lastmile_nodes + IDL_nodes + null_nodes) | 545fda88458fb0266b0f4f98791de83759ba96f5 | 3,636,991 |
def photo_new(request, cast: Cast):
"""
Add a new Photo to a cast
"""
if request.method == 'POST':
form = CastPhotoForm(request.POST, request.FILES)
if form.is_valid():
photo = form.save(commit=False)
photo.cast = cast
photo.save()
messages.success(request, f'Photo has been added')
return redirect('cast_photo_detail', slug=cast.slug, pk=photo.pk)
else:
form = CastPhotoForm()
return render(request, 'castadmin/photo_edit.html', {
'cast': cast,
'form': form,
}) | cf9aac5f0ea49e48e571d89227c69f8ff382162a | 3,636,992 |
def decode(argument: str) -> tuple[list[int], ...]:
"""Decode argument string from command line
:param argument: argument string
:return: pair of list of digits
"""
char_lists = map(list, argument.split('-'))
range_ = tuple(list(map(int, clist)) for clist in char_lists)
return range_ | d3805396cab52fc09896ca9553f1ac3450f27e99 | 3,636,993 |
def generate_data(p=11, n=400):
"""
Generates non-linear multivariate data of dimension 'p'.
The data is linear in parameters of the type:
y = b0 + b1 * x + b2*x^2 + ... + bp * x^p
Args:
:param p: int
dimensions
:param n: int
number of samples
Returns: np.ndarray
"""
true_params = np.random.uniform(low=0.0, high=1.0, size=p+1)
x = np.sort(np.random.uniform(low=-1.0, high=1.0, size=n))
X = np.zeros(shape=(n, p+1), dtype=float)
X[:, 0] = 1.
for i in range(p):
X[:, i+1] = x ** (i+1)
# logger.debug("X:\n%s" % str(list(X)))
e = np.random.normal(loc=0.0, scale=0.2, size=n)
y_true = X.dot(true_params)
y = y_true + e
# logger.debug("y:\n%s" % str(list(y)))
return X, y, true_params, x, y_true, e | 98f4e1f661d8f21307115c1d1873bcf1c111ff85 | 3,636,994 |
def get_search_apps():
"""Gets all registered search apps."""
return tuple(_load_search_apps().values()) | 5287abce0a31e9eb2165aafb8a6cfbaabda85e48 | 3,636,995 |
def volume_tetrahedron(
point_a: array_like, point_b: array_like, point_c: array_like, point_d: array_like
) -> np.float64:
"""
Return the volume of a tetrahedron defined by four points.
The points are the vertices of the tetrahedron. They must be 3D or less.
Parameters
----------
point_a, point_b, point_c, point_d : array_like
The four vertices of the tetrahedron.
Returns
-------
np.float64
The volume of the tetrahedron.
References
----------
http://mathworld.wolfram.com/Tetrahedron.html
Examples
--------
>>> from skspatial.measurement import volume_tetrahedron
>>> volume_tetrahedron([0, 0], [3, 2], [-3, 5], [1, 8])
0.0
>>> volume_tetrahedron([0, 0, 0], [2, 0, 0], [1, 1, 0], [0, 0, 1]).round(3)
0.333
>>> volume_tetrahedron([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]).round(3)
0.167
"""
vector_ab = Vector.from_points(point_a, point_b)
vector_ac = Vector.from_points(point_a, point_c)
vector_ad = Vector.from_points(point_a, point_d)
vector_cross = vector_ac.cross(vector_ad)
# Set the dimension to 3 so it matches the cross product.
vector_ab = vector_ab.set_dimension(3)
return 1 / 6 * abs(vector_ab.dot(vector_cross)) | 3369044cfe53762c9bbbf8363da5d385b14b51ba | 3,636,996 |
def lemmatizer(word):
"""Returns: lemmatized word if word >= length 5
"""
if len(word)<4:
return word
return wnl.lemmatize(wnl.lemmatize(word, "n"), "v") | f8e5020b85638464b261e1ec066a141ba4a202a0 | 3,636,997 |
def kolmogn(n, x, cdf=True):
"""Computes the CDF for the two-sided Kolmogorov-Smirnov distribution.
The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x),
for a sample of size n drawn from a distribution with CDF F(t), where
D_n &= sup_t |F_n(t) - F(t)|, and
F_n(t) is the Empirical Cumulative Distribution Function of the sample.
Parameters
----------
n : integer, array_like
the number of samples
x : float, array_like
The K-S statistic, float between 0 and 1
cdf : bool, optional
whether to compute the CDF(default=true) or the SF.
Returns
-------
cdf : ndarray
CDF (or SF it cdf is False) at the specified locations.
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, x, cdf, None], op_dtypes=[None, np.float, np.bool, np.float])
for _n, _x, _cdf, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
z[...] = _kolmogn(int(_n), _x, cdf=_cdf)
result = it.operands[-1]
return result | 132672a1bf45bb0b675c3ce503d47ed4f740184b | 3,636,998 |
import os
def join_legacy_read_path(sample_path: str, suffix: int) -> str:
"""
Create a path string for a sample read file using the old file name convention (eg. reads_1.fastq).
:param sample_path: the path to the sample directory
:param suffix: the read file suffix
:return: the read path
"""
return os.path.join(sample_path, f"reads_{suffix}.fastq") | b6e12de4edfec05fb8a5fa2363dce284dcfdd5f0 | 3,636,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.