content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def make_wc2018_dataset(
matches_features: pd.DataFrame,
team_features: pd.DataFrame,
wc2018_qualified: pd.DataFrame):
"""
Simulating the Tournament
With a trained model at our disposal, we can now run tournament simulations on it.
For example, let's take the qualified teams for the FIFA 2018 World Cup.
`team_features` data structure.
['date', 'draw', 'finaltourn', 'friendly', 'ga', 'gd', 'gs', 'homeVenue',
'loss', 'match_id', 'name', 'neutralVenue', 'opponentName',
'qualifier', 'win', 'confederation', 'name_y', 'fifa_code', 'ioc_code',
'opponentCC', 'last10win_per', 'last10loss_per', 'last10draw_per',
'last10gd_per', 'last10_oppCC_per', 'last30win_per', 'last30loss_per',
'last30draw_per', 'last30gd_per', 'last30_oppCC_per', 'last50win_per',
'last50loss_per', 'last50draw_per', 'last50gd_per', 'last50_oppCC_per']
"""
print('# Do `make_wc2018_dataset`.')
def expand_grid(dictionary):
return pd.DataFrame([row for row in product(*dictionary.values())],
columns=dictionary.keys())
# Make all possible match pairs.
to_predict = expand_grid(
{'team1': wc2018_qualified.name.values, "team2": wc2018_qualified.name.values})
to_predict = to_predict[to_predict.team1 < to_predict.team2]
# Select the latest game stats.
team_features = team_features.sort_values('date').groupby(by='name').last().reset_index()
# Preprocess team_features.
team_features = team_features[
['name', 'gd', 'last10win_per', 'last10loss_per', 'last10draw_per',
'last10gd_per', 'last10_oppCC_per', 'last30win_per', 'last30loss_per',
'last30draw_per', 'last30gd_per', 'last30_oppCC_per', 'last50win_per',
'last50loss_per', 'last50draw_per', 'last50gd_per', 'last50_oppCC_per']]
# Add historical info.
to_predict = to_predict.merge(
team_features, left_on=['team1'], right_on=['name'], how='left')
to_predict = to_predict.merge(
team_features, left_on=['team2'], right_on=['name'], how='left', suffixes=('.t1', '.t2'))
to_predict['outcome'] = to_predict['gd.t1']
to_predict = to_predict.drop(columns=['name.t1', 'name.t2', 'gd.t1', 'gd.t2'])
length = len(to_predict)
extra_features = pd.DataFrame(
{'date': ['2018-06-14'] * length, 'team1Home': to_predict.team1 == 'RUS',
'team2Home': to_predict.team2 == 'RUS',
'neutralVenue': (to_predict.team1 != 'RUS') & (to_predict.team2 != 'RUS'),
'friendly': [False] * length, 'qualifier': [False] * length, 'finaltourn': [True] * length})
to_predict = pd.concat([extra_features, to_predict], axis=1)
# Keep columns the same order as matches_features.
to_predict = to_predict[matches_features.columns]
return to_predict | a9c0647bd3a87025cd8413170e3e7208b238822d | 3,629,100 |
from typing import OrderedDict
def calcular_indices(arr, clases_posibles):
"""
Calcula los indices positivos y negativos de un arreglo
En la primer posicion de cada elemento se espera la clase real.
En la segunda posicion de cada elemento se espera la clase calculada.
"""
dic = []
for clase in clases_posibles:
ind_clase = calcular_indices_clase(arr, clase)
#print("clase: {}, ind: {}".format(clase, ind_clase))
dic.append(ind_clase)
result = OrderedDict()
result["vp"] = 0
result["vn"] = 0
result["fp"] = 0
result["fn"] = 0
#result = {"vp": 0, "vn": 0, "fp": 0, "fn": 0}
for r in dic:
result["vp"] += r["vp"]
result["vn"] += r["vn"]
result["fp"] += r["fp"]
result["fn"] += r["fn"]
if (result["vp"] + result["fn"] == 0):
result["sens"] = None
else:
result["sens"] = round(result["vp"] / (result["vp"] + result["fn"]), 3)
if (result["vn"] + result["fp"] == 0):
result["espc"] = None
else:
result["espc"] = round(result["vn"] / (result["vn"] + result["fp"]), 3)
return result, dic | 3607de8e7fb8b269cc2bf10f7acac50043b68119 | 3,629,101 |
def create_app():
"""Creating and configuring an instance of the Flask application"""
app = Flask(__name__)
@app.route('/')
def root():
"""Base view."""
return 'TODO - part 2 and beyond!'
return app | 8d4f5a047a1118760b409f9828bce3e9fb4bfeff | 3,629,102 |
def _box_faces(image):
""" Add borders to all detected faces """
for face in image.faces:
_box_face(image, face)
return image | 8594856f0b789d280a208e8687858f26dccb4b2b | 3,629,103 |
def get_logger(name, is_task_logger=True):
"""Return a logger with the given name.
The logger will by default be constructed as a task logger. This will
ensure it contains additional information on the current task name and task
ID, if running in a task. If executed outside of a task, the name name and
ID will be replaced with ``???`` by Celery.
Task logging should be turned off only when we know for sure that the
code isn't going to be running in a task.
Version Added:
3.0
Args:
name (unicode):
The name shown in the log line. This is expected to be a module
name.
is_task_logger (bool, optional):
Whether to construct a task logger.
Returns:
logging.Logger:
The new (or existing) logger for the given name.
"""
if is_task_logger:
return _get_task_logger(name)
return _get_logger(name) | a1a4c079603ac3c9aa288d901ab5271c00b1646a | 3,629,104 |
import warnings
def get_stratified_gene_usage_frequency(ts = None, replace = True):
"""
MODIFIES A TCRsampler instance with esitmates vj_occur_freq_stratified by subject
Parameters
----------
ts : tcrsampler.sampler.TCRsampler
replace : bool
if True, ts.v_occur_freq is set to ts.v_occur_freq_stratified
so other functions will work as befor.
Returns
-------
ts : tcrsampler.sampler.TCRsampler
"""
if ts is None:
ts = TCRsampler(default_background = 'britanova_human_beta_t_cb.tsv.sampler.tsv')
# (1/uniqueTCR_sample_depth) / nsubject
nsubjects = len(ts.ref_df.subject.value_counts())
inverse_tcrs_per_subject = (1/ts.ref_df.subject.value_counts()) / nsubjects
# <weights df>
ws_df = pd.DataFrame({'subject': inverse_tcrs_per_subject.index, 'sweight': inverse_tcrs_per_subject}).reset_index(drop = True)
# left join <ws_df> to provide a subject specific weight
df = ts.ref_df.merge(ws_df, how = 'left', on = 'subject').copy()
# All sweights should sum to 1.0, up to rounding error
assert np.isclose(df.sweight.sum() ,1.0)
# SUBJECT STRATIFIED V,J FREQUENCIES
# For each V,J combo take the weighted sum across all samples
df_vj_occur_freq = df[['sweight','v_reps','j_reps']].groupby(['v_reps','j_reps']).sum().reset_index().rename(columns = {'sweight': 'pVJ'})
assert np.isclose(df_vj_occur_freq.pVJ.sum() ,1.0)
df_vj_occur_freq
# Covert to a dictionary keyed on (V,J)
ts.vj_occur_freq_stratified = { (x[0],x[1]): x[2] for x in df_vj_occur_freq.to_dict('split')['data']}
# SUBJECT STRATIFIED VFREQUENCIES
df_v_occur_freq = df[['sweight','v_reps']].groupby(['v_reps']).sum().reset_index().rename(columns = {'sweight': 'pV'})
assert np.isclose(df_v_occur_freq.pV.sum() ,1.0)
df_v_occur_freq
# Covert to a dictionary keyed on (V,J)
ts.v_occur_freq_stratified = { x[0]:x[1] for x in df_v_occur_freq.to_dict('split')['data']}
# SUBJECT STRATIFIED JFREQUENCIES
df_j_occur_freq = df[['sweight','j_reps']].groupby(['j_reps']).sum().reset_index().rename(columns = {'sweight': 'pJ'})
assert np.isclose(df_j_occur_freq.pJ.sum() ,1.0)
df_j_occur_freq
# Covert to a dictionary keyed on (V,J)
ts.j_occur_freq_stratified = { x[0]:x[1] for x in df_j_occur_freq.to_dict('split')['data']}
if replace:
warnings.warn("REPLACING ts.vj_occur_freq WITH ts.vj_occur_freq_stratified", stacklevel=2)
warnings.warn("REPLACING ts.v_occur_freq WITH ts.v_occur_freq_stratified", stacklevel=2)
warnings.warn("REPLACING ts.j_occur_freq WITH ts.j_occur_freq_stratified", stacklevel=2)
ts.vj_occur_freq = ts.vj_occur_freq_stratified
ts.v_occur_freq = ts.v_occur_freq_stratified
ts.j_occur_freq = ts.j_occur_freq_stratified
return ts | 002367ea1e97ddbdef8692022cd312de5f27f857 | 3,629,105 |
def _calculateSvalues(xarr, yarr, sigma2=1.):
"""Calculates the intermediate S values required for basic linear regression.
See, e.g., Numerical Recipes (Press et al 1992) Section 15.2.
"""
if len(xarr) != len(yarr):
raise ValueError("Input xarr and yarr differ in length!")
if len(xarr) <= 1:
raise ValueError("Input arrays must have 2 or more values elements.")
S = len(xarr) / sigma2
Sx = np.sum(xarr / sigma2)
Sy = np.sum(yarr / sigma2)
Sxx = np.sum(xarr * xarr / sigma2)
Sxy = np.sum(xarr * yarr / sigma2)
return (S, Sx, Sy, Sxx, Sxy) | 53a7a1427c232e8cf226b6d80bcd59565803a3e0 | 3,629,106 |
from typing import Tuple
from typing import Optional
def existing_deployment_openshift(
runner: Runner, deployment_arg: str, expose: PortMapping,
add_custom_nameserver: bool
) -> Tuple[str, Optional[str]]:
"""
Handle an existing deploymentconfig by doing nothing
"""
runner.show(
"Starting network proxy to cluster using the existing proxy "
"DeploymentConfig {}".format(deployment_arg)
)
try:
# FIXME: This call is redundant, as we already check for an existing dc
# just to get to this code path in the first place.
runner.check_call(
runner.kubectl("get", "deploymentconfig", deployment_arg)
)
except CalledProcessError as exc:
raise runner.fail(
"Failed to find deploymentconfig {}:\n{}".format(
deployment_arg, exc.stderr
)
)
run_id = None
return deployment_arg, run_id | 79f6cebd6854c20516315c34f4810d4b0c4e0204 | 3,629,107 |
import six
def remove_nulls_from_dict(d):
"""
remove_nulls_from_dict function recursively remove empty or null values
from dictionary and embedded lists of dictionaries
"""
if isinstance(d, dict):
return {k: remove_nulls_from_dict(v) for k, v in six.iteritems(d) if v}
if isinstance(d, list):
return [remove_nulls_from_dict(entry) for entry in d if entry]
else:
return d | dd0da02eae06ceccc1347e6ac87dcb65bdc44126 | 3,629,108 |
def frame_processors(configuration, call_types, return_types):
""":type configuration: ducktest.config_reader.Configuration"""
typer = IdleProcessor()
chain(
typer,
MappingTypeProcessor(typer),
ContainerTypeProcessor(typer),
PlainTypeProcessor(),
)
call_frame_processor = chain(
DirectoriesValidater(configuration.write_docstrings_in_directories),
CallVariableSplitter(),
CallTypeStorer(call_types, typer)
)
return_frame_processor = chain(
DirectoriesValidater(configuration.write_docstrings_in_directories),
GeneratorTypeProcessor(return_types),
ReturnTypeStorer(return_types, typer),
)
return call_frame_processor, return_frame_processor | ae56574ca20d91c1e50aaeb46dece0536a650940 | 3,629,109 |
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Sum Numbers',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('item',
metavar='int',
type=int,
nargs='+',
help='Numbers to add')
parser.add_argument('-s',
'--sorted',
action='store_true',
help='Sort the items')
return parser.parse_args() | ddbd6399bb713cc2eebcf680c6e593ce55d27856 | 3,629,110 |
import os
def GetLocalPackageArchiveDir(tar_dir, archive_name):
"""Returns directory where local package archive files live.
Args:
tar_dir: The tar root directory for where package archives would be found.
archive_name: The name of the archive contained within the package.
Returns:
The standard location where local package archive files are found.
"""
return os.path.join(tar_dir,
ARCHIVE_DIR,
archive_name) | b45c00b4806b81abef4c8ee10a325d2b3d536385 | 3,629,111 |
def StartAndRunFlow(flow_cls,
client_mock=None,
client_id=None,
check_flow_errors=True,
**kwargs):
"""Builds a test harness (client and worker), starts the flow and runs it.
Args:
flow_cls: Flow class that will be created and run.
client_mock: Client mock object.
client_id: Client id of an emulated client.
check_flow_errors: If True, raise on errors during flow execution.
**kwargs: Arbitrary args that will be passed to flow.StartFlow().
Raises:
RuntimeError: check_flow_errors was true and the flow raised an error in
Start().
Returns:
The session id of the flow that was run.
"""
with TestWorker() as worker:
flow_id = flow.StartFlow(flow_cls=flow_cls, client_id=client_id, **kwargs)
if check_flow_errors:
rdf_flow = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
if rdf_flow.flow_state == rdf_flow.FlowState.ERROR:
raise RuntimeError(
"Flow %s on %s raised an error in state %s. \nError message: %s\n%s"
% (flow_id, client_id, rdf_flow.flow_state, rdf_flow.error_message,
rdf_flow.backtrace))
RunFlow(
client_id,
flow_id,
client_mock=client_mock,
check_flow_errors=check_flow_errors,
worker=worker)
return flow_id | fb8aee3f2ede547c7eeea293d950f0383060d925 | 3,629,112 |
from typing import Optional
def check_dk(base: str, add: Optional[str] = None) -> str:
"""Check country specific VAT-Id"""
weights = (2, 7, 6, 5, 4, 3, 2, 1)
s = sum(int(c) * w for (c, w) in zip(base, weights))
r = s % 11
if r == 0:
return '' # check ok
else:
return 'f' | 306c5fda13ce30ce0c9ddbd9302347ef0a86d393 | 3,629,113 |
import html
def generate_sidepanel(spin_system, index):
"""Generate scrollable side panel listing for spin systems"""
# title
title = html.B(f"Spin system {index}", className="")
# spin system name
name = "" if "name" not in spin_system else spin_system["name"]
name = html.Div(f"Name: {name}", className="")
# spin system abundance
abundance = (
""
if "abundance" not in spin_system
else np.around(float(spin_system["abundance"].split(" ")[0]), decimals=3)
)
abundance = html.Div(f"Abundance: {abundance} %", className="")
# number of sites
n_sites = len(spin_system["sites"])
n_sites = html.Div(f"Sites: {n_sites}")
a_tag = html.A([title, name, abundance, n_sites])
# The H6(index) only shows for smaller screen sizes.
return html.Li(
[html.H6(index), html.Div(a_tag)],
# draggable="true",
className="list-group-item",
) | c820638c4e9baf2983b6f20ec7fb71a5e18f417b | 3,629,114 |
import subprocess
def ensure_installed(tool):
"""
Checks if a given tool is installed and in PATH
:param tool: Tool to check if installed and in PATH
:return: Full path of the tool
"""
proc = subprocess.Popen('export PATH=$PATH:/Applications/STMicroelectronics/STM32CubeMX.app/Contents/MacOs/:/usr/local/opt/arm-none-eabi-llvm/bin/ && which ' + tool, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
print('Found {t} install in "{p}"'.format(t=tool, p=out.strip()))
return out.strip()
else:
print(tool + ' is not installed (or is not in the PATH).')
return out.strip() | 6d77c686f679b693d2c29fa9750094573675a6d5 | 3,629,115 |
def clc_points(points):
"""
Args:
points (np.array|list): OpenCV cv2.boxPoints returns coordinates, order is
[right_bottom, left_bottom, left_top, right_top]
Returns:
list: reorder the coordinates, order is [left_top, right_top, right_bottom, left_bottom]
"""
pt_list = list(map(lambda x: list(map(int, x)), list(points)))
temp_box = sorted(pt_list, key=lambda x: x[0])
left_pt = temp_box[:2]
right_pt = temp_box[2:]
# sort the coordinate
left_pt = sorted(left_pt, key=lambda x: x[1])
right_pt = sorted(right_pt, key=lambda x: x[1])
res_list = [left_pt[0], right_pt[0], right_pt[1], left_pt[1]]
return np.float32(res_list) | 201434fda90698d45126fbc99cd505e5c5096518 | 3,629,116 |
def choose_til(*args):
"""
choose_til() -> bool
Choose a type library ( 'ui_choose' , 'chtype_idatil' ).
"""
return _ida_kernwin.choose_til(*args) | 48ffd4ff9b66caf40602e4cf778dfff8240db5cc | 3,629,117 |
def _has_child_providers(context, rp_id):
"""Returns True if the supplied resource provider has any child providers,
False otherwise
"""
child_sel = sa.select([_RP_TBL.c.id])
child_sel = child_sel.where(_RP_TBL.c.parent_provider_id == rp_id)
child_res = context.session.execute(child_sel.limit(1)).fetchone()
if child_res:
return True
return False | 000fad116d01e676577bc51ad3049173eb69987a | 3,629,118 |
import random
def is_zero(n, p=.5):
"""Return the sum of n random (-1, 1) variables divided by n.
n: number of numbers to sum
p: probability of 1 (probablity of -1 is 1-p)
"""
# This function should be about zero, but as n increases it gets better
numbers = random.choices((-1, 1), weights=(1-p, p), k=n)
return sum(numbers) / n | eae43e784d77cb50e1b0b48f6d32d5085164147d | 3,629,119 |
from typing import Union
from typing import Sequence
from typing import Tuple
from typing import List
def reduce_loss(
sim_time: Union[float, int],
n_steps: int,
scene: JaxScene,
coordinate_init: Sequence,
velocity_init: Sequence,
target_coordinate: Sequence,
attractor: Sequence,
constants: dict,
) -> Tuple[float, List[Sequence]]:
"""
Wrapper function that reduces loss across batch in order to produce scalar from the vmapped function
Args:
sim_time: Time in seconds per simulation
n_steps: Number of steps per simulation
scene: JaxScene object with scene geometry
coordinate_init: jax array with initial agents coordinate
velocity_init: jax array with initial agents velocity
target_coordinate: jax array with the target for the agent
attractor: jax array with attractor coordinates
constants: Physical constants
Returns:
Single float loss for gradient and tuple with auxilary values
"""
loss_out = vmapped_loss(
sim_time,
n_steps,
scene,
coordinate_init,
velocity_init,
target_coordinate,
attractor,
constants,
)
return np.sum(loss_out[0]), loss_out[1:] | 9499b0b2b379ba27c77a8d3c2364aed4f387e10b | 3,629,120 |
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer | 3eb9e743c7002878ade487d2d6a9cb0803bfea44 | 3,629,121 |
def extract_data(loss_logs, fields, max_x=-1):
"""Extract numerical logs from loss logs.
Arguments:
loss_logs: list of text files containing numerical log data generated by
autoencoders
fields: types of values to plot (each gets its own subplot, e.g.
nonzero_mae, loss, prior_mean, etc.)
max_x: limit plots to <max_x> iterations from start
Returns:
dict containing mappings from each field (e.g. nonzero_mae, loss,
prior_mean, etc.) to a dict of directories (denoting different
autoencoders), which in turn map to numpy arrays containing the values
associated with that field and autoencoder.
Raises:
RuntimeException if a field specified is not found.
"""
data = defaultdict(dict)
extra_info = defaultdict(dict)
for loss_log in loss_logs:
exp = '/'.join(str(loss_log).split('/')[-5:])
config = loss_log.parent / 'config'
skip = False
with open(config, 'r') as f:
for line in f.readlines():
param = line.split()[0]
value = line.split()[1]
if param == 'adversarial_variance':
extra_info[exp][param] = float(value)
if param == 'densefs':
skip = True
break
if skip:
continue
print(str(loss_log.parent.resolve()).replace('/home/' + USER, '~'))
df = pd.read_csv(loss_log, sep=' ')
for field in fields:
try:
if max_x > 0:
data[field][exp] = df[field].to_numpy()[:max_x]
else:
data[field][exp] = df[field].to_numpy()
except KeyError:
raise RuntimeError('{} not found.'.format(field))
return data, extra_info | ce63d683569bf49447aff32c1bb3aa23f6f164eb | 3,629,122 |
def roc_pr_curves(xaxis, tpr_list, precision_list, model_names, model_colors=None, prc_chance=None,
prc_upper_ylim=None, figname=None, legend=True, figax=None, **kwargs):
"""Make a ROC and PR curve for each model, optionally with a SD. Compute an AUC score for each curve.
Parameters
----------
xaxis : list-like
The FPR and Recall, i.e. the x-axis for both plots. All TPR and Precision lists should be
interpolated/computed to reflect the values at each point on xaxis.
tpr_list : list of lists, shape = [n_models, len(xaxis)]
tpr_list[i] corresponds to the TPR values for model i along xaxis. If tpr_list[i] is a list, then do not plot a
standard deviation of the TPR. If tpr_list[i] is a list of lists, then it represents the TPR of each fold
from cross-validation, in which case it is used to compute the mean and std of the TPR.
precision_list : list of lists, shape = [n_models, len(xaxis)]
precision_list[i] corresponds to the precision values for model i along xaxis. If precision_list[i] is a list,
then do not plot a standard deviation of the precision. If precision_list[i] is a list of lists,
then it represents the precision of each fold from cross-validation, in which case it is used to compute the
mean and std of the precision.
model_names : list-like
The name of each model.
model_colors : list-like or None
If not none, the color to use for each model.
prc_chance : float or None
If not none, plot a chance line for the PR curve at this value.
prc_upper_ylim : float or None
If specified, the upper ylim for the PR curve. Otherwise, use the uper ylim of the ROC curve.
figname : str or None
If specified, save the figure with prefix figname.
legend : bool
If specified, display a legend.
figax : ([figure, figure], [axes, axes]) or None
If specified, make the plot in the two provided axes. Otherwise, generate a new axes.
kwargs : dict
Additional parameters for saving a figure.
Returns
-------
fig_list : The handle to both figures (one for the ROC and one for the PR).
auroc_list : AUROC scores for each model
auroc_std_list : 1SD of AUROC scores for each model, or None if not computed.
aupr_list : AUPR scores for each model
aupr_std_list : 1SD of AUPR scores for each model, or None if not computed.
"""
if figax:
fig_list, ax_list = figax
else:
fig_roc, ax_roc = plt.subplots()
fig_pr, ax_pr = plt.subplots()
fig_list = [fig_roc, fig_pr]
ax_list = [ax_roc, ax_pr]
# If no colors specified, evenly sample the colormap to color each model
if model_colors is None:
model_colors = np.linspace(0, 0.99, len(model_names))
model_colors = set_color(model_colors)
# ROC curves
ax = ax_list[0]
auroc_list, auroc_std_list = _plot_each_model(ax, xaxis, tpr_list, model_colors, model_names)
# Chance line
ax.plot(xaxis, xaxis, color="black", linestyle="--", zorder=1)
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_aspect("equal")
if legend:
ax.legend(loc="lower right", frameon=False)
# ylim of ROC curve will help format PR curve
lower_ylim, upper_ylim = ax.get_ylim()
# PR curves
ax = ax_list[1]
aupr_list, aupr_std_list = _plot_each_model(ax, xaxis, precision_list, model_colors, model_names)
# Optional chance line and formatting
if prc_chance:
ax.axhline(prc_chance, color="black", linestyle="--", zorder=1)
if not prc_upper_ylim:
prc_upper_ylim = upper_ylim
ax.set_ylim(bottom=lower_ylim, top=prc_upper_ylim)
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_aspect("equal")
if legend:
ax.legend(frameon=False)
if figname:
save_fig(fig_list[0], figname + "Roc", **kwargs)
save_fig(fig_list[1], figname + "Pr", **kwargs)
return fig_list, auroc_list, auroc_std_list, aupr_list, aupr_std_list | 3fa02960e628ab8ed43b8c9c53761e84efea182a | 3,629,123 |
def safe_divide(num, denom):
"""Divides the two numbers, avoiding ZeroDivisionError.
Args:
num: numerator
denom: demoninator
Returns:
the quotient, or 0 if the demoninator is 0
"""
try:
return num / denom
except ZeroDivisionError:
return 0 | 144cf6bf8b53ab43f3ab2e16e7dd2c95f5408035 | 3,629,124 |
def npSigma11( fitResult, nuisFilter="alpha_" ):
"""
Returns the block of the covariance matrix that corresponds to the main term.
"""
cov,pars = npCov( fitResult )
newPars = list( pars )
for i in reversed( range(len(pars)) ):
if nuisFilter not in pars[i]: continue
cov = np.delete( cov, i, 0 )
cov = np.delete( cov, i, 1 )
newPars.remove( pars[i] )
return cov,newPars | 751a7780b7d4a887a2b86cb9f8d0bfefda86177d | 3,629,125 |
import socket
def webpack_dev_url(request):
"""
If webpack dev server is running, add HMR context processor so template can
switch script import to HMR URL
"""
if not getattr(settings, "WEBPACK_DEV_URL", None):
return {}
data = {"host": split_domain_port(request._get_raw_host())[0]}
hmr_socket = socket.socket()
try:
hmr_socket.connect(
(settings.WEBPACK_DEV_HOST.format(**data), settings.WEBPACK_DEV_PORT)
)
except socket.error:
# No HMR server
logger.warning("Webpack dev server not found\n")
return {}
finally:
hmr_socket.close()
# HMR server found
logger.info("Webpack dev server found, HMR enabled\n")
context = {"WEBPACK_DEV_URL": settings.WEBPACK_DEV_URL.format(**data)}
return context | 9e5146f87d25067b43debc0a75d2f02763502c1e | 3,629,126 |
from typing import Any
from typing import AbstractSet
import sympy
def parameter_symbols(val: Any) -> AbstractSet[sympy.Symbol]:
"""Returns parameter symbols for this object.
Args:
val: Object for which to find the parameter symbols.
Returns:
A set of parameter symbols if the object is parameterized. It the object
does not implement the _parameter_symbols_ magic method or that method
returns NotImplemented, returns an empty set.
"""
return {sympy.Symbol(name) for name in parameter_names(val)} | d289dc941d93c34f87e6568df83b5d8e7f39f753 | 3,629,127 |
import re
def datetime(el, default_date=None):
"""Process dt-* properties
Args:
el (bs4.element.Tag): Tag containing the dt-value
Returns:
a tuple (string string): a tuple of two strings, (datetime, date)
"""
def try_normalize(dtstr, match=None):
"""Try to normalize a datetime string.
1. Use 'T' as the date/time separator.
2. Convert 12-hour time to 24-hour time
pass match in if we have already calculated it to avoid rework
"""
match = match or (dtstr and re.match(DATETIME_RE + '$', dtstr))
if match:
datestr = match.group('date')
hourstr = match.group('hour')
minutestr = match.group('minute') or '00'
secondstr = match.group('second') or '00'
ampmstr = match.group('ampm')
if ampmstr:
hourstr = match.group('hour')
if ampmstr.startswith('p'):
hourstr = str(int(hourstr) + 12)
dtstr = '%sT%s:%s:%s' % (datestr, hourstr, minutestr, secondstr)
tzstr = match.group('tz')
if tzstr:
dtstr += tzstr
return dtstr
# handle value-class-pattern
value_els = el.find_all(class_=is_vcp_class)
if value_els:
date_parts = []
for value_el in value_els:
if 'value-title' in value_el.get('class', []):
title = el.get('title')
if title:
date_parts.append(title.strip())
elif value_el.name in ('img', 'area'):
alt = value_el.get('alt') or value_el.get_text()
if alt:
date_parts.append(alt.strip())
elif value_el.name == 'data':
val = value_el.get('value') or value_el.get_text()
if val:
date_parts.append(val.strip())
elif value_el.name == 'abbr':
title = value_el.get('title') or value_el.get_text()
if title:
date_parts.append(title.strip())
elif value_el.name in ('del', 'ins', 'time'):
dt = value_el.get('datetime') or value_el.get_text()
if dt:
date_parts.append(dt.strip())
else:
val = value_el.get_text()
if val:
date_parts.append(val.strip())
date_part = default_date
time_part = None
for part in date_parts:
match = re.match(DATETIME_RE + '$', part)
if match:
# if it's a full datetime, then we're done
date_part = match.group('date')
time_part = match.group('time')
return try_normalize(part, match=match), date_part
if re.match(TIME_RE + '$', part):
time_part = part
elif re.match(DATE_RE + '$', part):
date_part = part
if date_part and time_part:
date_time_value = '%sT%s' % (date_part,
time_part)
else:
date_time_value = date_part or time_part
return try_normalize(date_time_value), date_part
prop_value = get_attr(el, "datetime", check_name=("time", "ins", "del"))\
or get_attr(el, "title", check_name="abbr")\
or get_attr(el, "value", check_name=("data", "input"))\
or el.get_text() # strip here?
# if this is just a time, augment with default date
match = re.match(TIME_RE + '$', prop_value)
if match and default_date:
prop_value = '%sT%s' % (default_date, prop_value)
return try_normalize(prop_value), default_date
# otherwise, treat it as a full date
match = re.match(DATETIME_RE + '$', prop_value)
return try_normalize(prop_value, match=match), match and match.group('date') | 38858690a4d77f522eb8d9a6967f75b5d5d7f9cd | 3,629,128 |
def lexicallyRelated(word1, word2):
"""
Determine whether two words might be lexically related to one another.
"""
return any(map(lambda stem: stem(word1) == stem(word2), stemmers)
) or word1.startswith(word2) or word2.startswith(word1) | be6df99aee20d0275682e60f8877a3b107dc8579 | 3,629,129 |
async def playing_check(ctx: commands.Context):
"""
Checks whether we are playing audio in VC in this guild.
This doubles up as a connection check.
"""
if await connected_check(ctx) and not ctx.guild.voice_client.is_playing():
raise commands.CheckFailure("The voice client in this guild is not playing anything.")
return True | 480bd5ef0e6b6eef63af3f07307f99805adf73fb | 3,629,130 |
def create_calibrated_rtl(feature_columns, config, quantiles_dir):
"""Creates a calibrated RTL estimator."""
feature_names = [fc.name for fc in feature_columns]
hparams = tfl.CalibratedRtlHParams(
feature_names=feature_names,
num_keypoints=200,
learning_rate=0.02,
lattice_l2_laplacian_reg=5.0e-4,
lattice_l2_torsion_reg=1.0e-4,
lattice_size=3,
lattice_rank=4,
num_lattices=100)
# Specific feature parameters.
hparams.set_feature_param("capital_gain", "lattice_size", 8)
hparams.set_feature_param("native_country", "lattice_size", 8)
hparams.set_feature_param("marital_status", "lattice_size", 4)
hparams.set_feature_param("age", "lattice_size", 8)
hparams.parse(FLAGS.hparams)
_pprint_hparams(hparams)
return tfl.calibrated_rtl_classifier(
feature_columns=feature_columns,
model_dir=config.model_dir,
config=config,
hparams=hparams,
quantiles_dir=quantiles_dir) | e6a73e66049e47fd8f18272647b5b948b015a004 | 3,629,131 |
def route_distance(df, con):
"""
Given a route's dataframe determine total distance (m) using gid
"""
dist = 0
cur = con.cursor()
for edge in df.edge[0:-1]:
query = 'SELECT length_m FROM ways WHERE gid={0}'.format(edge)
cur.execute(query)
out = cur.fetchone()
dist += out[0]
return dist | ca83cca270a97b60b615f89c83541c0491abddcf | 3,629,132 |
def convert_units(P, In='cm', Out='m'):
"""
Quickly convert distance units between meters, centimeters and millimeters
"""
c = {'m':{'mm':1000.,'cm':100.,'m':1.},
'cm':{'mm':10.,'cm':1.,'m':0.01},
'mm':{'mm':1.,'cm':0.1,'m':0.001}}
return c[In][Out]*P | bc318011ffc71d575c7e7276c2dede467a84dc2c | 3,629,133 |
def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
gamma=1e-4, tau_min=0.1, tau_max=0.5,
nu=0.85):
"""
Nonmonotone line search from [1]
Parameters
----------
f : callable
Function returning a tuple ``(f, F)`` where ``f`` is the value
of a merit function and ``F`` the residual.
x_k : ndarray
Initial position.
d : ndarray
Search direction.
f_k : float
Initial merit function value.
C, Q : float
Control parameters. On the first iteration, give values
Q=1.0, C=f_k
eta : float
Allowed merit function increase, see [1]_
nu, gamma, tau_min, tau_max : float, optional
Search parameters, see [1]_
Returns
-------
alpha : float
Step length
xp : ndarray
Next position
fp : float
Merit function value at next position
Fp : ndarray
Residual at next position
C : float
New value for the control parameter C
Q : float
New value for the control parameter Q
References
----------
.. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
search and its application to the spectral residual
method'', IMA J. Numer. Anal. 29, 814 (2009).
"""
alpha_p = 1
alpha_m = 1
alpha = 1
while True:
xp = x_k + alpha_p * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_p**2 * f_k:
alpha = alpha_p
break
alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
xp = x_k - alpha_m * d
fp, Fp = f(xp)
if fp <= C + eta - gamma * alpha_m**2 * f_k:
alpha = -alpha_m
break
alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
# Update C and Q
Q_next = nu * Q + 1
C = (nu * Q * (C + eta) + fp) / Q_next
Q = Q_next
return alpha, xp, fp, Fp, C, Q | a8ccaf00d39848495da9d2cc66014012d57bb146 | 3,629,134 |
def course_state_editor(func):
"""
Decorator for any method that will be used to alter a Course's 'state'. It does a few useful things:
1. Clears any lingering dashboard data for a given course run to ensure that it will be in the right state
after the command.
2. Allows the user to specify a Course instead of CourseRun and automatically choose the most recent CourseRun
related to that Course.
3. Change some edX data for our fake CourseRuns to match any update CourseRun dates (eg: edx_course_keys that
reference the CourseRun.start_date)
"""
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
user = kwargs['user']
course_run = kwargs.get('course_run')
# If a Course was specified instead of a CourseRun, use the latest CourseRun in the set
if not course_run:
course = kwargs['course']
course_run = course.courserun_set.order_by('fuzzy_start_date', '-start_date').first()
kwargs['course_run'] = course_run
del kwargs['course']
# Clear existing dashboard data for the CourseRun (cached edX data, final grades, etc.)
clear_dashboard_data(user, course_run=course_run)
# Call the wrapped function
ret_val = func(*args, **kwargs)
# If this is a fake course run, update associated edX data to match any new dates set
# on the CourseRun (eg: edx_course_key and course id's in raw edX data)
if is_fake_program_course_run(course_run) and course_run.start_date:
update_fake_course_run_edx_key(user, course_run)
return ret_val
return wrapper | 891cfccd9d4a0da4c80010cd2f7827d040c82e8e | 3,629,135 |
def importDataFromCSV(dataType, filename):
"""
Import from a `.csv` file into a dataframe or python time/distance matrix dictionary.
Parameters
----------
dataType: string, Required
The type of data to be imported. Valid options are 'nodes', 'arcs', 'assignments', or 'matrix'.
filename: string, Required
The path and the name of the file to be imported.
Return
------
pandas.dataframe or dictionary
The resulting object depends on the data that are imported. If the data are 'nodes', 'arcs' or 'assignments', return pandas.dataframe; otherwise, if the data are 'matrix', return dictionary.
Examples
--------
The following examples will be the same as examples in :meth:`~veroviz.utilities.exportDataToCSV`
Import veroviz and check if it is the latest version:
>>> import veroviz as vrv
>>> vrv.checkVersion()
Create a nodes dataframe:
>>> nodesDF = vrv.createNodesFromLocs(
... locs = [[42.1538, -78.4253],
... [42.3465, -78.6234],
... [42.6343, -78.1146]])
>>> nodesDF
Save the nodesDF dataframe as a .csv file in a subdirectory named "test":
>>> vrv.exportDataToCSV(data = nodesDF, filename = 'test/nodes.csv')
Import the dataframe we just saved:
>>> importedNodes = vrv.importDataFromCSV(
... dataType = 'nodes',
... filename = 'test/nodes.csv')
>>> importedNodes
If the data type is inconsistent with the data, an error message will be thrown and nothing will be imported.
>>> importedArcs = vrv.importDataFromCSV(
... dataType = 'arcs',
... filename = 'test/nodes.csv')
Error: test/nodes.csv was not successfully imported. Check the data type.
Similarly we can import and export the 'arcs' and 'assignments' dataframe
For time/distance matrices, they are saved as dictionaries in VeRoViz, here is an example of how to import/export them.
Get travel time/distance matrices using the nodes we just created:
>>> [timeDict, distDict] = vrv.getTimeDist2D(
... nodes = nodesDF,
... routeType = 'fastest',
... dataProvider = 'OSRM-online')
>>> timeDict
{(1, 1): 0.0,
(1, 2): 2869.9,
(1, 3): 4033.9,
(2, 1): 2853.3,
(2, 2): 0.0,
(2, 3): 4138.2,
(3, 1): 4037.8,
(3, 2): 4055.4,
(3, 3): 0.0}
Export the time dictionary to a .csv file in a subdirectory named "test":
>>> vrv.exportDataToCSV(data = timeDict, filename = 'test/timeMatrix.csv')
Import the saved dictionary
>>> importedTime = vrv.importDataFromCSV(
... dataType = 'matrix',
... filename = 'test/timeMatrix.csv')
>>> importedTime
{(1, 1): 0.0,
(1, 2): 2869.9,
(1, 3): 4033.9,
(2, 1): 2853.3,
(2, 2): 0.0,
(2, 3): 4138.2,
(3, 1): 4037.8,
(3, 2): 4055.4,
(3, 3): 0.0}
"""
# Replace backslash
filename = replaceBackslashToSlash(filename)
if (type(filename) is not str):
print("Error: filename should be a string, please check the inputs.")
return
# validation - The validation of this script is different from others
try:
if (dataType.lower() in {'nodes', 'arcs', 'assignments'}):
data = pd.read_csv(filename, index_col=0)
if (dataType.lower() == 'nodes'):
[valFlag, errorMsg, warningMsg] = valNodes(data)
if (valFlag and warningMsg == ""):
# print("Message: %s was successfully imported as Nodes dataframe" % filename)
pass
else:
print("%s %s was not successfully imported." % (errorMsg, filename))
return
elif (dataType.lower() == 'arcs'):
[valFlag, errorMsg, warningMsg] = valArcs(data)
if (valFlag and warningMsg == ""):
# print("Message: %s was successfully imported as Arcs dataframe" % filename)
pass
else:
print("%s %s was not successfully imported." % (errorMsg, filename))
return
elif (dataType.lower() == 'assignments'):
[valFlag, errorMsg, warningMsg] = valAssignments(data)
if (valFlag and warningMsg == ""):
# print("Message: %s was successfully imported as Assignments dataframe" % filename)
pass
else:
print("%s %s was not successfully imported." % (errorMsg, filename))
return
else:
return
elif (dataType.lower() == 'matrix'):
dataframe = pd.read_csv(filename, index_col=0)
dataframe.columns = dataframe.columns.astype(int)
data = convertMatricesDataframeToDictionary(dataframe)
else:
print("Error: data type not supported. Expected 'nodes', 'arcs', 'assignments' or 'matrix' (for time matrix or distance matrix)")
except (TypeError, ValueError):
print("Error: Cannot import file: %s, check if `dataType` is correct for inputs." % (filename))
except IOError:
print("Error: Cannot import file: %s" % (filename))
return data | cc185dd59fbc0a820e6f3fe2262154325e61d915 | 3,629,136 |
def main():
"""Run the exploit and go interactive."""
args = get_parsed_args()
host = args.host
port = int(args.port)
sock = None
t = None
try:
sock = exploit(host, port)
t = Telnet()
t.sock = sock
print_info('Exploit sent, going interactive!')
t.mt_interact()
except ConnectionResetError:
print_info('Remote host closed connection')
except KeyboardInterrupt:
print_info('Ctrl-C received! Quitting!')
finally:
if sock is not None:
sock.close()
if t is not None:
t.close()
return 0 | ed9f340f42aadaf25157fee156d16313faf6e553 | 3,629,137 |
from typing import Optional
def xyz_to_str(xyz_dict: dict,
isotope_format: Optional[str] = None,
) -> str:
"""
Convert an ARC xyz dictionary format, e.g.::
{'symbols': ('C', 'N', 'H', 'H', 'H', 'H'),
'isotopes': (13, 14, 1, 1, 1, 1),
'coords': ((0.6616514836, 0.4027481525, -0.4847382281),
(-0.6039793084, 0.6637270105, 0.0671637135),
(-1.4226865648, -0.4973210697, -0.2238712255),
(-0.4993010635, 0.6531020442, 1.0853092315),
(-2.2115796924, -0.4529256762, 0.4144516252),
(-1.8113671395, -0.3268900681, -1.1468957003))}
to a string xyz format with optional Gaussian-style isotope specification, e.g.::
C(Iso=13) 0.6616514836 0.4027481525 -0.4847382281
N -0.6039793084 0.6637270105 0.0671637135
H -1.4226865648 -0.4973210697 -0.2238712255
H -0.4993010635 0.6531020442 1.0853092315
H -2.2115796924 -0.4529256762 0.4144516252
H -1.8113671395 -0.3268900681 -1.1468957003
Args:
xyz_dict (dict): The ARC xyz format to be converted.
isotope_format (str, optional): The format for specifying the isotope if it is not the most abundant one.
By default, isotopes will not be specified. Currently the only supported
option is 'gaussian'.
Raises:
ConverterError: If input is not a dict or does not have all attributes.
Returns: str
The string xyz format.
"""
xyz_dict = check_xyz_dict(xyz_dict)
if xyz_dict is None:
logger.warning('Got None for xyz_dict')
return None
recognized_isotope_formats = ['gaussian']
if any([key not in list(xyz_dict.keys()) for key in ['symbols', 'isotopes', 'coords']]):
raise ConverterError(f'Missing keys in the xyz dictionary. Expected to find "symbols", "isotopes", and '
f'"coords", but got {list(xyz_dict.keys())} in\n{xyz_dict}')
if any([len(xyz_dict['isotopes']) != len(xyz_dict['symbols']),
len(xyz_dict['coords']) != len(xyz_dict['symbols'])]):
raise ConverterError(f'Got different lengths for "symbols", "isotopes", and "coords": '
f'{len(xyz_dict["symbols"])}, {len(xyz_dict["isotopes"])}, and {len(xyz_dict["coords"])}, '
f'respectively, in xyz:\n{xyz_dict}')
if any([len(xyz_dict['coords'][i]) != 3 for i in range(len(xyz_dict['coords']))]):
raise ConverterError(f'Expected 3 coordinates for each atom (x, y, and z), got:\n{xyz_dict}')
xyz_list = list()
for symbol, isotope, coord in zip(xyz_dict['symbols'], xyz_dict['isotopes'], xyz_dict['coords']):
common_isotope = get_most_common_isotope_for_element(symbol)
if isotope_format is not None and common_isotope != isotope:
# consider the isotope number
if isotope_format == 'gaussian':
element_with_isotope = '{0}(Iso={1})'.format(symbol, isotope)
row = '{0:14}'.format(element_with_isotope)
else:
raise ConverterError('Recognized isotope formats for printing are {0}, got: {1}'.format(
recognized_isotope_formats, isotope_format))
else:
# don't consider the isotope number
row = '{0:4}'.format(symbol)
row += '{0:14.8f}{1:14.8f}{2:14.8f}'.format(*coord)
xyz_list.append(row)
return '\n'.join(xyz_list) | 589d1ac44649c9a464c051791a9044830a6aa175 | 3,629,138 |
from typing import Tuple
async def _provision_nic_with_public_ip(
network_client: NetworkManagementClient,
location: str,
vm_name: str,
) -> Tuple[str, str]:
"""
Creates a public IP address and corresponding NIC. Returns (NIC id, public address
name)
"""
subnet_id = await _ensure_virtual_network_and_subnet(network_client, location)
# provision an IP address
# TODO there should probably be a way to re-use IP addresses rather than
# create/destroy them for every VM
poller = await network_client.public_ip_addresses.begin_create_or_update(
await ensure_meadowrun_resource_group(location),
f"{vm_name}-ip",
{
"location": location,
# critical that we use Basic here instead of Standard, otherwise this will
# not be available via instance metadata:
# https://github.com/MicrosoftDocs/azure-docs/issues/44314
"sku": {"name": "Basic"},
"public_ip_allocation_method": "Dynamic",
"public_ip_address_version": "IPV4",
},
)
ip_address_result = await poller.result()
print(f"Provisioned public IP address {ip_address_result.name}")
# provision a NIC
poller = await network_client.network_interfaces.begin_create_or_update(
await ensure_meadowrun_resource_group(location),
f"{vm_name}-ip",
{
"location": location,
"ip_configurations": [
{
"name": f"{vm_name}-ip-config",
"subnet": {"id": subnet_id},
"public_ip_address": {
"id": ip_address_result.id,
"properties": {"deleteOption": "Delete"},
},
}
],
},
)
nic_result = await poller.result()
print(f"Provisioned network interface client {nic_result.name}")
return nic_result.id, ip_address_result.name | 3a45d54d565f17eeebc5261fbc79c209483db321 | 3,629,139 |
from typing import Dict
from typing import Any
import json
import time
def predict(event: Dict[str, Any],
context: Any) -> Dict[str, Any]:
"""
AWS lambda function, to handle incoming GET requests asking our model for predictions.
:param event: standard AWS lambda event param - it contains the query parameters for the GET call
:param context: standard AWS lambda context param - not use in this application
:return:
"""
# static "feature store"
action_map = {
'start': [1, 0, 0, 0, 0, 0, 0],
'end': [0, 1, 0, 0, 0, 0, 0],
'add': [0, 0, 1, 0, 0, 0, 0],
'remove': [0, 0, 0, 1, 0, 0, 0],
'purchase': [0, 0, 0, 0, 1, 0, 0],
'detail': [0, 0, 0, 0, 0, 1, 0],
'view': [0, 0, 0, 0, 0, 0, 1],
'empty': [0, 0, 0, 0, 0, 0, 0]
}
print("Received event: " + json.dumps(event))
params = event.get('queryStringParameters', {})
response = dict()
start = time.time()
session_str = params.get('session', '')
session = session_str.split(',') if session_str != '' else []
print(session)
session = ['start'] + session + ['end']
session_onehot = [
action_map[_] if _ in action_map else action_map['empty']
for _ in session
]
# input is array of array, even if we just ask for 1 prediction here
input_payload = {'instances': [session_onehot]}
result = get_response_from_sagemaker(model_input=json.dumps(input_payload),
endpoint_name=SAGEMAKER_ENDPOINT_NAME,
content_type='application/json')
if result:
# print for debugging in AWS Cloudwatch
print(result)
# get the first item in the prediction array, as it is a 1-1 prediction
response = result['predictions'][0][0]
return wrap_response(200, {
"prediction": response,
"time": time.time() - start,
"endpoint": SAGEMAKER_ENDPOINT_NAME
}) | 33566f1477af398d253df8e89d19d241710ab4b0 | 3,629,140 |
def get_simclr_transform(size, s=1):
"""Return a set of data augmentation transformations as described in the SimCLR paper."""
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_transforms = transforms.Compose([transforms.Resize(size=size),
transforms.RandomHorizontalFlip(),
#transforms.RandomApply([color_jitter], p=0.8),
#transforms.RandomGrayscale(p=0.2),
GaussianBlur(kernel_size=int(0.1 * size)),
transforms.ToTensor(),
normalize])
return data_transforms | 237de2baa38f4e0e6859c5f66d6364438026b155 | 3,629,141 |
import json
def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, indent=None, separators=None, sort_keys=False,
core: Core = None, **kw):
"""
Dumps an object into a Writer with support for HomeControl's data types
"""
return json.dump(obj, fp, skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular,
cls=partial(JSONEncoder, core=core),
allow_nan=allow_nan, indent=indent,
separators=separators, sort_keys=sort_keys, **kw) | b76016041dfe99b8fa9335974eebacf3ba668dcd | 3,629,142 |
import os
def moving_slope_filter(dataframe,time,data,cutoff,time_unit=None,
log_file=os.path.join(os.getcwd(),'filter_log.txt')):
"""
Filters out datapoints based on the difference between the slope in one point
and the next (sudden changes like noise get filtered out), based on a given
cut off; Moving Slope Filtering
Parameters
----------
dataframe : pd.DataFrame
the dataframe containing the data that needs to be smoothened.
time : str
name of the time column in the dataframe
data : str
name of the column containing the data that needs to be filtered
cutoff: int
the cutoff value to compare the slopes with to apply the filtering.
Returns
-------
pd.DataFrame
the adjusted dataframe with the filtered values
"""
original = len(dataframe)
#calculate initial slopes
new_dataframe = calc_slopes(dataframe,time,data,time_unit=time_unit)
new_name = dataframe.columns[-1]
#As long as the slope column contains values higher then cutoff, remove those
#rows from the dataframe and recalculate slopes
while abs(new_dataframe[new_name]).max() > cutoff:
new_dataframe = new_dataframe.drop(new_dataframe[abs(new_dataframe[new_name]) > cutoff].index)
new_dataframe = calc_slopes(new_dataframe,time,data,time_unit=time_unit)
new_dataframe = new_dataframe.drop(new_name,axis='columns')
new_dataframe.reset_index(drop=True,inplace=True)
log_file = open(log_file,'a')
log_file.write(str('Original dataset: '+str(original)+' datapoints; new dataset: '+
str(len(new_dataframe))+' datapoints'+str(original-len(new_dataframe))+
' datapoints filtered\n'))
log_file.close()
return new_dataframe | a92deb31cc850214dd5ed6a29bfb2b33b78691ff | 3,629,143 |
async def get_sender_data(user_id: int) -> dict:
"""
Sender profile
:param user_id: User ID
:type user_id: int
:return: Sender profile
:rtype: dict
"""
return await get_sender_data_request(user_id) | 7ecf5290ed9d19ccd52ba5d31595adcca762eb82 | 3,629,144 |
def conditional_input_prediction_2d(x, y_pred, var_1_index, var_2_index, var_1_bins, var_2_bins, dependence_function=np.mean):
"""
For a given set of 2 input values, calculate a summary statistic based on all of the examples that fall within
each binned region of the input data space. The goal is to show how the model predictions vary on average as
a function of 2 input variables.
Args:
x: Array of input data values
y_pred: ML model predictions
var_1_index: Index of first input column
var_2_index: Index of second input column
var_1_bins: Bins used to segment variable 1
var_2_bins: Bins used to segment variable 2
dependence_function: Summary statistic function
Returns:
dependence_matrix: An array of partial dependence values with rows for each variable 2 bin and
columns for each variable 1 bin
dependence_counts: Array containing the number of examples within each bin.
"""
dependence_matrix = np.zeros((var_2_bins.size - 1, var_1_bins.size - 1))
dependence_counts = np.zeros(dependence_matrix.shape, dtype=int)
for i in range(var_2_bins.size - 1):
for j in range(var_1_bins.size - 1):
valid_indices = np.where((x[:, var_1_index] >= var_1_bins[j]) &
(x[:, var_1_index] < var_1_bins[j + 1]) &
(x[:, var_2_index] >= var_2_bins[i]) &
(x[:, var_2_index] < var_2_bins[i + 1])
)[0]
if valid_indices.size > 0:
dependence_matrix[i, j] = dependence_function(y_pred[valid_indices])
dependence_counts[i, j] = valid_indices.size
else:
dependence_matrix[i, j] = np.nan
return dependence_matrix, dependence_counts | 8f34450c9780bba1503d093bef19d808ee4b869e | 3,629,145 |
import re
import sys
import attr
def update_model_params(params, update):
"""Updates the default parameters using supplied user arguments."""
update_dict = {}
for p in update:
m = re.match("(.*)=(.*)", p)
if not m:
LOGGER.error("Unable to parse param update '%s'", p)
sys.exit(1)
k, v = m.groups()
update_dict[k] = v
try:
params = attr.evolve(params, **update_dict)
except TypeError as e:
LOGGER.error(e)
LOGGER.error("Valid parameters: %s", list(attr.asdict(params).keys()))
sys.exit(1)
#print('params', params)
return params | cb96059e91f3aa839d3646f9bdce2093a4ea4983 | 3,629,146 |
def fgt_get_pressureUnit(pressure_index, get_error = _get_error):
"""Get current unit on selected pressure device.
Args:
pressure_index: Index of pressure channel or unique ID
Returns:
current unit as a string
"""
pressure_index = int(pressure_index)
low_level_function = low_level.fgt_get_pressureUnit
c_error, unit = low_level_function(pressure_index)
exceptions.manage_pressure_status(low_level_function.__name__, pressure_index)
c_error = fgt_ERROR(c_error)
return (c_error, unit) if get_error else unit | dd5b80fbdef7b4644c944c22207b4505082c498d | 3,629,147 |
def _get_rawhide_version():
"""
Query Koji to find the rawhide version from the build target.
:return: the rawhide version (e.g. "f32")
:rtype: str
"""
koji_session = get_session(conf, login=False)
build_target = koji_session.getBuildTarget("rawhide")
if build_target:
return build_target["build_tag_name"].partition("-build")[0] | c8fbb63f40ca5744563f598b58b998ad4aec2d14 | 3,629,148 |
def test_mixed():
"""Test positional arguments passed via keyword.
"""
class TestView(simple.SimpleView):
args = ['id']
def __call__(self, foo):
return self.request, self.id, foo
testview = create_view(TestView)
# generally is a possibility...
assert testview('request', foo='bar', id=1) == ('request', 1, 'bar')
# ...but only if the correct order is maintained: here 'id' would
# have two values, 'bar' and 1.
assert_raises(TypeError, testview, 'request', 'bar', id=1) | 93d9594e91d10a2aa92d7e853057dc84d638efca | 3,629,149 |
def fail_event_on_handler_exception(func):
"""
Decorator which marks the models.Event associated with handler by
BaseHandler.set_context() as FAILED in case the `func` raises an
exception.
The exception is re-raised by this decorator once its finished.
"""
@wraps(func)
def decorator(handler, *args, **kwargs):
try:
return func(handler, *args, **kwargs)
except Exception as e:
# Skip the exception in case it has been already handled by
# some decorator. This can happen in case multiple decorators
# are nested.
if handler._last_handled_exception == e:
raise
handler._last_handled_exception = e
err = 'Could not process message handler. See the traceback.'
log.exception(err)
# In case the exception interrupted the database transaction,
# rollback it.
db.session.rollback()
# Mark the event as failed.
db_event_id = handler.current_db_event_id
db_event = db.session.query(Event).filter_by(
id=db_event_id).first()
if db_event:
msg = "Handling of event failed with traceback: %s" % (str(e))
db_event.transition(EventState.FAILED, msg)
db_event.builds_transition(ArtifactBuildState.FAILED.value, msg)
db.session.commit()
raise
return decorator | 07a0b5a24b95470bee29eff545c9676b1438e801 | 3,629,150 |
def intensityAdjustment(image, template):
"""Tune image intensity based on template
----------
images : <numpy.ndarray>
image needed to be adjusted
template : <numpy.ndarray>
Typically we use the middle image from image stack. We want to match the image
intensity for each channel to template's
Returns
-------
numpy.ndarray
The resulting image after intensity adjustment
"""
m, n, channel = image.shape
output = np.zeros((m, n, channel))
for ch in range(channel):
image_avg, template_avg = np.average(image[:, :, ch]), np.average(template[:, :, ch])
output[..., ch] = image[..., ch] * (template_avg / image_avg)
return output | 4e1021c718c80c46d05c276a3c8e4e682a6fa7b5 | 3,629,151 |
def Define_core_memesa_model():
"""\nOriginal core model + inefficient branch\n"""
model_name = 'core_model_1b'
Reactions ={'R01' : {'id' : 'R01', 'reversible' : False, 'reagents' : [(-1, 'X0'), (1, 'A')], 'SUBSYSTEM' : ''},
'R02' : {'id' : 'R02', 'reversible' : True, 'reagents' : [(-1, 'A'), (1, 'B')], 'SUBSYSTEM' : 'C1'},
'R03' : {'id' : 'R03', 'reversible' : True, 'reagents' : [(-1, 'A'), (1, 'C')], 'SUBSYSTEM' : 'C1'},
'R04' : {'id' : 'R04', 'reversible' : True, 'reagents' : [(-1, 'C'), (1, 'B')], 'SUBSYSTEM' : 'C1'},
'R05' : {'id' : 'R05', 'reversible' : False, 'reagents' : [(-1, 'B'), (1, 'D')], 'SUBSYSTEM' : ''},
'R06' : {'id' : 'R06', 'reversible' : False, 'reagents' : [(-1, 'D'), (1, 'E1')], 'SUBSYSTEM' : 'C2'},
'R07' : {'id' : 'R07', 'reversible' : False, 'reagents' : [(-1, 'E1'), (1, 'E2')], 'SUBSYSTEM' : 'C2'},
'R08' : {'id' : 'R08', 'reversible' : False, 'reagents' : [(-1, 'E2'), (1, 'G')], 'SUBSYSTEM' : 'C2'},
'R09' : {'id' : 'R09', 'reversible' : False, 'reagents' : [(-1, 'D'), (1, 'F1')], 'SUBSYSTEM' : 'C2'},
'R10' : {'id' : 'R10', 'reversible' : False, 'reagents' : [(-1, 'F1'), (1, 'F2')], 'SUBSYSTEM' : 'C2'},
'R11' : {'id' : 'R11', 'reversible' : False, 'reagents' : [(-1, 'F2'), (1, 'G')], 'SUBSYSTEM' : 'C2'},
'R12' : {'id' : 'R12', 'reversible' : False, 'reagents' : [(-1, 'G'), (1, 'H')], 'SUBSYSTEM' : ''},
'R13' : {'id' : 'R13', 'reversible' : False, 'reagents' : [(-1, 'H'), (1, 'I1')], 'SUBSYSTEM' : 'C3'},
'R14' : {'id' : 'R14', 'reversible' : True, 'reagents' : [(-1, 'I1'), (1, 'I2')], 'SUBSYSTEM' : 'C3'},
'R15' : {'id' : 'R15', 'reversible' : False, 'reagents' : [(-1, 'I2'), (1, 'L')], 'SUBSYSTEM' : 'C3'},
'R16' : {'id' : 'R16', 'reversible' : False, 'reagents' : [(-1, 'H'), (1, 'J1')], 'SUBSYSTEM' : 'C3'},
'R17' : {'id' : 'R17', 'reversible' : False, 'reagents' : [(-1, 'J1'), (1, 'J2')], 'SUBSYSTEM' : 'C3'},
'R18' : {'id' : 'R18', 'reversible' : False, 'reagents' : [(-1, 'J2'), (1, 'L')], 'SUBSYSTEM' : 'C3'},
'R19' : {'id' : 'R19', 'reversible' : True, 'reagents' : [(-1, 'I1'), (1, 'K1')], 'SUBSYSTEM' : 'C3'},
'R20' : {'id' : 'R20', 'reversible' : True, 'reagents' : [(-1, 'K1'), (1, 'K2')], 'SUBSYSTEM' : 'C3'},
'R21' : {'id' : 'R21', 'reversible' : True, 'reagents' : [(-1, 'K2'), (1, 'I2')], 'SUBSYSTEM' : 'C3'},
'R22' : {'id' : 'R22', 'reversible' : False, 'reagents' : [(-1, 'L'), (1, 'M')], 'SUBSYSTEM' : ''},
'R23' : {'id' : 'R23', 'reversible' : True, 'reagents' : [(-1, 'M'), (1, 'N')], 'SUBSYSTEM' : 'C4'},
'R24' : {'id' : 'R24', 'reversible' : False, 'reagents' : [(-1, 'M'), (1, 'N')], 'SUBSYSTEM' : 'C4'},
'R25' : {'id' : 'R25', 'reversible' : False, 'reagents' : [(-1, 'N'), (1, 'X1')], 'SUBSYSTEM' : ''},
'R26' : {'id' : 'R26', 'reversible' : False, 'reagents' : [(-1, 'A'), (0.5, 'N'), (0.5, 'X3')], 'SUBSYSTEM' : ''}
}
Species = { 'X0' : {'id' : 'X0', 'boundary' : True, 'SUBSYSTEM' : ''},
'A' : {'id' : 'A', 'boundary' : False, 'SUBSYSTEM' : 'C1'},
'B' : {'id' : 'B', 'boundary' : False, 'SUBSYSTEM' : 'C1'},
'C' : {'id' : 'C', 'boundary' : False, 'SUBSYSTEM' : 'C1'},
'D' : {'id' : 'D', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'E1' : {'id' : 'E1', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'E2' : {'id' : 'E2', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'F1' : {'id' : 'F1', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'F2' : {'id' : 'F2', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'G' : {'id' : 'G', 'boundary' : False, 'SUBSYSTEM' : 'C2'},
'H' : {'id' : 'H', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'I1' : {'id' : 'I1', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'I2' : {'id' : 'I2', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'J1' : {'id' : 'J1', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'J2' : {'id' : 'J2', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'K1' : {'id' : 'K1', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'K2' : {'id' : 'K2', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'L' : {'id' : 'L', 'boundary' : False, 'SUBSYSTEM' : 'C3'},
'M' : {'id' : 'M', 'boundary' : False, 'SUBSYSTEM' : 'C4'},
'N' : {'id' : 'N', 'boundary' : False, 'SUBSYSTEM' : 'C4'},
'X1' : {'id' : 'X1', 'boundary' : True, 'SUBSYSTEM' : ''},
'X3' : {'id' : 'X3', 'boundary' : True, 'SUBSYSTEM' : ''}
}
Bounds = {'R01' : {'lower' : 0, 'upper' : 1}}
Objective_function = {'objMaxJ25' : {'id' : 'objMaxJ25', 'flux' : 'R25', 'coefficient' : 1, 'sense' : 'Maximize', 'active' : True}}
return model_name, Reactions, Species, Bounds, Objective_function | 7ff51107de0de6ed9ea6a86dc759a54eff4bcfc4 | 3,629,152 |
def F16(x):
"""Rosenbrock function"""
sum = 0
for i in range(len(x)-1):
sum += 100*(x[i+1]-x[i]**2)**2+(x[i]+1)**2
return sum | 7421ad45568a8b86aff41fc5c8466ae6ce7aeb9d | 3,629,153 |
def jaccard_simple(annotation,segmentation):
""" Compute region similarity as the Jaccard Index.
Arguments:
annotation (ndarray): binary annotation map.
segmentation (ndarray): binary segmentation map.
Return:
jaccard (float): region similarity
"""
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if np.isclose(np.sum(annotation),0) and np.isclose(np.sum(segmentation),0):
return 1
else:
return np.sum((annotation & segmentation)) / \
np.sum((annotation | segmentation),dtype=np.float32) | e1defc9cc0dedb812b0880c63c9aa218a6bc93b6 | 3,629,154 |
def shuffle_df(df):
"""
return:
pandas.DataFrame | shuffled dataframe
params:
df: pandas.DataFrame
"""
return df.reindex(np.random.permutation(df.index)) | b609158f62d271f07a8e3b5409f0b28cdc3f83d0 | 3,629,155 |
from typing import List
def transform_from_sklearn(
idx: pd.Index,
vars_: List[str],
vals: np.array,
) -> pd.DataFrame:
"""
Add index and column names to sklearn output.
:param idx: data index
:param vars_: names of feature columns
:param vals: features data
:return: dataframe with an index an column names
"""
# Some SkLearn models like Lasso return a one-dimensional array for a
# two-dimensional input. Add a dimension for such cases.
if vals.ndim == 1:
vals = np.expand_dims(vals, axis=1)
dbg.dassert_eq(
vals.shape,
(len(idx), len(vars_)),
"The shape of `vals` does not match the length of `idx` and `vars_`",
)
x = pd.DataFrame(vals, index=idx, columns=vars_)
return x | 7a37752c7647143f894c13e9a90febe925adbb7f | 3,629,156 |
def from_package_str(item):
"""Display name space info when it is different, then diagram's or parent's
namespace."""
subject = item.subject
diagram = item.diagram
if not (subject and diagram):
return False
namespace = subject.namespace
parent = item.parent
# if there is a parent (i.e. interaction)
if parent and parent.subject and parent.subject.namespace is not namespace:
return False
return f"(from {namespace.name})" if namespace is not item.diagram.owner else "" | ab89c199aa886bff0ec88f6df37a655bb9ee7596 | 3,629,157 |
import os
def search_by_wl(target_type: str, imaging_type: str, wl: float, base_path: str) -> str:
"""Search a folder for an image of given wavelength.
A path to the image is returned.
:param target_type:
String either 'leaf' or 'reference'. Use the ones listed in constants.py.
:param imaging_type:
String either 'refl' for reflectance or 'tran' for transmittance. Use the ones listed in constants.py.
:param wl:
Wavelength.
:param base_path:
Path to the image folder. Usually the one returned by get_image_file_path() is correct and other paths
should only be used for testing and debugging.
:returns:
Returns absolute path to the image.
:raises FileNotFoundError if not found
"""
def almost_equals(f1: float, f2: float, epsilon=0.01):
"""Custom float equality for our desired 2 decimal accuracy."""
res = abs(f1 - f2) <= epsilon
return res
folder = P.path_directory_render(target_type, imaging_type, base_path)
for filename in os.listdir(folder):
image_wl = FN.parse_wl_from_filename(filename)
if almost_equals(wl, image_wl):
return P.path_file_rendered_image(target_type, imaging_type, wl, base_path)
raise FileNotFoundError(f"Could not find {wl} nm image from {folder}.") | b4c20d9b8d965d67519444597516f36570b14d1a | 3,629,158 |
def calculateAverageValue(faceBlob, binaryLabelVolume):
"""Deprecated."""
total = 0.0
for labeledPoint in faceBlob.points():
total += float(at(binaryLabelVolume, labeledPoint.loc))
return float(total) / float(len(faceBlob.points())) | 26f38c24a7b21ec4b54b5476f8af7544f701e209 | 3,629,159 |
import numpy
def calc_m_inv_m_norm_by_unit_cell_parameters(
unit_cell_parameters, flag_unit_cell_parameters: bool = False):
"""nM matrix."""
a, b = unit_cell_parameters[0], unit_cell_parameters[1]
c = unit_cell_parameters[2]
alpha, beta = unit_cell_parameters[3], unit_cell_parameters[4]
gamma = unit_cell_parameters[5]
c_a, c_b, c_g = numpy.cos(alpha), numpy.cos(beta), numpy.cos(gamma)
s_a, s_b, s_g = numpy.sin(alpha), numpy.sin(beta), numpy.sin(gamma)
phi, dder_phi = calc_phi_by_unit_cell_parameters(
unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
zeros = numpy.zeros_like(unit_cell_parameters[0])
ones = numpy.ones_like(unit_cell_parameters[0])
m_inv_m_norm_ij = numpy.stack([
s_a/phi, zeros, zeros,
(c_a*c_b-c_g)/(phi*s_a), 1./s_a, zeros,
(c_a*c_g-c_b)/(phi*s_a), -c_a/s_a, ones], axis=0)
dder = {}
if flag_unit_cell_parameters:
der_11 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_12 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_13 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_21 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_22 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_23 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_31 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_32 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
der_33 = numpy.stack([zeros, zeros, zeros, zeros, zeros, zeros], axis=0)
dder["unit_cell_parameters"] = numpy.stack([
der_11, der_12, der_13, der_21, der_22, der_23, der_31, der_32,
der_33], axis=0)
return m_inv_m_norm_ij, dder | 0c4e1988ccd1ea9d055fbea58119efebac542875 | 3,629,160 |
import subprocess
def get_m():
"""获取加密参数m"""
return subprocess.check_output(['node', 'scrapy_ddiy/scripts/js/yuanrenxue/002.js']).decode().strip() | e079affe696805ba2f006e55da9a4abb90f53220 | 3,629,161 |
import os
def fetch_mirchi2018(data_dir=None, resume=True, verbose=1):
"""
Downloads (and creates) dataset for replicating Mirchi et al., 2018, SCAN
Parameters
----------
data_dir : str, optional
Directory to check for existing data files (if they exist) or to save
generated data files. Files should be named mirchi2018_fc.npy and
mirchi2018_panas.csv for the functional connectivity and behavioral
data, respectively.
Returns
-------
X : (73, 198135) numpy.ndarray
Functional connections from MyConnectome rsfMRI time series data
Y : (73, 13) numpy.ndarray
PANAS subscales from MyConnectome behavioral data
"""
data_dir = os.path.join(_get_data_dir(data_dir=data_dir), 'ds-mirchi2018')
os.makedirs(data_dir, exist_ok=True)
X_fname = os.path.join(data_dir, 'myconnectome_fc.npy')
Y_fname = os.path.join(data_dir, 'myconnectome_panas.csv')
if not os.path.exists(X_fname):
X = _get_fc(data_dir=data_dir, resume=resume, verbose=verbose)
np.save(X_fname, X, allow_pickle=False)
else:
X = np.load(X_fname, allow_pickle=False)
if not os.path.exists(Y_fname):
Y = _get_panas(data_dir=data_dir, resume=resume, verbose=verbose)
np.savetxt(Y_fname, np.column_stack(list(Y.values())),
header=','.join(Y.keys()), delimiter=',', fmt='%i')
# convert dictionary to structured array before returning
Y = np.array([tuple(row) for row in np.column_stack(list(Y.values()))],
dtype=dict(names=list(Y.keys()), formats=['i8'] * len(Y)))
else:
Y = np.genfromtxt(Y_fname, delimiter=',', names=True, dtype=int)
return X, Y | 7eb8526c8583b05d856860e97ba26c6ec7f96d07 | 3,629,162 |
def round(dt, rounding=None):
"""Round a datetime value using specified rounding method.
Args:
dt: `datetime` value to be rounded.
rounding: `DatetimeRounding` value representing rounding method.
"""
if rounding is DatetimeRounding.NEAREST_HOUR:
return round_to_hour(dt)
elif rounding is DatetimeRounding.NEAREST_MINUTE:
return round_to_minute(dt)
else:
return dt | 3970f4ad47b73c48f134817ff9decf7363dfe906 | 3,629,163 |
def testid(prefix='',c=_default_conc,
squishy=False,squishz=False,decimate=False,substr=False,
subm=_default_subm,subr=_default_subr,subc=_default_subc,
subrho=_default_subrho,version=-1):
"""Creates a standardized string that uniquely identifies a test.
Args:
prefix (Optional[str]): string to start ID string with (default = '')
c (Optional[float]): concentration of run (default = `_default_conc`)
squishy (Optional[float]): factor that y coords should be squished by
(default = False)
squishz (Optional[float]): factor that z coords should be sauished by
(default = False)
decimate (Optional[int]): factor that particle number should be decimated
by (default = False)
substr (Optional[bool]): If true, the test includes substructure
(default = False)
subm (Optional[float]): mass of subhalo relative to parent's Mvir.
Only used if `substr` == True. (default = `_default_subm`)
subr (Optional[float]): radius of subhalo relative to parent's Rvir.
Only used if `substr` == True. (default = `_default_subr`)
subc (Optional[float]): concentration of substructure.
Only used if `substr` == True. (default = `_default_subc`)
subrho (Optional[float]): central density of subhalo relative to parent's central
density. Only used if `substr` == True. (default = `_default_subrho`)
version (Optional[int]): test realization. Set to -1 for the fiducial
version. (default = -1)
Returns:
str: A unique string identifying a test.
Raises:
ValueError: If the generated ID string is empty.
"""
idstr = prefix
if c is not None:
idstr+= '_c{}'.format(util.num2str(c))
# General tags
if decimate is not None and decimate!=False and decimate > 1:
idstr+= '_dec{}'.format(util.num2str(decimate))
if squishy is not None and squishy!=False and squishy > 0 and squishy!=1:
idstr+='_sqY{}'.format(util.num2str(float(squishy)))
if squishz is not None and squishz!=False and squishz > 0 and squishz!=1:
idstr+='_sqZ{}'.format(util.num2str(float(squishz)))
# Substructure tags
if substr:
idstr+=subid(subm=subm,subr=subr,subc=subc,subrho=subrho)
# Version number
if version>=0:
idstr+='_ver{:03d}'.format(version)
# Return
if len(idstr)==0:
raise ValueError('Empty ID string.')
elif idstr.startswith('_'):
return idstr[1:]
else:
return idstr | 8383281ee2af3f0422d58f37f88e94288e5324e6 | 3,629,164 |
def calc_bearing(lat1, lon1, lat2, lon2):
"""
Calculate bearing in degrees from (lat1,lon1) towards (lat2, lon2)
"""
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
a = np.arctan2(
np.sin(dlon) * np.cos(lat2),
np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(dlon),
)
return (np.degrees(a) + 360.0) % 360.0 | f2cf545de2460d1e56659d5c9a54011c46ac898c | 3,629,165 |
def Block(data, num_filter, kernel, stride=(1,1), pad=(0,0), eps=1e-3, name=None):
""" CNN block"""
conv = mx.sym.Convolution(data=data, num_filter=num_filter,
stride=stride, kernel=kernel, pad=pad, name='conv_%s' % name)
bn = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=eps,
momentum=0.9, name='bn_%s' % name)
outputs = mx.sym.Activation(data=bn, act_type='relu', name='relu_%s' % name)
return outputs | 0ec599725324b8528c3c7dfee4e13b6f4d850764 | 3,629,166 |
def flat(x, output_name="flatten"):
""" takes a tensor of rank > 2 and return a tensor of shape [?,n]"""
shape = x.get_shape().as_list()
n = np.prod(shape[1:])
x_flat = tf.reshape(x, [-1, n])
return tf.identity(x_flat, name=output_name) | 34c96ce4eb377d8bcbf25411115df11553b14bb7 | 3,629,167 |
from operator import sub
def preprocess_value(value): # TODO add preprocessing for noise and other stuff
"""
Preprocess value with regex to clear noise and data
Parameters
----------
value: str
string to be preprocessed
Returns
-------
preprocessed: str
value that is already filtered from tokens that are not in
"""
value = value.split('_')
value = str(' '.join(value)).lower().split(' ')
preprocessed = []
for v in value:
v = sub(r'<img[^<>]+(>|$)', ' image_token ', v)
v = sub(r'<[^<>]+(>|$)', ' ', v)
# noinspection RegExpDuplicateCharacterInClass
v = sub(r'[img_assist[^]]*?]', ' ', v)
v = sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|%[0-9a-fA-F][0-9a-fA-F])+', ' url_token ', v)
preprocessed.append(v)
preprocessed_str = ' '.join(preprocessed)
return preprocessed_str.lower() | a124e64c6b53b5632ef9d26e7d0242e983a95c6c | 3,629,168 |
import os
def get_molecule(molecule):
"""Call read_xyz for molecule.
Extract symbols and geometry for given molecule from data folder.
Parameters
----------
molecule : string
prefix of the .xyz file
Returns
-------
molecule : `dict`
This output is described in :func:`~molpy.util.read_xyz`
"""
return read_xyz(os.path.join(dirname, molecule + ".xyz")) | f3e2a737713a899befe578d6bde90d288ede4a94 | 3,629,169 |
def connect_to_database():
""" Attempt to connect to sqlite database. Return connection object."""
try:
connection = sqlite3.connect('blackjack_terminal')
print("DB Connected!")
except Exception, e:
print e
sys.exit(1)
return connection | a8df927370e395f08ae22aff0589b87db822220b | 3,629,170 |
def div_up(a, b):
"""Return the upper bound of a divide operation."""
return (a + b - 1) // b | e297f2d08972ebc667d1f3eadca25ef885ef5453 | 3,629,171 |
def handle_info():
"""
This function is called when you register your Battlesnake on play.battlesnake.com
See https://docs.battlesnake.com/guides/getting-started#step-4-register-your-battlesnake
It controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
print("INFO")
return {
"apiversion": "1",
"author": "", # TODO: Your Battlesnake Username
"color": "#66ff33", # TODO: Personalize
"head": "evil", # TODO: Personalize
"tail": "weight", # TODO: Personalize
} | 8ad42644adf2c77469efbf2656a6297384da4188 | 3,629,172 |
from datetime import datetime
def story_root(story):
"""story_root()
Serves the root page of a story
Accessed at '/story/<story' via a GET request
"""
# Gets the DocumentReference to the story document in Firestore
story_ref = db.collection('stories').document(story)
# Gets the DocumentSnapshot of the story document in Firestore
story_doc = story_ref.get()
# Checks whether or not the story exists
if not story_doc.exists:
abort(404)
# Gets the root page's page ID
page_id = story_doc.get('root_id')
# Gets the page data for the specified page ID
page = story_doc.get('page_nodes.`' + page_id + '`')
# Gets whether or not the page is viewed as a preview (from history page)
preview = request.args.get('preview')
if preview == None:
preview = False
# Gets whether or not the user is logged in
guest = current_user == None
# Replaces user attributes in the page content with the current user's values
for user_attr in allowed_user_attr:
page['page_body_text'] = page['page_body_text'].replace('$$' + user_attr + '$$', 'Guest' if guest else getattr(current_user, user_attr))
history_id = None
if not preview and not guest:
# Records the page visit to story activity
user_activity = UserActivity.get_user_activity(current_user.email)
user_activity.story_activity.append({
'timestamp': datetime.now(),
'story': story,
'page_id': page_id
})
user_activity.save()
# Checks for a matching history, to not add a duplicate history
history_found = False
for index, history in enumerate(current_user.history):
if history['story'] == story and history['pages'][0] == page_id and len(history['pages']) == 1:
# Updates timestamp of matching history
history['last_updated'] = datetime.now()
history_found = True
history_id = index
# If a matching history does not already exists, adds the root page to a new history
if not history_found:
new_history = {}
new_history['last_updated'] = datetime.now()
new_history['story'] = story
new_history['pages'] = [page_id]
current_user.history.append(new_history)
history_id = len(current_user.history) - 1
# Saves the changes to the user
current_user.save()
# Gets whether or not the page is favorited
favorited = False
if not guest:
for favorite in current_user.favorites:
if favorite['story'] == story and favorite['page_id'] == page_id:
favorited = True
# Returns the story_page.html template with the specified page
return render_response(render_template('story_page.html', guest=guest, favorited=favorited, story=story, page=page, preview=preview, history=history_id)) | e9cc07349368324d6a8859390dc5bcfb7c52d71c | 3,629,173 |
from numscons.core.utils import pkg_to_path
def get_scons_pkg_build_dir(pkg):
"""Return the build directory for the given package (foo.bar).
The path is relative to the top setup.py"""
return pjoin(get_scons_build_dir(), pkg_to_path(pkg)) | 6e6e346fdd12f5f595f8c2938ce8fe14ae012716 | 3,629,174 |
def oklab_to_linear_srgb(lab):
"""Convert from Oklab to linear sRGB."""
return util.dot(LMS_TO_SRGBL, [c ** 3 for c in util.dot(OKLAB_TO_LMS3, lab)]) | 6c0bd205285d9e26c0c6cb27e689b288eee075c0 | 3,629,175 |
def rjust(text: str, length: int) -> str:
"""Like str.rjust() but ignore all ANSI controlling characters."""
return " " * (length - len(strip_ansi(text))) + text | e32de595293837c780f97098ee302ce4ce002117 | 3,629,176 |
def solar_elevation_angle(solar_zenith_angle):
"""Returns Solar Angle in Degrees, with Solar Zenith Angle, solar_zenith_angle."""
solar_elevation_angle = 90 - solar_zenith_angle
return solar_elevation_angle | f896c5d0608171f3e5bd37cede1965fe57846d07 | 3,629,177 |
def vector3d_to_quaternion(x):
"""Convert a tensor of 3D vectors to a quaternion.
Prepends a 0 to the last dimension, i.e. [[1,2,3]] -> [[0,1,2,3]].
Args:
x: A `tf.Tensor` of rank R, the last dimension must be 3.
Returns:
A `Quaternion` of Rank R with the last dimension being 4.
Raises:
ValueError, if the last dimension of x is not 3.
"""
#x = torch.from_numpy(x)
if x.shape[-1] != 3:
raise ValueError("The last dimension of x must be 3.")
padding = (1,0)
return F.pad(x, padding) | 2175bd7bf2a7f2cda4bd3d5603dfc04497966d3e | 3,629,178 |
def record_read_permission_factory(record=None):
"""Pre-configured record read permission factory."""
PermissionPolicy = get_record_permission_policy()
return PermissionPolicy(action='read', record=record) | e9cc5829345ad904c619569edd5d0670904faadc | 3,629,179 |
import requests
def fetch_66_cookie():
"""
获取 cookies
:return:
"""
cookie_url = 'http://www.66ip.cn/'
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
}
while True:
response = requests.get(cookie_url, headers=headers)
js_code1 = response.text
js_code1 = js_code1.rstrip('\n')
js_code1 = js_code1.replace('</script>', '')
js_code1 = js_code1.replace('<script>', '')
index = js_code1.rfind('}')
js_code1 = js_code1[0:index + 1]
js_code1 = 'function getCookie() {' + js_code1 + '}'
js_code1 = js_code1.replace('eval', 'return')
js_code2 = execjs.compile(js_code1)
code = js_code2.call('getCookie')
if 'document.cookie' in code:
break
code = 'var a' + code.split('document.cookie')[1].split("Path=/;'")[0] + "Path=/;';return a;"
code = 'window = {}; \n' + code
js_final = "function getClearance(){" + code + "};"
js_final = js_final.replace("return return", "return eval")
ctx = execjs.compile(js_final)
jsl_clearance = ctx.call('getClearance')
jsl_uid = response.headers["Set-Cookie"].split(";")[0]
jsl_cle = jsl_clearance.split(';')[0].split('=')[1]
cookie = f"{jsl_uid}; __jsl_clearance={jsl_cle}"
return cookie | 55225b4b132b16a7b97615c78f46034fac8cb0bb | 3,629,180 |
def query_address(address):
"""
Example: curl localhost:8080/queries/1234
"""
return get_score(address) | 6c03893c0bf581ec766a6c6f43f7398099a72e87 | 3,629,181 |
def marshal(resp, schema):
"""
prepares the response object with the specified schema
:param resp: the falcon response object
:param schema: the schema class that should be used to validate the response
:return: falcon.Response
"""
data = resp
resp_ = None
if isinstance(data, list):
resp_ = []
for d in data:
resp_.append(schema().dump(d))
# resp.media = resp_
if isinstance(data, dict):
resp_ = schema().load(data=data, unknown=EXCLUDE)
return resp_ | f7bcbe258a96998aac3be7fde95b640651b41fdf | 3,629,182 |
def ethereum_tester(
patch_genesis_gas_limit,
):
"""Returns an instance of an Ethereum tester"""
tester = EthereumTester(PyEVMBackend())
tester.set_fork_block('FORK_BYZANTIUM', 0)
return tester | bcecb6caaef4905e05343645710508a06c8bb64c | 3,629,183 |
import requests
def fetch_form(formid):
"""
Fetch Ona Form.
"""
headers = {"Authorization": "Token {}".format(ONA_TOKEN)}
url = urljoin(ONA_URI, "/api/v1/forms/{}.json".format(formid))
response = requests.get(url, headers=headers)
return response.json() if response.status_code == 200 else None | be3a09994929292d3ad98b4f6f769823f0bb02d0 | 3,629,184 |
def get_token_from_http_header(request):
"""Retrieves the http authorization header from the request"""
token = None
try:
header = request.META.get(HTTP_AUTHORIZATION_HEADER, "")
except AttributeError:
header = ""
try:
prefix, payload = header.split()
except ValueError:
prefix = "-"
if prefix.lower() == AUTHORIZATION_HEADER_PREFIX.lower():
token = payload
return token | 951d00b6f571d591a3785971ad1bdec46107f3ca | 3,629,185 |
def choose_line(path_with_lines):
""" Choose one line for each stations in path_with_lines list
Args:
path_with_lines(list): A list of dictionaries of stations and lines
Returns:
final_path(list): A list of dictionaries of station, line, and token
"""
final_path = []
i = 0
end = len(path_with_lines) - 1
token = 0
while i < end:
if len(path_with_lines[i]['Line']) == 1:
path_with_lines[i]['token'] = token
final_path.append(path_with_lines[i])
i += 1
else:
for line in path_with_lines[i]['Line']:
for next_line in path_with_lines[i + 1]['Line']:
if line == next_line:
new_dict = {'Station':path_with_lines[i]['Station'], 'Line':[line], 'token': token}
final_path.append(new_dict)
break
break
i += 1
end_fin = len(final_path)
if len(path_with_lines[end]) == 1:
final_path.append(path_with_lines[end])
else:
new_dict = {'Station': path_with_lines[end]['Station'], 'Line': final_path[end_fin - 1]['Line'], 'token': token}
final_path.append(new_dict)
i = 0
while i < end_fin:
if final_path[i]['Line'] != final_path[i + 1]['Line']:
final_path[i]['token'] = 1
i += 1
return final_path | 7548d49a652f1cc2d4c75cee06ed63099713e733 | 3,629,186 |
def get_multiclass_predictions_and_correctness(probabilities, labels, top_k=1):
"""Returns predicted class, correctness boolean vector."""
_validate_probabilities(probabilities, multiclass=True)
_check_rank_nonempty(rank=1, labels=labels)
_check_rank_nonempty(rank=2, probabilities=probabilities)
if top_k == 1:
class_predictions = np.argmax(probabilities, -1)
top_k_probs = probabilities[np.arange(len(labels)), class_predictions]
is_correct = np.equal(class_predictions, labels)
else:
top_k_probs, is_correct = _filter_top_k(probabilities, labels, top_k)
return top_k_probs, is_correct | 25937b5f4aa0dc77dbb4f0c40b5d6120fe3624b7 | 3,629,187 |
from subprocess import getoutput
import tarfile
import click
import os
import shutil
def download_frappe_assets(verbose=True):
"""Downloads and sets up Frappe assets if they exist based on the current
commit HEAD.
Returns True if correctly setup else returns False.
"""
assets_setup = False
frappe_head = getoutput("cd ../apps/frappe && git rev-parse HEAD")
if frappe_head:
try:
url = get_assets_link(frappe_head)
click.secho("Retrieving assets...", fg="yellow")
prefix = mkdtemp(prefix="frappe-assets-", suffix=frappe_head)
assets_archive = download_file(url, prefix)
print("\n{0} Downloaded Frappe assets from {1}".format(green('✔'), url))
if assets_archive:
directories_created = set()
click.secho("\nExtracting assets...\n", fg="yellow")
with tarfile.open(assets_archive) as tar:
for file in tar:
if not file.isdir():
dest = "." + file.name.replace("./frappe-bench/sites", "")
asset_directory = os.path.dirname(dest)
show = dest.replace("./assets/", "")
if asset_directory not in directories_created:
if not os.path.exists(asset_directory):
os.makedirs(asset_directory, exist_ok=True)
directories_created.add(asset_directory)
tar.makefile(file, dest)
print("{0} Restored {1}".format(green('✔'), show))
build_missing_files()
return True
else:
raise
except Exception:
# TODO: log traceback in bench.log
click.secho("An Error occurred while downloading assets...", fg="red")
assets_setup = False
finally:
try:
shutil.rmtree(os.path.dirname(assets_archive))
except Exception:
pass
return assets_setup | 16f4146e41f2914cbcaa8a7fcaed995183f96331 | 3,629,188 |
def compute_gradient_penalty(discriminator, interpolated):
"""Computes the gradient penalty for a discriminator.
According to [https://arxiv.org/abs/1704.00028]
Args:
discriminator: The discriminator to compute the gradient
penalty for.
interpolated: The interpolation between real and fake data.
Returns:
The two norm of the difference between the discriminator gradients
and one.
"""
with tf.GradientTape() as tape:
tape.watch(interpolated)
d_interpolated = discriminator(interpolated)
gradient = tape.gradient(d_interpolated, [interpolated])[0]
sum_axes = list(range(1, len(interpolated.shape)))
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradient), axis=sum_axes))
gradient_penalty = tf.reduce_mean((slopes - 1.0) ** 2.0)
return gradient_penalty | 51386af15dff536eabad8ea89a25ea2591df8638 | 3,629,189 |
import torch
def build_grid(resolution, device):
"""
Building the grid for linear embedding
:param device: As we create a new tensor, we need to put it on the device as well
:param resolution: tuple of integers (height, width)
:return: Tensor of shape [1, height, height, 4]
"""
ranges = [np.linspace(0., 1., num=res) for res in resolution]
grid = np.meshgrid(*ranges, sparse=False, indexing="ij")
grid = np.stack(grid, axis=-1)
grid = np.reshape(grid, [resolution[0], resolution[1], -1])
grid = np.expand_dims(grid, axis=0)
grid = grid.astype(np.float32)
grid = np.concatenate([grid, 1.0 - grid], axis=-1)
grid = torch.from_numpy(grid)
grid = grid.to(device)
return grid | 490d1d6ddb7aa40dbf89a1cd802c79c907652f19 | 3,629,190 |
from typing import Optional
from typing import Dict
from typing import Union
from datetime import datetime
def get_fit_point_data(frame: fitdecode.records.FitDataMessage) -> Optional[Dict[str, Union[float, int, str, datetime]]]:
"""Extract some data from an FIT frame representing a track point
and return it as a dict.
"""
data: Dict[str, Union[float, int, str, datetime]] = {}
if (frame.has_field('position_lat') and frame.has_field('position_long') ) and frame.get_value('position_lat') != None and frame.get_value('position_long') != None:
data['latitude'] = frame.get_value('position_lat') / ((2**32) / 360)
data['longitude'] = frame.get_value('position_long') / ((2**32) / 360)
# else:
# Frame does not have any latitude or longitude data. We will ignore these frames in order to keep things
# simple, as we did when parsing the TCX file.
# return None
# print(frame.fields)
for field in POINTS_COLUMN_NAMES[3:]:
if frame.has_field(field):
data[field] = frame.get_value(field)
return data | 2fc0ecf014de8e947eaacdafbb6485afacb5db7c | 3,629,191 |
from typing import Mapping
import six
import os
def read(config_values):
"""Reads an ordered list of configuration values and deep merge the values in reverse order."""
if not config_values:
raise PolyaxonConfigurationError('Cannot read config_value: `{}`'.format(config_values))
config_values = to_list(config_values)
config = {}
for config_value in config_values:
if not isinstance(config_value, (Mapping, six.string_types)):
raise PolyaxonConfigurationError(
"Expects Mapping, string, or list of Mapping/string instances, "
"received {} instead".format(type(config_value)))
if isinstance(config_value, Mapping):
config_results = config_value
elif os.path.isfile(config_value):
config_results = _read_from_file(config_value)
else:
# try reading a stream of yaml or json
try:
config_results = _read_from_stream(config_value)
except ScannerError:
raise PolyaxonConfigurationError(
'Received non valid yaml stream: `{}`'.format(config_value))
if config_results and isinstance(config_results, Mapping):
config = deep_update(config, config_results)
else:
raise PolyaxonConfigurationError('Cannot read config_value: `{}`'.format(config_value))
return config | 3dfed9bf313ed3beb1350480ec6ec40a18843869 | 3,629,192 |
def from_time (year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None):
"""Convenience wrapper to take a series of date/time elements and return a WMI time
of the form `yyyymmddHHMMSS.mmmmmm+UUU`. All elements may be int, string or
omitted altogether. If omitted, they will be replaced in the output string
by a series of stars of the appropriate length.
:param year: The year element of the date/time
:param month: The month element of the date/time
:param day: The day element of the date/time
:param hours: The hours element of the date/time
:param minutes: The minutes element of the date/time
:param seconds: The seconds element of the date/time
:param microseconds: The microseconds element of the date/time
:param timezone: The timeezone element of the date/time
:returns: A WMI datetime string of the form: `yyyymmddHHMMSS.mmmmmm+UUU`
"""
def str_or_stars (i, length):
if i is None:
return "*" * length
else:
return str (i).rjust (length, "0")
wmi_time = ""
wmi_time += str_or_stars (year, 4)
wmi_time += str_or_stars (month, 2)
wmi_time += str_or_stars (day, 2)
wmi_time += str_or_stars (hours, 2)
wmi_time += str_or_stars (minutes, 2)
wmi_time += str_or_stars (seconds, 2)
wmi_time += "."
wmi_time += str_or_stars (microseconds, 6)
if timezone >= 0:
wmi_time += "+"
else:
wmi_time += "-"
timezone = abs (timezone)
wmi_time += str_or_stars (timezone, 3)
return wmi_time | b0f9d7b9610b23ef23867453389558b22473b74e | 3,629,193 |
def makeit_ssa(exprs):
"""
Convert an iterable of Eqs into Static Single Assignment (SSA) form.
"""
# Identify recurring LHSs
seen = {}
for i, e in enumerate(exprs):
seen.setdefault(e.lhs, []).append(i)
# Optimization: don't waste time reconstructing stuff if already in SSA form
if all(len(i) == 1 for i in seen.values()):
return exprs
# SSA conversion
c = 0
mapper = {}
processed = []
for i, e in enumerate(exprs):
where = seen[e.lhs]
rhs = uxreplace(e.rhs, mapper)
if len(where) > 1:
needssa = e.is_Scalar or where[-1] != i
lhs = Symbol(name='ssa%d' % c, dtype=e.dtype) if needssa else e.lhs
if e.is_Increment:
# Turn AugmentedAssignment into Assignment
processed.append(e.func(lhs, mapper[e.lhs] + rhs, operation=None))
else:
processed.append(e.func(lhs, rhs))
mapper[e.lhs] = lhs
c += 1
else:
processed.append(e.func(e.lhs, rhs))
return processed | 627d21c6f941e859686e41c3aafc1a81c4b4213e | 3,629,194 |
import sys
def get_vocabulary(fobj, threshold):
"""Read text and return dictionary that encodes vocabulary
"""
p_dict = dict()
add_c = 0
for line in fobj:
phrase = line.strip('\r\n ').split(' ||| ')
src_list = phrase[0].split(' ')
trg_list = phrase[1].split(' ')
if len(src_list) == 1 or len(trg_list) == 1: # 長さが1のものは使わない
continue
elif len(src_list) == len(trg_list) and len(trg_list) > 1 and (
src_list[0] == trg_list[0] or src_list[-1] == trg_list[-1]): # 長さが同じ場合は,先頭か末尾が同じなら許容する
pass
elif not (src_list[0] == trg_list[0] and src_list[-1] == trg_list[-1]): # (長さが違う場合は)先頭と末尾が同じ場合だけ許容
continue
p_src = phrase[0].strip('\r\n ') # .split()
p_trg = phrase[1].strip('\r\n ') # .split()
count = int(phrase[-1])
if p_trg not in p_dict:
p_dict[p_trg] = []
if not (count < threshold):
p_dict[p_trg].append((p_src, count))
add_c += 1
p = ""
for w in trg_list[::-1]:
p = w + " " + p if p != "" else w
if p not in p_dict:
p_dict[p] = []
sys.stderr.write('vocab Done len={} add_c={}\n'.format(len(p_dict), add_c))
return p_dict | 7ed39da7e652c3108b5f27a021d19331104ee3e8 | 3,629,195 |
def parse_ucsc_file_index(stream, base_url):
"""Turn a UCSC DCC files.txt index into a dictionary of name-value pairs
"""
file_index = {}
for line in stream:
filename, attribute_line = line.split('\t')
filename = base_url + filename
attributes = {}
for assignment in attribute_line.split(';'):
name, value = assignment.split('=')
attributes[name.strip()] = value.strip()
file_index[filename] = attributes
return file_index | 2d74bae9c7f2584ff8d859c8d2781faa3f6631b5 | 3,629,196 |
def AUC_PR(true_vessel_img, pred_vessel_img, save_fname):
"""
Precision-recall curve
"""
precision, recall, _ = precision_recall_curve(true_vessel_img.flatten(), pred_vessel_img.flatten(), pos_label=1)
save_obj({"precision":precision, "recall":recall}, save_fname)
AUC_prec_rec = auc(recall, precision)
return AUC_prec_rec | 42dfa61e733788957b86ea6f00cceccfc65c20f0 | 3,629,197 |
def download_foot_bones():
"""Download foot bones dataset."""
return _download_and_read('fsu/footbones.ply') | 1220029e6ac06b5b6d570ffed02a73b4373e9c61 | 3,629,198 |
def _select_list_subset_schema():
""" schema for select_list_subset type """
return schemas.load(_SELECT_LIST_SUBSET_KEY) | 00721c12da47bd85bedbde7c99c5ae3de9299a3b | 3,629,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.