content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def main(
path_experiment,
path_table,
path_dataset,
path_output,
path_reference=None,
path_comp_bm=None,
min_landmarks=1.,
details=True,
allow_inverse=False,
):
""" main entry point
:param str path_experiment: path to experiment folder
:param str path_table: path to assignment file (requested registration pairs)
:param str path_dataset: path to provided landmarks
:param str path_output: path to generated results
:param str|None path_reference: path to the complete landmark collection,
if None use dataset folder
:param str|None path_comp_bm: path to reference comp. benchmark
:param int nb_workers: number of parallel processes
:param float min_landmarks: required number of submitted landmarks in range (0, 1),
match values in COL_PAIRED_LANDMARKS
:param bool details: exporting case details
:param bool allow_inverse: allow evaluate also inverse transformation,
warped landmarks from ref to move image
"""
path_results = os.path.join(path_experiment, ImRegBenchmark.NAME_CSV_REGISTRATION_PAIRS)
if not os.path.isfile(path_results):
raise AttributeError('Missing experiments results: %s' % path_results)
path_reference = path_dataset if not path_reference else path_reference
# drop time column from Cover which should be empty
df_overview = pd.read_csv(path_table).drop([ImRegBenchmark.COL_TIME], axis=1, errors='ignore')
df_overview = _df_drop_unnamed(df_overview)
# drop Warp* column from Cover which should be empty
df_overview = df_overview.drop(
[col for col in df_overview.columns if 'warped' in col.lower()],
axis=1,
errors='ignore',
)
df_results = pd.read_csv(path_results)
df_results = _df_drop_unnamed(df_results)
# df_results.drop(filter(lambda c: 'Unnamed' in c, df_results.columns), axis=1, inplace=True)
cols_ = list(ImRegBenchmark.COVER_COLUMNS_WRAP) + [ImRegBenchmark.COL_TIME]
df_results = df_results[[col for col in cols_ if col in df_results.columns]]
df_experiments = pd.merge(df_overview, df_results, how='left', on=ImRegBenchmark.COVER_COLUMNS)
df_experiments = swap_inverse_experiment(df_experiments, allow_inverse)
# df_experiments.drop([ImRegBenchmark.COL_IMAGE_REF_WARP, ImRegBenchmark.COL_POINTS_REF_WARP],
# axis=1, errors='ignore', inplace=True)
df_experiments.drop(filter(lambda c: 'Unnamed' in c, df_results.columns), axis=1, inplace=True)
df_experiments = replicate_missing_warped_landmarks(df_experiments, path_dataset, path_experiment)
normalize_exec_time(df_experiments, path_experiment, path_comp_bm)
# logging.info('Filter used landmarks.')
# path_filtered = os.path.join(path_output, FOLDER_FILTER_DATASET)
# create_folder(path_filtered, ok_existing=True)
# _filter_lnds = partial(filter_export_landmarks, path_output=path_filtered,
# path_dataset=path_dataset, path_reference=path_reference)
# for idx, ratio in iterate_mproc_map(_filter_lnds, df_experiments.iterrows(),
# desc='Filtering', nb_workers=nb_workers):
# df_experiments.loc[idx, COL_PAIRED_LANDMARKS] = np.round(ratio, 2)
logging.info('Compute landmarks statistic.')
_compute_lnds_stat = partial(
ImRegBenchmark.compute_registration_statistic,
df_experiments=df_experiments,
path_dataset=path_dataset,
path_experiment=path_experiment,
path_reference=path_reference,
)
# NOTE: this has to run in SINGLE thread so there is SINGLE table instance
list(iterate_mproc_map(_compute_lnds_stat, df_experiments.iterrows(), desc='Statistic', nb_workers=1))
name_results, _ = os.path.splitext(os.path.basename(path_results))
path_results = os.path.join(path_output, name_results + '_NEW.csv')
logging.debug('exporting CSV results: %s', path_results)
df_experiments.to_csv(path_results)
path_json = export_summary_json(df_experiments, path_experiment, path_output, min_landmarks, details)
return path_json
| 5,336,100
|
def download_url(url: str, filename: Union[Path, str]) -> None:
"""Downloads data from url to file.
Args:
url: url to the data to download.
filename: path to the download location.
"""
with TqdmUpTo(unit="B", unit_scale=True, unit_divisor=1024, miniters=1) as t:
urlretrieve(url, filename, reporthook=t.update_to)
| 5,336,101
|
def fake_login(request):
"""Contrived version of a login form."""
if getattr(request, 'limited', False):
raise RateLimitError
if request.method == 'POST':
password = request.POST.get('password', 'fail')
if password is not 'correct':
return False
return True
| 5,336,102
|
def register_scrapqd(app,
template=None,
register_sample_url=True,
redirect_root=True):
"""System add ScrapQD url to the Flask App and registers system defined crawlers."""
name = config.APP_NAME
if register_sample_url:
register_sample_page(name, app)
load_graphql_url(name, app,
template=template,
redirect_root=redirect_root)
| 5,336,103
|
def copytree(src, dst, symlinks=False, ignore=None):
"""like shutil.copytree() but ignores existing files
https://stackoverflow.com/a/22331852/1239986
"""
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except OSError:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
| 5,336,104
|
def split_to_sentences(data):
"""
Split data by linebreak "\n"
Args:
data: str
Returns:
A list of sentences
"""
sentences = data.split('\n')
# Additional clearning (This part is already implemented)
# - Remove leading and trailing spaces from each sentence
# - Drop sentences if they are empty strings.
sentences = [s.strip() for s in sentences]
sentences = [s for s in sentences if len(s) > 0]
return sentences
| 5,336,105
|
def test_rollouts(do_print=False, time_for_test=3):
"""Do rollouts and see if the environment crashes."""
time_start = time()
while True:
if time() - time_start > time_for_test:
break
# obtaining random params
width = np.random.choice(np.arange(1, 20))
height = np.random.choice(np.arange(1, 20))
n_keys = np.random.choice(np.arange(1, 20))
n_chests = np.random.choice(np.arange(1, 20))
n_food = np.random.choice(np.arange(1, 20))
initial_health = np.random.choice(np.arange(1, 20))
food_efficiency = np.random.choice(np.arange(1, 20))
wh = width * height
n_objects = 3 + n_keys + n_chests + n_food
params = dict(width=width, height=height, n_keys=n_keys, n_chests=n_chests, n_food=n_food,
initial_health=initial_health, food_efficiency=food_efficiency)
if do_print:
print("Obtained params", params)
if n_objects > wh:
with pytest.raises(AssertionError) as excinfo:
# creating environment
KeyChestGymEnv(engine_constructor=KeyChestEnvironmentRandom,
**params)
assert str(excinfo.value).startswith('Too small width*height')
continue
else:
env = KeyChestGymEnv(engine_constructor=KeyChestEnvironmentRandom,
**params)
assert isinstance(env, KeyChestGymEnv)
# doing episodes
for episode in range(20):
obs = env.reset()
img = env.render(mode='rgb_array')
assert img.shape[2] == 3
done = False
steps = 0
while not done:
act = env.action_space.sample()
obs, rew, done, info = env.step(act)
img = env.render(mode='rgb_array')
assert img.shape[2] == 3
steps += 1
| 5,336,106
|
def velocity_N(
adata,
group=None,
recalculate_pca=True,
recalculate_umap=True,
del_2nd_moments=None,
):
"""use new RNA based pca, umap, for velocity calculation and projection for kinetics or one-shot experiment.
Note that currently velocity_N function only considers labeling data and removes splicing data if they exist.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that stores data for the the kinetics or one-shot experiment, must include `X_new, X_total`
layers.
group: `str` or None (default: None)
The cell group that will be used to calculate velocity in each separate group. This is useful if your data
comes from different labeling condition, etc.
recalculate_pca: `bool` (default: True)
Whether to recalculate pca with the new RNA data. If setting to be False, you need to make sure the pca is
already generated via new RNA.
recalculate_umap: `bool` (default: True)
Whether to recalculate umap with the new RNA data. If setting to be False, you need to make sure the umap is
already generated via new RNA.
del_2nd_moments: `None` or `bool`
Whether to remove second moments or covariances. Default it is `None` rgument used for `dynamics` function.
Returns
-------
Nothing but the adata object is updated with the low dimensional (umap or pca) velocity projections with the
new RNA or pca based RNA velocities.
"""
del_2nd_moments = DynamoAdataConfig.use_default_var_if_none(
del_2nd_moments, DynamoAdataConfig.RECIPE_DEL_2ND_MOMENTS_KEY
)
var_columns = adata.var.columns
layer_keys = adata.layers.keys()
# check velocity_N, velocity_T, X_new, X_total
if not np.all([i in layer_keys for i in ["X_new", "X_total"]]):
raise Exception(f"The `X_new`, `X_total` has to exist in your data before running velocity_N function.")
# delete the moments and velocities that generated via total RNA
for i in ["M_t", "M_tt", "M_n", "M_tn", "M_nn", "velocity_N", "velocity_T"]:
if i in layer_keys:
del adata.layers[i]
# delete the kinetic paraemters that generated via total RNA
for i in [
"alpha",
"beta",
"gamma",
"half_life",
"alpha_b",
"alpha_r2",
"gamma_b",
"gamma_r2",
"gamma_logLL",
"delta_b",
"delta_r2",
"bs",
"bf",
"uu0",
"ul0",
"su0",
"sl0",
"U0",
"S0",
"total0",
"beta_k",
"gamma_k",
]:
if i in var_columns:
del adata.var[i]
if group is not None:
group_prefixes = [group + "_" + str(i) + "_" for i in adata.obs[group].unique()]
for i in group_prefixes:
for j in [
"alpha",
"beta",
"gamma",
"half_life",
"alpha_b",
"alpha_r2",
"gamma_b",
"gamma_r2",
"gamma_logLL",
"delta_b",
"delta_r2",
"bs",
"bf",
"uu0",
"ul0",
"su0",
"sl0",
"U0",
"S0",
"total0",
"beta_k",
"gamma_k",
]:
if i + j in var_columns:
del adata.var[i + j]
# now let us first run pca with new RNA
if recalculate_pca:
pca_monocle(adata, np.log1p(adata[:, adata.var.use_for_pca].layers["X_new"]), pca_key="X_pca")
# if there are unspliced / spliced data, delete them for now:
for i in ["spliced", "unspliced", "X_spliced", "X_unspliced"]:
if i in layer_keys:
del adata.layers[i]
# now redo the RNA velocity analysis with moments generated with pca space of new RNA
# let us also check whether it is a one-shot or kinetics experiment
if adata.uns["pp"]["experiment_type"] == "one-shot":
dynamics(
adata,
one_shot_method="sci_fate",
model="deterministic",
group=group,
del_2nd_moments=del_2nd_moments,
)
elif adata.uns["pp"]["experiment_type"] == "kin":
dynamics(
adata,
model="deterministic",
est_method="twostep",
group=group,
del_2nd_moments=del_2nd_moments,
)
else:
raise Exception(
f"velocity_N function only supports either the one-shot or kinetics (kin) metabolic labeling "
f"experiment."
)
# umap based on new RNA
if recalculate_umap:
reduceDimension(adata, enforce=True)
# project new RNA velocity to new RNA pca
cell_velocities(
adata,
basis="pca",
X=adata.layers["M_n"],
V=adata.layers["velocity_N"],
enforce=True,
)
# project new RNA velocity to new RNA umap
cell_velocities(
adata,
basis="umap",
X=adata.layers["M_n"],
V=adata.layers["velocity_N"],
enforce=True,
)
| 5,336,107
|
def wordcount(corpus: List[TokenisedCorpus]) -> None:
"""Calculate wordcounts for a corpus.
Calculates the average, standard deviation and variance of
a LyricsCorpus.
Args:
corpus: A list of TokenisedCorpus objects
"""
click.echo("Analysing Wordcount of song: ")
words = []
for song in corpus:
words.append(len(song.tokens))
average = sum(words) / len(words)
stdev = statistics.stdev(words)
var = statistics.variance(words)
click.echo(f"Average: {average}")
click.echo(f"Std dev: {stdev}")
click.echo(f"Variance: {var}")
| 5,336,108
|
def test_type_args_propagation() -> None:
"""
It propagates type arguments to the generic's bases
"""
T = TypeVar("T", bound=float)
F = TypeVar("F", str, bytes)
S = TypeVar("S")
class Tuple(tuple[T, ...], Generic[F, T]):
pass
class TupleSubclass(Tuple[str, T], Generic[T, S]):
pass
assert resolve_type_args(Tuple[str, int], tuple) == (int, ...)
assert resolve_type_args(TupleSubclass[int, bytes], Tuple) == (str, int)
assert resolve_type_args(TupleSubclass[bool, None], tuple) == (bool, ...)
| 5,336,109
|
def read_config_file(fp: str, mode='r', encoding='utf8', prefix='#') -> dict:
"""
读取文本文件,忽略空行,忽略prefix开头的行,返回字典
:param fp: 配置文件路径
:param mode:
:param encoding:
:param prefix:
:return:
"""
with open(fp, mode, encoding=encoding) as f:
ll = f.readlines()
ll = [i for i in ll if all([i.strip(), i.startswith(prefix) == False])]
params = {i.split('=')[0].strip(): i.split('=')[1].strip() for i in ll}
print(params)
return params
| 5,336,110
|
def PrepareForMakeGridData(
allowed_results, starred_iid_set, x_attr,
grid_col_values, y_attr, grid_row_values, users_by_id, all_label_values,
config, related_issues, hotlist_context_dict=None):
"""Return all data needed for EZT to render the body of the grid view."""
def IssueViewFactory(issue):
return template_helpers.EZTItem(
summary=issue.summary, local_id=issue.local_id, issue_id=issue.issue_id,
status=issue.status or issue.derived_status, starred=None, data_idx=0,
project_name=issue.project_name)
grid_data = MakeGridData(
allowed_results, x_attr, grid_col_values, y_attr, grid_row_values,
users_by_id, IssueViewFactory, all_label_values, config, related_issues,
hotlist_context_dict=hotlist_context_dict)
issue_dict = {issue.issue_id: issue for issue in allowed_results}
for grid_row in grid_data:
for grid_cell in grid_row.cells_in_row:
for tile in grid_cell.tiles:
if tile.issue_id in starred_iid_set:
tile.starred = ezt.boolean(True)
issue = issue_dict[tile.issue_id]
tile.issue_url = tracker_helpers.FormatRelativeIssueURL(
issue.project_name, urls.ISSUE_DETAIL, id=tile.local_id)
tile.issue_ref = issue.project_name + ':' + str(tile.local_id)
return grid_data
| 5,336,111
|
def custom_address_validator(value, context):
"""
Address not required at all for this example,
skip default (required) validation.
"""
return value
| 5,336,112
|
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, backoff=0, debug=False):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn using the new single line for left and right lane line method.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
masked_lines = np.zeros(img.shape, dtype=np.uint8)
lane_info = draw_lines(masked_lines, lines, backoff=backoff, debug=debug)
return masked_lines, lane_info
| 5,336,113
|
def get_model(args) -> Tuple:
"""Choose the type of VQC to train. The normal vqc takes the latent space
data produced by a chosen auto-encoder. The hybrid vqc takes the same
data that an auto-encoder would take, since it has an encoder or a full
auto-encoder attached to it.
Args:
args: Dictionary of hyperparameters for the vqc.
Returns:
An instance of the vqc object with the given specifications (hyperparams).
"""
qdevice = get_qdevice(
args["run_type"],
wires=args["nqubits"],
backend_name=args["backend_name"],
config=args["config"],
)
if args["hybrid"]:
vqc_hybrid = VQCHybrid(qdevice, device="cpu", hpars=args)
return vqc_hybrid
vqc = VQC(qdevice, args)
return vqc
| 5,336,114
|
def get_launch_template_constraint_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLaunchTemplateConstraintResult]:
"""
Resource Type definition for AWS::ServiceCatalog::LaunchTemplateConstraint
"""
...
| 5,336,115
|
def mag_var_scatter(model_dict, gradient_var_list, no_of_dims, rd = None, rev = None):
"""
Create a scatter plot of gradient of model vs. variance for each dimension
of the data
"""
f, axarr = plt.subplots(no_of_dims, 1, sharex=True, figsize=(12,20))
for i in range(no_of_dims):
grad_v_curr = gradient_var_list[i]
axarr[i].plot(zip(*grad_v_curr)[0], zip(*grad_v_curr)[1], 'bo')
ax = f.add_subplot(111, frameon=False)
# Turn off axis lines and ticks of the big subplot
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='None', top='off', bottom='off', left='off',
right='off')
plt.xlabel('Standard deviation of component')
plt.ylabel(r'Magnitude of coefficient of $w$')
plt.title('Magnitude-variance scatter plot')
abs_path = resolve_path()
rel_path_p = 'plots/'
abs_path_p = os.path.join(abs_path, rel_path_p)
fname = get_svm_model_name(model_dict, rd, rev) + '_scatter'
plt.savefig(abs_path_p + fname, bbox_inches = 'tight')
| 5,336,116
|
def tail_ratio(returns):
"""
Determines the ratio between the right (95%) and left tail (5%).
For example, a ratio of 0.25 means that losses are four times
as bad as profits.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
Returns
-------
float
tail ratio
"""
return ep.tail_ratio(returns)
| 5,336,117
|
def get_trajectory_for_weight(simulation_object, weight):
"""
:param weight:
:return:
"""
print(simulation_object.name+" - get trajectory for w=", weight)
controls, features, _ = simulation_object.find_optimal_path(weight)
weight = list(weight)
features = list(features)
return {"w": weight, "phi": features, "controls": controls}
| 5,336,118
|
def UnNT(X, Z, N, T, sampling_type):
"""Computes reshuffled block-wise complete U-statistic."""
return np.mean([UnN(X, Z, N, sampling_type=sampling_type)
for _ in range(T)])
| 5,336,119
|
def boolean_matrix_of_image(image_mat, cutoff=0.5):
"""
Make a bool matrix from the input image_mat
:param image_mat: a 2d or 3d matrix of ints or floats
:param cutoff: The threshold to use to make the image pure black and white. Is applied to the max-normalized matrix.
:return:
"""
if not isinstance(image_mat, np.ndarray):
image_mat = np.array(image_mat)
if image_mat.ndim == 3:
image_mat = image_mat.sum(axis=2)
elif image_mat.ndim > 3 or image_mat.ndim == 1:
raise ValueError("The image_mat needs to have 2 or 3 dimensions")
if image_mat.dtype != np.dtype('bool'):
image_mat = image_mat.astype('float')
image_mat = image_mat / image_mat.max()
image_mat = image_mat > cutoff
return image_mat
| 5,336,120
|
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
"""
if mask is not None:
mask = mask.unsqueeze_(2)
inputs = torch.matmul(inputs, mask)
if pooling_type == "MAX":
output, indices = torch.max(inputs, 1, keepdim=False, out=None)
elif pooling_type == "AVR":
if mask is not None:
output = torch.sum(inputs, 1, keepdim=False, dtype=None)
num_elems = torch.sum(mask, 1, keepdim=True)
output = torch.div(output, torch.max(num_elems, 1))
else:
output = torch.mean(inputs, axis=1)
return output
| 5,336,121
|
def select_object(object):
""" Select specific object.
Parameters:
object (obj): Object to select.
Returns:
None
"""
object.select = True
| 5,336,122
|
def _monte_carlo_trajectory_sampler(
time_horizon: int = None,
env: DynamicalSystem = None,
policy: BasePolicy = None,
state: np.ndarray = None,
):
"""Monte-Carlo trajectory sampler.
Args:
env: The system to sample from.
policy: The policy applied to the system during sampling.
sample_space: The space where initial conditions are drawn from.
Returns:
A generator function that yields system observations as tuples.
"""
@sample_generator
def _sample_generator():
state_sequence = []
state_sequence.append(state)
env.state = state
time = 0
for t in range(time_horizon):
action = policy(time=time, state=env.state)
next_state, cost, done, _ = env.step(time=t, action=action)
state_sequence.append(next_state)
time += 1
yield state_sequence
return _sample_generator
| 5,336,123
|
def pi_mult(diff: float) -> int:
"""
Функция, вычисляющая множитель, на который нужно домножить 2 pi, чтобы компенсировать разрыв фазы
:param diff: разность фазы в двух ячейках матрицы
:return : целое число
"""
return int(0.5 * (diff / pi + 1)) if diff > 0 else int(0.5 * (diff / pi - 1))
| 5,336,124
|
def get_integer(val=None, name="value", min_value=0, default_value=0):
"""Returns integer value from input, with basic validation
Parameters
----------
val : `float` or None, default None
Value to convert to integer.
name : `str`, default "value"
What the value represents.
min_value : `float`, default 0
Minimum allowed value.
default_value : `float` , default 0
Value to be used if ``val`` is None.
Returns
-------
val : `int`
Value parsed as an integer.
"""
if val is None:
val = default_value
try:
orig = val
val = int(val)
except ValueError:
raise ValueError(f"{name} must be an integer")
else:
if val != orig:
warnings.warn(f"{name} converted to integer {val} from {orig}")
if not val >= min_value:
raise ValueError(f"{name} must be >= {min_value}")
return val
| 5,336,125
|
def bb_moments_raincloud(region_idx=None, parcellation='aparc', title=''):
"""Stratify regional data according to BigBrain statistical moments (authors: @caseypaquola, @saratheriver)
Parameters
----------
region_idx : ndarray, shape = (n_val,)
Indices of regions to be included in analysis.
parcellation : string, optional
Name of parcellation. Options are 'aparc', 'schaefer_100', 'schaefer_200', 'schaefer_300',
'schaefer_400', 'glasser_360'. Default is 'aparc'.
title : string, optional
Title of raincloud plot. Default is empty.
Returns
-------
figure
Raincloud plot.
"""
def prctile(x, p):
"""Matlab-like percentile function (author: someone from the internet)"""
p = np.asarray(p, dtype=float)
n = len(x)
p = (p - 50) * n / (n - 1) + 50
p = np.clip(p, 0, 100)
return np.percentile(x, p)
# Load BigBrain statistical moments (mean, skewness)
bb_pth = os.path.dirname(os.path.dirname(__file__)) + '/histology/bb_moments_' + parcellation + '.csv'
bb_moments_aparc = np.loadtxt(bb_pth, delimiter=',', dtype=float)
# Initiate figure and axes
fig, axs = plt.subplots(1, 1, figsize=(15, 5))
axs2 = [axs.twinx(), axs.twinx()]
# Plot first moment at the top
inv = [(ii + 1) * 2 for ii in reversed(range(bb_moments_aparc.shape[0]))]
# Moments colors
spec = ['#9e0142', '#66c2a5']
# Loop over BigBrain moments
for ii in range(bb_moments_aparc.shape[0]):
# for ii in range(1):
jj = inv[ii]
# Random numbers to scatter points
rando = [(random.random() * .3) + (jj - 0.15) for rr in range(bb_moments_aparc[ii, region_idx].shape[1])]
# Scatter plot
axs2[ii].scatter(bb_moments_aparc[ii, region_idx], rando, c=spec[ii], alpha=0.88,
linewidth=0.88, edgecolors='w', s=122)
# Density distribution
data = sns.distplot(bb_moments_aparc[ii, region_idx], hist=False, kde=True, ax=axs2[ii]).get_lines()[0].get_data()
axs2[ii].fill_between(data[0], (jj + 0.3), data[1] + (jj + 0.3), facecolor=spec[ii])
# In-house box plot
qr = prctile(bb_moments_aparc[ii, region_idx].flatten(), [25, 75])
rect = pat.FancyBboxPatch((qr[0] + 0.01, jj - 0.1), qr[1] - qr[0] - 0.02, 0.2, fc=spec[ii], alpha=0.41,
ec=None, boxstyle="round,pad=0.01")
rectout = pat.FancyBboxPatch((qr[0] + 0.01, jj - 0.1), qr[1] - qr[0] - 0.02, 0.2, alpha=.88,
ec='k', boxstyle="round,pad=0.01", fill=False, lw=1.5)
axs2[ii].add_patch(rect)
axs2[ii].add_patch(rectout)
# Median line
axs2[ii].plot([np.median(bb_moments_aparc[ii, region_idx]), np.median(bb_moments_aparc[ii, region_idx])],
[jj - 0.1, jj + 0.1], lw=3, color='k')
# Detect outliers, and if any, excluse them from the whiskers
mad = 3 * median_absolute_deviation(bb_moments_aparc[ii, region_idx], axis=1)
if np.argwhere(np.abs(bb_moments_aparc[ii, region_idx]) > mad).shape[0] == 0:
mini = np.nanmin(bb_moments_aparc[ii, region_idx])
maxi = np.nanmax(bb_moments_aparc[ii, region_idx])
else:
mat = np.abs(bb_moments_aparc[ii, region_idx])
np.where(np.abs(mat) > mad, np.nan, mat)
mini = np.nanmin(mat)
maxi = np.nanmax(mat)
axs2[ii].plot([mini, qr[0]], [jj, jj], lw=1.5, color='k')
axs2[ii].plot([qr[1], maxi], [jj, jj], lw=1.5, color='k')
# Figure axes and other things to prettify
axs2[ii].set_ylim([1.5, 5.5])
axs2[ii].set_xlim([-1.6, 1.6])
fig.tight_layout()
sns.despine(fig=fig, ax=axs2[ii])
axs2[ii].axes.get_yaxis().set_ticks([])
axs2[ii].set_ylabel('')
axs.set_ylim([1.5, 5.5])
axs.tick_params(axis='y', length=0, rotation=90, labelsize=16)
axs.tick_params(axis='x', length=0, labelsize=16)
axs.set_yticks((2.75, 4.75))
axs.set_yticklabels(('Skewness', 'Mean'))
# Add title
if title:
plt.title(title)
return fig, axs, axs2
| 5,336,126
|
def release(cohesin, occupied, args):
"""
AN opposite to capture - releasing cohesins from CTCF
"""
if not cohesin.any("CTCF"):
return cohesin # no CTCF: no release necessary
# attempting to release either side
for side in [-1, 1]:
if (np.random.random() < args["ctcfRelease"][side].get(cohesin[side].pos, 0)) and (cohesin[side].attrs["CTCF"]):
cohesin[side].attrs["CTCF"] = False
return cohesin
| 5,336,127
|
def validate_build_dependency(key: str, uri: str) -> None:
"""
Raise an exception if the key in dependencies is not a valid package name,
or if the value is not a valid IPFS URI.
"""
validate_package_name(key)
# validate is supported content-addressed uri
if not is_ipfs_uri(uri):
raise EthPMValidationError(f"URI: {uri} is not a valid IPFS URI.")
| 5,336,128
|
def assert_allclose(actual: float, desired: numpy.float64, err_msg: str):
"""
usage.scipy: 1
usage.sklearn: 1
"""
...
| 5,336,129
|
def set_uuids_from_yaml(args):
"""Set uuids from a yaml mapping. Useful for migration to uuids.
:param args: Argparse namespace object with filename
:type args: namespace
"""
with open(args.filename, 'r') as f:
mapping = yaml.safe_load(f)
for uuid, envdict in mapping.items():
print("{} {} ==> {}".format(
envdict['account_number'], envdict['name'], uuid
))
if not args.list:
set_uuid(str(envdict['account_number']), envdict['name'], uuid)
print("")
| 5,336,130
|
def cosine(u, v):
"""
d = cosine(u, v)
Computes the Cosine distance between two n-vectors u and v,
(1-uv^T)/(||u||_2 * ||v||_2).
"""
u = np.asarray(u)
v = np.asarray(v)
return (1.0 - (np.dot(u, v.T) / \
(np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T)))))
| 5,336,131
|
def get_data_from_redis_key(
label=None,
client=None,
host=None,
port=None,
password=None,
db=None,
key=None,
expire=None,
decompress_df=False,
serializer='json',
encoding='utf-8'):
"""get_data_from_redis_key
:param label: log tracking label
:param client: initialized redis client
:param host: not used yet - redis host
:param port: not used yet - redis port
:param password: not used yet - redis password
:param db: not used yet - redis db
:param key: not used yet - redis key
:param expire: not used yet - redis expire
:param decompress_df: used for decompressing
``pandas.DataFrame`` automatically
:param serializer: not used yet - support for future
pickle objects in redis
:param encoding: format of the encoded key in redis
"""
decoded_data = None
data = None
rec = {
'data': data
}
res = build_result.build_result(
status=ae_consts.NOT_RUN,
err=None,
rec=rec)
log_id = label if label else 'get-data'
try:
use_client = client
if not use_client:
log.debug(
f'{log_id} - get key={key} new '
f'client={host}:{port}@{db}')
use_client = redis.Redis(
host=host,
port=port,
password=password,
db=db)
else:
log.debug(f'{log_id} - get key={key} client')
# create Redis client if not set
# https://redis-py.readthedocs.io/en/latest/index.html#redis.StrictRedis.get # noqa
raw_data = use_client.get(
name=key)
if raw_data:
if decompress_df:
try:
data = zlib.decompress(
raw_data).decode(
encoding)
rec['data'] = json.loads(data)
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as f:
if (
'while decompressing data: '
'incorrect header check') in str(f):
data = None
log.critical(
f'unable to decompress_df in redis_key={key} '
f'ex={f}')
else:
log.error(
f'failed decompress_df in redis_key={key} '
f'ex={f}')
raise f
# allow decompression failure to fallback to previous method
if not data:
log.debug(f'{log_id} - decoding key={key} encoding={encoding}')
decoded_data = raw_data.decode(encoding)
log.debug(
f'{log_id} - deserial key={key} serializer={serializer}')
if serializer == 'json':
data = json.loads(decoded_data)
elif serializer == 'df':
data = decoded_data
else:
data = decoded_data
if data:
if ae_consts.ev('DEBUG_REDIS', '0') == '1':
log.info(
f'{log_id} - found key={key} '
f'data={ae_consts.ppj(data)}')
else:
log.debug(f'{log_id} - found key={key}')
# log snippet - if data
rec['data'] = data
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
else:
log.debug(f'{log_id} - no data key={key}')
return build_result.build_result(
status=ae_consts.SUCCESS,
err=None,
rec=rec)
except Exception as e:
err = (
f'{log_id} failed - redis get from decoded={decoded_data} '
f'data={data} key={key} ex={e}')
log.error(err)
res = build_result.build_result(
status=ae_consts.ERR,
err=err,
rec=rec)
# end of try/ex for getting redis data
return res
| 5,336,132
|
def main():
"""
TODO: create a "boggle" to match every exist vocabs in the dictionary with the 4x4 character input
"""
word_lst = []
# 排出一個 4 x 4 的方形字母若輸入不符合規定則顯示"illegal format"
for i in range(4):
word = input(str(i + 1) + " row of letters: ")
row_lst = []
if len(word) != 7:
print("Illegal input")
break
for j in range(4):
row_lst += word[(2 * j)].lower()
word_lst.append(row_lst)
start = time.time()
####################
boggle(word_lst)
####################
end = time.time()
print('----------------------------------')
print(f'The speed of your boggle algorithm: {end - start} seconds.')
| 5,336,133
|
def _train_model(
train_iter: Iterator[DataBatch],
test_iter: Iterator[DataBatch],
model_type: str,
num_train_iterations: int = 10000,
learning_rate: float = 1e-5
) -> Tuple[Tuple[Any, Any], Tuple[onp.ndarray, onp.ndarray]]:
"""Train a model and return weights and train/test loss."""
batch = next(train_iter)
key = jax.random.PRNGKey(0)
loss_fns = _loss_fns_for_model_type(model_type)
p, s = loss_fns.init(key, batch["feats"], batch["time"])
opt = opt_base.Adam(learning_rate=learning_rate)
opt_state = opt.init(p, s)
@jax.jit
def update(opt_state, key, feats, times):
key, key1 = jax.random.split(key)
p, s = opt.get_params_state(opt_state)
value_and_grad_fn = jax.value_and_grad(loss_fns.apply, has_aux=True)
(loss, s), g = value_and_grad_fn(p, s, key1, feats, times)
next_opt_state = opt.update(opt_state, g, loss=loss, model_state=s, key=key)
return next_opt_state, key, loss
train_loss = []
test_loss = []
for i in range(num_train_iterations):
batch = next(train_iter)
opt_state, key, unused_loss = update(opt_state, key, batch["feats"],
batch["time"])
if (i < 100 and i % 10 == 0) or i % 100 == 0:
p, s = opt.get_params_state(opt_state)
train_loss.append(
onp.asarray(eval_many(p, s, key, train_iter, model_type=model_type)))
test_loss.append(
onp.asarray(eval_many(p, s, key, test_iter, model_type=model_type)))
print(i, train_loss[-1], test_loss[-1])
return (p, s), (onp.asarray(train_loss), onp.asarray(test_loss))
| 5,336,134
|
def main(_args: Sequence[str]) -> int:
"""Main program."""
config = create_configuration()
generator = create_generator(config)
while True:
if os.path.exists(config.trigger_stop_file):
warning("Stopping due to existence of stop trigger file.")
return 0
debug('Generating new discovery map.')
res = generator.update_discovery_map()
if res != 0:
warning("Envoy configuration generator returned {code}", code=res)
if config.exit_on_generation_failure:
warning("Stopping due to exit-on-failure.")
return res
time.sleep(config.failure_sleep)
else:
time.sleep(config.refresh_time)
| 5,336,135
|
def test_ordered_node_next_child(db, version_relation, version_pids,
build_pid, recids):
"""Test the PIDNodeOrdered next_child method."""
parent_pid = build_pid(version_pids[0]['parent'])
ordered_parent_node = PIDNodeOrdered(parent_pid, version_relation)
assert ordered_parent_node.next_child(version_pids[0]['children'][0]) == \
version_pids[0]['children'][1]
# Check that the next child can be retrieved if there is a "hole" in the
# sequence of indices.
ordered_parent_node.remove_child(version_pids[0]['children'][1],
reorder=False)
del version_pids[0]['children'][1]
assert ordered_parent_node.next_child(version_pids[0]['children'][0]) == \
version_pids[0]['children'][1]
# Check that next_child returns None if there is no next child.
assert ordered_parent_node.next_child(version_pids[0]['children'][-1]) \
is None
| 5,336,136
|
def main(inargs):
"""Run the program."""
cube, history = gio.combine_files(inargs.infiles, inargs.var)
if inargs.annual:
cube = timeseries.convert_to_annual(cube, aggregation='mean', days_in_month=True)
if inargs.flux_to_mag:
cube = uconv.flux_to_magnitude(cube)
dim_coord_names = [coord.name() for coord in cube.dim_coords]
assert dim_coord_names[0] in ['time', 'year']
cube.data = numpy.cumsum(cube.data, axis=0)
cube.attributes['history'] = cmdprov.new_log(git_repo=repo_dir, infile_history={inargs.infiles[0]: history[0]})
iris.save(cube, inargs.outfile)
| 5,336,137
|
def swig_base_TRGBPixel_getMin():
"""swig_base_TRGBPixel_getMin() -> CRGBPixel"""
return _Core.swig_base_TRGBPixel_getMin()
| 5,336,138
|
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
| 5,336,139
|
def load_wavefunction(file: TextIO) -> Wavefunction:
"""Load a qubit wavefunction from a file.
Args:
file (str or file-like object): the name of the file, or a file-like object.
Returns:
wavefunction (pyquil.wavefunction.Wavefunction): the wavefunction object
"""
if isinstance(file, str):
with open(file, 'r') as f:
data = json.load(f)
else:
data = json.load(file)
wavefunction = Wavefunction(convert_dict_to_array(data['amplitudes']))
return wavefunction
| 5,336,140
|
def delete_alias(request, DOMAIN, ID):
"""
Delete Alias based on ID
ENDPOINT : /api/v1/alias/:domain/:id
"""
FORWARD_EMAIL_ENDPOINT = f"https://api.forwardemail.net/v1/domains/{DOMAIN}/aliases/{ID}"
res = requests.delete(FORWARD_EMAIL_ENDPOINT, auth=(USERNAME, ''))
if res.status_code == 200:
print("Deleted")
return JsonResponse(res.json())
| 5,336,141
|
def clear_old_changes_sources():
""" Delete the "-changes.tar.gz" lines from the "sources" file. """
with open('sources', 'r') as f:
lines = f.readlines()
with open('sources', 'w') as f:
for line in lines:
if '-changes.tar.gz' not in line:
f.write(line)
| 5,336,142
|
def test_equals():
"""Test basic equality. Complex relationships are tested in test_material_run.test_deep_equals()."""
from citrine.resources.measurement_spec import MeasurementSpec as CitrineMeasurementSpec
from gemd.entity.object import MeasurementSpec as GEMDMeasurementSpec
gemd_obj = GEMDMeasurementSpec(
name="My Name",
notes="I have notes",
tags=["tag!"]
)
citrine_obj = CitrineMeasurementSpec(
name="My Name",
notes="I have notes",
tags=["tag!"]
)
assert gemd_obj == citrine_obj, "GEMD/Citrine equivalence"
citrine_obj.notes = "Something else"
assert gemd_obj != citrine_obj, "GEMD/Citrine detects difference"
| 5,336,143
|
def _generate_input_weights(
N,
dim_input,
dist="custom_bernoulli",
connectivity=1.0,
dtype=global_dtype,
sparsity_type="csr",
seed=None,
input_bias=False,
**kwargs,
):
"""Generate input or feedback weights for a reservoir.
Weights are drawn by default from a discrete Bernoulli random variable,
i.e. are always equal to 1 or -1. Then, they can be rescaled to a specific constant
using the `input_scaling` parameter.
Warning
-------
This function is deprecated since version v0.3.1 and will be removed in future
versions. Please consider using :py:func:`bernoulli` or :py:func:`random_sparse`
instead.
Parameters
----------
N: int
Number of units in the connected reservoir.
dim_input: int
Dimension of the inputs connected to the reservoir.
dist: str, default to "norm"
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 0.1
Also called density of the sparse matrix.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
input_bias: bool, optional
'input_bias' parameter is deprecated. Bias should be initialized
separately from the input matrix.
If True, will add a row to the matrix to take into
account a constant bias added to the input.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
warnings.warn(
"'generate_input_weights' is deprecated since v0.3.1 and will be removed in "
"future versions. Consider using 'normal', 'uniform' or 'random_sparse'.",
DeprecationWarning,
)
if input_bias:
warnings.warn(
"'input_bias' parameter is deprecated. Bias should be initialized "
"separately from the input matrix.",
DeprecationWarning,
)
dim_input += 1
return _random_sparse(
N,
dim_input,
connectivity=connectivity,
dtype=dtype,
dist=dist,
sparsity_type=sparsity_type,
seed=seed,
**kwargs,
)
| 5,336,144
|
def _get_variable_name(param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| 5,336,145
|
def addGlider(i, j, grid):
"""adds a glider with top left cell at (i, j)"""
glider = np.array([[0, 0, 255],
[255, 0, 255],
[0, 255, 255]])
grid[i:i+3, j:j+3] = glider
| 5,336,146
|
def np_gather(params, indices, axis=0, batch_dims=0):
"""numpy gather"""
if batch_dims == 0:
return gather(params, indices)
result = []
if batch_dims == 1:
for p, i in zip(params, indices):
axis = axis - batch_dims if axis - batch_dims > 0 else 0
r = gather(p, i, axis=axis)
result.append(r)
return np.stack(result)
for p, i in zip(params[0], indices[0]):
r = gather(p, i, axis=axis)
result.append(r)
res = np.stack(result)
return res.reshape((1,) + res.shape)
| 5,336,147
|
def texture(data):
"""Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns
------
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
# one-element wrap-around padding
x = np.pad(data, 1, mode='wrap')
# set first and last range elements to NaN
x[:, 0] = np.nan
x[:, -1] = np.nan
# get neighbours using views into padded array
x1 = x[..., :-2, 1:-1] # center:2
x2 = x[..., 1:-1, :-2] # 4
x3 = x[..., 2:, 1:-1] # 8
x4 = x[..., 1:-1, 2:] # 6
x5 = x[..., :-2, :-2] # 1
x6 = x[..., :-2, 2:] # 3
x7 = x[..., 2:, 2:] # 9
x8 = x[..., 2:, :-2] # 7
# stack arrays
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighbouring pixels
xa_valid_count = np.count_nonzero(~np.isnan(xa), axis=0)
# root mean of squared differences
rmsd = np.sqrt(np.nansum((xa - data) ** 2, axis=0) / xa_valid_count)
# reinforce that NaN values should have NaN textures
rmsd[np.isnan(data)] = np.nan
return rmsd
| 5,336,148
|
def joos_2013_monte_carlo(
runs: int = 100, t_horizon: int = 1001, **kwargs
) -> Tuple[pd.DataFrame, np.ndarray]:
"""Runs a monte carlo simulation for the Joos_2013 baseline IRF curve.
This function uses uncertainty parameters for the Joos_2013 curve calculated by
Olivie and Peters (2013): https://esd.copernicus.org/articles/4/267/2013/
Parameters
----------
runs : int
Number of runs for Monte Carlo simulation. Must be >1.
t_horizon : int
Length of the time horizon over which baseline curve is
calculated (years)
Returns
-------
summary : pd.DataFrame
Dataframe with 'mean', '+sigma', and '-sigma' columns summarizing
results of Monte Carlo simulation.
results : np.ndarray
Results from all Monte Carlo runs.
"""
if runs <= 1:
raise ValueError('number of runs must be >1')
results = np.zeros((t_horizon, runs))
# Monte Carlo simulations
# sigma and x are from Olivie and Peters (2013) Table 5 (J13 values)
# They are the covariance and mean arrays for CO2 IRF uncertainty
sigma = np.array(
[
[0.129, -0.058, 0.017, -0.042, -0.004, -0.009],
[-0.058, 0.167, -0.109, 0.072, -0.015, 0.003],
[0.017, -0.109, 0.148, -0.043, 0.013, -0.013],
[-0.042, 0.072, -0.043, 0.090, 0.009, 0.006],
[-0.004, -0.015, 0.013, 0.009, 0.082, 0.013],
[-0.009, 0.003, -0.013, 0.006, 0.013, 0.046],
]
)
x = np.array([5.479, 2.913, 0.496, 0.181, 0.401, -0.472])
p_samples = multivariate_normal.rvs(x, sigma, runs)
p_df = pd.DataFrame(p_samples, columns=['t1', 't2', 't3', 'b1', 'b2', 'b3'])
p_exp = np.exp(p_df)
a1 = p_exp['b1'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a2 = p_exp['b2'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a3 = p_exp['b3'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
tau1 = p_exp['t1']
tau2 = p_exp['t2']
tau3 = p_exp['t3']
for count in np.arange(runs):
co2_kwargs = {
'a1': a1[count],
'a2': a2[count],
'a3': a3[count],
'tau1': tau1[count],
'tau2': tau2[count],
'tau3': tau3[count],
}
irf = joos_2013(t_horizon, **co2_kwargs)
results[:, count] = irf
summary = pd.DataFrame(columns=['mean', '-2sigma', '+2sigma', '5th', '95th'])
summary['mean'] = np.mean(results, axis=1)
summary['+2sigma'] = summary['mean'] + (1.96 * np.std(results, axis=1))
summary['-2sigma'] = summary['mean'] - (1.96 * np.std(results, axis=1))
summary['5th'] = np.percentile(results, 5, axis=1)
summary['95th'] = np.percentile(results, 95, axis=1)
return summary, results
| 5,336,149
|
def pairwise_l1_loss(outputs, targets):
"""
"""
batch_size = outputs.size()[0]
if batch_size < 3:
pair_idx = np.arange(batch_size, dtype=np.int64)[::-1].copy()
pair_idx = torch.from_numpy(pair_idx).cuda()
else:
pair_idx = torch.randperm(batch_size).cuda()
#diff_outputs = torch.sigmoid(outputs) - torch.sigmoid(outputs[pair_idx])
diff_outputs = outputs - outputs[pair_idx]
diff_targets = targets - targets[pair_idx]
loss = nn.L1Loss()(diff_outputs, diff_targets)
return loss
| 5,336,150
|
def get_mwis(input_tree):
"""Get minimum weight independent set
"""
num_nodes = input_tree['num_nodes']
nodes = input_tree['nodes']
if num_nodes <= 0:
return []
weights = [0, nodes[0][0]]
for idx, node_pair in enumerate(nodes[1:], start=1):
node_weight, node_idx = node_pair
wis_prime = weights[idx]
prime2_index = max(1, idx) - 1
wis_prime2 = weights[prime2_index] + node_weight
weights.append(max(wis_prime, wis_prime2))
return weights
| 5,336,151
|
def info(tid, alternate_token=False):
"""
Returns transaction information for the transaction
associated with the passed transaction ID
:param id: String with transaction ID.
:return: Dictionary with information about transaction.
"""
if not tid:
raise Exception('info() requires id parameter')
return r._get('/transactions/' + tid,
{
'oauth_token': alternate_token if alternate_token else c.access_token,
'client_id': c.client_id,
'client_secret': c.client_secret
})
| 5,336,152
|
def find_extrema(array, condition):
"""
Advanced wrapper of numpy.argrelextrema
Args:
array (np.ndarray): data array
condition (np.ufunc): e.g. np.less (<), np.great_equal (>=) and etc.
Returns:
np.ndarray: indexes of extrema
np.ndarray: values of extrema
"""
# get indexes of extrema
indexes = argrelextrema(array, condition)[0]
# in case where data line is horisontal and doesn't have any extrema -- return None
if len(indexes) == 0:
return None, None
# get values based on found indexes
values = array[indexes]
# calc the difference between nearby extrema values
diff_nearby_extrema = np.abs(np.diff(values, n=1))
# form indexes where no twin extrema (the case when data line is horisontal and have two extrema on borders)
indexes = np.array([index for index, diff in zip(indexes, diff_nearby_extrema) if diff > 0] + [indexes[-1]])
# get values based on filtered indexes
values = array[indexes]
return indexes, values
| 5,336,153
|
def compute_all_metrics_statistics(all_results):
"""Computes statistics of metrics across multiple decodings."""
statistics = {}
for key in all_results[0].keys():
values = [result[key] for result in all_results]
values = np.vstack(values)
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(values, axis=0)
statistics[key + "_MIN"] = np.min(values, axis=0)
statistics[key + "_MAX"] = np.max(values, axis=0)
return statistics
| 5,336,154
|
def parse_pubkey(expr: str) -> Tuple['PubkeyProvider', str]:
"""
Parses an individual pubkey expression from a string that may contain more than one pubkey expression.
:param expr: The expression to parse a pubkey expression from
:return: The :class:`PubkeyProvider` that is parsed as the first item of a tuple, and the remainder of the expression as the second item.
"""
end = len(expr)
comma_idx = expr.find(",")
next_expr = ""
if comma_idx != -1:
end = comma_idx
next_expr = expr[end + 1:]
return PubkeyProvider.parse(expr[:end]), next_expr
| 5,336,155
|
def XOR(*conditions):
"""
Creates an XOR clause between all conditions, e.g.
::
x <> 1 XOR y <> 2
*conditions* should be a list of column names.
"""
assert conditions
return _querybuilder.logical_xor(conditions)
| 5,336,156
|
def Interpolator(name=None, logic=None):
"""Returns an interpolator
:param name: Specify the name of the solver
:param logic: Specify the logic that is going to be used.
:returns: An interpolator
:rtype: Interpolator
"""
return get_env().factory.Interpolator(name=name, logic=logic)
| 5,336,157
|
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
| 5,336,158
|
def log_softmax(x, dim):
"""logsoftmax operation, requires |dim| to be provided.
Have to do some weird gymnastics to get vectorization and stability.
"""
if isinstance(x, torch.Tensor):
return F.log_softmax(x, dim=dim)
elif isinstance(x, IntervalBoundedTensor):
out = F.log_softmax(x.val, dim)
# Upper-bound on z_i is u_i - log(sum_j(exp(l_j)) + (exp(u_i) - exp(l_i)))
ub_lb_logsumexp = torch.logsumexp(x.lb, dim, keepdim=True)
ub_relu = F.relu(x.ub - x.lb) # ReLU just to prevent cases where lb > ub due to rounding
# Compute log(exp(u_i) - exp(l_i)) = u_i + log(1 - exp(l_i - u_i)) in 2 different ways
# See https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf for further discussion
# (1) When u_i - l_i <= log(2), use expm1
ub_log_diff_expm1 = torch.log(-torch.expm1(-ub_relu))
# (2) When u_i - l_i > log(2), use log1p
use_log1p = (ub_relu > 0.693)
ub_relu_log1p = torch.masked_select(ub_relu, use_log1p)
ub_log_diff_log1p = torch.log1p(-torch.exp(-ub_relu_log1p))
# NOTE: doing the log1p and then masked_select creates NaN's
# I think this is likely to be a subtle pytorch bug that unnecessarily
# propagates NaN gradients.
ub_log_diff_expm1.masked_scatter_(use_log1p, ub_log_diff_log1p)
ub_log_diff = x.ub + ub_log_diff_expm1
ub_scale = torch.max(ub_lb_logsumexp, ub_log_diff)
ub_log_partition = ub_scale + torch.log(
torch.exp(ub_lb_logsumexp - ub_scale)
+ torch.exp(ub_log_diff - ub_scale))
ub_out = x.ub - ub_log_partition
# Lower-bound on z_i is l_i - log(sum_{j != i}(exp(u_j)) + exp(l_i))
# Normalizing scores by max_j u_j works except when i = argmax_j u_j, u_i >> argmax_{j != i} u_j, and u_i >> l_i.
# In this case we normalize by the second value
lb_ub_max, lb_ub_argmax = torch.max(x.ub, dim, keepdim=True)
# Make `dim` the last dim for easy argmaxing along it later
dims = np.append(np.delete(np.arange(len(x.shape)), dim), dim).tolist()
# Get indices to place `dim` back where it was originally
rev_dims = np.insert(np.arange(len(x.shape) - 1), dim, len(x.shape) - 1).tolist()
# Flatten x.ub except for `dim`
ub_max_masked = x.ub.clone().permute(dims).contiguous().view(-1, x.shape[dim])
# Get argmax along `dim` and set max indices to -inf
ub_max_masked[np.arange(np.prod(x.shape) / x.shape[dim]), ub_max_masked.argmax(1)] = -float('inf')
# Reshape to make it look like x.ub again
ub_max_masked = ub_max_masked.view(np.array(x.shape).take(dims).tolist()).permute(rev_dims)
lb_logsumexp_without_argmax = ub_max_masked.logsumexp(dim, keepdim=True)
lb_ub_exp = torch.exp(x.ub - lb_ub_max)
lb_cumsum_fwd = torch.cumsum(lb_ub_exp, dim)
lb_cumsum_bwd = torch.flip(torch.cumsum(torch.flip(lb_ub_exp, [dim]), dim), [dim])
# Shift the cumulative sums so that i-th element is sum of things before i (after i for bwd)
pad_fwd = [0] * (2 * len(x.shape))
pad_fwd[-2*dim - 2] = 1
pad_bwd = [0] * (2 * len(x.shape))
pad_bwd[-2*dim - 1] = 1
lb_cumsum_fwd = torch.narrow(F.pad(lb_cumsum_fwd, pad_fwd), dim, 0, x.shape[dim])
lb_cumsum_bwd = torch.narrow(F.pad(lb_cumsum_bwd, pad_bwd), dim, 1, x.shape[dim])
lb_logsumexp_without_i = lb_ub_max + torch.log(lb_cumsum_fwd + lb_cumsum_bwd) # logsumexp over everything except i
lb_logsumexp_without_i.scatter_(dim, lb_ub_argmax, lb_logsumexp_without_argmax)
lb_scale = torch.max(lb_logsumexp_without_i, x.lb)
lb_log_partition = lb_scale + torch.log(
torch.exp(lb_logsumexp_without_i - lb_scale)
+ torch.exp(x.lb - lb_scale))
lb_out = x.lb - lb_log_partition
return IntervalBoundedTensor(out, lb_out, ub_out)
else:
raise TypeError(x)
| 5,336,159
|
def get_attributes_callback(get_offers_resp):
"""Callback fn for when get_attributes is called asynchronously"""
return AttributesProvider(get_offers_resp)
| 5,336,160
|
def display(choices, slug):
"""
Get the display name for a form choice based on its slug. We need this function
because we want to be able to store ACS data using the human-readable display
name for each field, but in the code we want to reference the fields using their
slugs, which are easier to change.
:param choices: A list of tuples representing Django-style form choices.
:param slug: The slug of the choice to select.
:return: The display name for the given slug.
"""
for choice_slug, display_name in choices:
if choice_slug == slug:
return display_name
raise NameError('No choice for for slug {} in {}'.format(slug, str(choices)))
| 5,336,161
|
def test_records_list_command(response, expected_result, mocker):
"""
Given:
- The records list command.
When:
- Mocking the response from the http request once to a response containing records, and once to a response with
no records.
Then:
- Validate that in the first case when the response contains records, the context has both `Class` and `Records`
keys. In the second case, when no records are in the response, validate the context has only the `Class` key.
"""
client = Client({})
mocker.patch.object(ServiceNowClient, 'http_request', return_value=response)
result = records_list_command(client, args={'class': 'test_class'})
assert expected_result == result[1]
| 5,336,162
|
def autocorrelation(data):
"""Autocorrelation routine.
Compute the autocorrelation of a given signal 'data'.
Parameters
----------
data : darray
1D signal to compute the autocorrelation.
Returns
-------
ndarray
the autocorrelation of the signal x.
"""
n_points = len(data)
variance = data.var()
data = data - data.mean()
corr = correlate(data, data, mode='full')[-n_points:]
result = corr / (variance * arange(n_points, 0, -1))
return result
| 5,336,163
|
def yes_or_no(question, default="no"):
"""
Returns True if question is answered with yes else False.
default: by default False is returned if there is no input.
"""
answers = "yes|[no]" if default == "no" else "[yes]|no"
prompt = "{} {}: ".format(question, answers)
while True:
answer = input(prompt).lower()
if answer == '':
answer = default
if answer in ['no', 'n']:
return False
elif answer in ['yes', 'y']:
return True
| 5,336,164
|
def delete_old_participant_details():
"""
Submits a query request to Google BigQuery that runs a window funtion selecting only the most recently updated row per recipient id.
The resulting view is then materialized, the old recipient table deleted, and the new table renamed to replace the old one.
"""
query = """
CREATE TABLE IF NOT EXISTS gdliveproject.tests.updated AS (
WITH added_row AS (
SELECT
recipient_id,
name,
age,
occupation,
completed,
country,
campaign,
last_updated,
ROW_NUMBER() OVER(PARTITION BY recipient_id ORDER BY last_updated DESC) AS row_number
FROM gdliveproject.tests.recipients
)
SELECT
*
FROM added_row
WHERE row_number = 1
ORDER BY recipient_id
);
DROP TABLE gdliveproject.tests.recipients;
ALTER TABLE gdliveproject.tests.updated RENAME TO recipients
"""
job = client.query(query)
job.result()
logging.info("Old rids deleted")
| 5,336,165
|
def query_airnow(param, data_period, bbox, key=None):
"""Construct an AirNow API query request and parse response.
Args:
param (str):
The evaluation parameter for which to query data.
data_period (list):
List with two elements, the first is the start date and time for
the query and the second is the end date and time for the query.
The API is sequentially queried in monthly intervals, so the start
date will usually be something like '2021-01-01T00' and the end
date will follow as '2021-01-31T23'.
bbox (dict):
Bounding box of latitude andlongitude values for AirNow API
queries.
key (str):
User key for API authentication.
Returns:
data (pandas DataFrame):
Data returned by the API for the specified query parameter and
time period.
"""
if type(param) is str:
param_list = [param]
elif type(param) is list:
param_list = param
else:
raise TypeError('Invalid type specified for "param". Must be either '
'str or list.')
begin = data_period[0][:-3]
end = data_period[1][:-3]
print('..Query start:', begin)
print('..Query end:', end)
# API Items
urlbase = "http://www.airnowapi.org/aq/data/?"
dataType = "C"
dataformat = "text/csv"
verbose = "1" # bool
nowcastonly = "0" # bool
rawconc = "1" # bool
# Construct query URL
url = urlbase + 'startdate=' + str(data_period[0])
url += '&enddate=' + str(data_period[1])
url += '¶meters=' + ','.join(param_list)
url += '&bbox=' + str(bbox["minLong"]) + ','
url += str(bbox["minLat"]) + ','
url += str(bbox["maxLong"]) + ','
url += str(bbox["maxLat"])
url += '&datatype=' + str(dataType)
url += '&format=' + str(dataformat)
url += '&verbose=' + str(verbose)
url += '&nowcastonly=' + str(nowcastonly)
url += '&includerawconcentrations=' + str(rawconc)
url += '&api_key=' + str(key)
# Get query response
data = requests.get(url)
fmt_query_data = StringIO(data.text)
data = pd.read_csv(fmt_query_data, sep=',',
names=['Site_Lat', 'Site_Lon', 'DateTime',
'Param_Name', 'Param_NowCast_Value',
'Param_Unit', 'Param_Value', 'Site_Name',
'Agency', 'Site_AQS', 'Site_Full_AQS'])
if data.empty:
status = 'Failed'
else:
status = 'Success'
data['Site_AQS'] = data['Site_AQS'].astype(str)
state_id = data['Site_AQS'].str.slice(0, 2)
county_id = data['Site_AQS'].str.slice(2, 5)
site_id = data['Site_AQS'].str.slice(5, 9)
data['Site_AQS'] = (state_id + '-' + county_id + '-' + site_id)
site_name = list(i for i in data.Site_Name.unique())
site_aqs = list(i for i in data.Site_AQS.astype(str).unique())
site_lat = list(i for i in data.Site_Lat.astype(str).unique())
site_lon = list(i for i in data.Site_Lon.astype(str).unique())
print('..Query site(s):')
for name, aqs, lat, lon in zip(site_name, site_aqs,
site_lat, site_lon):
print('....Site name:', name)
print('......AQS ID:', aqs)
print('......Latitude:', "{0:7.4f}".format(float(lat)))
print('......Longitude:', "{0:7.4f}".format(float(lon)))
# Print warning if data from multiple sites are returned
if any(len(lst) > 1 for lst in [site_name, site_aqs,
site_lat, site_lon]):
print('..Warning: Query returned data from multiple sites.',
'\n..Site selection can be narrowed by reducing the '
'bounding box size.')
print('..Query Status:', status)
return data
| 5,336,166
|
def test_topic_name_case_change(volttron_instance, database_client):
"""
When case of a topic name changes check if they are saved as two topics
Expected result: query result should be cases insensitive
"""
clean_db(database_client)
agent_uuid = install_historian_agent(volttron_instance,
mongo_agent_config())
try:
publisher = volttron_instance.build_agent()
oat_reading = round(random.uniform(30, 100), 14)
message = [{'FluffyWidgets': oat_reading}, {
'FluffyWidgets': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}]
publisheddt = publish_data(publisher, BASE_ANALYSIS_TOPIC, message)
gevent.sleep(0.1)
lister = volttron_instance.build_agent()
topic_list = lister.vip.rpc.call('platform.historian',
'get_topic_list').get(timeout=5)
assert topic_list is not None
assert len(topic_list) == 1
assert 'FluffyWidgets' in topic_list[0]
result = lister.vip.rpc.call('platform.historian', 'query',
topic=BASE_ANALYSIS_TOPIC[
9:] + '/FluffyWidgets').get(
timeout=5)
assert result is not None
assert len(result['values']) == 1
assert isinstance(result['values'], list)
mongoizetimestamp = publisheddt[:-3] + '000+00:00'
assert result['values'][0] == [mongoizetimestamp, oat_reading]
message = [{'Fluffywidgets': oat_reading}, {
'Fluffywidgets': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}]
publisheddt = publish_data(publisher, BASE_ANALYSIS_TOPIC, message)
gevent.sleep(0.1)
topic_list = lister.vip.rpc.call('platform.historian',
'get_topic_list').get(timeout=5)
assert topic_list is not None
assert len(topic_list) == 1
assert 'Fluffywidgets' in topic_list[0]
result = lister.vip.rpc.call(
'platform.historian', 'query',
topic=BASE_ANALYSIS_TOPIC[9:] + '/Fluffywidgets',
order="LAST_TO_FIRST").get(timeout=5)
assert result is not None
assert len(result['values']) == 2
assert isinstance(result['values'], list)
mongoizetimestamp = publisheddt[:-3] + '000+00:00'
assert result['values'][0] == [mongoizetimestamp, oat_reading]
finally:
volttron_instance.stop_agent(agent_uuid)
volttron_instance.remove_agent(agent_uuid)
| 5,336,167
|
def upload_to_py_pi():
"""
Upload the Transiter Python package inside the CI container to PyPI.
If this is not a build on master or a release tag, this is a no-op.
"""
if "pypi" not in get_artifacts_to_push():
return
print("Uploading to PyPI")
subprocess.run(
[
"docker",
"run",
"--env",
"TWINE_USERNAME=" + os.environ.get("TWINE_USERNAME"),
"--env",
"TWINE_PASSWORD=" + os.environ.get("TWINE_PASSWORD"),
"jamespfennell/transiter-ci:latest",
"distribute",
]
)
| 5,336,168
|
def compile(string):
"""
Compile a string to a template function for the path.
"""
return tokens_to_function(parse(string))
| 5,336,169
|
def flajolet_martin(data, k):
"""Estimates the number of unique elements in the input set values.
Inputs:
data: The data for which the cardinality has to be estimated.
k: The number of bits of hash to use as a bucket number. The number of buckets is 2^k
Output:
Returns the estimated number of unique items in the dataset
"""
total_buckets = 2 ** k
total_zeroes = []
for i in range(total_buckets):
total_zeroes.append(0)
for i in data:
h = hash(str(i)) #convert the value into a string because python hashes integers to themselves
bucket = h & (total_buckets - 1) #Finds the bucket where the number of ending zero's are appended
bucket_hash = h >> k #move the bits of the hash to the right to use the binary digits without the bucket digits
total_zeroes[bucket] = max(total_zeroes[bucket], zero_counter(bucket_hash))
return math.ceil(2 ** (float(sum(total_zeroes)) / total_buckets) * total_buckets * 0.79402)
| 5,336,170
|
def makelist(filename, todo_default=['TODO', 'DONE']):
"""
Read an org-mode file and return a list of Orgnode objects
created from this file.
"""
ctr = 0
if isinstance(filename, str):
f = codecs.open(filename, 'r', 'utf8')
else:
f = filename
todos = set(todo_default) # populated from #+SEQ_TODO line
level = ''
heading = ""
bodytext = ""
tag1 = "" # The first tag enclosed in ::
alltags = set([]) # set of all tags in headline
sched_date = ''
deadline_date = ''
closed_date = ''
clocklist = []
datelist = []
rangelist = []
nodelist = []
propdict = dict()
for line in f:
ctr += 1
hdng = _RE_HEADING.search(line)
if hdng:
if heading: # we are processing a heading line
this_node = Orgnode(level, heading, bodytext, tag1, alltags)
if sched_date:
this_node.set_scheduled(sched_date)
sched_date = ""
if deadline_date:
this_node.set_deadline(deadline_date)
deadline_date = ''
if closed_date:
this_node.set_closed(closed_date)
closed_date = ''
if clocklist:
this_node.set_clock(clocklist)
clocklist = []
if datelist:
this_node.set_datelist(datelist)
datelist = []
if rangelist:
this_node.set_rangelist(rangelist)
rangelist = []
this_node.set_properties(propdict)
nodelist.append(this_node)
propdict = dict()
level = hdng.group(1)
heading = hdng.group(2)
bodytext = ""
(tag1, alltags, heading) = find_tags_and_heading(heading)
else: # we are processing a non-heading line
if line.startswith('#+SEQ_TODO'):
todos |= set(_RE_TODO_KWDS.findall(line))
continue
if line.find(':PROPERTIES:') >= 0:
continue
if line.find(':END:') >= 0:
continue
(prop_key, prop_val) = find_property(line)
if prop_key:
propdict[prop_key] = prop_val
continue
_sched_date = find_scheduled(line)
_deadline_date = find_deadline(line)
_closed_date = find_closed(line)
sched_date = _sched_date or sched_date
deadline_date = _deadline_date or deadline_date
closed_date = closed_date or _closed_date
if not _sched_date and not _deadline_date:
(dl, rl) = find_daterangelist(line)
datelist += dl
rangelist += rl
clock = find_clock(line)
if clock:
clocklist.append(clock)
if not (line.startswith('#') or _sched_date or _deadline_date
or clock or _closed_date):
bodytext = bodytext + line
# write out last node
this_node = Orgnode(level, heading, bodytext, tag1, alltags)
this_node.set_properties(propdict)
if sched_date:
this_node.set_scheduled(sched_date)
if deadline_date:
this_node.set_deadline(deadline_date)
if closed_date:
this_node.set_closed(closed_date)
closed_date = ''
if clocklist:
this_node.set_clock(clocklist)
clocklist = []
if datelist:
this_node.set_datelist(datelist)
datelist = []
if rangelist:
this_node.set_rangelist(rangelist)
rangelist = []
nodelist.append(this_node)
# using the list of TODO keywords found in the file
# process the headings searching for TODO keywords
for n in nodelist:
h = n.headline
todo_search = _RE_TODO_SRCH.search(h)
if todo_search:
if todo_search.group(1) in todos:
n.set_heading(todo_search.group(2))
n.set_todo(todo_search.group(1))
priority_search = _RE_PRTY_SRCH.search(n.headline)
if priority_search:
n.set_priority(priority_search.group(1))
n.set_heading(priority_search.group(2))
# set parent of nodes
ancestors = [None]
n1 = nodelist[0]
l1 = n1.level
for n2 in nodelist:
# n1, l1: previous node and its level
# n2, l2: this node and its level
l2 = n2.level
if l1 < l2:
ancestors.append(n1)
else:
while len(ancestors) > l2:
ancestors.pop()
if ancestors:
n2.set_parent(ancestors[-1])
n1 = n2
l1 = l2
return nodelist
| 5,336,171
|
def decode(encoded: list):
"""Problem 12: Decode a run-length encoded list.
Parameters
----------
encoded : list
The encoded input list
Returns
-------
list
The decoded list
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(encoded, list):
raise TypeError('The argument given is not of `list` type.')
decoded = []
for x in encoded:
if isinstance(x, list):
decoded.extend(x[0] * [x[1]])
else:
decoded.append(x)
return decoded
| 5,336,172
|
def create_offset(set_point_value):
"""Docstring here (what does the function do)"""
offset_value = random.randint(-128, 128)
offset_value_incrementation = float(offset_value / 100)
return set_point_value - offset_value_incrementation
| 5,336,173
|
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
print(example)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
| 5,336,174
|
def startpeerusersync(
server, user_id, resync_interval=OPTIONS["Deployment"]["SYNC_INTERVAL"]
):
"""
Initiate a SYNC (PULL + PUSH) of a specific user from another device.
"""
user = FacilityUser.objects.get(pk=user_id)
facility_id = user.facility.id
device_info = get_device_info()
command = "sync"
common_job_args = dict(
keep_alive=True,
resync_interval=resync_interval,
job_id=hashlib.md5("{}::{}".format(server, user).encode()).hexdigest(),
extra_metadata=prepare_sync_task(
facility_id,
user_id,
user.username,
user.facility.name,
device_info["device_name"],
device_info["instance_id"],
server,
type="SYNCPEER/SINGLE",
),
)
job_data = None
# attempt to resume an existing session
sync_session = find_soud_sync_session_for_resume(user, server)
if sync_session is not None:
command = "resumesync"
# if resuming encounters an error, it should close the session to avoid a loop
job_data = prepare_soud_resume_sync_job(
server, sync_session.id, user_id, **common_job_args
)
# if not resuming, prepare normal job
if job_data is None:
job_data = prepare_soud_sync_job(
server, facility_id, user_id, **common_job_args
)
job_id = queue.enqueue(peer_sync, command, **job_data)
return job_id
| 5,336,175
|
def clean_url(str_text_raw):
"""This function eliminate a string URL in a given text"""
str_text = re.sub("url_\S+", "", str_text_raw)
str_text = re.sub("email_\S+", "", str_text)
str_text = re.sub("phone_\S+", "", str_text)
return(re.sub("http[s]?://\S+", "", str_text))
| 5,336,176
|
def compare_strategies(strategy, baseline=always_roll(5)):
""" Вернуть среднее отношение побед STRATEGY против BASELINE """
as_first = 1 - make_average(play)(strategy, baseline)
as_second = make_average(play)(baseline, strategy)
return (as_first + as_second) / 2
| 5,336,177
|
def process_geo(
path_geo_file: Path,
*,
add_pop: bool = True,
add_neighbors: bool = True,
add_centroids: bool = False,
save_geojson: bool = False,
path_pop_file: Path = PATH_PA_POP,
path_output_geojson: Path = PATH_OUTPUT_GEOJSON,
) -> geopandas.GeoDataFrame:
"""
Reads a given geographic file (eg. geojson), converts it to a geopandas geoDataFrame,
adds a column for each polygon with a list of neighboring polygons, and adds a column
for each polygon.
Args:
path_geo_file (Path): Path to geographic file (eg. geojson) that will be read.
add_neighbors (bool): Adds a new column called NEIGBHORS for each county with all geographic regions that
border each region. Defaults to True.
add_pop (bool): Adds a new field with the population for each county. Defaults to True.
save_geojson (bool): Whether to save file as geojson. Default to False.
path_pop_file (Path) OPTIONAL: Path to CSV with population data to merge to geo data. Defaults to PATH_PA_POP.
add_centroids (bool) OPTIONAL: Gets centroids of each polygon if selected. Defaults to false.
path_output_geojson (Path. optional): Path to output geojson file.
"""
gdf = geopandas.read_file(path_geo_file)
# add population data
if add_pop:
df_pop = pd.read_csv(path_pop_file)
gdf = gdf.merge(df_pop, left_on="NAME", right_on="name", how="left")
gdf["population"] = gdf["population"].astype(int)
# add NEIGHBORS column
if add_neighbors:
gdf["NEIGHBORS"] = None
for index, country in gdf.iterrows():
# get 'not disjoint' countries
neighbors = gdf[~gdf.geometry.disjoint(country.geometry)].NAME.tolist()
# remove own name from the list
neighbors = [name for name in neighbors if country.NAME != name]
# add names of neighbors as NEIGHBORS value
gdf.at[index, "NEIGHBORS"] = ", ".join(neighbors)
if add_centroids:
gdf["CENTROID"] = gdf["geometry"].centroid
if save_geojson:
gdf.to_file(path_output_geojson, driver="GeoJSON")
return gdf
| 5,336,178
|
def pytest_itemcollected(item):
"""Attach markers to each test which uses a fixture of one of the resources."""
if not hasattr(item, "fixturenames"):
return
fixturenames = set(item.fixturenames)
for resource_kind in _resource_kinds:
resource_fixture = "_{}_container".format(resource_kind)
if resource_fixture in fixturenames:
item.add_marker(resource_kind)
| 5,336,179
|
def nml_poisson(X, sum_x, sum_xxT, lmd_max=100):
"""
Calculate NML code length of Poisson distribution. See the paper below:
yamanishi, Kenji, and Kohei Miyaguchi. "Detecting gradual changes from data stream using MDL-change statistics."
2016 IEEE International Conference on Big Data (Big Data). IEEE, 2016.
parameters:
X: data sequence
sum_x: mean sequence
sum_xxT: variance sequence
lmd_max: the maximum value of lambda
returns:
NML code length
"""
n = len(X)
lmd_hat = sum_x / n
if lmd_hat == 0:
neg_log = np.sum(special.gammaln(X + 1))
else:
neg_log = -n * lmd_hat * np.log(lmd_hat) + \
n * lmd_hat + np.sum(special.gammaln(X + 1))
cpl = complexity_poisson(n, lmd_max)
return neg_log + cpl
| 5,336,180
|
def setup_metrics(app):
"""
Setup Flask app with prometheus metrics
"""
app.before_request(before_request)
app.after_request(after_request)
@app.route('/metrics')
def metrics():
# update k8s metrics each time this url is called.
global PROMETHEUS_METRICS
PROMETHEUS_METRICS = get_k8s_metrics()
return Response(prometheus_client.generate_latest(), mimetype='text/plain; version=0.0.4; charset=utf-8')
| 5,336,181
|
def _getFormat(fileformat):
"""Get the file format constant from OpenSSL.
:param str fileformat: One of ``'PEM'`` or ``'ASN1'``.
:raises OpenSSLInvalidFormat: If **fileformat** wasn't found.
:returns: ``OpenSSL.crypto.PEM`` or ``OpenSSL.crypto.ASN1`` respectively.
"""
fileformat = 'FILETYPE_' + fileformat
fmt = getattr(OpenSSL.crypto, fileformat, None)
if fmt is not None:
return fmt
else:
raise OpenSSLInvalidFormat("Filetype format %r not found."% fileformat)
| 5,336,182
|
def rewrite_tex_file(texpath, replacements, backup=False):
"""Rewrite a tex file, replacing ADS keys with INSPIRE keys.
Parameters
----------
texpath: PathLike
Path to tex file to rewrite
replacements: array of dict
Each dict has keys "ads_key", "insp_key", and "bib_str".
backup: bool, optional [default: False]
If True, first back up the tex file (using suffix ".bak.tex")
"""
texpath = Path(texpath)
with texpath.open("r") as texfile:
tex = texfile.read()
if backup:
with texpath.with_suffix(".bak.tex").open("w") as backupfile:
backupfile.write(tex)
for rep in replacements:
tex = tex.replace(rep["ads_key"], rep["insp_key"])
with texpath.open("w") as texfile:
texfile.write(tex)
| 5,336,183
|
def upload_file(_file, directory):
""" Upload yang model into session storage """
f = None
filename = None
try:
if not os.path.exists(directory):
logging.debug('Creating session storage ..')
os.makedirs(directory)
if not os.path.exists(directory):
logging.error('Failed to create session storage ..')
return None
logging.debug('Copying file content ..')
f = tempfile.NamedTemporaryFile('w+', suffix='.py', dir=directory, delete=False)
fname = f.name
for chunk in _file.chunks():
f.write(chunk)
f.close()
parser = Parser(fname)
target_file = os.path.join(directory, parser.get_filename())
os.rename(fname, target_file)
filename = parser.get_filename()
except:
logging.exception('Failed to upload file: ')
finally:
logging.debug('Cleaning up ..')
if f is not None and os.path.exists(f.name):
logging.debug('Deleting ' + f.name)
os.remove(f.name)
return filename
| 5,336,184
|
def clip(
arg: ir.NumericValue,
lower: ir.NumericValue | None = None,
upper: ir.NumericValue | None = None,
) -> ir.NumericValue:
"""
Trim values at input threshold(s).
Parameters
----------
arg
Numeric expression
lower
Lower bound
upper
Upper bound
Returns
-------
NumericValue
Clipped input
"""
if lower is None and upper is None:
raise ValueError("at least one of lower and upper must be provided")
op = ops.Clip(arg, lower, upper)
return op.to_expr()
| 5,336,185
|
def human_time_duration(seconds: int) -> str:
"""For a passed-in integer (seconds), return a human-readable duration string.
"""
if seconds <= 1:
return '<1 second'
parts = []
for unit, div in TIME_DURATION_UNITS:
amount, seconds = divmod(int(seconds), div)
if amount > 0:
parts.append('{} {}{}'.format(amount, unit, "" if amount == 1 else "s"))
return ', '.join(parts)
| 5,336,186
|
def reindex_network_nodes(network):
"""Reindex the nodes of a channel network."""
node_reindexer = SegmentNodeReindexer()
network.for_each(node_reindexer)
return network
| 5,336,187
|
def check_header(argv=None):
"""Run aspell and report line number in which misspelled words are."""
argv = sys.argv[1:] if argv is None else argv
# Apparently the personal dictionary cannot be a relative path
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exclude", nargs=1, type=_valid_file, required=False)
parser.add_argument("files", nargs="*", type=_valid_file)
cli_args = parser.parse_args(argv)
###
fnames = cli_args.files
if cli_args.exclude:
patterns = [_make_abspath(item) for item in _read_file(cli_args.exclude[0])]
exclude_filter = lambda x: not any(fnmatch(x, pattern) for pattern in patterns)
fnames = filter(exclude_filter, fnames)
###
fdict = {".py": "#", ".rst": "..", ".ini": "#", ".sh": "#", ".cfg": "#"}
retval = 0
for fname in fnames:
_, ext = os.path.splitext(fname)
if (ext in fdict) and _check_header(fname, StreamFile, fdict[ext]):
retval = 1
print(" " + fname.strip())
return retval
| 5,336,188
|
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
"""
win.update_idletasks()
width = win.winfo_width()
fm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * fm_width
height = win.winfo_height()
title_bar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + title_bar_height + fm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify()
| 5,336,189
|
def test_d4_3_15v23_d4_3_15v23i(mode, save_output, output_format):
"""
naive xpathDefaultNamespace (exact uri of targetNamespace) test case
in complexType
"""
assert_bindings(
schema="ibmData/valid/D4_3_15/d4_3_15v23.xsd",
instance="ibmData/valid/D4_3_15/d4_3_15v23.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,336,190
|
def avg_arrays_1d(data, axis=None, weights=None, **kws):
"""Average list of 1D arrays or curves by interpolation on a reference axis
Parameters
----------
data : lists of lists
data_fmt : str
define data format
- "curves" -> :func:`curves_to_matrix`
- "lists" -> :func:`curves_to_matrix`
weights : None or array
weights for the average
Returns
-------
axis, zavg : 1D arrays
np.average(zdats)
"""
data_fmt = kws.pop("data_fmt", "curves")
if data_fmt == "curves":
ax, mat = curves_to_matrix(data, axis=axis, **kws)
elif data_fmt == "lists":
ax, mat = lists_to_matrix(data, axis=axis, **kws)
else:
raise NameError("'data_fmt' not understood")
return ax, np.average(mat, axis=0, weights=weights)
| 5,336,191
|
def _is_double(arr):
"""
Return true if the array is doubles, false if singles, and raise an error if it's neither.
:param arr:
:type arr: np.ndarray, scipy.sparse.spmatrix
:return:
:rtype: bool
"""
# Figure out which dtype for data
if arr.dtype == np.float32:
return False
elif arr.dtype == np.float64:
return True
else:
raise ValueError("Only float32 or float64 dtypes are supported")
| 5,336,192
|
def every(delay, task, name):
"""
Executes a task every `delay` seconds
:param delay: the delay in seconds
:param task: the method to run. The method should return False if you want the loop to stop.
:return: None
"""
next_time = time.time() + delay
while True:
time.sleep(max(0, next_time - time.time()))
try:
if task() is False:
break
except Exception:
logger.debug("Problem while executing repetitive task: %s", name, exc_info=True)
# skip tasks if we are behind schedule:
next_time += (time.time() - next_time) // delay * delay + delay
| 5,336,193
|
def simulate_from_network_attr(edgelist_filename, param_func_list, labels,
theta,
binattr_filename=None,
contattr_filename=None,
catattr_filename=None,
sampler_func = basicALAAMsampler,
numSamples = 100,
iterationInStep = None,
burnIn = None):
"""Simulate ALAAM from on specified network with binary and/or continuous
and categorical attributes.
Parameters:
edgelist_filename - filename of Pajek format edgelist
param_func_list - list of change statistic functions corresponding
to parameters to estimate
labels - list of strings corresponding to param_func_list
to label output (header line)
theta - correponding vector of theta values
binattr_filename - filename of binary attributes (node per line)
Default None, in which case no binary attr.
contattr_filename - filename of continuous attributes (node per line)
Default None, in which case no continuous attr.
catattr_filename - filename of categorical attributes (node per line)
Default None, in which case no categorical attr.
sampler_func - ALAAM sampler function with signature
(G, A, changestats_func_list, theta, performMove,
sampler_m); see basicALAAMsampler.py
default basicALAAMsampler
iterationInStep - number of sampler iterations
i.e. the number of iterations between samples
(or 10*numNodes if None)
numSamples - Number of samples (default 100)
burnIn - Number of sampels to discard at start
(or 10*iterationInStep if None)
"""
assert(len(param_func_list) == len(labels))
G = Graph(edgelist_filename, binattr_filename, contattr_filename,
catattr_filename)
#G.printSummary()
sys.stdout.write(' '.join(['t'] + labels + ['acceptance_rate']) + '\n')
for (simvec,stats,acceptance_rate,t) in simulateALAAM(G, param_func_list,
theta,
numSamples,
iterationInStep,
burnIn,
sampler_func = sampler_func):
sys.stdout.write(' '.join([str(t)] + [str(x) for x in list(stats)] +
[str(acceptance_rate)]) + '\n')
| 5,336,194
|
def HfcVd(M, far='default'):
"""
Computes the vitual dimensionality (VD) measure for an HSI
image for specified false alarm rates. When no false alarm rate(s) is
specificied, the following vector is used: 1e-3, 1e-4, 1e-5.
This metric is used to estimate the number of materials in an HSI scene.
Parameters:
M: `numpy array`
HSI data as a 2D matrix (N x p).
far: `list [default default]`
False alarm rate(s).
Returns: python list
VD measure, number of materials estimate.
References:
C.-I. Chang and Q. Du, "Estimation of number of spectrally distinct
signal sources in hyperspectral imagery," IEEE Transactions on
Geoscience and Remote Sensing, vol. 43, no. 3, mar 2004.
J. Wang and C.-I. Chang, "Applications of independent component
analysis in endmember extraction and abundance quantification for
hyperspectral imagery," IEEE Transactions on Geoscience and Remote
Sensing, vol. 44, no. 9, pp. 2601-1616, sep 2006.
"""
N, numBands = M.shape
# calculate eigenvalues of covariance and correlation between bands
lambda_cov = np.linalg.eig(np.cov(M.T))[0] # octave: cov(M')
lambda_corr = np.linalg.eig(np.corrcoef(M.T))[0] # octave: corrcoef(M')
# not realy needed:
lambda_cov = np.sort(lambda_cov)[::-1]
lambda_corr = np.sort(lambda_corr)[::-1]
if far == 'default':
far = [10**-3, 10**-4, 10**-5]
else:
far = [far]
numEndmembers_list = []
for y in range(len(far)):
numEndmembers = 0
pf = far[y]
for x in range(numBands):
sigmaSquared = (2.*lambda_cov[x]/N) + (2.*lambda_corr[x]/N) + (2./N)*lambda_cov[x]*lambda_corr[x]
sigma = sp.sqrt(sigmaSquared)
tau = -ss.norm.ppf(pf, 0, abs(sigma))
if (lambda_corr[x]-lambda_cov[x]) > tau:
numEndmembers += 1
numEndmembers_list.append(numEndmembers)
return numEndmembers_list
| 5,336,195
|
def cycle(sheduled_jobs):
"""
Start scheduled job worker. The worker will push deferred tasks to
redis queue
"""
queue = []
now = datetime.utcnow()
for when, job in sheduled_jobs:
if not hasattr(job, 'defer'):
raise RuntimeError('Job should have defer method')
queue.append(Task(at=when.schedule(now, now), when=when, job=job))
while True:
now = datetime.utcnow()
for task in queue:
if task.at > now:
continue
task.job.defer()
task.at = task.when.schedule(prev=task.at, now=now)
sleep(0.001)
| 5,336,196
|
def load_multicenter_aids_cohort_study(**kwargs):
"""
Originally in [1]::
Siz: (78, 4)
AIDSY: date of AIDS diagnosis
W: years from AIDS diagnosis to study entry
T: years from AIDS diagnosis to minimum of death or censoring
D: indicator of death during follow up
i AIDSY W T D
1 1990.425 4.575 7.575 0
2 1991.250 3.750 6.750 0
3 1992.014 2.986 5.986 0
4 1992.030 2.970 5.970 0
5 1992.072 2.928 5.928 0
6 1992.220 2.780 4.688 1
References
----------
[1] Cole SR, Hudgens MG. Survival analysis in infectious disease research: describing events in time. AIDS. 2010;24(16):2423-31.
"""
return _load_dataset("multicenter_aids_cohort.tsv", sep="\t", index_col=0, **kwargs)
| 5,336,197
|
def bomb():
"""Bomb context appropriate for testing all simple wires cases."""
bomb = Bomb()
bomb.serial = 'abc123'
bomb.batteries = True
bomb.labels = ['FRK']
return bomb
| 5,336,198
|
def process_replot_argument(replot_dir, results_dir):
"""Reads the args.json file in a results directory, copies it to an
appropriate location in the current results directory and returns the link
speed range and a list of RemyCC files."""
argsfilename = os.path.join(replot_dir, "args.json")
argsfile = open(argsfilename)
jsondict = json.load(argsfile)
argsfile.close()
args = jsondict["args"]
remyccs = args["remycc"]
link_ppt_range = np.logspace(np.log10(args["link_ppt"][0]), np.log10(args["link_ppt"][1]), args["num_points"])
console_dir = os.path.join(replot_dir, "outputs")
replots_dirname = os.path.join(results_dir, "replots", os.path.basename(replot_dir))
os.makedirs(replots_dirname, exist_ok=True)
target_filename = os.path.join(replots_dirname, "args.json")
shutil.copy(argsfilename, target_filename)
return remyccs, link_ppt_range, console_dir
| 5,336,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.