content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def warp_containing_points(img, pts, H, border=4, shape_only=False):
"""
display = img.copy()
for pt in pts.reshape((-1,2)).astype(int):
cv2.circle(display, tuple(pt), 4, (255, 0, 0),
-1, cv2.LINE_AA)
debug_show('warp', display)
"""
pts2 = cv2.perspectiveTransform(pts, H)
x0, y0, w, h = cv2.boundingRect(pts2)
print("got bounding rect", x0, y0, w, h)
T = translation(-x0 + border, -y0 + border)
TH = np.dot(T, H)
if shape_only:
return (h + 2 * border, w + 2 * border), TH
else:
dst = cv2.warpPerspective(
img, TH, (w + 2 * border, h + 2 * border), borderMode=cv2.BORDER_REPLICATE
)
return dst, TH
| 5,342,400
|
def generate_parallelogrammatic_board(width=5, height=5):
"""
Creates a board with a shape of a parallelogram.
Width and height specify the size (in fields) of the board.
"""
return [[1] * height for _ in range(width)]
| 5,342,401
|
def function_is_even(latex_dict: dict) -> str:
"""
colloquially,
sympy.cos(x)==sympy.cos(-x)
sympy.cos(x) - sympy.cos(-x) == 0
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> function_is_even(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
| 5,342,402
|
def process_json(args, json_raw):
"""
Verarbeitung des geparsten JSON und suchen der Metrik
"""
if args.debug:
print(f"DEBUG: [process_json] count = {len(json_raw)}, metric = {args.metric} ")
for metric in json_raw:
if metric['name'] == args.metric:
if args.debug:
print(f"DEBUG: [process_json] metric found, raw = {metric}")
if len(metric['metrics']) < 1:
exit_unknown(f"Metric contains no metrics -- {metric}")
elif len(metric['metrics']) == 1:
process_metric(args, metric, metric['metrics'][0])
else:
if args.debug:
print(f"DEBUG: [process_json] metric contain multiple values, label_name = {args.label_name}, label_value = {args.label_value}")
for metric_value in metric['metrics']:
if args.label_name == 'Not set' or args.label_value == 'Not set':
exit_unknown(f"-n/--label-name and -v/--label-value are required for multi-value metrics -- {metric}")
if metric_value['labels'][args.label_name] == args.label_value:
process_metric(args, metric, metric_value)
exit_unknown(f"No metric found with label_name = {args.label_name}, label_value = {args.label_value} -- {metric}")
exit_unknown(f"Metric with name = {args.metric} not found")
| 5,342,403
|
def summarize_curriculum(
curriculum: AbstractCurriculum,
) -> str:
"""
Generate a detailed string summarizing the contents of the curriculum.
:return: A string that would print as a formatted outline of this curriculum's contents.
"""
def maybe_plural(num: int, label: str):
return f"{num} {label}" + ("" if num == 1 else "s")
block_summaries = []
for i_block, block in enumerate(curriculum.learn_blocks_and_eval_blocks()):
task_summaries = []
for i_task, task_block in enumerate(block.task_blocks()):
variant_summaries = []
for i_variant, task_variant in enumerate(task_block.task_variants()):
variant_summary = (
f"\n\t\t\tTask variant {i_variant+1}, "
f"{task_variant.task_label} - {task_variant.variant_label}: "
+ (
f"{maybe_plural(task_variant.num_episodes, 'episode')}."
if task_variant.num_episodes is not None
else f"{maybe_plural(task_variant.num_steps, 'step')}."
)
)
variant_summaries.append(variant_summary)
task_summary = (
f"\n\t\tTask {i_task+1}, {task_block.task_label}: "
f"{maybe_plural(len(variant_summaries), 'variant')}"
)
task_summaries.append(task_summary + "".join(variant_summaries))
block_summary = (
f"\n\n\tBlock {i_block+1}, "
f"{'learning' if block.is_learning_allowed else 'evaluation'}: "
f"{maybe_plural(len(task_summaries), 'task')}"
)
block_summaries.append(block_summary + "".join(task_summaries))
curriculum_summary = (
f"This curriculum has {maybe_plural(len(block_summaries), 'block')}"
+ "".join(block_summaries)
)
return curriculum_summary
| 5,342,404
|
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
| 5,342,405
|
def _fem_xref_methods_check(fem1: BDF):
"""
testing that these methods work with xref
"""
log = fem1.log
log.debug('_fem_xref_methods_check(fem1)')
fem1._get_rigid()
common_node_ids = list(fem1.nodes.keys())
fem1.get_rigid_elements_with_node_ids(common_node_ids)
for spc_id in set(list(fem1.spcadds.keys()) + list(fem1.spcs.keys())):
fem1.get_reduced_spcs(spc_id, consider_spcadd=True)
for mpc_id in set(list(fem1.mpcadds.keys()) + list(fem1.mpcs.keys())):
fem1.get_reduced_mpcs(mpc_id, consider_mpcadd=True)
get_dependent_nid_to_components(fem1)
fem1._get_maps(eids=None, map_names=None,
consider_0d=True, consider_0d_rigid=True,
consider_1d=True, consider_2d=True, consider_3d=True)
get_dependent_nid_to_components(fem1)
fem1.get_pid_to_node_ids_and_elements_array(pids=None, etypes=None, idtype='int32',
msg=' which is required by test_bdf')
fem1.get_property_id_to_element_ids_map(msg=' which is required by test_bdf')
fem1.get_material_id_to_property_ids_map(msg=' which is required by test_bdf')
fem1.get_element_ids_list_with_pids(pids=None)
fem1.get_element_ids_dict_with_pids(pids=None, stop_if_no_eids=False,
msg=' which is required by test_bdf')
fem1.get_node_id_to_element_ids_map()
fem1.get_node_id_to_elements_map()
export_mcids(fem1, csv_filename=None, eids=None,
export_xaxis=True, export_yaxis=False, iply=0, log=None, debug=False)
export_mcids_all(fem1)
| 5,342,406
|
def register_filters(app):
"""Jinja filters."""
app.jinja_env.globals["DEBUG"] = app.config["DEBUG"]
app.jinja_env.globals["EXTENSIONS"] = get_valid_extensions()
app.jinja_env.globals["SITE_TITLE"] = app.config["LNBITS_SITE_TITLE"]
| 5,342,407
|
def main(level):
"""Damona is an environment manager for singularity containers.
It is to singularity container what conda is to packaging.
The default environment is called 'base'. You can create and activate
a new environment as follows:
\b
damona env --create TEST
damona activate TEST
Once an environment is activated, you can install a Damona-registered image
(and its registered binaries):
damona install fastqc:0.11.9
\b
More information on https://damona.readthedocs.io.
Please report issues on https://github.com/cokelaer/damona
Contact: Thomas Cokelaer at pasteur dot fr
"Make everything as simple as possible, but not simpler." -- Albert Einstein
"""
######################## !!!!!!!!!!!! ####################
# this function cannot print anything because the damona
# activate command prints bash commands read by damona.sh
######################## !!!!!!!!!!!! ####################
logger.setLevel(level)
| 5,342,408
|
def perform_context_selection(
estimation_tasks: List[EstimationTask],
) -> List[EstimationTask]:
"""Changes the circuits in estimation tasks to involve context selection.
Args:
estimation_tasks: list of estimation tasks
"""
output_estimation_tasks = []
for estimation_task in estimation_tasks:
(
context_selection_circuit,
frame_operator,
) = get_context_selection_circuit_for_group(estimation_task.operator)
frame_circuit = estimation_task.circuit + context_selection_circuit
new_estimation_task = EstimationTask(
frame_operator, frame_circuit, estimation_task.number_of_shots
)
output_estimation_tasks.append(new_estimation_task)
return output_estimation_tasks
| 5,342,409
|
def where(condition, x, y):
"""Wrapper of `torch.where`.
Parameters
----------
condition : DTensor of bool
Where True, yield x, otherwise yield y.
x : DTensor
The first tensor.
y : DTensor
The second tensor.
"""
return torch.where(condition, x, y)
| 5,342,410
|
def test_cdf_accuracy():
"""Compare accuracy of the cumulative distribution function.
Compare the results with the ones obtained with the R poibin package
[Rpoibin]_.
"""
p = [0.1, 0.1]
pb = PoiBin(p)
assert np.all(np.abs(pb.cdf([0, 2]) - np.array([0.81, 1.])) < 1e-10)
p = [0.5, 1.0]
pb = PoiBin(p)
assert np.all(np.abs(pb.cdf([1, 2]) == np.array([0.5, 1.])) < 1e-10)
p = [0.1, 0.5]
pb = PoiBin(p)
assert np.all(np.abs(pb.cdf([0, 1, 2]) == np.array([0.45, 0.95, 1.])) <
1e-10)
p = [0.1, 0.5, 0.7]
pb = PoiBin(p)
assert np.all(np.abs(pb.cdf([0, 1, 2]) == np.array([0.135, 0.6, 0.965])) <
1e-10)
| 5,342,411
|
def ukhls_wave_prefix(columns, year):
""" Determine wave prefix for ukhls wave data.
Parameters
----------
columns : list
A list of column names to add wave prefixes to.
year : int
Which wave year is being processed.
Returns
-------
columns : list
Column names with wave prefixes added.
"""
#wave_letter = alphabet[year - 2008]
wave_letter = get_wave_letter(year)
exclude = ["pidp"]
for i, item in enumerate(columns):
if item not in exclude:
columns[i] = wave_letter + "_" + item # Looks stupid but needed to update the list.
return columns
| 5,342,412
|
def sort_converters(converters: Iterable[Optional[GenericConverter]]) -> List[GenericConverter]:
"""
Sort a list of converters according to their priority.
"""
converters = cast(Iterable[GenericConverter], filter(bool, converters))
return sorted(converters, key=lambda c: c.priority, reverse=True)
| 5,342,413
|
def test_news_removal():
"""tests whether the news removed are returned in a list"""
data = news_removal()
assert isinstance(data, list)
| 5,342,414
|
def _make_path_relative(origin, dest):
"""
Return the relative path between origin and dest.
If it's not possible return dest.
If they are identical return ``os.curdir``
Adapted from `path.py <http://www.jorendorff.com/articles/python/path/>`_ by Jason Orendorff.
"""
origin = os.path.abspath(origin).replace('\\', '/')
dest = os.path.abspath(dest).replace('\\', '/')
#
orig_list = splitall(os.path.normcase(origin))
# Don't normcase dest! We want to preserve the case.
dest_list = splitall(dest)
#
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
#
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
#
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return os.curdir
else:
# return os.path.join(*segments).replace('\\', '/')
return os.path.join(*segments)
| 5,342,415
|
def print_mf_weight_statistics():
""" Prints debug info about size of weights. """
def callback(i_epoch, model, loss_train, loss_val, subset=None, trainer=None, last_batch=None):
models, labels = [], []
try:
models.append(model.outer_transform)
labels.append("outer transform weights:")
except:
pass
try:
models.append(model.inner_transform)
labels.append("inner transform weights:")
except:
pass
try:
models.append(model.transform)
labels.append("transform weights:")
except:
pass
try:
models.append(model.encoder)
labels.append("encoder weights:")
except:
pass
try:
models.append(model.decoder)
labels.append("decoder weights:")
except:
pass
subset_str = " " if subset is None or trainer is None else " {:>2d} / {:>2d}:".format(subset, trainer)
for model_, label_ in zip(models, labels):
weights = np.hstack([param.detach().cpu().numpy().flatten() for param in model_.parameters()])
logger.debug(
"{} {:26.26s} mean {:>8.5f}, std {:>8.5f}, range {:>8.5f} ... {:>8.5f}".format(
subset_str, label_, np.mean(weights), np.std(weights), np.min(weights), np.max(weights)
)
)
return callback
| 5,342,416
|
def createPolygon(fire):
"""
create a Polygon object from list of points
"""
points = []
for coordinate in fire["geometry"]["coordinates"][0]:
points.append(tuple(coordinate))
polygon = Polygon(points)
return polygon
| 5,342,417
|
def change_anim_nodes(node_object="", in_tangent='linear', out_tangent='linear'):
"""
Changes the setting on all anim nodes.
:param node_object:
:param in_tangent:
:param out_tangent:
:return: <bool> True for success. <bool> False for failure.
"""
anim_nodes = object_utils.get_connected_anim(node_object)
cmds.keyTangent(anim_nodes, itt=in_tangent, ott=out_tangent)
return True
| 5,342,418
|
def is_x_degenerated(x, codon, table):
"""Determine if codon is x-fold degenerated.
@param codon the codon
@param table code table id
@param true if x <= the degeneration of the codon
"""
return (x <= len(altcodons(codon, table)))
| 5,342,419
|
def get_long_tensor(tokens_list, batch_size, pad_id=constant.PAD_ID):
""" Convert (list of )+ tokens to a padded LongTensor. """
sizes = []
x = tokens_list
while isinstance(x[0], list):
sizes.append(max(len(y) for y in x))
x = [z for y in x for z in y]
tokens = torch.LongTensor(batch_size, *sizes).fill_(pad_id)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
| 5,342,420
|
def test_subdomain_against_pattern_asterix_prefix(create_user):
"""Test user email on subdomain against pattern """
emails = ["harold@help.bar.com"]
patterns = ["*bar.com"]
assert create_user.preprocess_pattern(emails, patterns) == True
| 5,342,421
|
def business():
"""
show business posts
"""
business = Post.query.filter_by(category="Business").all()
return render_template('business.html', post=business)
| 5,342,422
|
def tile_image(x_gen, tiles=()):
"""Tiled image representations.
Args:
x_gen: 4D array of images (n x w x h x 3)
tiles (int pair, optional): number of rows and columns
Returns:
Array of tiled images (1 x W x H x 3)
"""
n_images = x_gen.shape[0]
if not tiles:
for i in range(int(np.sqrt(n_images)), 0, -1):
if n_images % i == 0:
break
n_rows = i
n_cols = n_images // i
else:
n_rows, n_cols = tiles
full = [np.hstack(x_gen[c * n_rows:(c + 1) * n_rows]) for c in range(n_cols)]
return np.expand_dims(np.vstack(full), 0)
| 5,342,423
|
def note_updated_data(note, role):
"""Return the data for updated date
:param note: the note that holds the data
:type note: :class:`jukeboxcore.djadapter.models.Note`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the updated date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = note.date_updated
return dt_to_qdatetime(dt)
| 5,342,424
|
def _xyz_atom_coords(atom_group):
"""Use this method if you need to identify if CB is present in atom_group and if not return CA"""
tmp_dict = {}
for atom in atom_group.atoms():
if atom.name.strip() in {"CA", "CB"}:
tmp_dict[atom.name.strip()] = atom.xyz
if 'CB' in tmp_dict:
return tmp_dict['CB']
elif 'CA' in tmp_dict:
return tmp_dict['CA']
else:
return float('inf'), float('inf'), float('inf')
| 5,342,425
|
def InitBareRepository(path):
"""Returns the Repo object"""
assert isinstance(path, str)
pathlib.Path(path).parent.mkdir(parents=True,exist_ok=True)
return git.Repo.init(path,bare=True)
| 5,342,426
|
def test_line_insert_before_after(tempfile_name, get_body):
"""
Test for file.line for insertion before specific line, using pattern and no patterns.
See issue #38670
:return:
"""
file_content = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/sugar",
]
)
file_modified = os.linesep.join(
[
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/pepper",
" - /srv/coriander",
" - /srv/sugar",
]
)
cfg_content = "- /srv/coriander"
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
for b_line, a_line in [("/srv/sugar", "/srv/salt")]:
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name,
content=cfg_content,
before=b_line,
after=a_line,
mode="insert",
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected
| 5,342,427
|
def test_syncFromSynapse():
"""This function tests recursive download as defined in syncFromSynapse
most of the functionality of this function are already tested in the
tests/integration/test_command_line_client::test_command_get_recursive_and_query
which means that the only test if for path=None
"""
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload two files in Folder
uploaded_paths = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = syn.store(File(f, parent=folder_entity))
#Add a file in the project level as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = syn.store(File(f, parent=project_entity))
#syncFromSynapse() uses chunkedQuery() which will return results that are eventually consistent but not always right after the entity is created.
start_time = time.time()
while len(list(syn.getChildren(project_entity))) != 2:
assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
time.sleep(2)
### Test recursive get
output = synapseutils.syncFromSynapse(syn, project_entity)
assert len(output) == len(uploaded_paths)
for f in output:
assert f.path in uploaded_paths
| 5,342,428
|
def clone_variants(sender, request, original_release, release, **kwargs):
"""
Clone all variants and arches from `original_release` to new instance. All
newly created objects are logged into a changeset. Since nor Variant nor
VariantArch has an export method, their string representation is used
instead.
"""
filter_include_trees = 'include_trees' in request.data
variant_mapping = {}
for tree in as_list(request.data.get('include_trees', []), 'include_trees'):
try:
variant, arch = tree.split('.')
except ValueError:
raise ValidationError('%s is not a well-formed tree specifier.' % tree)
if tree not in original_release.trees:
raise ValidationError('%s is not in release %s.' % (tree, original_release))
variant_mapping.setdefault(variant, set()).add(arch)
for variant in original_release.variant_set.all():
if filter_include_trees and variant.variant_uid not in variant_mapping:
continue
archs = variant.variantarch_set.all()
variant.pk = None
variant.release = release
variant.save()
request.changeset.add('Variant', variant.pk,
'null', json.dumps(str(variant)))
for variant_arch in archs:
if filter_include_trees and variant_arch.arch.name not in variant_mapping[variant.variant_uid]:
continue
variant_arch.pk = None
variant_arch.variant = variant
variant_arch.save()
request.changeset.add('VariantArch', variant_arch.pk,
'null', json.dumps(str(variant_arch)))
| 5,342,429
|
def get_http_exception(code):
"""Return an exception class based on its code"""
try:
return http_exceptions[int(code)]
except:
return None
| 5,342,430
|
def _zoom(restricted_func_and_grad, wolfe_one, wolfe_two, a_lo, phi_lo,
dphi_lo, a_hi, phi_hi, dphi_hi, g_0, pass_through):
"""
Implementation of zoom. Algorithm 3.6 from Wright and Nocedal, 'Numerical
Optimization', 1999, pg. 59-61. Tries cubic, quadratic, and bisection methods
of zooming.
"""
state = _ZoomState(
done=False,
failed=False,
j=0,
a_lo=a_lo,
phi_lo=phi_lo,
dphi_lo=dphi_lo,
a_hi=a_hi,
phi_hi=phi_hi,
dphi_hi=dphi_hi,
a_rec=(a_lo + a_hi) / 2.,
phi_rec=(phi_lo + phi_hi) / 2.,
a_star=1.,
phi_star=phi_lo,
dphi_star=dphi_lo,
g_star=g_0,
nfev=0,
ngev=0,
)
delta1 = 0.2
delta2 = 0.1
def body(state):
# Body of zoom algorithm. We use boolean arithmetic to avoid using jax.cond
# so that it works on GPU/TPU.
dalpha = (state.a_hi - state.a_lo)
a = jnp.minimum(state.a_hi, state.a_lo)
b = jnp.maximum(state.a_hi, state.a_lo)
cchk = delta1 * dalpha
qchk = delta2 * dalpha
# This will cause the line search to stop, and since the Wolfe conditions
# are not satisfied the minimization should stop too.
threshold = jnp.where((jnp.finfo(dalpha).bits < 64), 1e-5, 1e-10)
state = state._replace(failed=state.failed | (dalpha <= threshold))
# Cubmin is sometimes nan, though in this case the bounds check will fail.
a_j_cubic = _cubicmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi,
state.phi_hi, state.a_rec, state.phi_rec)
use_cubic = (state.j > 0) & (a_j_cubic > a + cchk) & (a_j_cubic < b - cchk)
a_j_quad = _quadmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi, state.phi_hi)
use_quad = (~use_cubic) & (a_j_quad > a + qchk) & (a_j_quad < b - qchk)
a_j_bisection = (state.a_lo + state.a_hi) / 2.
use_bisection = (~use_cubic) & (~use_quad)
a_j = jnp.where(use_cubic, a_j_cubic, state.a_rec)
a_j = jnp.where(use_quad, a_j_quad, a_j)
a_j = jnp.where(use_bisection, a_j_bisection, a_j)
# TODO(jakevdp): should we use some sort of fixed-point approach here instead?
phi_j, dphi_j, g_j = restricted_func_and_grad(a_j)
phi_j = phi_j.astype(state.phi_lo.dtype)
dphi_j = dphi_j.astype(state.dphi_lo.dtype)
g_j = g_j.astype(state.g_star.dtype)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
hi_to_j = wolfe_one(a_j, phi_j) | (phi_j >= state.phi_lo)
star_to_j = wolfe_two(dphi_j) & (~hi_to_j)
hi_to_lo = (dphi_j * (state.a_hi - state.a_lo) >= 0.) & (~hi_to_j) & (~star_to_j)
lo_to_j = (~hi_to_j) & (~star_to_j)
state = state._replace(
**_binary_replace(
hi_to_j,
state._asdict(),
dict(
a_hi=a_j,
phi_hi=phi_j,
dphi_hi=dphi_j,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
# for termination
state = state._replace(
done=star_to_j | state.done,
**_binary_replace(
star_to_j,
state._asdict(),
dict(
a_star=a_j,
phi_star=phi_j,
dphi_star=dphi_j,
g_star=g_j,
)
),
)
state = state._replace(
**_binary_replace(
hi_to_lo,
state._asdict(),
dict(
a_hi=state.a_lo,
phi_hi=state.phi_lo,
dphi_hi=state.dphi_lo,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
state = state._replace(
**_binary_replace(
lo_to_j,
state._asdict(),
dict(
a_lo=a_j,
phi_lo=phi_j,
dphi_lo=dphi_j,
a_rec=state.a_lo,
phi_rec=state.phi_lo,
),
),
)
state = state._replace(j=state.j + 1)
# Choose higher cutoff for maxiter than Scipy as Jax takes longer to find
# the same value - possibly floating point issues?
state = state._replace(failed= state.failed | state.j >= 30)
return state
state = lax.while_loop(lambda state: (~state.done) & (~pass_through) & (~state.failed),
body,
state)
return state
| 5,342,431
|
def _gradients_input(model: Union[tf.keras.models.Model, 'keras.models.Model'],
x: tf.Tensor,
target: Union[None, tf.Tensor]) -> tf.Tensor:
"""
Calculates the gradients of the target class output (or the output if the output dimension is equal to 1)
with respect to each input feature.
Parameters
----------
model
Tensorflow or keras model.
x
Input data point.
target
Target for which the gradients are calculated if the output dimension is higher than 1.
Returns
-------
Gradients for each input feature.
"""
with tf.GradientTape() as tape:
tape.watch(x)
preds = _run_forward(model, x, target)
grads = tape.gradient(preds, x)
return grads
| 5,342,432
|
def fixBadSets(sets):
"""Splits bad sets into a series of valid, incomplete sets."""
from Totoro.dbclasses import Set as Set
toRemove = []
toAdd = []
for ss in sets:
if ss.getStatus(silent=True)[0] != 'Bad':
continue
toRemove.append(ss)
if len(ss.totoroExposures) == 1:
raise exceptions.TotoroError(
'found bad set with one exposure. This is probably a bug.')
elif len(ss.totoroExposures) == 2:
# If the bad set has two exposures, splits it.
toAdd += [Set.fromExposures(exp) for exp in ss.totoroExposures]
else:
# Tests all possible combinations of two exposures to check if one
# of them is a valid set.
validSets = []
for ii, jj in [[0, 1], [0, 2], [1, 2]]:
testSet = Set.fromExposures([ss.totoroExposures[ii], ss.totoroExposures[jj]])
if testSet.getStatus(silent=True)[0] != 'Bad':
validSets.append(testSet)
if len(validSets) == 0:
# If no valid combinations, each exposures goes to a set.
toAdd += [Set.fromExposures(exp) for exp in ss.totoroExposures]
else:
# Otherwise, selects the combination that produces an
# incomplete set with maximum SN2.
signalToNoise = [np.nansum(xx.getSN2Array()) for xx in validSets]
maxSet = validSets[np.argmax(signalToNoise)]
toAdd.append(maxSet)
missingExposure = [
exp for exp in ss.totoroExposures if exp not in maxSet.totoroExposures
]
toAdd.append(Set.fromExposures(missingExposure))
for ss in toRemove:
sets.remove(ss)
for ss in toAdd:
sets.append(ss)
return sets
| 5,342,433
|
def get_databases():
"""Return an ordered dict of (dbname: database). The order is
according to search preference, the first DB to contain a document
should be assumed to be the authoritative one."""
sql_dbs = [
_SQLDb(
XFormInstanceSQL._meta.db_table,
lambda id_: XFormInstanceSQL.get_obj_by_id(id_),
"XFormInstance",
lambda doc: XFormInstanceSQLRawDocSerializer(doc).data,
),
_SQLDb(
CommCareCaseSQL._meta.db_table,
lambda id_: CommCareCaseSQL.get_obj_by_id(id_),
"CommCareCase",
lambda doc: CommCareCaseSQLRawDocSerializer(doc).data,
),
_SQLDb(
SQLLocation._meta.db_table,
lambda id_: SQLLocation.objects.get(location_id=id_),
'Location',
lambda doc: doc.to_json()
),
]
all_dbs = OrderedDict()
for db in sql_dbs:
all_dbs[db.dbname] = db
couchdbs_by_name = couch_config.all_dbs_by_db_name
for dbname in sorted(couchdbs_by_name):
all_dbs[dbname] = _CouchDb(couchdbs_by_name[dbname])
return all_dbs
| 5,342,434
|
def save_qa_result(resource_id, qa_result, log):
"""
Saves the results of the QA check to the qa table.
"""
import ckan.model as model
from ckanext.qa.model import QA
now = datetime.datetime.now()
qa = QA.get_for_resource(resource_id)
if not qa:
qa = QA.create(resource_id)
model.Session.add(qa)
else:
log.info('QA from before: %r', qa)
for key in ('openness_score', 'openness_score_reason', 'format'):
setattr(qa, key, qa_result[key])
qa.archival_timestamp == qa_result['archival_timestamp']
qa.updated = now
model.Session.commit()
log.info('QA results updated ok')
| 5,342,435
|
def get_app_data_path(app_name):
"""Returns the OS-specific path to Application Data for the given App.
Creates the path if it doesn't already exist.
NOTE: Darwin: https://developer.apple.com/reference/foundation/1414224-nssearchpathfordirectoriesindoma?language=objc
"""
assert type(app_name) == str
if sys.platform.startswith('darwin'):
from AppKit import \
NSSearchPathForDirectoriesInDomains, \
NSApplicationSupportDirectory, \
NSUserDomainMask # pip install pyobjc
app_data_path = os.path.join(
NSSearchPathForDirectoriesInDomains(
NSApplicationSupportDirectory,
NSUserDomainMask,
True)[0], app_name)
elif sys.platform.startswith('win32'):
app_data_path = os.path.join(os.environ['APPDATA'], app_name)
elif sys.platform.startswith('linux') \
or sys.platform.startswith('freebsd'): # freebsd is untested
app_data_path = os.path.expanduser(os.path.join("~", "." + app_name))
else:
raise NotImplementedError("The platform, {}, is not supported."
.format(sys.platform))
if not os.path.exists(app_data_path):
os.mkdir(app_data_path)
return app_data_path
| 5,342,436
|
def list_examples():
"""List all examples"""
examples = ExampleModel.query()
form = ExampleForm()
if form.validate_on_submit():
example = ExampleModel(
example_name=form.example_name.data,
example_description=form.example_description.data,
added_by=users.get_current_user()
)
try:
example.put()
example_id = example.key.id()
flash(u'Example %s successfully saved.' % example_id, 'success')
return redirect(url_for('list_examples'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_examples'))
return render_template('list_examples.html', examples=examples, form=form)
| 5,342,437
|
def protocol(ctx: Context, protocol_id: PublicId) -> None:
"""Push a protocol to the registry or save it in local registry."""
if ctx.config.get("local"):
_save_item_locally(ctx, PROTOCOL, protocol_id)
else:
push_item(ctx, PROTOCOL, protocol_id)
| 5,342,438
|
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(_MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
| 5,342,439
|
def test_load_db_1():
"""
Properly load the dataset
"""
transaction, bit_map, b2i_dict, i2b_dict = load_db(
"./data/contextIGB.txt", 1.0, 0.0, False,
)
t = np.array(
[
[1, 2, 4, 5],
[2, 3, 5],
[1, 2, 4, 5],
[1, 2, 3, 5],
[1, 2, 3, 4, 5],
[2, 3, 4],
]
)
bmap = np.array(
[
[1, 1, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
]
)
b2i = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}
i2b = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4}
assert (
(transaction == t).all()
and (bit_map == bmap).all()
and (b2i_dict == b2i)
and (i2b_dict == i2b)
)
| 5,342,440
|
def prepare_deep(schema: types.Schema, schemas: types.Schemas):
"""
Resolve $ref and merge allOf including for object properties and items.
Assume the schema is a valid JSONSchema.
Args:
schema: The schema to prepare.
schemas: The schemas from which to resolve any $ref.
Returns:
The prepared schema.
"""
schema = prepare(schema=schema, schemas=schemas)
# Resolve $ref in any properties
properties = schema.get(types.OpenApiProperties.PROPERTIES, None)
if properties is not None:
for name, prop_schema in properties.items():
properties[name] = prepare_deep(schema=prop_schema, schemas=schemas)
# Resolve $ref of any items
items_schema = peek.items(schema=schema, schemas={})
if items_schema is not None:
schema[types.OpenApiProperties.ITEMS] = prepare_deep(
schema=items_schema, schemas=schemas
)
return schema
| 5,342,441
|
def test_assert_response_bad_status_code_with_json_errors():
"""Different status code than expected, with the server including errors."""
errors = [{"foo": "bar"}]
test_content = {"errors": errors}
response = create_response(status_code=404, json_content=test_content)
with pytest.raises(CommandError) as cm:
assert_response_ok(response)
assert str(cm.value) == (
"Wrong status code from server (expected=200, got=404) errors={} "
"headers={{'Content-Type': 'application/json'}}".format(errors)
)
| 5,342,442
|
def _resize_and_center_fundus(image, diameter):
"""
Helper function for scale normalizing image.
"""
copy = image.copy()
# Find largest contour in image.
contours = _find_contours(image)
# Return unless we have gotten some result contours.
if contours is None:
return None
center, radius = contours
# Calculate the min and max-boundaries for cropping the image.
x_min = max(0, int(center[0] - radius))
y_min = max(0, int(center[1] - radius))
z = int(radius*2)
x_max = x_min + z
y_max = y_min + z
# Crop the image.
copy = copy[y_min:y_max, x_min:x_max]
# Scale the image.
fx = fy = (diameter / 2) / radius
copy = cv2.resize(copy, (0, 0), fx=fx, fy=fy)
# Add padding to image.
shape = copy.shape
# Get the border shape size.
top = bottom = int((diameter - shape[0])/2)
left = right = int((diameter - shape[1])/2)
# Add 1 pixel if necessary.
if shape[0] + top + bottom == diameter - 1:
top += 1
if shape[1] + left + right == diameter - 1:
left += 1
# Define border of the image.
border = [top, bottom, left, right]
# Add border.
copy = cv2.copyMakeBorder(copy, *border,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0])
# Return the image.
return copy
| 5,342,443
|
def _create_qApp():
"""
Create QApplicaiton if one does not exist. Return QApplication.instance().
Vendored from matplotlib.backends.backend_qt5 with changes:
- Assume Qt5, removing tolerance for Qt4.
- Applicaiton has been changed (matplotlib -> bluesky).
"""
global qApp
if qApp is None:
app = QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
try:
from PyQt5 import QtX11Extras # noqa
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
try:
QApplication.setAttribute(
QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError: # Attribute only exists for Qt>=5.6.
pass
qApp = QApplication(["bluesky"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
except AttributeError:
pass
| 5,342,444
|
def _pad_X_delta(X, delta, indices, padded_group_size):
"""Currently Unused."""
X_group = onp.take(X, indices, axis=0)
X_group = onp.pad(X_group, [(0, padded_group_size - X_group.shape[0]),
(0, 0)])
delta_group = onp.take(delta, indices, axis=0)
delta_group = onp.pad(delta_group, (
0,
padded_group_size - delta_group.shape[0],
))
return X_group, delta_group
| 5,342,445
|
def test_ratelimit_bg_on(ctx):
"""
Test resolving a ratelimited domain with a background worker.
"""
ctx.set_option("ratelimit:", "1")
ctx.set_option("ratelimit-factor:", "0")
total_runs = 6
success_threshold = 4 # 2/3*total_runs
successes = 0
for i in range(total_runs):
cb_data = dict(done=False)
cb_data2 = dict(done=False)
retval, async_id = ctx.resolve_async(qname, cb_data, callback, qtype, qclass)
retval, async_id = ctx.resolve_async(qname2, cb_data2, callback, qtype, qclass)
while retval == 0 and not (cb_data['done'] and cb_data['done']):
time.sleep(0.1)
retval = ctx.process()
if bool(cb_data.get('was_ratelimited')) ^ bool(cb_data2.get('was_ratelimited')):
successes += 1
if successes >= success_threshold:
break
time.sleep(1)
if successes >= success_threshold:
print("Ratelimit-bg-on: pass")
else:
print("Failed ratelimit-bg-on")
| 5,342,446
|
def union(x, y=None):
"""Get sorted list of elements combined for two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) | set(y)))
| 5,342,447
|
def is_tuple(typ) -> bool:
"""
Test if the type is `typing.Tuple`.
"""
try:
return issubclass(get_origin(typ), tuple)
except TypeError:
return typ in (Tuple, tuple)
| 5,342,448
|
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
| 5,342,449
|
def run_loop(agents, env, max_frames=0):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
observation_spec = env.observation_spec()
action_spec = env.action_spec()
for agent, obs_spec, act_spec in zip(agents, observation_spec, action_spec):
agent.setup(obs_spec, act_spec)
try:
while True:
timesteps = env.reset()
for a in agents:
a.reset()
while True:
total_frames += 1
# RGP {
standard_agents = [agent for agent in agents if not isinstance(agent, multistep_agent.MultistepAgent)]
multiagent_agents = [agent for agent in agents if isinstance(agent, multistep_agent.MultistepAgent)]
# Standard actions
actions = [agent.step(timestep)
for agent, timestep in zip(standard_agents, timesteps)]
# Multistep actions
multiagent_actions_list = [agent.multistep(timestep) for agent, timestep in zip(multiagent_agents, timesteps)] # list of lists
multiagent_actions = [action for sublist in multiagent_actions_list for action in sublist] # flatten the list to get a list of actions
# } RGP
if max_frames and total_frames >= max_frames:
return
if timesteps[0].last():
break
timesteps = env.step(actions + multiagent_actions) # RGP: (+ multiagent_actions)
except KeyboardInterrupt:
pass
finally:
elapsed_time = time.time() - start_time
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
| 5,342,450
|
def main():
""" This is a prefix calculator """
print("This is a prefix calculator. The first 3 goes are free!\nType \"q\" to quit.")
goes = 0
while goes < 3:
initial_input = get_input()
#initial_input = "(+ 2 (^ 3 3) (* 3 2))"
check_input(initial_input)
answer = chew_through_nests(initial_input)
print("= " + str(answer))
goes += 1
| 5,342,451
|
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
# We have already failed - return now so we can send our fail email.
if job_context['job'].success is False:
return job_context
try:
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = _smash_key(job_context, key, input_files)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception("Could not smash dataset.",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
job_context['dataset'].success = True
job_context['dataset'].save()
logger.debug("Created smash output!",
archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash);
return job_context
| 5,342,452
|
def permute_columns(df,
column_to_order: str,
ind_permute: bool = False,
columns_to_permute: list = []):
"""
Author: Allison Wu
Description: This function permutes the columns specified in columns_to_permute
:param df:
:param column_to_order:
:param ind_permute:
:param columns_to_permute:
:return: permuted_df
"""
window = Window.partitionBy().orderBy(col(column_to_order))
window_rand = Window.partitionBy().orderBy(rand())
df = df. \
withColumn('id', func.row_number().over(window)). \
withColumn('rand_id', func.row_number().over(window_rand))
rand_df = df. \
select(['rand_id'] + columns_to_permute).\
withColumnRenamed('rand_id', 'id')
for c in columns_to_permute:
rand_df = rand_df.\
withColumnRenamed(c, f'rand_{c}')
permuted_df = df.join(rand_df, ['id'], how = 'inner').cache()
return permuted_df
| 5,342,453
|
def reset_dismissed(institute_id, case_name):
"""Reset all dismissed variants for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.reset_all_dimissed(store, institute_obj, case_obj)
return redirect(request.referrer)
| 5,342,454
|
def create_xst_script(config):
"""
given the configuration file create a script that will
build the verilog files declared within the configuration file
Args:
config (dictionary): configuraiton dictionary
Return:
(string) script file name
Raises:
Nothing
"""
xst_abs_dir = create_xst_dir(config)
flags = get_xst_flags(config)
#print "Flags: %s" % str(flags)
xst_dir = os.path.join(config["build_dir"], XST_DIR)
temp_dir = create_temp_dir(config)
project_dir = os.path.join(xst_dir, PROJECT_FILENAME)
top_module = config["top_module"]
output_file = os.path.join(xst_dir, top_module)
xst_script_fn = os.path.join(xst_abs_dir, XST_SCRIPT_FILENAME)
fp = open(xst_script_fn, "w")
fp.write("set -tmpdir \"%s\"%s" % (temp_dir, os.linesep))
fp.write("set -xsthdpdir \"%s\"%s" % (xst_dir, os.linesep))
#fp.write("set -xsthdpini \"%s\"%s" % (xst_dir, os.linesep))
fp.write("run%s" % os.linesep)
fp.write("-ifn %s%s" % (project_dir, os.linesep))
fp.write("-ofn %s%s" % (output_file, os.linesep))
fp.write("-ofmt NGC%s" % (os.linesep))
fp.write("-p %s%s" % (config["device"], os.linesep))
fp.write("-top %s%s" % (top_module, os.linesep))
coregen_files = coregen_utils.get_target_files(config)
if len(coregen_files) > 0:
fp.write("-sd %s%s" % (coregen_utils.get_coregen_dir(config, absolute = True), os.linesep))
#print "flags[lso] = %s" % str(flags["-lso"]["value"])
if ("-lso" not in flags.keys()) or (len(flags["-lso"]["value"]) == 0):
#print "creating custom lso file"
flags["-lso"]["value"] = create_lso_file(config)
for flag in flags:
if len(flags[flag]["value"]) == 0:
continue
#print "flag: %s: %s" % (flag, flags[flag]["value"])
fp.write("%s %s%s" % (flag, flags[flag]["value"], os.linesep))
fp.close()
return xst_script_fn
| 5,342,455
|
def fetch_quarters():
"""
This method fetches all sections in the roadmap project, then
using some regex magic, filters out all sections that are not
named like quarter names. For example:
Q1 2020
q1 2021
q3 2020
are all matches.
"""
sections = ASANA_CLIENT.sections.find_by_project(ASANA_ROADMAP_PROJECT_GID)
for section in sections:
if re.match("Q\d{1}\s\d{4}", section["name"], re.IGNORECASE):
yield {"name": section["name"], "gid": section["gid"]}
| 5,342,456
|
def texts_from_array(x_train, y_train, x_test=None, y_test=None,
class_names = [],
max_features=MAX_FEATURES, maxlen=MAXLEN,
val_pct=0.1, ngram_range=1, preprocess_mode='standard', verbose=1):
"""
Loads and preprocesses text data from arrays.
Args:
x_train(list): list of training texts
y_train(list): list of integers representing classes
x_val(list): list of training texts
y_val(list): list of integers representing classes
class_names (list): list of strings representing class labels
shape should be (num_examples,1) or (num_examples,)
max_features(int): max num of words to consider in vocabulary
maxlen(int): each document can be of most <maxlen> words. 0 is used as padding ID.
ngram_range(int): size of multi-word phrases to consider
e.g., 2 will consider both 1-word phrases and 2-word phrases
limited by max_features
val_pct(float): Proportion of training to use for validation.
Has no effect if x_val and y_val is supplied.
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (boolean): verbosity
"""
if not class_names:
classes = list(set(y_train))
classes.sort()
class_names = ["%s" % (c) for c in classes]
if x_test is None or y_test is None:
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=val_pct)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = class_names,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
| 5,342,457
|
def display2(depts, level=0):
"""
[[a, 1], [b, 2], [c, 3], [d, 3], [a, 1]]
:param depts:
:return:
"""
lists = []
for d in depts:
lists.append([d, level])
children = Department.objects.filter(parent_id=d.id)
if children:
lists.extend(display2(children, level + 1))
return lists
| 5,342,458
|
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set(s.target_id for s in suggestions)
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
| 5,342,459
|
def test_bah(S):
""" Fees for BAH should be equal to 1 * fee. """
FEE = 0.01
result = algos.BAH().run(S)
wealth_no_fees = result.total_wealth
result.fee = FEE
wealth_with_fees = result.total_wealth
assert abs(wealth_no_fees * (1 - FEE) - wealth_with_fees) < EPS
| 5,342,460
|
def test_check_tag_exist_only_key_true():
"""
GIVEN List of EC2 instance tags.
WHEN check_tag_exist() is called.
THEN The key is on the provided list of tags.
"""
dummy_tags = [
{'Key': 'Environment', 'Value': 'dev'},
{'Key': 'Project', 'Value': 'ansible-lab'},
{'Key': 'Name', 'Value': 'ansible-0'}
]
assert check_tag_exist(tags=dummy_tags, tag_key='Project', tag_value='')
assert check_tag_exist(tags=dummy_tags, tag_key='Project')
| 5,342,461
|
def default_loc_scale_fn(
is_singular=False,
loc_initializer=tf.random_normal_initializer(stddev=0.1),
untransformed_scale_initializer=tf.random_normal_initializer(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result has mean
approximately `0.05` and std. deviation approximately `0.005`.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + '_loc',
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable)
if is_singular:
return loc, None
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable)
scale = (np.finfo(dtype.as_numpy_dtype).eps +
tf.nn.softplus(untransformed_scale))
return loc, scale
return _fn
| 5,342,462
|
def db_eval_boundary(args):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
foreground_mask, gt_mask, ignore_mask, bound_th, class_id, pred_is_boundary = args
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
# print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
if pred_is_boundary:
fg_boundary = foreground_mask
else:
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
from skimage.morphology import disk
from cv2 import dilate
def binary_dilation(x, d): return dilate(
x.astype(np.uint8), d).astype(np.bool)
fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F, precision
| 5,342,463
|
def angle_trunc(a):
"""
helper function to map all angles onto [-pi, pi]
"""
while a < 0.0:
a += pi * 2
return ((a + pi) % (pi * 2)) - pi
| 5,342,464
|
def get_tag_or_default(
alignment: pysam.AlignedSegment, tag_key: str, default: Optional[str] = None
) -> Optional[str]:
"""Extracts the value associated to `tag_key` from `alignment`, and returns a default value
if the tag is not present."""
try:
return alignment.get_tag(tag_key)
except KeyError:
return default
| 5,342,465
|
def vtln_warp_mel_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, mel_freq):
"""
Inputs:
vtln_low_cutoff (float): lower frequency cutoffs for VTLN
vtln_high_cutoff (float): upper frequency cutoffs for VTLN
low_freq (float): lower frequency cutoffs in mel computation
high_freq (float): upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): given frequency in Mel
Outputs:
Tensor: mel_freq after vtln warp
"""
return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, inverse_mel_scale(mel_freq)))
| 5,342,466
|
def _squared_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight):
"""
Computes the derivative of _squared_loss_and_spatial_grad.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Design matrix.
y : ndarray, shape (n_samples,)
Target / response vector.
w : ndarray shape (n_features,)
Unmasked, ravelized weights map.
grad_weight: float
l1_ratio * alpha
Returns
-------
ndarray, shape (n_features,)
Derivative of _squared_loss_and_spatial_grad function.
"""
data_section = np.dot(X, w) - y
image_buffer = np.zeros(mask.shape)
image_buffer[mask] = w
return (np.dot(X.T, data_section)
- grad_weight * _div(_gradient(image_buffer))[mask])
| 5,342,467
|
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc)
| 5,342,468
|
def invalid_auth_header(jwt):
"""Produce invalid JWT tokens for use in tests."""
return {'Authorization': 'Bearer ' + jwt.create_jwt(claims=TestJwtClaims.invalid, header=JWT_HEADER)}
| 5,342,469
|
def update_file_info_in_job(job, file_infos):
"""
Update the 'setup.package.fileInformations' data in the JSON to append new file information.
"""
for file_info in file_infos:
try:
job['setup']['package']['fileInformations'].append(file_info)
except (KeyError, TypeError, AttributeError):
# If we get here, 'setup.package.fileInformations' does not exist yet.
print('Job file input is missing required setup.package.fileInformations data.')
exit(1)
return job
| 5,342,470
|
def _FirstStatementsInScriptElements(contents):
"""Returns a list of first statements found in each <script> element."""
soup = parse_html.BeautifulSoup(contents)
script_elements = soup.find_all('script', src=None)
return [_FirstStatement(e.get_text()) for e in script_elements]
| 5,342,471
|
def load_image(input_file_path):
"""
Load the 'input_file_path' and return a 2D numpy array of the image it contains.
"""
image_array = np.array(pil_img.open(input_file_path).convert('L'))
return image_array
| 5,342,472
|
def pickle_compat_enforcer(obj):
"""i only need to make 1 distinction: container?"""
| 5,342,473
|
def ask_the_user(runner: Runner) -> Direction:
"""Ask the user what to do (in absolute UP, DOWN, etc.)"""
return runner.ask_absolute()
| 5,342,474
|
def patents_hgh(path):
"""Dynamic Relation Between Patents and R\\&D
a panel of 346 observations from 1975 to 1979
*number of observations* : 1730
*observation* : production units
*country* : United States
A dataframe containing :
obsno
firm index
year
year
cusip
Compustat's identifying number for the firm (Committee on Uniform
Security Identification Procedures number)
ardsic
a two-digit code for the applied R&D industrial classification
(roughly that in Bound, Cummins, Griliches, Hall, and Jaffe, in the
Griliches R&D, Patents, and Productivity volume)
scisect
is the firm in the scientific sector ?
logk
the logarithm of the book value of capital in 1972.
sumpat
the sum of patents applied for between 1972-1979.
logr
the logarithm of R&D spending during the year (in 1972 dollars)
logr1
the logarithm of R&D spending (one year lag)
logr2
the logarithm of R&D spending (two years lag)
logr3
the logarithm of R&D spending (three years lag)
logr4
the logarithm of R&D spending (four years lag)
logr5
the logarithm of R&D spending (five years lag)
pat
the number of patents applied for during the year that were
eventually granted
pat1
the number of patents (one year lag)
pat2
the number of patents (two years lag)
pat3
the number of patents (three years lag)
pat4
the number of patents (four years lag)
Hall, Bronwyn , Zvi Griliches and Jerry Hausman (1986) “Patents and R&D:
Is There a Lag?”, *International Economic Review*, **27**, 265-283.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `patents_hgh.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1730 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'patents_hgh.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/PatentsHGH.csv'
maybe_download_and_extract(path, url,
save_file_name='patents_hgh.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 5,342,475
|
def available_domain_names():
"""
This function takes godaddy credentials from the user and generates all the
available domain names in a text file.
"""
godaddy_credentials()
domain_name = input("\nEnter required DOMAIN Name: ")
url = get_url(domain_name)
print("\nSearching for the available domains")
res = requests.get(url, headers=req_headers)
if res.status_code == 200:
# Output is in JSON file so reading it.
response = res.json()
# Saving our available domains in a text file.
print(f"\nSaving all the available domain names in {domain_name}.txt file")
f = open(f"{domain_name}.txt", "a")
for i in response:
f.writelines(i["domain"] + "\n")
f.close()
print(f"\nFile {domain_name}.txt saved successfully in your current directory")
else:
print("Error Status Code")
| 5,342,476
|
def explore_pca(x_train, y_train, n_components=TOTAL_PCA_COMPONENTS):
"""Create plots of Principal Component Analysis decomposition
Find the first TOTAL_PCA_COMPONENTS PCA components of the argument. Create
a plot of the explained variance ratio and a plot of the cumulative sum of
the explained variance ratio, over the number of PCA components used. Save
the explained variance ratio to a comma-separated file. Create a scatter
plot of the data points versus the first two principal components, coloured
by classification target. Create a line plot of the first five principal
components.
Args:
x_train: pandas.DataFrame
Training data to decompose using Principal Component Analysis
y_train: pandas.Series
Classification targets
n_components : int, Optional, default 60
Number of PCA components to use in plots and csv file
Returns:
None
"""
pca = PCA(n_components=n_components)
transformed = pca.fit_transform(x_train)
components = pca.components_
var_ratio = pd.Series(pca.explained_variance_ratio_)
sum_var_ratio = var_ratio.cumsum()
var_ratio.to_csv(os.path.join('outputs', 'var_ratio.csv'), header=True)
plot_explained_variance(var_ratio, os.path.join('outputs', 'var_ratio.png'))
plot_sum_explained_variance(sum_var_ratio,
os.path.join('outputs', 'var_ratio_sum.png'))
first_two_pca_scatter(transformed[:, :2], y_train,
os.path.join('outputs', 'two_pca_components.png'))
plot_components(components[:3, :], os.path.join('outputs',
'pca_components.png'))
| 5,342,477
|
def assert_allclose(actual: List[int], desired: numpy.ndarray):
"""
usage.scipy: 4
"""
...
| 5,342,478
|
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
| 5,342,479
|
def set_config(variable,value):
"""
This function is used to reset global environment variables.
Following variables can be accessed:
- X: Transformed dataset (X)
- y: Transformed dataset (y)
- X_train: Transformed train dataset (X)
- X_test: Transformed test/holdout dataset (X)
- y_train: Transformed train dataset (y)
- y_test: Transformed test/holdout dataset (y)
- seed: random state set through session_id
- prep_pipe: Transformation pipeline configured through setup
- target_inverse_transformer: Target variable inverse transformer
- folds_shuffle_param: shuffle parameter used in Kfolds
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- create_model_container: results grid storage container
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
- data_before_preprocess: data before preprocessing
- target_param: name of target variable
- gpu_param: use_gpu param configured through setup
Example
--------
>>> set_config('seed', 123)
This will set the global seed to '123'.
"""
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing set_config()")
logger.info("""set_config(variable={}, value={})""".\
format(str(variable), str(value)))
if variable == 'X':
global X
X = value
if variable == 'y':
global y
y = value
if variable == 'X_train':
global X_train
X_train = value
if variable == 'X_test':
global X_test
X_test = value
if variable == 'y_train':
global y_train
y_train = value
if variable == 'y_test':
global y_test
y_test = value
if variable == 'seed':
global seed
seed = value
if variable == 'prep_pipe':
global prep_pipe
prep_pipe = value
if variable == 'target_inverse_transformer':
global target_inverse_transformer
target_inverse_transformer = value
if variable == 'folds_shuffle_param':
global folds_shuffle_param
folds_shuffle_param = value
if variable == 'n_jobs_param':
global n_jobs_param
n_jobs_param = value
if variable == 'html_param':
global html_param
html_param = value
if variable == 'create_model_container':
global create_model_container
create_model_container = value
if variable == 'master_model_container':
global master_model_container
master_model_container = value
if variable == 'display_container':
global display_container
display_container = value
if variable == 'exp_name_log':
global exp_name_log
exp_name_log = value
if variable == 'logging_param':
global logging_param
logging_param = value
if variable == 'log_plots_param':
global log_plots_param
log_plots_param = value
if variable == 'USI':
global USI
USI = value
if variable == 'data_before_preprocess':
global data_before_preprocess
data_before_preprocess = value
if variable == 'target_param':
global target_param
target_param = value
if variable == 'gpu_param':
global gpu_param
gpu_param = value
logger.info("Global variable: " + str(variable) + ' updated')
logger.info("set_config() succesfully completed......................................")
| 5,342,480
|
def optimize_spot_bid(ctx, instance_type, spot_bid):
"""
Check whether the bid is sane and makes an effort to place the instance in a sensible zone.
"""
spot_history = _get_spot_history(ctx, instance_type)
if spot_history:
_check_spot_bid(spot_bid, spot_history)
zones = ctx.ec2.get_all_zones()
most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history)
logger.info("Placing spot instances in zone %s.", most_stable_zone)
return most_stable_zone
| 5,342,481
|
def test_search_with_type(context):
"""
Search with type filter
"""
# When create a query block
t = QuerySet("localhost", index="foo")
# And there are records
add_document("foo", {"bar": 1})
add_document("foo", {"bar": 2})
add_document("foo", {"bar": 3}, doc_type="bar")
# And I add a type filter
_type = Type("bar")
t.filter(_type)
results = t[0:10]
# Then my results only have that type
len(results).should.equal(1)
results[0]["_source"]["bar"].should.equal(3)
| 5,342,482
|
def _generator(batch_size, classes, X, y, augment):
"""Generate batches of training data forever."""
while 1:
batch_X, batch_y = [], []
for i in range(batch_size):
# random.seed(random.randint(0, 9001))
class_i = random.randint(0, NUM_CLASSES - 1)
# sample_index = random.randint(0, len(classes[class_i]) - 1)
sample_index = random.choice(classes[class_i])
command = y[sample_index]
if args.multi:
file_names = [img_dir_l + X[sample_index][0],
img_dir_r + X[sample_index][1]]
shape=(shapeY,shapeX/2)
else:
file_names = [img_dir + X[sample_index]]
shape=(shapeY,shapeX/2)
image, command = process_image(file_names, command, augment=augment, multi=args.multi, shape=shape)
batch_X.append(image)
batch_y.append(command)
yield np.array(batch_X), np.array(batch_y)
| 5,342,483
|
def new_hassle_participants():
"""Select participants for the room helpers."""
# Get a list of all current members.
members = helpers.get_all_members()
return flask.render_template('hassle_new_participants.html', members=members)
| 5,342,484
|
def main():
"""Main program."""
initialize_megatron(extra_args_provider=add_text_generate_args,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
# Set up model and load checkpoint.
model = get_model(model_provider)
args = get_args()
if args.load is not None:
_ = load_checkpoint(model, None, None)
# Generate samples.
if args.num_samples == 0:
args.micro_batch_size = 1
if args.sample_input_file != None:
generate_samples_input_from_file(model)
else:
generate_samples_interactive(model)
else:
generate_and_write_samples_unconditional(model)
| 5,342,485
|
def data_store_remove_folder(request):
"""
remove a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store. It is invoked by an AJAX call and returns json object that include a
status of 'success' if succeeds, and HttpResponse of status code of 403, 400, or 500 if fails.
The AJAX request must be a POST request with input data passed in for res_id and folder_path
where folder_path is the relative path for the folder to be removed under
res_id collection/directory.
"""
res_id = request.POST.get('res_id', None)
if res_id is None:
return HttpResponse('Bad request - resource id is not included',
status=status.HTTP_400_BAD_REQUEST)
res_id = str(res_id).strip()
try:
resource, _, user = authorize(request, res_id,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
except NotFound:
return HttpResponse('Bad request - resource not found', status=status.HTTP_400_BAD_REQUEST)
except PermissionDenied:
return HttpResponse('Permission denied', status=status.HTTP_401_UNAUTHORIZED)
folder_path = request.POST.get('folder_path', None)
if folder_path is None:
return HttpResponse('Bad request - folder_path is not included',
status=status.HTTP_400_BAD_REQUEST)
folder_path = str(folder_path).strip()
if not folder_path:
return HttpResponse('Bad request - folder_path cannot be empty',
status=status.HTTP_400_BAD_REQUEST)
if not folder_path.startswith('data/contents/'):
return HttpResponse('Bad request - folder_path must start with data/contents/',
status=status.HTTP_400_BAD_REQUEST)
if folder_path.find('/../') >= 0 or folder_path.endswith('/..'):
return HttpResponse('Bad request - folder_path must not contain /../',
status=status.HTTP_400_BAD_REQUEST)
try:
remove_folder(user, res_id, folder_path)
except SessionException as ex:
return HttpResponse(ex.stderr, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as ex:
return HttpResponse(ex.message, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return_object = {'status': 'success'}
return HttpResponse(
json.dumps(return_object),
content_type="application/json"
)
| 5,342,486
|
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Finds distance between two given points
Parameters:
x1, y1 : The x and y coordinates of first point
x2, y2 : The x and y coordinates of second point
Returns:
Distance upto two decimal places.
"""
distance = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return round(distance,2)
| 5,342,487
|
def load_target_class(input_dir):
"""Loads target classes."""
df = pd.read_csv(join(input_dir, "target_class.csv"), header=None, index_col=0, names=["Target"])
return df
| 5,342,488
|
def dict_to_tf_example(data,
dataset_directory,
label_map_path,
ignore_difficult_instances=False,
image_subdirectory='Images',
is_debug=False):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_path: the prototxt file that contains a map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf_gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
if is_debug:
# Each box is ymin, xmin, ymax, xmax = box in [0, 1]
bboxes_array = np.array([ymin, xmin, ymax, xmax])
bboxes_array = np.transpose(bboxes_array)
classes_array = np.array(classes)
scores_array = None
category_index = label_map_util.create_category_index_from_labelmap(\
label_map_path, use_display_name=True)
display_bbox(np.array(image), bboxes_array, classes_array, scores_array, category_index)
return example
| 5,342,489
|
def fix_labels(ply_gt, ply_seg):
"""
Remove extra vertices from the ground truth
"""
size = len(ply_gt.elements[0]["x"])
gt_x = np.array(ply_gt.elements[0]["x"])
seg_x = np.array(ply_seg.elements[0]["x"])
new_gt_label = np.zeros_like(seg_x)
gt_label = np.array(ply_gt.elements[0]["label"])
for i in range(size):
if seg_x.shape[0] > i:
if abs(gt_x[i] - seg_x[i]) < 1e-16:
new_gt_label[i] = gt_label[i]
new_gt_label = clean_gt(new_gt_label).astype(np.int)
return new_gt_label
| 5,342,490
|
def visual(openPath, imgPath):
"""Visualize the use of homoglyph stegonography.
Args:
openPath (string): path to the text file to analyse
imgPath (string): image file path
"""
with open(openPath, encoding="utf-8") as openFile:
data = openFile.read()
visualAnsi = []
for char in data:
if ord(char) < 128:
visualAnsi.append(char)
else:
visualAnsi.append("\033[31m" + char + "\033[0m")
ansiToHTMLRaster("".join(visualAnsi), imgPath)
| 5,342,491
|
def test_set_and_get():
"""
Проверяем, что новые значения уровней по ключу устанавливаются, и потом считываются.
"""
Levels.set('kek', 10000)
assert Levels.get('kek') == 10000
Levels.set('kek', 5)
assert Levels.get('kek') == 5
| 5,342,492
|
def get_coin_price(api_url: str, currency: str) -> float:
"""
Get the USD price of a coin from Gemini
Args:
api_url: The API URL for Gemini
currency: The cryptocurrency the bot is monitoring
Returns:
coin_price: The price the coin currently holds in USD
"""
# Instantiate Gemini and query the price
coin_price = -1
api_query = "/v1/pricefeed"
try:
price_feeds = requests.get(api_url + api_query).json()
for feed in price_feeds:
if feed.get('pair') == currency + "USD":
coin_price = float(feed.get('price'))
except Exception as err:
print("ERROR: Unable to get price due to %s" % err)
print("Price feed: %s" % price_feeds)
return coin_price
| 5,342,493
|
def GenerateAuthToken(key_name, user_id, action_id='', when=None):
"""Generates a URL-safe token based on XSRFToken but for generla purpose.
Args:
key_name (str): name of secret key to generate token.
user_id (str): the user ID of the authenticated user.
action_id (str): a string identifier of the action they requested
authorization for.
when (datetime): the time when the user was authorized for this action.
If not set the current utc time is used.
Returns:
A string token.
"""
key = SecretKey.GetSecretKey(key_name)
when = when or time_util.GetUTCNow()
when_timestamp = time_util.ConvertToTimestamp(when)
digester = hmac.new(key)
digester.update(str(user_id))
digester.update(_DELIMITER)
digester.update(action_id)
digester.update(_DELIMITER)
digester.update(str(when_timestamp))
digest = digester.digest()
return base64.urlsafe_b64encode('%s%s%d' % (digest, _DELIMITER,
when_timestamp))
| 5,342,494
|
def test_run_error(check_output_mock):
"""There was a bad command made, therefore no output"""
check_output_mock.side_effect = CalledProcessError(
returncode=2, cmd='Bad Command!')
# pytest.set_trace()
with catch_stdout() as caught_output:
with pytest.raises(SystemExit):
run('ls')
output = caught_output.getvalue().strip()
# since we're mocking CalledProcessError call, not sure we can simulate
# exception raised by actual check_output call, so e.output is None
assert output == 'None'
| 5,342,495
|
def run_optimization() -> None:
"""Run an optimization sequence based on preferential Bayesian optimization."""
num_dims = 5
strategy = pysls.CurrentBestSelectionStrategy.LastSelection
optimizer = pysls.PreferentialBayesianOptimizer(
num_dims=num_dims,
initial_query_generator=gen_initial_query,
current_best_selection_strategy=strategy)
optimizer.set_hyperparams(kernel_signal_var=0.50,
kernel_length_scale=0.10,
kernel_hyperparams_prior_var=0.10)
print("#iter,residual,value")
for i in range(30):
options = optimizer.get_current_options()
chosen_index = ask_human_for_feedback(options)
optimizer.submit_feedback_data(chosen_index)
optimizer.determine_next_query()
residual = np.linalg.norm(optimizer.get_maximizer() - 0.2)
value = calc_simulated_objective_func(optimizer.get_maximizer())
print("{},{},{}".format(i + 1, residual, value))
| 5,342,496
|
def download_dataset(file_url, file_name):
"""
Utility to download a dataset
"""
# %%
new_dir = up(up(up(up(os.path.abspath(__file__)))))
os.chdir(new_dir)
file_path = r'artificial_neural_networks/datasets/' + file_name
exists = os.path.isfile(file_path)
if exists:
print(file_name + ' already exists.')
print('You have to delete it first, if you want to re-download it.')
else:
urlretrieve(file_url, file_path)
print(file_name + ' was downloaded succesfully.')
# %%
return file_path
| 5,342,497
|
def get_dataset_psnr(device, model, dataset, source_img_idx_shift=64,
batch_size=10, max_num_scenes=None):
"""Returns PSNR for each scene in a dataset by comparing the view predicted
by a model and the ground truth view.
Args:
device (torch.device): Device to perform PSNR calculation on.
model (models.neural_renderer.NeuralRenderer): Model to evaluate.
dataset (misc.dataloaders.SceneRenderDataset): Dataset to evaluate model
performance on. Should be one of "chairs-test" or "cars-test".
source_img_idx_shift (int): Index of source image for each scene. For
example if 00064.png is the source view, then
source_img_idx_shift = 64.
batch_size (int): Batch size to use when generating predictions. This
should be a divisor of the number of images per scene.
max_num_scenes (None or int): Optionally limit the maximum number of
scenes to calculate PSNR for.
Notes:
This function should be used with the ShapeNet chairs and cars *test*
sets.
"""
num_imgs_per_scene = dataset.num_imgs_per_scene
# Set number of scenes to calculate
num_scenes = dataset.num_scenes
if max_num_scenes is not None:
num_scenes = min(max_num_scenes, num_scenes)
# Calculate number of batches per scene
assert (num_imgs_per_scene - 1) % batch_size == 0, "Batch size {} must divide number of images per scene {}."
# Comparison are made against all images except the source image (and
# therefore subtract 1 from total number of images)
batches_per_scene = (num_imgs_per_scene - 1) // batch_size
# Initialize psnr values
psnrs = []
for i in range(num_scenes):
# Extract source view
source_img_idx = i * num_imgs_per_scene + source_img_idx_shift
img_source = dataset[source_img_idx]["img"].unsqueeze(0).repeat(batch_size, 1, 1, 1).to(device)
render_params = dataset[source_img_idx]["render_params"]
azimuth_source = torch.Tensor([render_params["azimuth"]]).repeat(batch_size).to(device)
elevation_source = torch.Tensor([render_params["elevation"]]).repeat(batch_size).to(device)
# Infer source scene
scenes = model.inverse_render(img_source)
# Iterate over all other views of scene
num_points_in_batch = 0
data_list = []
scene_psnr = 0.
for j in range(num_imgs_per_scene):
if j == source_img_idx_shift:
continue # Do not compare against same image
# Add new image to list of images we want to compare to
data_list.append(dataset[i * num_imgs_per_scene + j])
num_points_in_batch += 1
# If we have filled up a batch, make psnr calculation
if num_points_in_batch == batch_size:
# Create batch for target data
img_target, azimuth_target, elevation_target = create_batch_from_data_list(data_list)
img_target = img_target.to(device)
azimuth_target = azimuth_target.to(device)
elevation_target = elevation_target.to(device)
# Rotate scene and render image
rotated = model.rotate_source_to_target(scenes, azimuth_source,
elevation_source, azimuth_target,
elevation_target)
img_predicted = model.render(rotated).detach()
scene_psnr += get_psnr(img_predicted, img_target)
data_list = []
num_points_in_batch = 0
psnrs.append(scene_psnr / batches_per_scene)
print("{}/{}: Current - {:.3f}, Mean - {:.4f}".format(i + 1,
num_scenes,
psnrs[-1],
torch.mean(torch.Tensor(psnrs))))
return psnrs
| 5,342,498
|
def smallest_continuous_multiple(max_multiple):
"""
Function takes an int, and returns the smallest natural number evenly divisible by all numbers
less than or equal to the input max_multiple.
REQ: max_multiple >= 0 and whole
:param max_multiple: {int}
:return: smallest natural number evenly divisible by all number less than or equal to input
Function reduces time complexity by iteratively removing redundant factors from the
check_list, ie. suppose 12 exists in the list, then 6, 4, 3, 2 and 1 will have been removed.
This check is done to remove the factors of every int, to reduce the lookup time later.
"""
# all numbers less than or equal to 2 are evenly divisible by themselves and below
if max_multiple <= 2:
return max_multiple
check_list = []
# make a list of all ints from 1 to input
for i in range(max_multiple):
check_list.append(i + 1)
# loop through check list backwards
for i in reversed(check_list):
# get factors of i without i included
temp_factors = get_factors(i)
temp_factors.remove(i)
# loop through the remaining factors, removing them from the check_list
for j in temp_factors:
try:
check_list.remove(j)
except ValueError:
pass
temp_num = max_multiple
# loop indefinitely until find smallest int that that satisfies exit condition
while True:
# if all factors less than max_multiple divide evenly into curr, return
if all(temp_num % n == 0 for n in check_list):
return temp_num
else:
temp_num += max_multiple
| 5,342,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.