content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def convert_F_units(F, lbda, in_unit='cgs', out_unit='si'):
"""
Function to convert Flux density between [ergs s-1 cm-2 um-1],
[W m-2 um-1] and [Jy].
Parameters
----------
F: float or 1d array
Flux
lbda: float or 1d array
Wavelength of the flux (in um)
in_unit: str, opt, {"si", "cgs", "jy", "cgsA"}
Input flux units.
'si': W/m^2/mu;
'cgs': ergs/s/cm^2/mu
'jy': janskys
'cgsA': erg/s/cm^2/AA
out_unit: str, opt {"si", "cgs", "jy"}
Output flux units.
Returns
-------
Flux in output units.
"""
if in_unit == 'cgs':
new_F = (F*1e23*np.power(lbda,2))/(c.c.value*1e6) # convert to jy
elif in_unit == 'cgsA':
new_F = (F*1e27*np.power(lbda,2))/(c.c.value*1e6) # convert to jy
elif in_unit == 'si':
new_F = (F*1e26*np.power(lbda,2))/(c.c.value*1e6) # convert to jy
elif in_unit == "jy":
new_F=F
else:
msg = "in_unit not recognized, try either 'cgs', 'si' or 'jy'."
raise TypeError(msg)
if out_unit == 'jy':
return new_F
elif out_unit == 'cgs':
return new_F*1e-23*c.c.value*1e6/np.power(lbda,2)
elif out_unit == 'si':
return new_F*1e-26*c.c.value*1e6/np.power(lbda,2)
else:
msg = "out_unit not recognized, try either 'cgs', 'si' or 'jy'."
raise TypeError(msg) | 024f553870711258963c248f9303bbdabddf6d47 | 3,628,500 |
from game import Directions
def tiny_maze_search(problem):
"""Return a sequence of moves that solves tiny_maze.
For any other maze, the sequence of moves will be incorrect,
so only use this for tiny_maze.
"""
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w] | 9205bdf5dfe45023dfe15afede6313c4f568f2b2 | 3,628,501 |
from ibmsecurity.appliance.ibmappliance import IBMError
import os.path
def export_metadata(isamAppliance, name, filename, check_mode=False, force=False):
"""
Export a federation
"""
ret_obj = search(isamAppliance, name)
fed_id = ret_obj['data']
if fed_id == {}:
raise IBMError("999", "Cannot export data from unknown federation: {0}".format(name))
if force is True or (fed_id != {} and os.path.exists(filename) is False):
if check_mode is False: # No point downloading a file if in check_mode
return isamAppliance.invoke_get_file(
"Export a federation",
"{0}/{1}/metadata".format(uri, fed_id),
filename,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object() | 8b6f446cc3e41fa060e148207c67cf77d4555430 | 3,628,502 |
def wkt_of_any(string):
"""Wkt of user input"""
out = osr.GetUserInputAsWKT(string)
if isinstance(out, str):
return out
else:
prj = None
with Env(_osgeo_use_exceptions=False):
gdal_ds = gdal.OpenEx(string, conv.of_of_str('raster'))
if gdal_ds is not None:
prj = gdal_ds.GetProjection()
gdal_ds = gdal.OpenEx(string, conv.of_of_str('vector'))
if gdal_ds is not None:
lyr = gdal_ds.GetLayerByIndex(0)
if lyr is not None:
prj = lyr.GetSpatialRef()
if prj is not None:
return prj.ExportToWkt()
raise ValueError('Could not convert to wkt ({})'.format(str(gdal.GetLastErrorMsg()).strip('\n'))) | 64f981db275286a66f525487f6ecb58fc4bae685 | 3,628,503 |
import json
import traceback
def getDatial(request):
"""输入文章id,返回文章的metadata。代表用户点击,记录session
Args:
request (GET): arxivID:String,article的ID
Returns:
json: 该article的metadata,调用ORM接口查数据库
"""
try:
ret_dict = {}
arxiv_id = request.GET.get("arxivID")
arxiv_doc = get_arxiv_document_by_id(arxiv_id=arxiv_id)
if arxiv_doc != None:
# session initialization
if 'last_read' not in request.session or request.session['last_read'] == None:
request.session['last_read'] = []
a = request.session['last_read']
a.append(arxiv_id)
request.session['last_read'] = a
ret_dict['arxiv_id'] = arxiv_doc.arxiv_id
ret_dict['submitter'] = arxiv_doc.submitter
ret_dict['authors'] = arxiv_doc.authors
ret_dict['title'] = arxiv_doc.title
ret_dict['comments'] = arxiv_doc.comments
ret_dict['doi'] = arxiv_doc.doi
ret_dict['report_no'] = arxiv_doc.report_no
ret_dict['categories'] = arxiv_doc.categories
ret_dict['journal_ref'] = arxiv_doc.journal_ref
ret_dict['license'] = arxiv_doc.license
ret_dict['abstract'] = arxiv_doc.abstract
ret_dict['versions'] = arxiv_doc.versions
ret_dict['update_date'] = arxiv_doc.update_date
ret_dict['authors_parsed'] = arxiv_doc.authors_parsed
return HttpResponse(json.dumps(ret_dict))
except Exception:
traceback.print_exc() | 834bd01d550cc06eaefea6eea0bd3ca49bffec78 | 3,628,504 |
def trajectory_overlap(gt_trajs, pred_traj):
"""
Calculate overlap among trajectories
:param gt_trajs:
:param pred_traj:
:param thresh_s:
:return:
"""
max_overlap = 0
max_index = 0
thresh_s = [0.5, 0.7, 0.9]
for t, gt_traj in enumerate(gt_trajs):
top1, top2, top3 = 0, 0, 0
total = len(set(gt_traj.keys()) | set(pred_traj.keys()))
for i, fid in enumerate(gt_traj):
if fid not in pred_traj:
continue
sIoU = iou(gt_traj[fid], pred_traj[fid])
if sIoU >= thresh_s[0]:
top1 += 1
if sIoU >= thresh_s[1]:
top2 += 1
if sIoU >= thresh_s[2]:
top3 += 1
tIoU = (top1 + top2 + top3) * 1.0 / (3 * total)
if tIoU > max_overlap:
max_overlap = tIoU
max_index = t
return max_overlap, max_index | 14e5d52828ca198634a3e09557b501e7bc14c193 | 3,628,505 |
def adding_detail(request):
"""Контроллер изменение деталей из расчета"""
calc_id = request.POST.get("calc_id")
crud_details_in_calc(request)
return JsonResponse(current_details_in_calc_and_main_calc_info(calc_id=calc_id)) | d81bf4def024f7cf25ff4eaa1e0aec639d96b0cb | 3,628,506 |
def create_futures_list(futures, executor):
"""creates a new FuturesList an initiates its attrs"""
fl = FuturesList(futures)
fl.config = executor.config
fl.executor = executor
return fl | 04c0363ff623acee3c1eabe62c85264551d4fc84 | 3,628,507 |
def unittestResultsToXml(*, name='launch_test', test_results={}):
"""
Serialize multiple unittest.TestResult objects into an XML document.
A testSuites element will be the root element of the document.
"""
# The test_suites element is the top level of the XML result.
# launch_test results contain two test suites - one from tests that ran while processes
# were active, and one from tests that ran after processes were shut down
test_suites = ET.Element('testsuites')
test_suites.set('name', name)
# To get tests, failures, and errors, we just want to iterate the results once
tests = 0
failures = 0
errors = 0
time = 0
for result in test_results.values():
tests += result.testsRun
failures += len(result.failures)
errors += len(result.errors)
time += sum(result.testTimes.values())
test_suites.set('tests', str(tests))
test_suites.set('failures', str(failures))
test_suites.set('errors', str(errors))
test_suites.set('time', str(round(time, 3)))
for (name, test_result) in test_results.items():
test_suites.append(unittestResultToXml(str(name), test_result))
return ET.ElementTree(test_suites) | efe1ce547341310c4bf6a9a66747d531580857b7 | 3,628,508 |
def time_sa_to_s(t, clock_freq):
"""
convert from time in samples to time in seconds
"""
return float(t / clock_freq) | eabf76cda8529dc9c9ad0acc6466c5037062b295 | 3,628,509 |
import subprocess
def get_raw_containers():
"""
Runs the shell command to get the container all data from Docker.
:returns: The raw information from the `docker ps` command.
:rtype: str
"""
cmds = ["docker", "ps", "-a"]
out = subprocess.Popen(
cmds,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
stdout = stdout.decode("utf-8")
if "Error" in stdout or "Cannot connect" in stdout:
raise errors.BadResponseDockerEngine
return stdout | 17afb1fd5144e635ed95812b5e485ba9675ec1ac | 3,628,510 |
def create_positions(ranges, numpts):
"""Create a sequence of np.prod(nupmts) over the ranges.
Args:
ranges: list of 2-tuples, each tuple being [min, max] of a numerical range.
numpts: list of integers -- the number of equally spaced points
between [min, max] including the end points.
Returns:
A sequence of all points on the multidimensional grid formed by
numpts along each of ranges. Note that NOT a grid structure, but
a linear sequence is returned, i.e. a list of N-tuples, each tuple
being the coordinate of a point, N being the number of ranges.
Numpy linspace is underlying the division of the ranges, end-points
inclusive.
"""
linsp = []
for rng, num in zip(ranges, numpts):
linsp.append(np.linspace(rng[0], rng[1], num=int(num), endpoint=True))
grid = np.meshgrid(*linsp, sparse=False) # list of arrays making up the grid
_positions = np.vstack(map(np.ravel, grid)) # this creates list of lists (one per direction)
positions = list(zip(*_positions)) # now we have a sequence of tuples
return positions | a0efabfaa92c7686d99ac8186203e8134ddaed31 | 3,628,511 |
def mod(ctx, arg):
"""
"""
action = ctx.copy_last_action()
return action | e9007ffab406df13ddf01c6d65d58deaa713799c | 3,628,512 |
def kdcompare(r, p, depth):
"""
Returns the branch of searching on a k-d tree
Input
r: root
p: point
depth : starting depth of search
Output
A value of -1 (left branch), or 1 (right)
"""
k = len(p)
dim = depth%k
if p[dim] <= r.point[dim]:
return -1
else:
return 1 | c11aa24718b8a2d8d9e39852ca53e09118d3c215 | 3,628,513 |
import base64
def get_props(paths):
"""Return a hash of hashes of props for PATHS, using the svn client. Convert
each embedded end-of-line to a single LF character."""
# It's not kosher to look inside .svn/ and try to read the internal
# property storage format. Instead, we use 'svn proplist'. After
# all, this is the only way the user can retrieve them, so we're
# respecting the black-box paradigm.
files = {}
exit_code, output, errput = svntest.main.run_svn(1,
"proplist",
"--verbose",
"--xml",
*paths)
output = (line for line in output if not line.startswith('DBG:'))
dom = parseString(''.join(output))
target_nodes = dom.getElementsByTagName('target')
for target_node in target_nodes:
filename = target_node.attributes['path'].nodeValue
file_props = {}
for property_node in target_node.getElementsByTagName('property'):
name = property_node.attributes['name'].nodeValue
if property_node.hasChildNodes():
text_node = property_node.firstChild
value = text_node.nodeValue
else:
value = ''
try:
encoding = property_node.attributes['encoding'].nodeValue
if encoding == 'base64':
value = base64.b64decode(value)
else:
raise Exception("Unknown encoding '%s' for file '%s' property '%s'"
% (encoding, filename, name,))
except KeyError:
pass
# If the property value contained a CR, or if under Windows an
# "svn:*" property contains a newline, then the XML output
# contains a CR character XML-encoded as ' '. The XML
# parser converts it back into a CR character. So again convert
# all end-of-line variants into a single LF:
value = eol_re.sub('\n', value)
file_props[name] = value
files[filename] = file_props
dom.unlink()
return files | 1323a896ba548d955662bab08f587058ca69cb71 | 3,628,514 |
def _calc_centers(graph, X, labelling, method='nearest'):
"""Return the new centers
graph : sparse matrix
Indicates the graph constructed from X
X : ndarray
Original Data
labelling: 1d array
The labelling of the vertices
method : one of 'nearest', 'floyd_warshall', 'erosion'
Method to calculate the new centers
"""
size = graph.shape[0]
centers = np.zeros(size)
max_label = int(np.max(labelling))
for label in range(1, max_label+1):
index_vert = np.where(labelling == label)[0]
if method == 'floyd_warshall':
subgraph = ((graph[index_vert]).transpose())[index_vert]
FW = floyd_warshall(subgraph, directed=False)
ind_center = np.argmin(np.max(FW, axis=-1))
centers[index_vert[ind_center]] = label
elif method == 'nearest':
mean_subgraph = np.mean(X[index_vert,:], axis=0, keepdims=True)
dist_from_mean = np.sum((X[index_vert,:] - mean_subgraph)**2, axis = -1)
ind_center = np.argmin(dist_from_mean.flatten())
centers[index_vert[ind_center]] = label
else:
raise Exception("Only use floyd_warshall or nearest methods (for now)")
return centers | 5d77bb871b5f80e3e7d97586faa2618e9f6ae04f | 3,628,515 |
def greaco_latin_square(k, factor_1_labels=None, factor_2_labels=None, seed=None):
""" Creates a k by k Greaco-Latin Square Design
A greaco-latin square is a design comprised of two orthogonal latin
squares. Note, there are no designs for k = 6.
Arguments:
k: the number of treatments.
factor_1_names: (optional) A list with k elements containing the
labels applied to the levels of the first factor. The default are
the first k uppercase Latin letters.
seed: (optional) The seed for the random number generation.
Raises:
ValueError: if k is not an integer greater than 2 or if one of the
names arguments does not have the correct number of names.
Returns:
list of lists: the Greaco-Latin Square design
Note:
This is not compatible with Python 2 due to the use of ord('α').
"""
if k < 2 or k == 6:
raise ValueError('No Greaco-Latin Squares exist for k={}'.format(k))
if factor_1_labels is None:
factor_1_labels = [chr(ord('A') + i) for i in range(k)]
elif not isinstance(factor_1_labels, list) or len(factor_1_labels) != k:
raise ValueError('factor_1_labels must be a list of length {}}').format(k)
if factor_2_labels is None:
factor_2_labels = [chr(ord('α') + i) for i in range(k)]
elif not isinstance(factor_2_labels, list) or len(factor_2_labels) != k:
raise ValueError('factor_2_labels must be a list of length {}}').format(k)
if seed is None or seed == 0:
seed = 7172
n_iter = 0
while True:
n_iter += 1
latin_square_1 = latin_square(k,
treatment_names=factor_1_labels,
randomize=True,
seed=seed * n_iter)
latin_square_2 = latin_square(k,
treatment_names=factor_2_labels,
randomize=True,
seed=35 * seed * n_iter)
if is_orthoganal(k, latin_square_1, latin_square_2):
break
if n_iter > MAX_ITERATIONS:
raise Exception('Maximum number of iterations reached')
greaco_latin_square = []
for i in range(k):
row = []
for j in range(k):
row.append((str(latin_square_1[i][j]) +
str(latin_square_2[i][j])))
greaco_latin_square.append(row)
return greaco_latin_square | 75eed4abd44d03486da2d5e3222f558bef35cde9 | 3,628,516 |
def get_poly_clock(params_array,section_str):
"""
Get list with poly from params array and section string.
Clock correction.
Parameters
----------
params_array : list
information from delay model ini file (see lib_ini_files.py).
section_str : str
section of the ini file associated with a certain station, source and time (see const_ini_files.py)
Returns
-------
poly_model : list of float
list of [m-th coeff] with polynomial coefficients for the station clock model with m from zero to max_order-1, in seconds.
"""
poly_model = list(map(TYPE_COEFF_DELAY,get_val_vector(params_array,section_str,C_INI_ST_CLOCK_POLY)))
# microseconds to seconds
poly_model=np.multiply(poly_model,1e-6)
return(poly_model) | efb11a1ebdfc997d8a274906549e5c28e506604d | 3,628,517 |
def hash_to_G2(data: bytes) -> G2:
"""Hashes a byte string to an element in G2."""
return G2(_relic.hash_to_G2(data)) | b36a86c0691d78ff333bd793bff1afb05eae058f | 3,628,518 |
def build_filter_stack(stack, options):
"""Setup and return a filter stack.
Args:
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options.
"""
# Token filter
if options.get('keyword_case'):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case'):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings'):
stack.preprocess.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
if options.get('use_space_around_operators', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())
# After grouping
if options.get('strip_comments'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())
if options.get('strip_whitespace') or options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
if options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.ReindentFilter(
char=options['indent_char'],
width=options['indent_width'],
indent_after_first=options['indent_after_first'],
indent_columns=options['indent_columns'],
wrap_after=options['wrap_after'],
comma_first=options['comma_first']))
if options.get('reindent_aligned', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.AlignedIndentFilter(char=options['indent_char']))
if options.get('right_margin'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.RightMarginFilter(width=options['right_margin']))
# Serializer
if options.get('output_format'):
frmt = options['output_format']
if frmt.lower() == 'php':
fltr = filters.OutputPHPFilter()
elif frmt.lower() == 'python':
fltr = filters.OutputPythonFilter()
else:
fltr = None
if fltr is not None:
stack.postprocess.append(fltr)
return stack | 9504322b6e145a47a10f935e5c80fffdf3854500 | 3,628,519 |
def get_nuclear_mgc(data, meta):
"""
Determines the going marginal_cost for this technology
@ In, data, dict, request for data
@ In, meta, dict, state information
@ Out, data, dict, filled data
@ In, meta, dict, state information
"""
return {'reference_price': get_trunc_mgc(trunc, meta, 'nuclear')}, meta | bd5f69f88b2c50ce23095393119a9e845434e85e | 3,628,520 |
from typing import Any
def efficientnet_b1(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> EfficientNet:
"""
Constructs a EfficientNet B1 architecture from
`"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" <https://arxiv.org/abs/1905.11946>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
inverted_residual_setting = _efficientnet_conf(width_mult=1.0, depth_mult=1.1, **kwargs)
return _efficientnet_model("efficientnet_b1", inverted_residual_setting, 0.2, pretrained, progress, **kwargs) | 2dc6c00bbc0b4fc403b1a145cefb610b57b7c74e | 3,628,521 |
def StepToGeom_MakeParabola2d_Convert(*args):
"""
:param SC:
:type SC: Handle_StepGeom_Parabola &
:param CC:
:type CC: Handle_Geom2d_Parabola &
:rtype: bool
"""
return _StepToGeom.StepToGeom_MakeParabola2d_Convert(*args) | 0146754d635869627bbb19d712f2f115cc8302c9 | 3,628,522 |
def read_corpus(file_path, source):
""" Read file, where each sentence is dilineated by a `\n`.
@param file_path (str): path to file containing corpus
@param source (str): "tgt" or "src" indicating whether text
is of the source language or target language
"""
data = []
for line in open(file_path):
sent = nltk.word_tokenize(line)
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data | c3922030cf621a7bcadfc1f8fb12fce675db5034 | 3,628,523 |
def del_find(data, ID, cur_id, intlist, num_start):
"""ID - int ID человека, чей лог меняется
cur_id - list id высвеченных данных лога
intList - list номеров, введенных пользователем
return is_change, inter
is_change - bool были ли осуществлены изменения
"""
cur_index = data_index_for_key(data, ID)
if cur_index == -1:
return False
del_id = []
for x in intlist:
del_id.append(cur_id[x - num_start])
for i in range(len(data[cur_index]['log']) - 1, -1, -1):
if data[cur_index]['log'][i]['id'] in del_id:
del data[cur_index]['log'][i]
return True if len(del_id) > 0 else False | e01eef91ef04821ad1aff47c9d010e6916544754 | 3,628,524 |
def rechunk_to_single_chunk_if_more_than_one_chunk_along_dim(ds, dim):
"""Rechunk an xarray object more than one chunk along dim."""
if dask.is_dask_collection(ds) and dim in ds.chunks:
if isinstance(ds, xr.Dataset):
nchunks = len(ds.chunks[dim])
elif isinstance(ds, xr.DataArray):
nchunks = len(ds.chunks[ds.get_axis_num(dim)])
if nchunks > 1:
ds = ds.chunk({dim: -1})
return ds | 0d10e93c00a39e68c623c6d329c16d44b140e6d7 | 3,628,525 |
def update_payin(
db, payin_id, remote_id, status, error,
amount_settled=None, fee=None, intent_id=None, refunded_amount=None,
):
"""Update the status and other attributes of a charge.
Args:
payin_id (int): the ID of the charge in our database
remote_id (str): the ID of the charge in the payment processor's database
status (str): the new status of the charge
error (str): if the charge failed, an error message to show to the payer
Returns:
Record: the row updated in the `payins` table
"""
with db.get_cursor() as cursor:
payin = cursor.one("""
UPDATE payins
SET status = %(status)s
, error = %(error)s
, remote_id = coalesce(remote_id, %(remote_id)s)
, amount_settled = coalesce(amount_settled, %(amount_settled)s)
, fee = coalesce(fee, %(fee)s)
, intent_id = coalesce(intent_id, %(intent_id)s)
, refunded_amount = coalesce(%(refunded_amount)s, refunded_amount)
WHERE id = %(payin_id)s
RETURNING *
, (SELECT status FROM payins WHERE id = %(payin_id)s) AS old_status
""", locals())
if not payin:
return
if remote_id and payin.remote_id != remote_id:
raise AssertionError(f"the remote IDs don't match: {payin.remote_id!r} != {remote_id!r}")
if status != payin.old_status:
cursor.run("""
INSERT INTO payin_events
(payin, status, error, timestamp)
VALUES (%s, %s, %s, current_timestamp)
""", (payin_id, status, error))
if status in ('pending', 'succeeded'):
cursor.run("""
UPDATE exchange_routes
SET status = 'consumed'
WHERE id = %s
AND one_off IS TRUE
""", (payin.route,))
# Lock to avoid concurrent updates
cursor.run("SELECT * FROM participants WHERE id = %s FOR UPDATE",
(payin.payer,))
# Update scheduled payins, if appropriate
if status in ('pending', 'succeeded'):
sp = cursor.one("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id))
if not sp:
# Try to find a scheduled renewal that matches this payin.
# It doesn't have to be an exact match.
schedule = cursor.all("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin IS NULL
AND mtime < %s
""", (payin.payer, payin.ctime))
today = utcnow().date()
schedule.sort(key=lambda sp: abs((sp.execution_date - today).days))
payin_tippees = set(cursor.all("""
SELECT coalesce(team, recipient) AS tippee
FROM payin_transfers
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id)))
for sp in schedule:
if any((tr['tippee_id'] in payin_tippees) for tr in sp.transfers):
cursor.run("""
UPDATE scheduled_payins
SET payin = %s
, mtime = current_timestamp
WHERE id = %s
""", (payin.id, sp.id))
break
return payin | 0964f6ef00d311d25f2ede1d9e858c6987b02776 | 3,628,526 |
def stack(x, num_block, size, stride, stack_index, block=basic):
""" A stack of num_block blocks. """
for block_index, s in enumerate([stride]+[1]*(num_block-1)):
x = block(x, size, s, stack_index, block_index)
return x | 1ad5b1eab9ecf85d19183d9b565828ab85b53f43 | 3,628,527 |
def twist2msg(twist):
"""
Converts a 6x1 twist vector into a geometry_msgs/Twist message
:type twist: numpy.array
:param twist: 6x1 twist matrix
:rtype: geometry_msgs.msg.Twist
:return The ROS Twist message
"""
twist2=np.reshape(twist, (6,))
return Twist(Vector3(twist2[3], twist2[4], twist2[5]),
Vector3(twist2[0], twist2[1], twist2[2])) | 37bbcc30c607ba2bc31dbccccbbacbcc07801baf | 3,628,528 |
def z1FromAngles(wavelength, stt, om, chi, phi):
"""
Calculate the scattering vector z1 from angles
@param wavelength
@param om angle in radians
@param chi angle in radians
@param phi angle in radians
@return The z1 vector
"""
th = stt/2.
z4 = np.array([
(2. * sin(th) * cos(th)) / wavelength,
(-2. * sin(th) * sin(th)) / wavelength,
.0
], dtype='float64')
return z1fromz4(z4, om, chi, phi) | 58af195ba10fc1b5145e5512d6a4b6689ccf5212 | 3,628,529 |
import requests
import json
def sparqling(sparql_query, libraries, is_basic = True,
no_sequence = False, progress = True):
"""
the function querys "https://synbiohub.org/" for parts
Parameters
----------
path : STRING
Path to Excel Spreadsheet
sparql_query
libraries
is_basic = True,
no_sequence = False
progress = True
Returns
-------
basic_DNA_parts: DATAFRAME
The parts table with headers from row=start_row and data from all rows after that.
metadata: DATAFRAME, (usecols x nrows)
A header less table of length nrows and width usecols
description: DATAFRAME, (description_col x 1)
A table consisting usually of a single cell and the header "Design Description"
Example
-------
cwd = os.path.dirname(os.path.abspath("__file__")) #get current working directory
path_filled = os.path.join(cwd, "darpa_template.xlsx")
filled_library, filled_metadata, filled_description = read_library(path_filled,
start_row = 13, nrows = 8, description_row = 9)
"""
all_pages = []
#loops over all pages and extracts query results
for library in libraries:
query_text = sparql_query.replace("library_variable",
f"'{library}'")
for i in range(0,2000):
if progress: #print progress
print(i)
#replace placeholder in query_text with page number to get
queryed = query_text.replace("replacehere", str(i*10000))
#make request for data
r = requests.post("https://synbiohub.org/sparql",
data = {"query":queryed},
headers = {"Accept":"application/json"})
#reformat page data
d = json.loads(r.text)
one_page = json_normalize(d['results']['bindings'])
#add page data to all pages data
all_pages.append(one_page)
#if the page was no longer a full page stop loop
if len(one_page)<10000:
break
#create pandas data frame containing all page info
#print(pd.DataFrame(all_pages))
all_pages = pd.concat(all_pages)
return(all_pages) | e088ea8e78d83400c2f2e252c2ac47ce80b69499 | 3,628,530 |
from typing import List
def convert_slide_binary_metadata_to_base64(slide: Slide) -> List[Slide]:
"""
Converts all binary data contained in the slide metadata to base64
"""
if slide.metadata is not None:
for metadata_key, metadata_value in slide.metadata.items():
if is_byte_data(metadata_value):
slide.metadata[metadata_key] = convert_binary_to_base64(metadata_value)
return slide | b1a8b1e5e8d65cbd32993363c1f7c1d7fce820bd | 3,628,531 |
def _has_class(domElement, className):
"""
Helper function to test if the provided element has the provided class
"""
return className in domElement.get_attribute('class').split(" ") | 9a20557cc8d3e3dc91ac33764a6d94139b70f6f2 | 3,628,532 |
def exponent(Cz, C):
"""Recover z such that C ** z == Cz (or equivalently z = log Cz base C).
For exponent(1, 1), arbitrarily choose to return 3"""
return 3 if (Cz == C == 1) else int(round(log(Cz, C))) | ded746c7231207b475b59dc13c57adfe42a039b1 | 3,628,533 |
def _remove_markers(sentence: str):
"""
removes the lemma markers from a sentence.
:param sentence: a string
:return: a string
"""
return sentence.replace(START_MARKER_TOKEN, '').replace(END_MARKER_TOKEN, '') | 7aac8959fb2795a2cbaec26a8cccbb948498bb4d | 3,628,534 |
def product():
"""
Import the test utils module to be able to:
- Create apigee test product
- Update custom scopes
- Update environments
- Update product paths
- Update custom attributes
- Update proxies to the product
- Update custom ratelimits
"""
return ApigeeApiProducts() | 883c74a19bc5e8e0f39fe314735b93b973f7642b | 3,628,535 |
def role_required(role, api=False):
"""flask view decorator implementing role based authorization; does not redirect to login for api views/routes"""
def _role_required(fnc):
@wraps(fnc)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated:
if api:
return 'Unauthorized', HTTPStatus.UNAUTHORIZED
return login_manager.unauthorized()
if not current_user.has_role(role):
return 'Forbidden', HTTPStatus.FORBIDDEN
return fnc(*args, **kwargs)
return decorated_view
return _role_required | 87276077c5b19ab9bdcfbaa7f84dc8315bac62b2 | 3,628,536 |
from typing import Tuple
from typing import List
from typing import Optional
from typing import Dict
from pathlib import Path
import sys
def dock_ligand(ligand: Tuple[str, str], software: str, receptors: List[str],
center: Tuple[float, float, float],
size: Tuple[int, int, int] = (10, 10, 10), ncpu: int = 1,
path: str = '.', extra: Optional[List[str]] = None,
repeats: int = 1) -> List[List[Dict]]:
"""Dock the given ligand using the specified vina-type docking program and
parameters into the ensemble of receptors repeatedly
Parameters
----------
software : str
the docking program to run
ligand : Ligand
a tuple containing a ligand's SMILES string and associated docking
input file
receptors : List[str]
the filesnames of PDBQT files corresponding to various receptor poses
center : Tuple[float, float, float]
the x-, y-, and z-coordinates, respectively, of the search box center
size : Tuple[int, int, int] (Default = (10, 10, 10))
the x, y, and z-radii, respectively, of the search box
path : string (Default = '.')
the path under which both the log and out files should be written to
ncpu : int (Default = 1)
the number of cores to allocate to the docking program
repeats : int (Default = 1)
the number of times to repeat a docking run
Return
------
ensemble_rowss : List[Dataframe]
a list of dataframes for this ligand's docking runs into the
ensemble of receptor poses, each containing the following columns:
smiles - the ligand's SMILES string
name - the name of the ligand
in - the filename of the input ligand file
out - the filename of the output docked ligand file
log - the filename of the output log file
score - the ligand's docking score
"""
if repeats <= 0:
raise ValueError(f'Repeats must be greater than 0! ({repeats})')
smi, pdbqt = ligand
p_pdbqt = Path(pdbqt)
ligand_name = p_pdbqt.stem
ensemble_rowss = []
for receptor in receptors:
repeat_rows = []
for repeat in range(repeats):
name = f'{Path(receptor).stem}_{ligand_name}_{repeat}'
argv, p_out, p_log = build_argv(
software=software, receptor=receptor, ligand=pdbqt, name=name,
center=center, size=size, ncpu=ncpu, extra=extra, path=path
)
ret = sp.run(argv, stdout=sp.PIPE, stderr=sp.PIPE)
try:
ret.check_returncode()
except sp.SubprocessError:
print(f'ERROR: docking failed. argv: {argv}', file=sys.stderr)
print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr)
repeat_rows.append({
'smiles': smi,
'name': ligand_name,
'in': p_pdbqt,
'out': p_out,
'log': p_log,
'score': None
})
ensemble_rowss.append(repeat_rows)
return ensemble_rowss | 4d5b92e2e3dbe6f88d4dcee4b7c86b3964a33e8c | 3,628,537 |
def read_cof_file(cof_file, headerlength=12, as_shc_order=True):
"""Get coefficients from a cof-format file.
Read a .cof file and output the n,m, mixed gh arrays (1D)
gh can then be split into separate g,h arrays with convert_gh
Args:
cof_file (str): full path to the file to read
headerlength (int): number of lines of header (to skip)
Returns:
gh (array): mixed degree-order list of coefficients OR:
cof (Dataframe): of n, m, g, h values
"""
cof = pd.read_csv(cof_file, skiprows=headerlength, delim_whitespace=True)
if as_shc_order:
n, m, gh = combine_gh(*cof.values.T)
return gh
else:
return cof | 5eff7c723b3fbd1d3af18b6e9af89fc1bac76d4c | 3,628,538 |
import torch
def get_mlp_models(level_params_list):
"""Get models based on level params and put them into level_algo_kwargs_list."""
level_algo_kwargs_list = []
for level_params in level_params_list:
model_kwargs = level_params["model_kwargs"]
algo_kwargs = deepcopy(level_params["algo_kwargs"])
# replace goal sampling strategy name with corresponding class if necessary
if "goal_sampling_strategy" in algo_kwargs:
algo_kwargs["goal_sampling_strategy"] = string_to_strategy(algo_kwargs["goal_sampling_strategy"])
# specify model
algo_kwargs["model"] = get_mlp_model(
**model_kwargs,
flat_algo = algo_kwargs["flat_algo_name"],
activation_fn = torch.nn.ReLU(inplace = False), #TODO: Try tanh?
squash_low = model_kwargs["squash_low"] if model_kwargs["squash_critics"] else None,
squash_high = model_kwargs["squash_high"] if model_kwargs["squash_critics"] else None
)
level_algo_kwargs_list.append(algo_kwargs)
return level_algo_kwargs_list | fdda720e6d0de3911e4d4c6b83835c2c3c635902 | 3,628,539 |
def format_datestamp(datestamp):
"""Format datestamp to an OAI-PMH compliant format.
Parameters
----------
datestamp: datetime.datetime
A datestamp.
Return
------
str:
Formatted datestamp.
"""
return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ') | f050dd4f18691034c0414a4d9fa51629b0208d6a | 3,628,540 |
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print titlestr % ("iter", "residual norm", "soln norm")
for i in xrange(cg_iters):
if callback is not None:
callback(x)
if verbose: print fmtstr % (i, rdotr, np.linalg.norm(x))
z = f_Ax(p)
v = rdotr / (p.dot(z) + 1e-8)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / (rdotr + 1e-8)
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print fmtstr % (i + 1, rdotr, np.linalg.norm(x)) # pylint: disable=W0631
return x | 55df65c77b1179a8c30dca1d2722cca965b9c57e | 3,628,541 |
def output_handler(data, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
data (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
log.info(" output_handler firing")
if data.status_code != 200:
raise ValueError(data.content.decode("utf-8"))
response_content_type = context.accept_header
prediction = data.content
return prediction, response_content_type | 01ae2b0f746570165ea632242a89fe71fc5f4c93 | 3,628,542 |
def create_pdna_net(gdf_nodes, gdf_edges, predistance=500):
"""
Create pandana network to prepare for calculating the accessibility to destinations
The network is comprised of a set of nodes and edges.
Parameters
----------
gdf_nodes: GeoDataFrame
gdf_edges: GeoDataFrame
predistance: int
the distance of search (in meters), default is 500 meters
Returns
-------
pandana network
"""
# Defines the x attribute for nodes in the network
gdf_nodes["x"] = gdf_nodes["geometry"].apply(lambda x: x.x)
# Defines the y attribute for nodes in the network (e.g. latitude)
gdf_nodes["y"] = gdf_nodes["geometry"].apply(lambda x: x.y)
# Defines the node id that begins an edge
gdf_edges["from"] = gdf_edges["u"].astype(np.int64)
# Defines the node id that ends an edge
gdf_edges["to"] = gdf_edges["v"].astype(np.int64)
# Define the distance based on OpenStreetMap edges
gdf_edges["length"] = gdf_edges["length"].astype(float)
gdf_nodes["id"] = gdf_nodes["osmid"].astype(np.int64)
gdf_nodes.set_index("id", inplace=True, drop=False)
# Create the transportation network in the city
# Typical data would be distance based from OSM or travel time from GTFS transit data
net = pdna.Network(gdf_nodes["x"], gdf_nodes["y"], gdf_edges["from"], gdf_edges["to"], gdf_edges[["length"]])
# Precomputes the range queries (the reachable nodes within this maximum distance)
# so that aggregations don’t perform the network queries unnecessarily
net.precompute(predistance + 10)
return net | 8ce49525923d9b7436e4ef9f4f8252c5ad168e08 | 3,628,543 |
from typing import Counter
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check whether the queries match the evaluation set
if candidate_set != ref_set:
if candidate_set >= ref_set:
# This is to be expected, since we have split the evaluation set in validation & test
pass
elif candidate_set < ref_set:
message = "Not all queries seem to be ranked. Are you scoring the right set?"
else:
message = "The submitted queries do not fully match the queries in the evaluation set. Are you scoring the right set?"
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message | 42f42044bf723d5fba3772565521124441347cb9 | 3,628,544 |
def make_jinja_element_parser(name_parsers, content):
"""
`name_parsers` must be a list of tag name parsers. For example,
`name_parsers` can be defined as follow in order to parse `if` statements:
name_parsers = [P.string(n) for n in ['if', 'elif', 'else', 'endif']]
"""
if len(name_parsers) == 1:
tag = make_jinja_tag_parser(name_parsers[0])
part = locate(P.seq(tag, P.success(None))).combine(
_combine_jinja_element_part
)
parts = [part]
end_tag_parser = None
else:
part_names = name_parsers[:-1]
first_part = make_jinja_element_part_parser(
part_names[0], content=content
)
next_parts = [
make_jinja_element_part_parser(name, content=content).many()
for name in part_names[1:]
]
parts = [first_part] + next_parts
end_tag_parser = make_jinja_tag_parser(name_parsers[-1])
content = [P.seq(*parts)]
if end_tag_parser:
content.append(end_tag_parser)
return locate(P.seq(*content)).combine(_combine_jinja_element) | e4d595093739b3b63a694a67f1399a615a0454f3 | 3,628,545 |
import base64
def encode_base64(data: bytes) -> bytes:
""" Creates a url safe base64 representation of an input string, strips
new lines."""
return base64.urlsafe_b64encode(data).replace(b"\n", b"") | 472ff045dc1df4ad5fe2a0e3001477a4d1c738fd | 3,628,546 |
from typing import Iterable
from typing import Optional
def non_none(iterable: Iterable[Optional[_T]]) -> Iterable[_T]:
"""
Make an iterator which contains all elements from *iterable* which are not *None*.
"""
return (x for x in iterable if x is not None) | cb10578c24c52cdbc97feaca5341e08ee5cb77c4 | 3,628,547 |
def extract_encoding(response: str) -> str:
"""Extract information about text encoding from HTTP Header or HTML meta tags.
Looks for charset in HTTP Content-Type Header and for HTML meta tags with attributes
http-equiv="Content-Type" and content="text/html; charset=XXX" or attribute charset="XXX".
response -- http response
:return:
"""
default_charset = "UTF-8"
headers, html = split_headers_from_content(response)
if headers is None and html is None:
return default_charset
charset = extract_encoding_from_headers(headers)
if charset is not None:
return charset
charset = extract_encoding_from_meta_tags(html)
if charset is not None:
return charset
return default_charset | 0c07c835e960c7129f81b1633af71c92d2314093 | 3,628,548 |
from typing import Dict
def validate_403_response(integration_response: Dict, transaction: Transaction) -> Dict:
"""
Ensures the response returned from `process_sep6_request()` matches the definitions
described in SEP-6. This function can be used for both /deposit and /withdraw
endpoints since the response schemas are identical.
Note that this validation function is only for 403 responses. /deposit and /withdraw
have distinct 200 Success response schemas so the validation for those are done in
depost.py and withdraw.py.
:param integration_response: the response dictionary returned from
`process_sep6_request()`
:param transaction: the transaction object that should not be saved to the DB
:return: a new dictionary containing the valid key-value pairs from
integration_response
"""
if Transaction.objects.filter(id=transaction.id).exists():
logger.error(
"transaction cannot be saved when returning 403 SEP-6 deposit/withdraw response"
)
raise ValueError()
statuses = ["pending", "denied"]
types = ["customer_info_status", "non_interactive_customer_info_needed"]
response = {"type": integration_response["type"]}
if response["type"] not in types:
logger.error("Invalid 'type' returned from process_sep6_request()")
raise ValueError()
elif response["type"] == types[0]:
if integration_response.get("status") not in statuses:
logger.error("Invalid 'status' returned from process_sep6_request()")
raise ValueError()
response["status"] = integration_response["status"]
more_info_url = rci.more_info_url(transaction.stellar_account)
if more_info_url:
response["more_info_url"] = more_info_url
else:
if "fields" not in integration_response:
logger.error(f"missing 'fields' for {types[1]}")
raise ValueError()
elif not isinstance(integration_response["fields"], list):
logger.error(f"invalid 'fields' for {types[1]}")
raise ValueError()
elif not all(f in SEP_9_FIELDS for f in integration_response["fields"]):
logger.error(f"invalid 'fields' for {types[1]}")
raise ValueError()
else:
response["fields"] = integration_response["fields"]
return response | d5ce3526687eb0bc01984bfc7889d2593e17a4ce | 3,628,549 |
def _badge_color_mode(owner, repo):
"""Return badge (color, mode) for a repository."""
redis = utils.get_redis_for_cache()
if redis.sismember("badges", owner + "/" + repo):
return "success", "enabled"
return "critical", "disabled" | 6c05ed689d81a5843f46aa7b8f444e2d59f4712d | 3,628,550 |
def fetch(uri, fetcher):
""" fetch? Use this function to :download and [fetch]; a web resource into a specified
directory. Part of the [Interface] """
resource = download(uri, fetcher)
resource.origin_dir = Fs(fetcher.fetch_dir)
if resource.the_head_id.is_directory:
Make(resource.path).dirs()
else:
Make(resource.local_dir).dirs()
Make(resource.path).touch(exist_ok=True)
if not resource.the_head_id.is_directory and Fs(resource.path).is_file:
with open(str(resource.path), 'wb') as f:
for chunk in resource.request.iter_content(
chunk_size=fetcher.chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return resource | feb0791f120958c260ce7619d2f848ced5919893 | 3,628,551 |
def fillArray(data, mask=None, fill_value=None):
"""
Fill masked numpy array with value without demasking.
Additonally set fill_value to value.
If data is not a MaskedArray returns silently data.
"""
if mask is not None and mask is not False:
data = np.ma.MaskedArray(data, mask=mask, copy=False)
if np.ma.is_masked(data) and fill_value is not None:
data._data[data.mask] = fill_value
np.ma.set_fill_value(data, fill_value)
elif not np.ma.is_masked(data):
data = np.ma.filled(data)
return data | 5e5c32cc02ecfb45429e6b96d473a0254a1baf22 | 3,628,552 |
def bio_hash_loss(weights: Array, x: Array, probs: Array) -> Array:
"""Calculates bio-hash loss from "Bio-Inspired Hashing for Unsupervised
Similarity Search"
(arXiv:2001.04907)
Args:
weights: model weights of shape output_features x input_features
x: input of shape batch x input_features
probs: probabilities of each element in input; has shape
batch x input_features, where each row should sum up to 1
Returns:
Array of energy/bio-hash loss for each input vector in batch
"""
xp = cp.get_array_module(weights)
max_activation_indices = xp.inner(weights, x).argmax(axis=0)
max_activation_weights = weights[max_activation_indices]
energy = -xp.inner(max_activation_weights, (x / probs)).diagonal()/xp.sqrt(xp.inner(max_activation_weights, max_activation_weights).diagonal())
return energy.sum() | 60903b7dfa4cd978382301eff4fdc539b6086e31 | 3,628,553 |
import os
import subprocess
import time
def start_jupyter(instance, local_port=8889):
"""
This function tries to SSH onto the instance, remotely start a Jupyter notebook server, and forward given
local port to it.
"""
# Check onif key is available
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
if os.path.exists(key_path):
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
if len(output) == 0:
print("Starting jupyter server remotely...")
subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "screen", "-dm", "bash", "-c", "\"jupyter", "notebook", "--no-browser", "--port=8889\""])
time.sleep(3)
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
print("\t ... done")
else:
print("Jupyter server found, did not start a new server.")
one_up = 0
while (one_up < 3):
if check_port(local_port + one_up):
server_prompt = {
'type': 'list',
'name': 'server',
'message': 'Port '+str(local_port + one_up)+' available. Connect?',
'choices': output
}
jupyter_instance = prompt.prompt(server_prompt)["server"]
remote_hostport = jupyter_instance.split("/")[2]
command = ["nohup", "ssh", "-i", key_path, "-N", "-L", str(local_port + one_up)+":"+remote_hostport, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]]
process = subprocess.Popen(command, preexec_fn=os.setpgrp)
print("Port forwarding PID: "+str(process.pid))
print(jupyter_instance.replace(str(remote_hostport), str(local_port + one_up), 1))
print("")
break
else:
print("Local port "+str(local_port)+" is taken. Maybe you are already connected?")
one_up += 1
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
return output | 3bc0b5186ea37757879e66492e0a5c1f0f7a06ca | 3,628,554 |
from typing import Dict
def train_model(data: Dict[str, Dataset], parameters: dict) -> Booster:
"""Train a model with the given datasets and parameters"""
# The object returned by split_data is a tuple.
# Access train_data with data[0] and valid_data with data[1]
model = lightgbm.train(params=parameters,
train_set=data["train"],
valid_sets=data["valid"],
num_boost_round=500,
early_stopping_rounds=20)
return model | a66e616bb51499f74bcb6d4f353f0cbcf482127d | 3,628,555 |
def _bblock_hack(bc, bblock):
"""
The Tcl compiler has some annoying implementation details which must be
recognised before any reduction.
"""
# 'variable' does not push a result so the Tcl compiler inserts a push.
variableis = []
changes = []
for i, inst in enumerate(bblock.insts):
if not isinstance(inst, Inst): continue
if not inst.name == 'variable': continue
assert bblock.insts[i+1].name in ['push1', 'push4']
assert bc.literal(bblock.insts[i+1].ops[0]) == ''
variableis.append(i)
for i in reversed(variableis):
bblock = bblock.replaceinst(i+1, [])
changes.append((TAG_H_VARIABLE, (i+1, i+2), (i+1, i+1)))
return bblock, changes | b8ffef65776bbf70d6874f130d723ce61cc1036e | 3,628,556 |
def yellow_bold(payload):
"""
Format payload as yellow.
"""
return '\x1b[33;1m{0}\x1b[39;22m'.format(payload) | 2ae528c9dcc5a4f9b5f685f201f4d6696387a256 | 3,628,557 |
def add_trial_name_as_number(warm_up_data_df):
"""add column where trial_name is converted to number and before-/after-correction is added"""
warm_up_data_df.insert(
warm_up_data_df.shape[1], "trial_name_corrected_by_before_and_after", float(100)
)
offset = 0
for index, row in warm_up_data_df.iterrows():
if row.trial_name == "a":
warm_up_data_df.at[index, "trial_name_number"] = 0 + offset
elif row.trial_name == "b":
warm_up_data_df.at[index, "trial_name_number"] = 1 + offset
elif row.trial_name == "c":
warm_up_data_df.at[index, "trial_name_number"] = 2 + offset
return warm_up_data_df | 160907c14f377cae0c2067dac1dd898f4e343056 | 3,628,558 |
from typing import Optional
def get_language(request: Request) -> str:
"""Get language based on request Accept-Language header or 'lang' query parameter."""
lang: Optional[str] = request.query_params.get("lang")
language_code: Optional[str] = getattr(request, "LANGUAGE_CODE", None)
if lang and lang in available_languages:
return lang
if language_code and language_code in available_languages:
return language_code
return current_language() | b58ca8df0ac9856a06965affd156b4fc6bfc5288 | 3,628,559 |
def is_installed(package_name):
"""Checks if the app is installed."""
output = adb.run_shell_command(['pm', 'list', 'packages'])
package_names = [line.split(':')[-1] for line in output.splitlines()]
return package_name in package_names | 3ec3921415cfbbeed7ae51133364c60736745a29 | 3,628,560 |
import numpy
def int_L1_keldysh(ngr, ngi, L1f, L1b, L1i, tir, tii, D1, gr, gi, Gr, Gi):
"""Return L1bar."""
L1i_out = numpy.zeros(L1i.shape, dtype=complex)
for s in range(ngi):
dt = numpy.zeros((ngi))
for y in range(s, ngi):
dt[y] = tii[s] - tii[y]
gtemp = numpy.exp(dt[:, None, None]*D1[None, :, :])
L1temp = einsum('yai,yia->yia', gtemp, L1i)
L1i_out[s] = einsum('y,yia,y->ia', gi, L1temp, Gi[:, s])/gi[s]
L1b_out = numpy.zeros(L1b.shape, dtype=complex)
tib = tir.copy()
for i in range(ngr):
tib[ngr - i - 1] = tir[i]
for s in range(ngr):
dt = numpy.zeros((ngr))
for y in range(s, ngr):
dt[y] = tib[s] - tib[y]
gtemp = numpy.exp(1.j*dt[:, None, None]*D1[None, :, :])
L1temp = einsum('yai,yia->yia', gtemp, L1b)
add = numpy.exp(D1[None, :, :]*(1.j*tib[s] - tii[:, None, None]))/gr[s]
L1b_out[s] = -1.j*einsum('y,yia,y->ia', gr, L1temp, Gr[:, s])/gr[s]
L1b_out[s] += numpy.einsum('y,yia,yai->ia', gi, L1i, add)*Gr[ngr - 1, s]
L1f_out = numpy.zeros(L1b.shape, dtype=complex)
for s in range(ngr):
dt = numpy.zeros((ngr))
for y in range(s, ngr):
dt[y] = tir[s] - tir[y]
gtemp = numpy.exp(1.j*dt[:, None, None]*D1[None, :, :])
L1temp = einsum('yai,yia->yia', gtemp, L1f)
fac = numpy.exp(1.j*D1[None, :, :]*(tir[s] - tib[:, None, None]))/gr[s]
add = numpy.exp(D1[None, :, :]*(1.j*tir[s] - tii[:, None, None]))/gr[s]
L1f_out[s] = 1.j*einsum(
'y,yia,y->ia', gr, L1temp, Gr[:, s])/gr[s]
L1f_out[s] -= 1.j*numpy.einsum(
'y,yia,yai->ia', gr, L1b, fac)*Gr[ngr - 1, s]
L1f_out[s] += numpy.einsum(
'y,yia,yai->ia', gi, L1i, add)*Gr[ngr - 1, s]
return L1f_out, L1b_out, L1i_out | 6e46755434cdf0fc91f6bc60c59ffae46d548b7a | 3,628,561 |
def cosine_similarity(v, u):
"""Calculate the cosine similarity between two vectors."""
v_norm = np.linalg.norm(v)
u_norm = np.linalg.norm(u)
similarity = v @ u / (v_norm * u_norm)
return similarity | e4cc38d5d6ed43d59a2515ebde7cde3553a31767 | 3,628,562 |
def normalize(rendered):
"""Return the input string without non-functional spaces or newlines."""
out = ''.join([line.strip()
for line in rendered.splitlines()
if line.strip()])
out = out.replace(', ', ',')
return out | 02a87a7a5e596b45d15bb2559403e92cb69a2f1d | 3,628,563 |
import aiohttp
import asyncio
async def test_download_speed(session: aiohttp.ClientSession, url: str) -> int:
"""Count the amount of data successfully downloaded."""
result = 0
try:
async with session.get(url) as resp:
while True:
chunk = await resp.content.read(56)
if not chunk:
break
else:
result += len(chunk)
except asyncio.CancelledError:
pass
finally:
return result | c6ca9504f90cbb9091051931054f12f8498b8535 | 3,628,564 |
def scale_design_mtx(X):
"""utility to scale the design matrix for display
This scales the columns to their own range so we can see the variations
across the column for all the columns, regardless of the scaling of the
column.
"""
mi, ma = X.min(axis=0), X.max(axis=0)
# Vector that is True for columns where values are not
# all almost equal to each other
col_neq = (ma - mi) > 1.e-8
Xs = np.ones_like(X)
# Leave columns with same value throughout with 1s
# Scale other columns to min, max in column
mi = mi[col_neq]
ma = ma[col_neq]
Xs[:,col_neq] = (X[:,col_neq] - mi)/(ma - mi)
return Xs | 00f78d7be5bf4e521e07ad00dac9285623a6d929 | 3,628,565 |
from typing import Dict
def check_domain_filter(item: Dict, cfg: Config) -> bool:
"""
Validate that a given post is actually one that we can (or should) work on
by checking the domain of the post against our filters.
:param item: a dict which has the post information in it.
:param cfg: the config object.
:return: True if we can work on it, False otherwise.
"""
if item['domain'] in cfg.image_domains:
return True
if item['domain'] in cfg.audio_domains:
return True
if item['domain'] in cfg.video_domains:
return True
if item['subreddit'] in cfg.subreddits_domain_filter_bypass:
return True
return False | 504ccf76438da5437d360226dcf4a26c712cb1f2 | 3,628,566 |
from typing import Optional
from typing import List
def get_epistatic_seqs_for_landscape(landscape: potts_model.PottsModel,
distance: int,
n: int,
adaptive: bool = True,
max_reuse: Optional[int] = None,
top_k: Optional[int] = None,
random_state: np.random.RandomState = np.random.RandomState(0)
) -> List[np.ndarray]:
"""Return `n` variants at `distance` that are enriched for epistasis on `landscape`.
To construct epistatic sequences, the top epistatic pairs are taken directly from the landscape
epistasis tensor, and used as building blocks for higher order mutants. If `max_reuse` is set, the
top epistatic pairs are filtered greedily to only reuse the same positions `max_reuse` times.
Args:
landscape: The landscape.
distance: The number of mutations from the landscape wildtype. Raises a ValueError if not an even number.
n: The number of variants in the test set.
adaptive: When True (False), return sequences enriched for adaptive (deleterious) epistasis
max_reuse: An integer indicating the maximum number of times a position can be reused in the starting pool
of epistatic pairs.
top_k: The number of highest magnitude interactions to use for sampling. All epistatic pairs included in the
resulting variants are guaranteed to be within the `top_k` highest magnitude.
random_state: An instance of np.random.RandomState
Return:
A List of sequences.
"""
if distance % 2 != 0:
raise ValueError('Odd distance not supported.')
if not top_k:
top_k = n
mutation_pairs = utils.get_top_n_mutation_pairs(landscape.epistasis_tensor, top_k, lowest=not adaptive)
if max_reuse is not None:
assert max_reuse > 0
mutation_pairs = filter_mutation_set_by_position(mutation_pairs, limit=max_reuse)
print(f'{len(mutation_pairs)} after filtering {top_k}')
num_rounds = distance // 2
all_combined = combine_k_rounds(num_rounds, mutation_pairs)
all_combined = [element for element in all_combined if len(element) == distance]
if len(all_combined) < n:
raise ValueError(f'Not enough ({len(all_combined)} < {n}) mutants at distance {distance}, try increasing `top_k`.')
# TODO(nthomas) after switching to np.random.Generator, we can do rng.choice(all_combined)
subset_idxs = random_state.choice(len(all_combined), n, replace=False)
subset = [all_combined[i] for i in subset_idxs]
seqs = [utils.apply_mutations(landscape.wildtype_sequence, m) for m in subset]
return seqs | 8e5bd9ae8ab3e1c88158c2df4917ffcd87179ce2 | 3,628,567 |
def compare_nodal_prices(df_dcopf, df_mppdc):
"""Find max absolute difference in nodal prices between DCOPF and MPPDC models
Parameters
----------
df_dcopf : pandas DataFrame
Results from DCOPF model
df_mppdc : pandas DataFrame
Results from MPPDC model
Returns
-------
max_price_difference : float
Maximum difference between nodal prices for DCOPF and MPPDC models
over all nodes and scenarios
"""
# DCOPF model
# -----------
df_tmp_1 = df_dcopf.reset_index().copy()
# Filter price records
df_tmp_1 = df_tmp_1[df_tmp_1['index'].str.contains(r'\.POWER_BALANCE\[')]
# Extract values
df_tmp_1['Value'] = df_tmp_1.apply(lambda x: x['Constraint']['Dual'], axis=1)
# Extract node and scenario IDs
df_tmp_1['NODE_ID'] = df_tmp_1['index'].str.extract(r'\.POWER_BALANCE\[(\d+)\]').astype(int)
df_tmp_1['SCENARIO_ID'] = df_tmp_1['SCENARIO_ID'].astype(int)
# Prices at each node for each scenario
df_dcopf_prices = df_tmp_1.set_index(['SCENARIO_ID', 'NODE_ID'])['Value']
# MPPDC model
# -----------
df_tmp_2 = df_mppdc.reset_index().copy()
# Filter price records
df_tmp_2 = df_tmp_2[df_tmp_2['index'].str.contains(r'\.lambda_var\[')]
# Extract values
df_tmp_2['Value'] = df_tmp_2.apply(lambda x: x['Variable']['Value'], axis=1)
# Extract node and scenario IDs
df_tmp_2['NODE_ID'] = df_tmp_2['index'].str.extract(r'\.lambda_var\[(\d+)\]').astype(int)
df_tmp_2['SCENARIO_ID'] = df_tmp_2['index'].str.extract(r'LL_DUAL\[(\d+)\]').astype(int)
# Prices at each node for each scenario
df_mppdc_prices = df_tmp_2.set_index(['SCENARIO_ID', 'NODE_ID'])['Value']
# Compute difference between models
# ---------------------------------
max_price_difference = df_dcopf_prices.subtract(df_mppdc_prices).abs().max()
print('Maximum difference between nodal prices over all nodes and scenarios: {0}'.format(max_price_difference))
return max_price_difference | 2368bb7f8534ac466ab7858fa1056e3fe5f48f16 | 3,628,568 |
def add(a, b):
"""A dummy function to add two variables"""
return a + b | 4914b8d73e6808d93e8e8ee98902ad3b093f1ce6 | 3,628,569 |
def get_interconnect_regs(interconnect: Interconnect):
"""function to loop through every interconnect object and dump the
entire configuration space
"""
result = []
for x, y in interconnect.tile_circuits:
tile = interconnect.tile_circuits[(x, y)]
# cb first
for cb_name, cb in tile.cbs.items():
# get the index
index = tile.features().index(cb)
# need to get range
# notice that we may already replace the mux with aoi + const
# so we need to get the height from the actual mux
mux = cb.mux
mux_range = mux.height
if mux_range <= 1:
continue
reg_addr, lo, hi = cb.get_reg_info(get_mux_sel_name(cb.node))
config_addr = interconnect.get_config_addr(reg_addr, index,
x, y)
result.append({
"name": cb_name,
"addr": config_addr,
"range": mux_range - 1,
"lo": lo,
"hi": hi,
"reg_name": f"config_reg_{reg_addr}"
})
for switchbox in tile.sbs.values():
index = tile.features().index(switchbox)
for sb, sb_mux in switchbox.sb_muxs.values():
if sb_mux.height > 1:
config_name = get_mux_sel_name(sb)
reg_addr, lo, hi = switchbox.get_reg_info(config_name)
mux_range = sb_mux.height
config_addr = interconnect.get_config_addr(reg_addr, index,
x, y)
result.append({
"name": str(sb),
"addr": config_addr,
"range": mux_range - 1,
"lo": lo,
"hi": hi,
"reg_name": f"config_reg_{reg_addr}"
})
for node, reg_mux in switchbox.reg_muxs.values():
if reg_mux.height > 1:
config_name = get_mux_sel_name(node)
reg_addr, lo, hi = switchbox.get_reg_info(config_name)
mux_range = reg_mux.height
config_addr = interconnect.get_config_addr(reg_addr, index,
x, y)
result.append({
"name": str(node),
"addr": config_addr,
"range": mux_range - 1,
"lo": lo,
"hi": hi,
"reg_name": f"config_reg_{reg_addr}"
})
return result | 58c620276133bce0fa04343dc7c4fe2721029ae3 | 3,628,570 |
def add_bg(sc):
""" Choose a background and add it to a scaper object (check the duration).
Args:
sc: scaper.Scaper, a scaper object to add a background to.
Returns:
scaper.Scaper object with the background added.
"""
sc.add_background(
label=("choose", []), source_file=("choose", []), source_time=("const", 0)
)
bg_instance = sc._instantiate()["annotations"][0]["data"][0][2]
bg_file = bg_instance["source_file"]
bg_lbl = bg_instance["label"]
file_duration = sf.info(bg_file).duration
sc.reset_bg_event_spec()
sc.add_background(
label=("const", bg_lbl),
source_file=("const", bg_file),
source_time=("uniform", 0, file_duration - sc.duration),
)
return sc | f4f55cd627aa2b0b6c35657a077d91e0bf82c143 | 3,628,571 |
from typing import Optional
from typing import Mapping
from typing import Any
from typing import Type
import torch
from typing import Sequence
import time
import collections
def pipeline(
*,
dataset: HintOrType[DatasetLoader],
model: HintOrType[Model],
model_kwargs: Optional[Mapping[str, Any]] = None,
optimizer_cls: Type[Optimizer] = torch.optim.Adam,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
loss_cls: Type[_Loss] = torch.nn.BCELoss,
loss_kwargs: Optional[Mapping[str, Any]] = None,
batch_size: int = 5120,
epochs: int,
context_features: bool,
drug_features: bool,
drug_molecules: bool,
train_size: Optional[float] = None,
random_state: Optional[int] = None,
metrics: Optional[Sequence[str]] = None,
) -> Result:
"""Run the training and evaluation pipeline.
:param dataset:
The dataset can be specified in one of three ways:
1. The name of the dataset
2. A subclass of :class:`chemicalx.DatasetLoader`
3. An instance of a :class:`chemicalx.DatasetLoader`
:param model:
The model can be specified in one of three ways:
1. The name of the model
2. A subclass of :class:`chemicalx.Model`
3. An instance of a :class:`chemicalx.Model`
:param model_kwargs:
Keyword arguments to pass through to the model constructor. Relevant if passing model by string or class.
:param optimizer_cls:
The class for the optimizer to use. Currently defaults to :class:`torch.optim.Adam`.
:param optimizer_kwargs:
Keyword arguments to pass through to the optimizer construction.
:param loss_cls:
The loss to use. If none given, uses :class:`torch.nn.BCELoss`.
:param loss_kwargs:
Keyword arguments to pass through to the loss construction.
:param batch_size:
The batch size
:param epochs:
The number of epochs to train
:param context_features:
Indicator whether the batch should include biological context features.
:param drug_features:
Indicator whether the batch should include drug features.
:param drug_molecules:
Indicator whether the batch should include drug molecules
:param train_size:
The ratio of training triples. Default is 0.8 if None is passed.
:param random_state:
The random seed for splitting the triples. Default is 42. Set to none for no fixed seed.
:param metrics:
The list of metrics to use.
:returns:
A result object with the trained model and evaluation results
"""
loader = dataset_resolver.make(dataset)
train_generator, test_generator = loader.get_generators(
batch_size=batch_size,
context_features=context_features,
drug_features=drug_features,
drug_molecules=drug_molecules,
train_size=train_size,
random_state=random_state,
)
model = model_resolver.make(model, model_kwargs)
optimizer = optimizer_cls(model.parameters(), **(optimizer_kwargs or {}))
model.train()
loss = loss_cls(**(loss_kwargs or {}))
losses = []
train_start_time = time.time()
for _epoch in trange(epochs):
for batch in train_generator:
optimizer.zero_grad()
prediction = model(*model.unpack(batch))
loss_value = loss(prediction, batch.labels)
losses.append(loss_value.item())
loss_value.backward()
optimizer.step()
train_time = time.time() - train_start_time
model.eval()
evaluation_start_time = time.time()
predictions = []
for batch in test_generator:
prediction = model(*model.unpack(batch))
if isinstance(prediction, collections.abc.Sequence):
prediction = prediction[0]
prediction = prediction.detach().cpu().numpy()
identifiers = batch.identifiers
identifiers["prediction"] = prediction
predictions.append(identifiers)
evaluation_time = time.time() - evaluation_start_time
predictions_df = pd.concat(predictions)
if metrics is None:
metric_dict = {"roc_auc": roc_auc_score}
else:
metric_dict = {name: metric_resolver.lookup(name) for name in metrics}
return Result(
model=model,
predictions=predictions_df,
losses=losses,
train_time=train_time,
evaluation_time=evaluation_time,
metrics={
name: func(predictions_df["label"], predictions_df["prediction"]) for name, func in metric_dict.items()
},
) | 6c921d664f93439d30b7f58ac2a2e64c2646309f | 3,628,572 |
import hashlib
def sha224(binary: bytes) -> str:
"""
Overview:
SHA224 hash.
Arguments:
- binary (:obj:`bytes`): Binary data to be hashed.
Returns:
- digest (:obj:`str`): SHA224 digest string.
Examples::
>>> from hbutils.encoding import sha224
>>> sha224(b'')
'd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f'
>>> sha224(b'this is a word')
'7b994bcffbc9a3689941e541a2e639c20e321b763b744c0d84f6899c'
"""
return _hash_algorithm(hashlib.sha224, binary) | b992800e81861fcf6ff188d700577e662c7228fb | 3,628,573 |
from typing import Dict
from typing import Any
def get_telephone_number(input: Dict[str, Any]) -> str:
"""loop through JSON sample to find a specific response where
element reference is equal to 'TelNo'
and extract the telephone value of that specific response"""
print(f"Getting telephone number, please wait...")
for response_list in __valid_top_level_responses(input):
for response in __valid_second_level_responses(response_list):
if "TelNo" in __valid_element_dictionary(response).values():
return __valid_value(response)
raise KeyError("Failed to get telephone number") | 392f9600054f6c0a8cecc1865e1e4acde1f47c11 | 3,628,574 |
async def async_setup(hass: HomeAssistant, config: dict):
"""Cannot setup using YAML"""
return True | f2dc5691573ab4a5fceba2929af980d4ba382731 | 3,628,575 |
def get_next_on_schedule():
"""Returns a list of current and 6 next assigned slots
No request params.
"""
try:
data = [{
'time' : slot.time.strftime( '%H:%M' ),
'editor' : slot.editor.first_name + ' ' + slot.editor.last_name
} for slot in Slot.get_next_on_schedule() ]
return data_response( data )
except:
return error_response( 'Nije moguće dohvatiti raspored emitiranja.' ) | 5b88a9e50f1033ec2b4c933fdd31ee264422db3f | 3,628,576 |
def getErdosSectors(topm_dict,minimum_incidence=None):
"""Make Erdös sectorialization and return dict with main variables"""
if not minimum_incidence:
minimum_incidence=max(2,int(topm_dict["nnodes"]*0.01))
t=topm_dict
max_degree_empirical=max(t["degrees_"])
prob=t["nedges"]/(t["nnodes"]*(t["nnodes"]-1)) # edge probability
max_degree_possible=2*(t["nnodes"]-1) # max d given N
d_=list(set(t["degrees_"]))
d_.sort()
sectorialized_degrees= newerSectorializeDegrees(
makeEmpiricalDistribution(
t["degrees_"], d_, t["nnodes"] ),
stats.binom(max_degree_possible,prob),
d_,
max_degree_empirical,
minimum_incidence,t["nnodes"])
sectorialized_agents= sectorializeAgents(
sectorialized_degrees, t["degrees"])
sectorialized_nagents=[len(i) for i in sectorialized_agents]
del t
return locals() | a8e0c336c7b790a4dba95d09f6cdad71b0eb972b | 3,628,577 |
def truncate(s, eps):
"""
Find the smallest k such that sum(s[:k]**2) \geq 1-eps.
"""
mysum = 0.0
k=-1
while (mysum < 1-eps):
k += 1
mysum += s[k]**2
return k+1 | fc9b5984316e969961b496fd54425e4f52f025ff | 3,628,578 |
def transform_pose_msg(msg, child_frame_current, child_frame_new):
"""
transform pose in given msg
"""
def pose_msg_to_matrix(msg):
translate = [msg.position.x, msg.position.y, msg.position.z]
angles = tf.transformations.euler_from_quaternion([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])
return tf.transformations.compose_matrix(angles=angles, translate=translate)
def matrix_to_pose_msg(m):
msg = Pose()
msg.position.x = m[0,3]
msg.position.y = m[1,3]
msg.position.z = m[2,3]
q = tf.transformations.quaternion_from_matrix(m)
msg.orientation.x = q[0]
msg.orientation.y = q[1]
msg.orientation.z = q[2]
msg.orientation.w = q[3]
return msg
t_current_new = transform_data.get_tf_matrix(child_frame_current, child_frame_new)
t_world_current = pose_msg_to_matrix(msg.pose)
t_world_new = np.dot(t_world_current, t_current_new)
ret = PoseStamped()
ret.header = msg.header
ret.pose = matrix_to_pose_msg(t_world_new)
return ret | 2d063e5f5a3906cd5811dcc589bf2e121037eb00 | 3,628,579 |
import six
def validate_uuid4(uuid_string):
"""Validate that a UUID string is in fact a valid uuid4.
Happily, the uuid module does the actual checking for us.
It is vital that the 'version' kwarg be passed
to the UUID() call, otherwise any 32-character
hex string is considered valid.
"""
if not isinstance(uuid_string, six.string_types):
return False
try:
val = UUID(uuid_string.translate(None, '-'), version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
# If the uuid_string is a valid hex code,
# but an invalid uuid4, the UUID.__init__ will convert it to a
# valid uuid4. This is bad for validation purposes.
return val.hex == uuid_string | 12608245047e62a5ad56e755245675810601b107 | 3,628,580 |
def nmi(vanilla_result, fair_result, seed=0):
"""
calculate normalized mutual information (NMI)
:param vanilla_result: vanilla mining result
:param fair_result: debiased mining result
:param seed: random seed
:return: NMI between vanilla mining result and debiased mining result
"""
# kmeans for vanilla spectral clustering
vanilla_kmeans = KMeans(n_clusters=vanilla_result.shape[1], random_state=seed, n_init=1).fit(vanilla_result)
vanilla_labels = vanilla_kmeans.labels_
# kmeans for fair spectral clustering
fair_kmeans = KMeans(n_clusters=fair_result.shape[1], random_state=seed, n_init=1).fit(fair_result)
fair_labels = fair_kmeans.labels_
# calculate normalized mutual information
nmi = normalized_mutual_info_score(vanilla_labels, fair_labels, average_method='arithmetic')
return nmi | 4f7a5800e620c0cf05baa0fe259bfc8ce21a9ed9 | 3,628,581 |
def list_to_str(items):
"""
:param items:
:return:
"""
mystr = ''
for item in items:
mystr += item
return mystr | 6530a33641f261888094d4ecb6fff469a97d6c10 | 3,628,582 |
def prefixed_collapsible_map(m, prefix):
"""
Return a dict of params corresponding to those in m with the added prefix
"""
if m == values.unset:
return {}
def flatten_dict(d, result=None, prv_keys=None):
if result is None:
result = {}
if prv_keys is None:
prv_keys = []
for k, v in d.items():
if isinstance(v, dict):
flatten_dict(v, result, prv_keys + [k])
else:
result['.'.join(prv_keys + [k])] = v
return result
if isinstance(m, dict):
flattened = flatten_dict(m)
return {'{}.{}'.format(prefix, k): v for k, v in flattened.items()}
return {} | 7007da04faf43d0b1b0293767cd4c6c89d3edb33 | 3,628,583 |
import torch
def to_minmax_form(boxes):
"""
:param boxes: (n, 4) tensor, (xmin, ymin, xmax, ymax) form.
:return: (n, 4) tensor, (cx, cy, w, h) form
"""
xmin = boxes[:, 0] - boxes[:, 2] / 2 + 0.5
ymin = boxes[:, 1] - boxes[:, 3] / 2 + 0.5
xmax = boxes[:, 0] + boxes[:, 2] / 2 - 0.5
ymax = boxes[:, 1] + boxes[:, 3] / 2 - 0.5
return torch.stack([xmin, ymin, xmax, ymax]) | f00f703c78db7926bbea684147facc6fa553ac67 | 3,628,584 |
def apps(request):
"""
apps
"""
return render_mako_context(request, '/demo/apps.html') | f25b54236f582e8fcf87b3bf90009181bab4aa37 | 3,628,585 |
def compute_unfolded_dimension(xtypes):
"""
Returns x dimension (int) taking into account unfolded categorical features
"""
res = 0
for xtyp in xtypes:
if xtyp == FLOAT or xtyp == INT:
res += 1
elif isinstance(xtyp, tuple) and xtyp[0] == ENUM:
res += xtyp[1]
else:
_raise_value_error(xtyp)
return res | cf3c47d7c97b00fd05e888fc85575f594841647d | 3,628,586 |
async def report_exc_info(
exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw
):
"""
Asynchronously reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
request: optional, a Starlette, WebOb, Werkzeug-based or Sanic request object.
extra_data: optional, will be included in the 'custom' section of the payload
payload_data: optional, dict that will override values in the final payload
(e.g. 'level' or 'fingerprint')
kw: provided for legacy purposes; unused.
Example usage:
rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN')
async def func():
try:
do_something()
except:
await report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'})
"""
with AsyncHandler():
try:
return await call_later(
_report_exc_info(
exc_info, request, extra_data, payload_data, level, **kw
)
)
except Exception as e:
log.exception('Exception while reporting exc_info to Rollbar. %r', e) | e677fe7d4d4b13658ec331129b3aeeae9b66733e | 3,628,587 |
from typing import Dict
def validate_input_parameters(
input_parameters: Dict, original_parameters: Dict
) -> Dict:
"""Validate input parameters.
:param input_parameters: dictionary which represents additional workflow input parameters.
:param original_parameters: dictionary which represents original workflow input parameters.
:raises REANAValidationError: Given there are additional input parameters which are not present in the REANA spec parameter list.
"""
for parameter in input_parameters.keys():
if parameter not in original_parameters:
raise REANAValidationError(
f'Input parameter "{parameter}" is not present in reana.yaml'
)
return input_parameters | f31c025ce8c8b345fc2a84f98aed3cd2dd8aa179 | 3,628,588 |
from datetime import datetime
def getHirlamSimulationEndTime(root, datetime_format):
"""Helper function for getting the ending time for the latest simulation from HIRLAM XML-response."""
try:
# get the starting time by finding the tags "resultTime" and "timePosition"
result_time_elem = [elem for elem in root.iter() if "resultTime" in elem.tag][0]
time_elem = [elem for elem in result_time_elem.iter() if "timePosition" in elem.tag][0]
end_time = datetime.datetime.strptime(
time_elem.text, datetime_format).replace(tzinfo=datetime.timezone.utc).timestamp()
except:
return None
return end_time | 36c5edbf8b9ed70bdcdda1605d32f9cbcc8073a9 | 3,628,589 |
import re
def GFFParse(ref_file):
"""Extracting annotated features from a GFF file based on feature identifier mapping."""
genes, transcripts, exons, utr5, utr3, cds = dict(), dict(), dict(), dict(), dict(), dict()
ref_fh = open(ref_file, 'rU')
for gln in ref_fh:
gln = gln.strip('\n\r').split('\t')
if not gln:continue ## not considering an empty line
if re.match(r'#', gln[0]) or re.match(r'>', gln[0]):continue ## not considering commented and FASTA header lines from GFF
if len(gln) == 1:continue ## not considering if FASTA sequence along with GFF
assert len(gln) == 9, '\t'.join(gln) ## a valid GFF line contains only 9 tab-delimited fields
if '' in gln:continue ## empty fields in any line ?
# TODO 1: include all possible first level features
if gln[2] == 'gene':
gid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'ID':gid = atb[1];continue
desc[atb[0]] = atb[1]
genes[(gln[0], gid)] = desc
# TODO 2: include all possible second level features
elif gln[2] == 'mRNA':
gid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'Parent':gid = atb[1];continue
desc[atb[0]] = atb[1]
if (gln[0], gid) in transcripts:
transcripts[(gln[0], gid)].append(desc)
else:
transcripts[(gln[0], gid)] = [desc]
# TODO 3: get third level features
elif gln[2] == 'exon':
tid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'Parent':tid = atb[1];continue
desc[atb[0]] = atb[1]
for fid in tid.split(','):
if (gln[0], fid) in exons:
exons[(gln[0], fid)].append(desc)
else:
exons[(gln[0], fid)] = [desc]
elif gln[2] == 'five_prime_UTR':
tid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'Parent':tid = atb[1];continue
desc[atb[0]] = atb[1]
for fid in tid.split(','):
if (gln[0], fid) in utr5:
utr5[(gln[0], fid)].append(desc)
else:
utr5[(gln[0], fid)] = [desc]
elif gln[2] == 'CDS':
tid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'Parent':tid = atb[1];continue
desc[atb[0]] = atb[1]
for fid in tid.split(','):
if (gln[0], fid) in cds:
cds[(gln[0], fid)].append(desc)
else:
cds[(gln[0], fid)] = [desc]
elif gln[2] == 'three_prime_UTR':
tid, desc = None, dict(chr = gln[0], start = gln[3], stop = gln[4], orient = gln[6], src = gln[1])
for atb in gln[-1].split(';'):
if atb == '':continue
atb = atb.split('=')
if atb[0] == 'Parent':tid = atb[1];continue
desc[atb[0]] = atb[1]
for fid in tid.split(','):
if (gln[0], fid) in utr3:
utr3[(gln[0], fid)].append(desc)
else:
utr3[(gln[0], fid)] = [desc]
ref_fh.close()
return genes, transcripts, exons, utr5, cds, utr3 | f28cd187bcd27cfe69b0a151a9b7e120ff632a74 | 3,628,590 |
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError, e:
# Registry key is left dangling (potentially
# after uninstalling).
print \
"scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey
else:
print "scons: *** Ignoring "+str(value)
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9.]+)$', d)
if m:
versions.append(m.group(1))
elif is_mac:
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9.]+)$', d)
if m:
versions.append(m.group(1))
versions = uniquify(versions) # remove dups
versions.sort(vercmp)
return versions | 2c8e712e054f5588f7331ac31e943b3bcc87ea23 | 3,628,591 |
import re
def detect_resolution(paths):
"""Attempt to detect the input resolution from a list of paths.
Args:
paths (list): List of file paths.
Raises:
ResolutionDetectionException: If there are too many possible resolutions in a path.
ResolutionDetectionException: If there are inconsistent resolutions detected between paths.
Returns:
int: Resolution in km.
"""
# Set up pattern for searching
regex = r'([0-9.]*km)'
res = None
for path in paths:
matches = re.findall(regex, path)
# Too many options to choose from
if len(matches) != 1:
raise ResolutionDetectionException(f'Unable to detect resolution from {path}, there are too many possibilities.')
# First detection
if res is None:
res = matches[0]
# Already detected, but not the same
elif res != matches[0]:
raise ResolutionDetectionException(f'Detected resolutions are inconsistent between supplied paths.')
# Made it this far, we have a detectable resolution
return float(res.replace('km', '')) | 76458ba30b4f0c065915946e06238b7f59375c8a | 3,628,592 |
def configure_plotting_functions(
db_conn, read_bundled_simulation_results_from_db, extract_simulation_result,
n_simulation_results, simulation_result_name, extract_display_info,
compile_plot_annotation_text, node_names, output_dirpath, is_single_process,
pdf_page_limit, image_formats_and_dpis, no_attractor_plot_annotation_text):
"""
Make partial application to functions plot_annotation_only(),
plot_states(), and plot_pdf_page_details().
:param db_conn: DB connection
:param read_bundled_simulation_results_from_db: [fuction] to read wrapped
simulation results from DB
:param extract_simulation_result: [function] to unwrap simulation result
:param n_simulation_results: number of simulation results in DB
:param simulation_result_name: description of simulation result for user
:param extract_display_info: [function] to extract display
information from simulation result
:param compile_plot_annotation_text: [function] to compile plot
annotation for simulation result
:param node_names: list of node names
:param output_dirpath: output directory path
:param is_single_process: [bool] whether BoolSi run has no workers
:param pdf_page_limit: maximum number of simulation results to
print to PDF
:param image_formats_and_dpis: iterable of tuples (format, dpi)
corresponding to output image formats
:param no_attractor_plot_annotation_text: annotation text for the number
of not found attractors
:return: ([function] to plot annotation with no states, [function] to
plot states, [function] to plot PDF-specific elements)
"""
fig, renderer = init_plotting()
# Find legend dimensions (in inches) for each legend type.
legend_widths = dict()
legend_heights = dict()
for legend_type in product((False, True), repeat=2):
legend_widths[legend_type], legend_heights[legend_type] = \
get_states_legend_dimensions(fig, renderer, *legend_type)
# Configure function to generate simulation results to be plotted.
n_simulation_results_to_plot = n_simulation_results if \
image_formats_and_dpis else min(n_simulation_results, pdf_page_limit)
def simulation_result_generator():
for simulation_resut_index, wrapped_simulation_result in enumerate(
read_bundled_simulation_results_from_db(db_conn)):
if simulation_resut_index == n_simulation_results_to_plot:
break
yield extract_simulation_result(wrapped_simulation_result)
# Compile annotation texts for the simulation results. Determine
# whether they can be spread out horizontally with the legends
# to fit within heatmap width, or they should be stacked instead.
max_annotation_width = get_text_width(fig, renderer, no_attractor_plot_annotation_text) if \
no_attractor_plot_annotation_text else 0
max_horizontal_layout_width = max_annotation_width
max_stacked_layout_width = max_annotation_width
longest_states = []
longest_time_labels = []
longest_time_labels_for_pdf = []
stage_label = 'Preprocessing {}s for graphic output...'.format(simulation_result_name)
with create_progressbar(n_simulation_results_to_plot, output_dirpath, is_single_process,
iterable=simulation_result_generator(), show_pos=True, show_eta=False,
stage_label=stage_label) as progressbar:
for simulation_result_index, simulation_result in enumerate(progressbar):
states, fixed_nodes, perturbed_nodes_by_t, time_labels, _ = \
extract_display_info(simulation_result)
annotation_text = compile_plot_annotation_text(
simulation_result, simulation_result_index)
annotation_width = get_text_width(fig, renderer, annotation_text)
if annotation_width > max_annotation_width:
max_annotation_width = annotation_width
if len(states) > len(longest_states):
longest_states = states
longest_time_labels = time_labels
if simulation_result_index < pdf_page_limit:
longest_time_labels_for_pdf = time_labels
# Determine legend type for the simulation result.
legend_type = bool(fixed_nodes), bool(perturbed_nodes_by_t)
# Calculate width (in inches) of annotation and legend
# when laid out horizontally.
legend_width = legend_widths[legend_type]
horizontal_layout_width = annotation_width + legend_h_pad + legend_width
if horizontal_layout_width > max_horizontal_layout_width:
max_horizontal_layout_width = horizontal_layout_width
stacked_layout_width = max(annotation_width, legend_width)
if stacked_layout_width > max_stacked_layout_width:
max_stacked_layout_width = stacked_layout_width
# Calculate heatmap width (in inches).
plot_width = len(node_names) * cell_size
# Determine whether the widest annotation and legend
# horizontal layout fits heatmap width or they should
# be stacked.
if max_horizontal_layout_width > plot_width:
layout_is_stacked = True
page_width = max(plot_width, max_stacked_layout_width)
else:
layout_is_stacked = False
page_width = plot_width
# Compensate for tiny undocumented padding produced by
# bbox_inches='tight' & pad_inches=0 around text and states' plot.
page_width *= 1.01
# Check if longest states can be plotted and measure x-axis
# labeling height.
try:
_, plot = plot_states(fig, False, dict(), 0, longest_states, dict(), dict(), node_names,
longest_time_labels, '')
except ValueError:
# Image failed to plot, no plotting will occur.
raise PlottingFailedException
else:
plt.draw()
# Calculate height of space from x-axis to bottom edge of its
# label (in inches).
xaxis_labeling_height = labeling_size_without_ticklabels + max(
label.get_window_extent().height / fig.dpi for label in plot.get_xticklabels())
# Measure y-axis labeling width to ensure equal PDF page
# width.
_, plot = plot_states(fig, False, dict(), 0, longest_states, dict(), dict(),
node_names, longest_time_labels_for_pdf, '')
plt.draw()
# Calculate width of space from y-axis to leftmost edge of its
# label (in inches).
yaxis_labeling_width = labeling_size_without_ticklabels + max(
label.get_window_extent().width / fig.dpi for label in plot.get_yticklabels())
fig.clear()
# Configure function for plotting annotation of simulation
# problems with no attractor found.
_plot_annotation_only = partial(plot_annotation_only, fig=fig, n_nodes=len(node_names),
annotation_text=no_attractor_plot_annotation_text)
# Configure function for plotting states.
_plot_states = partial(
plot_states, fig, layout_is_stacked, legend_heights, xaxis_labeling_height)
# Configure plotting PDF page details.
_plot_page_details = partial(
plot_page_details, fig, page_width, xaxis_labeling_height, yaxis_labeling_width)
return _plot_annotation_only, _plot_states, _plot_page_details | 065eaa7687cd3674c2eadab75d76c3992729c899 | 3,628,593 |
def compute_mean_nutrient_intake(nutrient_intake):
"""Compute mean nutrient intake"""
nutrient_totals = nutrient_intake.sum()
total_count = nutrient_intake.count()
carb_mean = (nutrient_totals[0]/total_count)
fiber_mean = (nutrient_totals[1]/total_count)
fat_mean = (nutrient_totals[2]/total_count)
prot_mean = (nutrient_totals[3]/total_count)
nutrients_mean = [carb_mean, fiber_mean, fat_mean, prot_mean]
return nutrients_mean | ff79f816162a8f555d5deb582eb9647f1cef66b9 | 3,628,594 |
def __(string):
"""Emojize a text, wrapping ``use_aliases``.
Args:
string (str): string to emojize.
Returns:
An emojized string.
"""
return emojize(string, use_aliases=True) | ecf6e8907961aa21c0f8cba866d9919ed9f5ab49 | 3,628,595 |
def preserve_channel_dim(func):
"""Preserve dummy channel dim."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2:
result = np.expand_dims(result, axis=-1)
return result
return wrapped_function | 709dfce7404eaa40d395ec273fb85b2ca42764ce | 3,628,596 |
import traceback
def redirect_auth_oidc(auth_code, fetchtoken=False, session=None):
"""
Finds the Authentication URL in the Rucio DB oauth_requests table
and redirects user's browser to this URL.
:param auth_code: Rucio assigned code to redirect
authorization securely to IdP via Rucio Auth server through a browser.
:param fetchtoken: If True, valid token temporarily saved in the oauth_requests table
will be returned. If False, redirection URL is returned.
:param session: The database session in use.
:returns: result of the query (authorization URL or a
token if a user asks with the correct code) or None.
Exception thrown in case of an unexpected crash.
"""
try:
redirect_result = session.query(models.OAuthRequest.redirect_msg).filter_by(access_msg=auth_code).first()
if isinstance(redirect_result, tuple):
if 'http' not in redirect_result[0] and fetchtoken:
# in this case the function check if the value is a valid token
vdict = validate_auth_token(redirect_result[0], session=session)
if vdict:
return redirect_result[0]
return None
elif 'http' in redirect_result[0] and not fetchtoken:
# return redirection URL
return redirect_result[0]
return None
return None
except:
raise CannotAuthenticate(traceback.format_exc()) | ec70b5c5d1b264ad48569fa955f0edcaaa1374fa | 3,628,597 |
def inception_crop_with_mask(
image, mask, resize_size=None, area_min=5, area_max=100):
"""Applies the same inception-style crop to an image and a mask tensor.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
image: [H, W, C] image tensor.
mask: [H, W, None] mask tensor. H and W must match the image. Will be
resized using tf.image.ResizeMethod.NEAREST_NEIGHBOR.
resize_size: Sequence of 2 ints; Resize image to [height, width] after crop.
area_min: minimal crop area.
area_max: maximal crop area.
Returns:
Cropped image and mask tensors.
"""
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image), tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
# Process image:
image_cropped = tf.slice(image, begin, size)
image_cropped.set_shape([None, None, image.shape[-1]])
if resize_size:
image_cropped = tf.image.resize(
image_cropped, resize_size, tf.image.ResizeMethod.BILINEAR)
# Process mask:
mask_cropped = tf.slice(mask, begin, size)
mask_cropped.set_shape([None, None, mask.shape[-1]])
if resize_size:
mask_cropped = tf.image.resize(
mask_cropped, resize_size, tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image_cropped, mask_cropped | 2f342b49035fd9a6e095a2debae3df6250718ec9 | 3,628,598 |
import os
def delete_outputs(config, outcfg):
"""
Remove pipeline outputs to save memory
after running the job
Parameters
----------
config : dict-like
Input configuration of job. Uses
config["management"]["delete"] (list of key
used to index outcfg) to determine
which files should be added to archive
outcfg : dict-like
Output configuration of job
Returns
-------
outcfg_cleaned : dict-like
Output configuration with selected
output keys removed.
"""
# determine keys (corresponding to files) in
# outcfg that should be stored
outkeys = config.get("management", {}).get("delete", None)
# if no output keys are requested, nothing to do
if outkeys is None:
return outcfg
# go through all flagged files and delete if existing
for k in outkeys:
# skip missing keys or ones not defined
if k not in outcfg or k is None:
continue
# delete list of files
if k.endswith("files"):
for f in outcfg[k]:
try:
os.remove(f)
except OSError:
pass
del outcfg[k]
# delete individual file
else:
try:
os.remove(outcfg[k])
del outcfg[k]
except OSError:
pass
return outcfg | 1edf02ae14a755f77899c6d3be05ff11a2d6bcf3 | 3,628,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.