content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_trainable_vars(name):
"""
returns the trainable variables
:param name: (str) the scope
:return: ([TensorFlow Variable])
"""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
| 22,000
|
def batch_add_annotations(cursor, annotations, annot_type, batch_size):
""" Add gene/transcript/exon annotations to the appropriate annotation table
"""
batch_size = 1
if annot_type not in ["gene", "transcript", "exon"]:
raise ValueError("When running batch annot update, must specify " + \
"annot_type as 'gene', 'exon', or 'transcript'.")
index = 0
while index < len(annotations):
try:
batch = annotations[index:index + batch_size]
except:
batch = annotations[index:]
index += batch_size
try:
cols = " (" + ", ".join([str_wrap_double(x) for x in
["ID", "annot_name", "source", "attribute", "value"]]) + ") "
command = 'INSERT OR IGNORE INTO "' + annot_type + '_annotations" ' + cols + \
"VALUES " + '(?,?,?,?,?)'
cursor.executemany(command, batch)
except Exception as e:
print(e)
sys.exit(1)
return
| 22,001
|
def getLambdaFasta():
"""
Returns the filename of the FASTA of the lambda phage reference.
"""
return _getAbsPath('lambdaNEB.fa')
| 22,002
|
def find_gaps(num_iter):
"""Generate integers not present in an iterable of integers.
Caution: this is an infinite generator.
"""
next_num = -1
for n in num_iter:
next_num += 1
while next_num < n:
yield next_num
next_num += 1
while True:
next_num += 1
yield next_num
| 22,003
|
def register_field(name):
"""Register a custom accessor on Activity objects.
Based on :func:`pandas.api.extensions.register_dataframe_accessor`.
Args:
name (str): Name under which the accessor should be registered. A warning
is issued if this name conflicts with a preexisting attribute.
Returns:
callable: A class decorator.
See also:
:func:`pandas.api.extensions.register_dataframe_accessor`
Register a custom accessor on DataFrame objects.
`pandas.api.extensions._register_accessor() <https://github.com/pandas-dev/pandas/blob/v1.2.4/pandas/core/accessor.py#L189-L275>`_
Notes:
When accessed, your accessor will be initialized with the Activity object
the user is interacting with. So the signature must be
.. code-block:: python
def __init__(self, activity_obj): # noqa: E999
...
Examples:
In your library code::
import heartandsole as hns
@hns.api.extensions.register_field('running_smoothness')
class SmoothnessAccessor:
def __init__(self, activity_obj):
self._obj = activity_obj
@property
def avg(self):
# return the average of the records
return self._obj.records['running_smoothness'].mean()
Back in an interactive IPython session:
.. code-block:: ipython
In [1]: act = hns.Activity(pd.DataFrame({{'running_smoothness': np.linspace(0, 10)}})
In [2]: act.running_smoothness.avg
Out[2]: 5.0
TODO:
* Consider making this a classmethod of Activity.
"""
from heartandsole import Activity
def decorator(field):
if hasattr(Activity, name):
warnings.warn(
f"registration of accessor {repr(field)} under name "
f"{repr(name)} for type {repr(Activity)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
)
setattr(Activity, name, CachedField(name, field))
Activity._fields.add(name)
return field
return decorator
| 22,004
|
def test_deprecation_warnings_set_power(cst_power_2freq):
"""
Test the deprecation warnings in set_power.
"""
power_beam = cst_power_2freq
power_beam2 = power_beam.copy()
with uvtest.check_warnings(DeprecationWarning, match="`set_power` is deprecated"):
power_beam2.set_power()
assert power_beam2 == power_beam
| 22,005
|
def Rotation_ECL_EQD(time):
"""Calculates a rotation matrix from ecliptic J2000 (ECL) to equatorial of-date (EQD).
This is one of the family of functions that returns a rotation matrix
for converting from one orientation to another.
Source: ECL = ecliptic system, using equator at J2000 epoch.
Target: EQD = equatorial system, using equator of date.
Parameters
----------
time : Time
The date and time of the desired equator.
Returns
-------
RotationMatrix
A rotation matrix that converts ECL to EQD.
"""
rot = Rotation_EQD_ECL(time)
return InverseRotation(rot)
| 22,006
|
def _make_players_away(team_size):
"""Construct away team of `team_size` players."""
away_players = []
for i in range(team_size):
away_players.append(
Player(Team.AWAY, _make_walker("away%d" % i, i, _RGBA_RED)))
return away_players
| 22,007
|
def store_barbican_secret_for_coriolis(
barbican, secret_info, name='Coriolis Secret'):
""" Stores secret connection info in Barbican for Coriolis.
:param barbican: barbican_client.Client instance
:param secret_info: secret info to store
:return: the HREF (URL) of the newly-created Barbican secret
"""
payload = json.dumps(secret_info)
secret = barbican.secrets.create(
name=name, payload=payload,
payload_content_type='application/json')
secret_ref = secret.store()
return secret_ref
| 22,008
|
def test_Has_Disk_At__Effective_Disk(score, max_score):
"""Function has_disk_at: effective disk."""
max_score.value += 1
try:
set_up()
assert Board.has_disk_at(test_board_4, (1, 1))
score.value += 1
except:
pass
| 22,009
|
def wfr2_grad_single(image, sigma, kx, ky, kw, kstep, grad=None):
"""Optimized, single precision version of wfr2_grad.
Single precision might be faster on some hardware.
In addition to returning the
used k-vector and lock-in signal, return the gradient of the lock-in
signal as well, for each pixel computed from the values of the surrounding pixels
of the GPA of the best k-vector. Slightly more accurate, determination of this gradient,
as boundary effects are mitigated.
"""
xx, yy = cp.ogrid[0:image.shape[0],
0:image.shape[1]]
c_image = cp.asarray(image, dtype=np.float32)
g = {'lockin': cp.zeros_like(c_image, dtype=np.complex64),
'grad': cp.zeros(image.shape + (2,), dtype=np.float32),
}
gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image, dtype=np.float32), sigma=sigma)
if grad == 'diff':
def grad_func(phase):
dbdx = cp.diff(phase, axis=0, append=np.nan)
dbdy = cp.diff(phase, axis=1, append=np.nan)
return dbdx, dbdy
elif grad is None:
def grad_func(phase):
return cp.gradient(phase)
else:
grad_func = grad
for wx in np.arange(kx-kw, kx+kw, kstep):
for wy in np.arange(ky-kw, ky+kw, kstep):
multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy))
X = cp.fft.fft2(c_image * multiplier)
X = X * gaussian
sf = cp.fft.ifft2(X)
t = cp.abs(sf) > cp.abs(g['lockin'])
angle = -cp.angle(sf)
grad = grad_func(angle)
grad = cp.stack(grad, axis=-1)
g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin'])
# TODO: do outside forloop.
g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad'])
for key in g.keys():
g[key] = g[key].get()
g['grad'] = wrapToPi(2 * g['grad']) / 2
return g
| 22,010
|
def padding():
"""Return 16-200 random bytes"""
return URANDOM(random.randrange(16, PAD_MAX))
| 22,011
|
def listener(phrase_limit: int, timeout: int = None, sound: bool = True) -> str:
"""Function to activate listener, this function will be called by most upcoming functions to listen to user input.
Args:
phrase_limit: Time in seconds for the listener to actively listen to a sound.
timeout: Time in seconds for the overall listener to be active.
sound: Flag whether or not to play the listener indicator sound. Defaults to True unless set to False.
Returns:
str:
- On success, returns recognized statement from the microphone.
- On failure, returns ``SR_ERROR`` as a string which is conditioned to respond appropriately.
"""
try:
sys.stdout.write("\rListener activated..") and playsound('indicators/start.mp3') if sound else \
sys.stdout.write("\rListener activated..")
if timeout and phrase_limit:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit, timeout=timeout)
else:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit)
sys.stdout.write("\r") and playsound('indicators/end.mp3') if sound else sys.stdout.write("\r")
return_val = recognizer.recognize_google(listened)
sys.stdout.write(f'\r{return_val}')
except (UnknownValueError, RequestError, WaitTimeoutError):
return_val = 'SR_ERROR'
return return_val
| 22,012
|
def find():
"""Prints user message and returns the number of HP-49 connected.
"""
hps = com.find()
if len( hps ) == 0:
print "No HP49-compatible devices connected."
sys.stdout.flush()
else:
print "Number of HP49-compatible devices: %d" % len( hps )
sys.stdout.flush()
return len( hps )
| 22,013
|
def get_children_templates(pvc_enabled=False):
"""
Define a list of all resources that should be created.
"""
children_templates = {
"service": "service.yaml",
"ingress": "ingress.yaml",
"statefulset": "statefulset.yaml",
"configmap": "configmap.yaml",
"secret": "secret.yaml",
}
if pvc_enabled:
children_templates["pvc"] = "pvc.yaml"
return children_templates
| 22,014
|
def split_blocks(block_iter, expected_hdr=None):
"""Extract a sequence of MPEG audio frames from a stream of data blocks.
Args:
block_iter: An iterable object that yields a sequence of data
blocks.
expected_hdr: If given, only yield frames matching this MP3Header
template
Yields:
A (hdr, data_buffer) pair.
If 'hdr' is None, data buffer contains non-MPEG-audio junk that
was found inside the stream. Otherwise 'hdr' is an MP3Header object
and 'data_buffer' contains the MP3 frame.
"""
buffered = ''
current_hdr = None
at_end_of_stream = False
to_be_skipped = 0
while True:
# First we skip data if necessary.
while to_be_skipped > 0:
assert current_hdr is None
# If we don't have anything in our buffer, pull in the
# next block.
if not buffered:
try:
buffered = block_iter.next()
except StopIteration:
sys.stderr.write(
"Stream ended while skipping data "
"between frames (probably ID3 headers).\n")
at_end_of_stream = True
break
# If the buffer contains less than the amount of data to
# be skipped, yield it all and update to_be_skipped.
# Otherwise slice the amount to be skipped off of the
# front of the buffer.
if len(buffered) <= to_be_skipped:
yield None, buffered
to_be_skipped -= len(buffered)
buffered = ''
else:
yield None, buffered[:to_be_skipped]
buffered = buffered[to_be_skipped:]
to_be_skipped = 0
# We try to have at least _READ_SIZE bytes of data buffered.
if len(buffered) < _READ_SIZE:
# To avoid excess string copies, we collect data in a list
# until we have the desired amount, then concatenate it all
# at the end.
buffered_list = [ buffered ]
buffered_size = len(buffered)
while buffered_size < _READ_SIZE:
try:
next_block = block_iter.next()
except StopIteration:
at_end_of_stream = True
break
buffered_list.append(next_block)
buffered_size += len(next_block)
buffered = ''.join(buffered_list)
# Are we at the end of the file? If so, break out of the
# "while True:" loop
if not buffered:
break
# Do we have an MP3 header? If so, yield the frame and then
# slice it off of our buffer.
if current_hdr:
current_frame = buffered[:current_hdr.frame_size]
# If we found a full-length frame, yield it. Otherwise
# return the truncated frame as junk. (We can be sure not
# to throw away a valid frame since we buffer at least the
# next _READ_SIZE bytes, and _READ_SIZE is larger than any
# possible MP3 frame.
if len(current_frame) != current_hdr.frame_size:
current_hdr = None
yield current_hdr, current_frame
current_hdr = None
buffered = buffered[len(current_frame):]
# Look for the next ID3 header.
id3_size, id3_offset = id3_header.find_size(buffered)
# Look for the next MP3 header.
next_hdr, offset = mp3_header.find(buffered, expected_hdr=expected_hdr)
# If we see an ID3 header before the next MP3 header, skip past the
# ID3. We do this out of paranoia, since an ID3 header might contain
# false synch.
if id3_size is not None and id3_offset < offset:
to_be_skipped = id3_offset + id3_size
continue
# We are starting on this header.
current_hdr = next_hdr
# If we cannot make any progress and are at the end of the
# stream, just return what we have buffered as junk and then
# break out of the loop
if (current_hdr, offset) == (None, 0) and at_end_of_stream:
if buffered:
yield None, buffered
break
# Did we find junk before the next frame? If so, yield it.
if offset > 0:
yield None, buffered[:offset]
buffered = buffered[offset:]
| 22,015
|
def sort_as_int(environment, value, reverse=False, attribute=None):
"""Sort collection after converting the attribute value to an int"""
def convert_to_int(x):
val = str(x)
# Test if this is a string representation of a float.
# This is what the copy rig does and it's annoying
if '.' in val:
val = float(val)
return int(val)
key_func = make_attrgetter(
environment, attribute,
postprocess=convert_to_int
)
return sorted(value, key=key_func, reverse=reverse)
| 22,016
|
def get_pretty_table_for_item(item, output_fields):
"""
"""
x = PrettyTable(["Attribute", "Value"])
attrs = _filter_attributes(item.get_attributes(), output_fields)
for attr in attrs:
row = []
row.append(attr)
row.append(getattr(item, attr))
x.add_row(row)
return x
| 22,017
|
def create_t1_based_unwarp(name='unwarp'):
"""
Unwarp an fMRI time series based on non-linear registration to T1.
NOTE: AS IT STANDS THIS METHOD DID NOT PRODUCE ACCEPTABLE RESULTS
IF BRAIN COVERAGE IS NOT COMPLETE ON THE EPI IMAGE.
ALSO: NEED TO ADD AUTOMATIC READING OF EPI RESOLUTION TO GET
"""
unwarpflow = pe.Workflow(name=name)
inputnode = pe.Node(interface=util.IdentityInterface(fields=['epi',
'T1W']),
name='inputspec')
outputnode = pe.Node(interface=util.IdentityInterface(fields=[
'unwarped_func',
'warp_files']),
name='outputspec')
tmedian = pe.Node(interface=ImageMaths(), name='tmedian')
tmedian.inputs.op_string = '-Tmedian'
epi_brain_ext = pe.Node(interface=util.Function(function=epi_brain_extract,
input_names=['in_file'],
output_names=['out_vol',
'out_mask']),
name='epi_brain_ext')
fast_debias = pe.Node(interface=FAST(), name='FAST_debias')
fast_debias.inputs.output_biascorrected = True
robex = pe.Node(interface=util.Function(function=my_robex,
input_names=['in_file'],
output_names=['out_file',
'out_mask']),
name='robex')
downsample_T1 = pe.Node(MRIConvert(), name='downsample_dti')
downsample_T1.inputs.vox_size = (3.438, 3.438, 3.000)
downsample_T1.inputs.out_type = 'niigz'
contrast_invert = pe.Node(interface=util.Function(function=invert_contrast,
input_names=['in_t1_brain',
'in_b0_brain'],
output_names=['out_fn']),
name='contrast_invert')
ants_syn = pe.Node(interface=util.Function(function=my_ants_registration_syn,
input_names=['in_T1W',
'in_epi'],
output_names=['out_transforms']),
name='ants_syn')
ants_warp = pe.Node(interface=WarpTimeSeriesImageMultiTransform(),
name='ants_warp')
'''connections'''
# unwarpflow.connect(inputnode, 'T1W', robex, 'in_file')
unwarpflow.connect(inputnode, 'T1W', fast_debias, 'in_files')
# unwarpflow.connect(robex, 'out_file', fast_debias, 'in_files')
unwarpflow.connect(fast_debias, 'restored_image', robex, 'in_file')
# unwarpflow.connect(fast_debias, 'restored_image', downsample_T1, 'in_file')
unwarpflow.connect(robex, 'out_file', downsample_T1, 'in_file')
unwarpflow.connect(downsample_T1, 'out_file', contrast_invert, 'in_t1_brain')
unwarpflow.connect(inputnode, 'epi', tmedian, 'in_file')
unwarpflow.connect(tmedian, 'out_file', epi_brain_ext, 'in_file')
unwarpflow.connect(epi_brain_ext, 'out_vol', contrast_invert, 'in_b0_brain')
unwarpflow.connect(contrast_invert, 'out_fn', ants_syn, 'in_T1W')
unwarpflow.connect(epi_brain_ext, 'out_vol', ants_syn, 'in_epi')
unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'out_transforms')
unwarpflow.connect(inputnode, 'epi', ants_warp, 'input_image')
unwarpflow.connect(contrast_invert, 'out_fn', ants_warp, 'reference_image')
unwarpflow.connect(ants_syn, 'out_transforms', ants_warp, 'transformation_series')
unwarpflow.connect(ants_syn, 'out_transforms', outputnode, 'warp_files')
unwarpflow.connect(ants_warp, 'output_image', outputnode, 'unwarped_func')
return unwarpflow
| 22,018
|
def get_qnode(caching, diff_method="finite-diff", interface="autograd"):
"""Creates a simple QNode"""
dev = qml.device("default.qubit.autograd", wires=3)
@qnode(dev, caching=caching, diff_method=diff_method, interface=interface)
def qfunc(x, y):
qml.RX(x, wires=0)
qml.RX(y, wires=1)
qml.CNOT(wires=[0, 1])
return expval(qml.PauliZ(wires=1))
return qfunc
| 22,019
|
def segment_relative_timestamps(segment_start, segment_end, timestamps):
""" Converts timestamps for a global recording to timestamps in a segment given the segment boundaries
Args:
segment_start (float): segment start time in seconds
segment_end (float): segment end time in seconds
timestamps (list): List with length the number of labelled classes. Each element of the list is a array of
start and end time of labelled portion of the recording.
Returns:
List of the timestamps of labelled portion in the segment , with respect to the segment.
Examples:
>>> timestamps = [np.array([0.0, 1.0, 2.0, 9.0]),
np.array([0.5, 1.5]),
np.array([3.0, 6.0]),
np.array([]),
np.array([7.0, 8.0])]
>>> segment_relative_timestamps(3.3, 6.6, timestamps)
>>> [array([[0. , 3.3]], dtype=float32),
array([], dtype=float32),
array([[0. , 2.7]], dtype=float32),
array([], dtype=float32),
array([], dtype=float32)]
"""
segment_timestamps = []
# loop over the classes
for c_timestamps in timestamps:
if c_timestamps.size > 0: # "if there are timestamps"
inside_timestamps = []
# For all timestamps, look if they fall in the segment. If they do, convert them to segment times.
for (start, end) in zip(c_timestamps[::2], c_timestamps[1::2]):
if start <= segment_end and end >= segment_start:
inside_timestamps.append(
(np.max([segment_start, start]) - segment_start, np.min([end, segment_end]) - segment_start))
segment_timestamps.append(np.asarray(inside_timestamps, dtype=np.float32))
else:
segment_timestamps.append(np.array([], dtype=np.float32))
return segment_timestamps
| 22,020
|
def compare_dataframes_mtmc(gts, ts):
"""Compute ID-based evaluation metrics for MTMCT
Return:
df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns.
"""
gtds = []
tsds = []
gtcams = gts['CameraId'].drop_duplicates().tolist()
tscams = ts['CameraId'].drop_duplicates().tolist()
maxFrameId = 0
for k in sorted(gtcams):
gtd = gts.query('CameraId == %d' % k)
gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId in gtd only
mfid = gtd['FrameId'].max()
gtd['FrameId'] += maxFrameId
gtd = gtd.set_index(['FrameId', 'Id'])
gtds.append(gtd)
if k in tscams:
tsd = ts.query('CameraId == %d' % k)
tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
# max FrameId among both gtd and tsd
mfid = max(mfid, tsd['FrameId'].max())
tsd['FrameId'] += maxFrameId
tsd = tsd.set_index(['FrameId', 'Id'])
tsds.append(tsd)
maxFrameId += mfid
# compute multi-camera tracking evaluation stats
multiCamAcc = mm.utils.compare_to_groundtruth(
pd.concat(gtds), pd.concat(tsds), 'iou')
metrics = list(mm.metrics.motchallenge_metrics)
metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp'])
mh = mm.metrics.create()
summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam')
return summary
| 22,021
|
def visit_simpleimage_node(self, node):
"""
Visits a image node.
Copies the image.
"""
if node['abspath'] is not None:
outdir = self.builder.outdir
relpath = os.path.join(outdir, node['relpath'])
dname = os.path.split(node['uri'])[0]
if dname:
relpath = os.path.join(relpath, dname)
if not os.path.exists(relpath):
os.makedirs(relpath)
if os.path.dirname(node['abspath']) != relpath:
shutil.copy(node['abspath'], relpath)
logger = getLogger("image")
logger.info("[image] copy '{0}' to '{1}'".format(
node['uri'], relpath))
| 22,022
|
def launch_corba(
exec_file=None,
run_location=None,
jobname=None,
nproc=None,
verbose=False,
additional_switches="",
start_timeout=60,
):
"""Start MAPDL in AAS mode
Notes
-----
The CORBA interface is likely to fail on computers with multiple
network adapters. The ANSYS RPC isn't smart enough to determine
the right adapter and will likely try to communicate on the wrong
IP.
"""
# Using stored parameters so launch command can be run from a
# cached state (when launching the GUI)
# can't run /BATCH in windows, so we trick it using "-b" and
# provide a dummy input file
if os.name == "nt":
# must be a random filename to avoid conflicts with other
# potential instances
tmp_file = "%s.inp" % random_string(10)
with open(os.path.join(run_location, tmp_file), "w") as f:
f.write("FINISH")
additional_switches += " -b -i %s -o out.txt" % tmp_file
# command must include "aas" flag to start MAPDL server
command = '"%s" -aas -j %s -np %d %s' % (
exec_file,
jobname,
nproc,
additional_switches,
)
# remove any broadcast files
broadcast_file = os.path.join(run_location, "mapdl_broadcasts.txt")
if os.path.isfile(broadcast_file):
os.remove(broadcast_file)
if verbose:
subprocess.Popen(command, shell=True, cwd=run_location)
else:
subprocess.Popen(
command,
shell=True,
cwd=run_location,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# listen for broadcast file
telapsed = 0
tstart = time.time()
started_rpc = False
while telapsed < start_timeout and not started_rpc:
try:
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
# see if connection to RPC has been made
rpc_txt = "visited:collaborativecosolverunitior-set:"
started_rpc = rpc_txt in broadcast
time.sleep(0.1)
telapsed = time.time() - tstart
except KeyboardInterrupt:
raise KeyboardInterrupt
# exit if timed out
if not started_rpc:
err_str = "Unable to start ANSYS within %.1f seconds" % start_timeout
if os.path.isfile(broadcast_file):
broadcast = open(broadcast_file).read()
err_str += "\n\nLast broadcast:\n%s" % broadcast
raise TimeoutError(err_str)
# return CORBA key
keyfile = os.path.join(run_location, "aaS_MapdlId.txt")
return open(keyfile).read()
| 22,023
|
def run_import(data_file: str, mapping_file: str, batch_size: int = 1000,
validate: bool = True, dry_run: bool = False):
"""
Inserts the data from data_file into the database using the mapping_file.
"""
logging.info("Connecting to database")
connection, cursor = connect_db()
logging.info("Loading mapping file")
try:
mapping = json.load(open(mapping_file))
except json.decoder.JSONDecodeError as err:
filename = os.path.basename(mapping_file)
logging.error(f'There is an error in {filename}: {err}')
sys.exit(1)
logging.info("Loading data file")
data = read_data_file(data_file, list(mapping.keys()))
#
# Derive occurrence and asv 'sheets' from asv-table sheet.
#
# We do this already here, to include asv and occurrence fields subsequent
# validation (which expects 'unpivoted' rows). This means, however,
# that asv-table defaults (added in data-mapping.json) will have no effects
# on occurrences or asvs.
#
try:
# 'Unpivot' event columns into rows, keeping 'id_columns' as columns
id_columns = ['asv_id_alias', 'DNA_sequence', 'associatedSequences',
'kingdom', 'phylum', 'class', 'order', 'family', 'genus',
'specificEpithet', 'infraspecificEpithet', 'otu']
occurrences = data['asv-table'] \
.melt(id_columns,
# Store event column header and values as:
var_name='event_id_alias',
value_name='organism_quantity')
except KeyError:
logging.error('Input files seem to not have been read properly. '
'Please, check dimensions (#rows, #cols) below:')
for sheet in ['dataset', 'emof', 'mixs', 'asv-table', 'annotation']:
logging.error(f'Sheet {sheet} has dimensions {data[sheet].shape}')
logging.error('Excel files exported from R have caused this problem '
'before. Try opening and saving input in Excel, '
'or importing data as *.tar.gz instead.')
sys.exit(1)
# Remove rows with organism_quantity 0,
# and reset index so that removed rows are no longer referenced
# As we do this before validation, we need to catch potential TypeError
try:
occurrences = occurrences[occurrences.organism_quantity > 0]
except TypeError:
logging.error('Counts in asv-table include non-numeric values. '
'No data were imported.')
sys.exit(1)
else:
occurrences.reset_index(inplace=True)
# Store as 'sheet' in data object
data['occurrence'] = occurrences
# Also create asv 'sheet'
data['asv'] = occurrences[['asv_id_alias', 'DNA_sequence']]
# Make sure we have unique asv rows,
# to avoid ON CONFLICT - DO UPDATE errors in insert_asvs
data['asv'] = data['asv'].drop_duplicates()
data['asv'].reset_index(inplace=True)
# Check for field differences between data input and mapping
logging.info("Checking fields")
if not compare_fields(data, mapping):
logging.error('No data were imported.')
sys.exit(1)
# Deal with Excel timestamps
# Requires date fields to exist, so do not move ahead of field check!
data['event']['eventDate'] = handle_dates(data['event']['eventDate'])
data['annotation']['date_identified'] = \
handle_dates(data['annotation']['date_identified'])
if validate:
logging.info("Validating input data")
if not run_validation(data, mapping):
logging.error("No data were imported.")
sys.exit(1)
if not compare_aliases(data):
logging.error("No data were imported.")
sys.exit(1)
logging.info("Updating defaults")
update_defaults(data, mapping)
# Replace remaining missing values with None.
# These will be transformed by format_value, and inserted into db as [null]
for sheet in data.keys():
data[sheet] = data[sheet].where(pandas.notnull(data[sheet]), None)
#
# Insert DATASET
#
logging.info("Inserting data")
logging.info(" * dataset")
dataset = insert_dataset(data['dataset'], mapping, cursor)
#
# Insert EVENTS
#
# Get 'event_pid' from dataset and add as new column
data['event'] = data['event'].assign(dataset_pid=lambda _: dataset)
logging.info(" * event")
data['event'] = insert_events(data['event'], mapping, cursor, batch_size)
#
# Insert MIXS
#
# Join with 'event' to get 'event_pid' as 'pid'
events = data['event'].set_index('event_id_alias')
data['mixs'] = data['mixs'].join(events['pid'], on='event_id_alias')
logging.info(" * mixs")
insert_common(data['mixs'], mapping['mixs'], cursor, batch_size)
#
# Insert EMOF
#
# Join with 'event' to get 'event_pid'
data['emof'] = data['emof'] \
.join(events['pid'], on='event_id_alias')
data['emof'].rename(columns={'pid': 'event_pid'}, inplace=True)
logging.info(" * emof")
insert_common(data['emof'], mapping['emof'], cursor, batch_size)
#
# Insert ASV
#
# Generate 'asv_id' as ASV:<md5-checksum of 'DNA_sequence'>
data['asv']['asv_id'] = [f'ASV:{hashlib.md5(s.encode()).hexdigest()}'
for s in data['asv']['DNA_sequence']]
logging.info(" * asvs")
data['asv'], old_max_asv = insert_asvs(data['asv'], mapping,
cursor, batch_size)
# Drop asv_id column again, as it confuses pandas
del data['asv']['asv_id']
#
# Insert TAXON_ANNOTATION
#
# Join with asv to add 'asv_pid'
asvs = data['asv'].set_index('asv_id_alias')
# Use inner join so that annotation is only added for new asvs
data['annotation'] = data['annotation'] \
.join(asvs['pid'], on='asv_id_alias', how='inner')
data['annotation'].rename(columns={'pid': 'asv_pid'}, inplace=True)
annotation = data['annotation'][data['annotation'].asv_pid > old_max_asv]
annotation.reset_index(inplace=True)
logging.info(" * annotations")
insert_common(annotation, mapping['annotation'], cursor, batch_size)
#
# Insert OCCURRENCE
#
# Join with asvs to add 'asv_pid'
occurrences = data['occurrence'].join(asvs['pid'], on='asv_id_alias')
occurrences.rename(columns={'pid': 'asv_pid'}, inplace=True)
# Set contributor´s taxon ranks to empty strings
# to allow for concatenation
tax_fields = ["kingdom", "phylum", "class", "order", "family", "genus",
"specificEpithet", "infraspecificEpithet", "otu"]
occurrences[tax_fields] = occurrences[tax_fields].fillna('')
# Join with events to add 'event_pid'
occurrences = occurrences.join(events, on='event_id_alias')
occurrences.rename(columns={'pid': 'event_pid'}, inplace=True)
# Concatenate contributor´s taxon rank fields
occurrences['previous_identifications'] = \
["|".join(z) for z in zip(*[occurrences[f] for f in tax_fields])]
logging.info(" * occurrences")
insert_common(occurrences, mapping['occurrence'], cursor, batch_size)
#
# Commit or Roll back
#
if dry_run:
logging.info("Dry run, rolling back changes")
connection.rollback()
else:
logging.info("Committing changes")
connection.commit()
| 22,024
|
def test_objlist(client, query):
"""Initialize a DataSelection from a list of objects.
"""
objs = client.search(query)
invIds, dsIds, dfIds = get_obj_ids(objs)
selection = DataSelection(objs)
assert selection.invIds == invIds
assert selection.dsIds == dsIds
assert selection.dfIds == dfIds
| 22,025
|
def test_get_daily_statements(session, client, jwt, app):
"""Assert that the default statement setting is daily."""
# Create a payment account and statement details, then get all statements for the account
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request(business_identifier='CP0002000')),
headers=headers)
invoice: Invoice = Invoice.find_by_id(rv.json.get('id'))
pay_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)
settings_model = factory_statement_settings(payment_account_id=pay_account.id,
frequency=StatementFrequency.DAILY.value)
statement_model = factory_statement(payment_account_id=pay_account.id,
frequency=StatementFrequency.DAILY.value,
statement_settings_id=settings_model.id)
factory_statement_invoices(statement_id=statement_model.id, invoice_id=invoice.id)
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements',
headers=headers)
assert rv.status_code == 200
assert rv.json.get('total') == 1
assert rv.json.get('items')[0].get('frequency') == StatementFrequency.DAILY.value
| 22,026
|
def fetch_xml(url):
"""
Fetch a URL and parse it as XML using ElementTree
"""
resp=urllib2.urlopen(url)
tree=ET.parse(resp)
return tree
| 22,027
|
def update_hirsch_index(depth_node_dict, minimum_hirsch_value, maximum_hirsch_value):
"""
Calculates the Hirsch index for a radial tree.
Note that we have a slightly different definition of the Hirsch index to the one found in:
Gómez, V., Kaltenbrunner, A., & López, V. (2008, April).
Statistical analysis of the social network and discussion threads in slashdot.
In Proceedings of the 17th international conference on World Wide Web (pp. 645-654). ACM.
Inputs: - depth_node_dict: A map from node depth to node ids as a python dictionary.
- minimum_hirsch_value: This is the previous Hirsch value.
- maximum_hirsch_value: This is the depth of the latest node added to the tree.
Output: - hirsch: The Hirsch index.
"""
# This is the previous hirsch index value.
hirsch_index = minimum_hirsch_value
if maximum_hirsch_value > minimum_hirsch_value:
adopters = depth_node_dict[maximum_hirsch_value]
width = len(adopters)
if width >= maximum_hirsch_value:
hirsch_index = maximum_hirsch_value
return hirsch_index
| 22,028
|
def migrate():
"""
Apply database migrations.
"""
app_name = orm_settings["config"]["app"]
os.environ["USER_ROOT"] = user_root
utils.set_pythonpath(user_root)
with Path(lib_root):
print()
subprocess.run(f"python manage.py migrate {app_name}")
print()
| 22,029
|
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if numpy.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
| 22,030
|
async def async_start(hass: HomeAssistantType, config_entry=None) -> bool:
"""Start Ampio discovery."""
topics = {}
@callback
async def version_info_received(msg):
"""Process the version info message."""
_LOGGER.debug("Version %s", msg.payload)
try:
data = json.loads(msg.payload)
except json.JSONDecodeError:
_LOGGER.error("Unable to decode Ampio MQTT Server version")
return
version = data.get(ATTR_VERSION, "N/A")
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, str("ampio-mqtt"))},
identifiers={(DOMAIN, str("ampio-mqtt"))},
name="Ampio MQTT Server",
manufacturer="Ampio",
model="MQTT Server",
sw_version=version,
)
topics[RESPONSE_AMPIO_VERSION] = {
"topic": RESPONSE_AMPIO_VERSION,
"msg_callback": version_info_received,
"qos": DEFAULT_QOS,
}
@callback
async def device_list_received(msg):
"""Process device list info message."""
try:
payload = json.loads(msg.payload)
except ValueError as err:
_LOGGER.error("Unable to parse JSON module list: %s", err)
return
modules: List[AmpioModuleInfo] = AmpioModuleInfo.from_topic_payload(payload)
for module in modules:
data_modules = hass.data[DATA_AMPIO_MODULES]
await async_setup_device_registry(hass, config_entry, module)
data_modules[module.user_mac] = module
ampio.async_publish(
hass, REQUEST_MODULE_NAMES.format(mac=module.user_mac), "1", 0, False
)
topics[RESPONSE_MODULE_DISCOVERY] = {
"topic": RESPONSE_MODULE_DISCOVERY,
"msg_callback": device_list_received,
"qos": DEFAULT_QOS,
}
async def module_names_received(msg):
"Handle names update." ""
matched = MAC_FROM_TOPIC_RE.match(msg.topic)
if matched:
mac = matched.group("mac").upper()
module = hass.data[DATA_AMPIO_MODULES].get(mac)
if module is None:
return
else:
return
try:
payload = json.loads(msg.payload)
except ValueError as err:
_LOGGER.error("Unable to parse JSON module names: %s", err)
return
module.names = ItemName.from_topic_payload(payload)
module.update_configs()
_LOGGER.info(
"Discovered: %s-%s (%s): %s",
module.code,
module.model,
module.software,
module.name,
)
for component, configs in module.configs.items():
for config in configs:
unique_id = config.get("unique_id")
if unique_id not in hass.data[DATA_AMPIO_UNIQUE_IDS]:
hass.data[DATA_AMPIO][component].append(config)
hass.data[DATA_AMPIO_UNIQUE_IDS].add(unique_id)
else:
_LOGGER.debug("Ignoring: %s", unique_id)
del hass.data[DATA_AMPIO_MODULES][mac]
if len(hass.data[DATA_AMPIO_MODULES]) == 0: # ALL MODULES discovered
_LOGGER.info("All modules discovered")
asyncio.create_task(async_load_entities(hass))
topics[RESPONSE_MODULE_NAMES] = {
"topic": RESPONSE_MODULE_NAMES,
"msg_callback": module_names_received,
"qos": DEFAULT_QOS,
}
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[DATA_AMPIO_MODULES] = {}
hass.data[DATA_AMPIO_UNIQUE_IDS] = set()
hass.data[DISCOVERY_UNSUBSCRIBE] = await subscription.async_subscribe_topics(
hass, hass.data.get(DISCOVERY_UNSUBSCRIBE), topics
)
ampio.async_publish(hass, REQUEST_AMPIO_VERSION, "", 0, False)
ampio.async_publish(hass, REQUEST_MODULE_DISCOVERY, "1", 0, False)
return True
| 22,031
|
def get_E_E_fan_H_d_t(P_fan_rtd_H, V_hs_vent_d_t, V_hs_supply_d_t, V_hs_dsgn_H, q_hs_H_d_t):
"""(37)
Args:
P_fan_rtd_H: 定格暖房能力運転時の送風機の消費電力(W)
V_hs_vent_d_t: 日付dの時刻tにおける熱源機の風量のうちの全般換気分(m3/h)
V_hs_supply_d_t: param V_hs_dsgn_H:暖房時の設計風量(m3/h)
q_hs_H_d_t: 日付dの時刻tにおける1時間当たりの熱源機の平均暖房能力(-)
V_hs_dsgn_H: returns: 日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h)
Returns:
日付dの時刻tにおける1時間当たりの送風機の消費電力量のうちの暖房設備への付加分(kWh/h)
"""
f_SFP = get_f_SFP()
E_E_fan_H_d_t = np.zeros(24 * 365)
a = (P_fan_rtd_H - f_SFP * V_hs_vent_d_t) \
* ((V_hs_supply_d_t - V_hs_vent_d_t) / (V_hs_dsgn_H - V_hs_vent_d_t)) * 10 ** (-3)
E_E_fan_H_d_t[q_hs_H_d_t > 0] = np.clip(a[q_hs_H_d_t > 0], 0, None)
return E_E_fan_H_d_t
| 22,032
|
def load_func(func_string):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if func_string is None:
return None
elif isinstance(func_string, str):
return import_from_string(func_string)
return func_string
| 22,033
|
def normalize(features):
"""
Scale data in provided series into [0,1] range.
:param features:
:return:
"""
return (features - features.min()) / (features.max() - features.min())
| 22,034
|
def get_gitlab_template_version(response):
"""Return version number of gitlab template."""
return glom(response, 'ref', default=False).replace('refs/tags/', '')
| 22,035
|
def test_get_encoder_for_error_loss_params(D_hat):
"""Tests for invalid value of `loss_params`."""
with pytest.raises(AssertionError,
match="loss_params should be a valid dict or None."):
get_z_encoder_for(X=X,
D_hat=D_hat,
n_atoms=N_ATOMS,
atom_support=N_TIMES_ATOM,
loss_params=42,
n_jobs=2)
| 22,036
|
def getConfigXmlString(version, name, protocol, user, host, port, path):
"""! Arguments -> XML String. """
tag_root = ET.Element(TAG_ROOT)
tag_root.set(ATTR_VERSION, version)
tag_remote = ET.Element(TAG_REMOTE)
tag_remote.set(ATTR_NAME, name)
tag_root.append(tag_remote)
appendElement(tag_remote, TAG_PROTOCOL, protocol)
appendElement(tag_remote, TAG_USER, user)
appendElement(tag_remote, TAG_HOST, host)
appendElement(tag_remote, TAG_PORT, port)
appendElement(tag_remote, TAG_PATH, path)
return ET.tostring(tag_root)
| 22,037
|
def phase_angle(A: Entity,
B: Entity,
C: Entity) -> Optional[float]:
"""The orbital phase angle, between A-B-C, of the angle at B.
i.e. the angle between the ref-hab vector and the ref-targ vector."""
# Code from Newton Excel Bach blog, 2014, "the angle between two vectors"
if B.name == C.name:
return None
AB = A.pos - B.pos
CB = C.pos - B.pos
return np.degrees(
np.arctan2(AB[1], AB[0]) -
np.arctan2(CB[1], CB[0])
) % 360
| 22,038
|
def point2geojsongeometry(x, y, z=None):
"""
helper function to generate GeoJSON geometry of point
:param x: x coordinate
:param y: y coordinate
:param z: y coordinate (default=None)
:returns: `dict` of GeoJSON geometry
"""
if z is None or int(z) == 0:
LOGGER.debug('Point has no z property')
coordinates = [x, y]
else:
LOGGER.debug('Point has z property')
coordinates = [x, y, z]
if None in coordinates:
return None
geometry = {
'type': 'Point',
'coordinates': coordinates
}
return geometry
| 22,039
|
def main(args):
"""
Run processes in parallel if --all=True, otherwise run one process.
"""
if args.all:
# Get all json files
filespath = args.data_dir + 'Original/'
filenames = [os.path.splitext(f)[0] for f in os.listdir(filespath) if f.endswith('.json')]
# Instantiating process with arguments
process_list = [Process(target=filter_corpus, args=(f, args.data_dir,)) for f in filenames]
for i, p in enumerate(process_list):
print('Process {} is starting...'.format(i+1))
p.start()
time.sleep(1)
# Complete the processes
for p in process_list:
p.join()
else:
filename = os.path.splitext(args.infile)[0]
filter_corpus(filename, args.data_dir)
| 22,040
|
def upper(string): # pragma: no cover
"""Lower."""
new_string = []
for c in string:
o = ord(c)
new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c)
return ''.join(new_string)
| 22,041
|
def test_main_setPref_invalid_field(capsys, reset_globals):
"""Test setPref() with a invalid field"""
class Field:
"""Simple class for testing."""
def __init__(self, name):
"""constructor"""
self.name = name
prefs = MagicMock()
prefs.DESCRIPTOR.fields_by_name.get.return_value = None
# Note: This is a subset of the real fields
ls_secs_field = Field('ls_secs')
is_router = Field('is_router')
fixed_position = Field('fixed_position')
fields = [ ls_secs_field, is_router, fixed_position ]
prefs.DESCRIPTOR.fields = fields
setPref(prefs, 'foo', '300')
out, err = capsys.readouterr()
assert re.search(r'does not have an attribute called foo', out, re.MULTILINE)
# ensure they are sorted
assert re.search(r'fixed_position\s+is_router\s+ls_secs', out, re.MULTILINE)
assert err == ''
| 22,042
|
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError: # nosec(cjschaef): 'obj' doesn't contain attribute
# 'uuid', return attribute 'id' or the 'obj'
pass
try:
return obj.id
except AttributeError:
return obj
| 22,043
|
def dateToUsecs(datestring):
"""Convert Date String to Unix Epoc Microseconds"""
dt = datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
return int(time.mktime(dt.timetuple())) * 1000000
| 22,044
|
def _compute_applied_axial(R_od, t_wall, m_stack, section_mass):
"""Compute axial stress for spar from z-axis loading
INPUTS:
----------
params : dictionary of input parameters
section_mass : float (scalar/vector), mass of each spar section as axial loading increases with spar depth
OUTPUTS:
-------
stress : float (scalar/vector), axial stress
"""
R = R_od - 0.5 * t_wall
# Add in weight of sections above it
axial_load = m_stack + np.r_[0.0, np.cumsum(section_mass[:-1])]
# Divide by shell cross sectional area to get stress
return gravity * axial_load / (2.0 * np.pi * R * t_wall)
| 22,045
|
def glob(loader, node):
"""Construct glob expressions."""
value = loader.construct_scalar(node)[len('~+/'):]
return os.path.join(
os.path.dirname(loader.name),
value
)
| 22,046
|
def categorical(p, rng=None, size=()):
"""Draws i with probability p[i]"""
if len(p) == 1 and isinstance(p[0], np.ndarray):
p = p[0]
p = np.asarray(p)
if size == ():
size = (1,)
elif isinstance(size, (int, np.number)):
size = (size,)
else:
size = tuple(size)
if size == (0,):
return np.asarray([])
assert len(size)
if p.ndim == 0:
raise NotImplementedError()
elif p.ndim == 1:
n_draws = int(np.prod(size))
sample = rng.multinomial(n=1, pvals=p, size=int(n_draws))
assert sample.shape == size + (len(p),)
rval = np.dot(sample, np.arange(len(p)))
rval.shape = size
return rval
elif p.ndim == 2:
n_draws_, n_choices = p.shape
(n_draws,) = size
assert n_draws == n_draws_
rval = [
np.where(rng.multinomial(pvals=p[ii], n=1))[0][0] for ii in range(n_draws)
]
rval = np.asarray(rval)
rval.shape = size
return rval
else:
raise NotImplementedError()
| 22,047
|
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='DSNT human pose model info')
parser.add_argument(
'--model', type=str, metavar='PATH', required=True,
help='model state file')
parser.add_argument(
'--gpu', type=int, metavar='N', default=0,
help='index of the GPU to use')
args = parser.parse_args()
return args
| 22,048
|
def get_model(theme, corpus_all, dictionary_all, num_topics=15, passes=25, iterations=400,
eval_every=None, update_every=0, alpha='auto', eta='auto'):
"""
Get the LDA model
"""
# Check if a model with the same config already exists.
# If it does, load the model instead of generating a new one
tempfile = TEMP_PATH + '/%s/%s_LDA_model_' % (theme, theme) + '_'.join([str(num_topics), str(passes), str(iterations), str(alpha), str(eta)])
if os.path.exists(tempfile):
lda = gensim.models.LdaModel.load(tempfile)
else:
lda = generate_lda_model(theme, corpus_all, dictionary_all, num_topics, passes,
iterations, eval_every, update_every, alpha, eta)
return lda
| 22,049
|
def download_query_log(file_path, from_, to, hashids=None, **opts):
"""
Downloads and saves query search engine/s logs into file.
"""
with open(file_path, 'wb') as file:
log_iter = query_log_iter(from_, to, hashids, **opts)
file.writelines(log_iter)
| 22,050
|
def mono_culture(fungi_1, fungi_2, file_dir, reference):
"""
Main
"""
trial_name = fungi_1.name+'_'+fungi_2.name+'_'+reference
fig = plt.figure()
WIDTH = 100
HEIGHT = 100
TIME = 30
# cells = generate_board(WIDTH,HEIGHT)
cells = generate_temp_gradient(WIDTH,HEIGHT)
# cells = generate_moist_gradient(WIDTH, HEIGHT)
# get_moistmap_of_food(cells, debug=True)
# exit()
# Fungi one
# fungi_row = random.choice(range(HEIGHT))
# fungi_col = random.choice(range(WIDTH))
fungi_row = 50
fungi_col = 50
place_fungus(cells, fungi_row, fungi_col, fungi_1)
# Fungi Two
fungi_row = 70
fungi_col = 70
place_fungus(cells, fungi_row, fungi_col, fungi_2)
# fungi_row = random.choice(range(HEIGHT))
# fungi_col = random.choice(range(WIDTH))
# place_fungus(cells, fungi_row, fungi_col, Prufa)
# place food
# place_food(cells, fungi_row+(HEIGHT-fungi_row)-5, fungi_col+(WIDTH-fungi_col)-5, fungi_row//10, 1, 1)
RADIUS = WIDTH//10
print("RADIUS: ", RADIUS)
# place_food(cells, 10, 10, RADIUS, 1, 1)
# place_food(cells, 50, 30, RADIUS+2, 1, 1)
main_board = Board(cells)
# img = get_image_from_state(cells, '-1', debug=True)
# get_heatmap_of_temp(cells, debug=True)
heat_data = get_heatmap_of_temp(main_board.state)
heatmap = plt.imshow(heat_data, origin='lower', cmap='hot')
plt.colorbar(heatmap)
plt.title("heatmap_{0}".format(trial_name))
plt.savefig(file_dir+'/heatmap_{0}.png'.format(trial_name))
plt.clf()
heat_data = get_moistmap(main_board.state)
heatmap = plt.imshow(heat_data, origin='lower', cmap='Blues')
plt.colorbar(heatmap)
plt.title("moistmap_{0}".format(trial_name))
plt.savefig(file_dir+'/moistmap_{0}.png'.format(trial_name))
plt.clf()
heat_data = get_heatmap_of_food(main_board.state)
heatmap = plt.imshow(heat_data, origin='lower', cmap='hot')
plt.colorbar(heatmap)
plt.title("foodmap_{0}".format(trial_name))
plt.savefig(file_dir+'/foodmap_{0}.png'.format(trial_name))
plt.clf()
# Moisture data
# exit()
# exit()
# img = get_heatmap_of_food(cells)
# plt.imshow(img, origin='lower', cmap='hot', interpolation='nearest')
# plt.show()
# plt.clf()
time = 0
file_names = []
for i in tqdm(range(TIME)):
temp_board = Board(main_board.state)
# while tqdm(time) <= TIME:
# new_cells = copy.deepcopy(board)
for rix, row in enumerate(cells):
# new_row_state = []
for cix, col in enumerate(row):
# Get surrounding for main board
neighbors = get_surroundings(main_board.state, rix, cix)
# Update the new temp board state
apply_rules(temp_board.state, neighbors, time)
# get_image_from_state(temp_board.state, neighbors, time)
# new_cells.append(new_row_state)
time += 1
# Update main board to temp board
main_board = temp_board
img = get_image_from_state(main_board.state, time)
plt.imshow(img, origin='lower', vmin=0)
file_name = file_dir+'/images/{1}_test_{0}.png'.format(time,trial_name)
file_names.append(file_name)
plt.title("{1} time: {0}".format(time, trial_name))
plt.savefig(file_name)
plt.clf()
data = []
for fname in file_names:
data.append(imageio.imread(fname))
imageio.mimsave(file_dir+'/{0}_test_complete.gif'.format(trial_name), data, duration=1)
| 22,051
|
def covariance_distance(covariances: List[Covariance],
x: np.ndarray) -> np.ndarray:
"""Euclidean distance of all pairs gp_models.
:param covariances:
:param x:
:return:
"""
# For each pair of kernel matrices, compute Euclidean distance
n_kernels = len(covariances)
dists = np.zeros((n_kernels, n_kernels))
for i in range(n_kernels):
for j in range(i + 1, n_kernels):
dists[i, j] = kernel_l2_dist(covariances[i].raw_kernel, covariances[j].raw_kernel, x)
# Make symmetric
dists = (dists + dists.T) / 2.
return dists
| 22,052
|
def create_inputs(im, im_info, model_arch='YOLO'):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = im
origin_shape = list(im_info['origin_shape'])
resize_shape = list(im_info['resize_shape'])
scale = im_info['scale']
if 'YOLO' in model_arch:
im_size = np.array([origin_shape]).astype('int32')
inputs['im_size'] = im_size
elif 'RetinaNet' in model_arch:
im_info = np.array([resize_shape + [scale]]).astype('float32')
inputs['im_info'] = im_info
elif 'RCNN' in model_arch:
im_info = np.array([resize_shape + [scale]]).astype('float32')
im_shape = np.array([origin_shape + [1.]]).astype('float32')
inputs['im_info'] = im_info
inputs['im_shape'] = im_shape
return inputs
| 22,053
|
def runDSSAT(dbname, options):
"""Driver function for performing a DSSAT nowcast simulation"""
log = logging.getLogger(__name__)
startyear, startmonth, startday = map(
int, options['nowcast']['startdate'].split('-'))
endyear, endmonth, endday = map(
int, options['nowcast']['enddate'].split('-'))
res = float(options['nowcast']['resolution'])
nens = int(options['dssat']['ensemble size'])
name = options['nowcast']['name'].lower()
if 'shapefile' in options['dssat']:
shapefile = options['dssat']['shapefile']
else:
shapefile = None
if 'assimilate' in options['dssat']:
assimilate = options['dssat']['assimilate']
else:
assimilate = "Y"
try:
crops = options['dssat']['crop'].split(",")
for crop in crops:
crop = crop.strip()
mod = __import__("dssat.{0}".format(crop), fromlist=[crop])
model = mod.Model(dbname, name, res, startyear, startmonth, startday,
endyear, endmonth, endday, nens, options['vic'], shapefile, assimilate)
model.run()
except Exception:
log.error("Error in crop selected.")
sys.exit()
| 22,054
|
def draw_masks(
points,
pil_image,
left_chin_nose_connection,
right_chin_nose_connection,
bottom_chin_nose_connection,
mask
):
"""
It receives 4 key points of the mask in the following format:
points: [top, right, bottom, left]
"""
top, right, bottom, left = points
# Vertical distance
height = int(((top[0] - bottom[0])**2 + (top[1] - bottom[1])**2)**0.5)
# Horizontal distance
width = int(0.8*((right[0] - left[0])**2 + (right[1] - left[1])**2)**0.5)
# Getting the current path
path = Path(__file__).parent
# Y - face tilt
left_chin_node_distance = get_distance(left_chin_nose_connection)
right_chin_node_distance = get_distance(right_chin_nose_connection)
distance_difference = abs(
left_chin_node_distance - right_chin_node_distance
)
# Z - face tilt validation
inverse = False
# Mask
if distance_difference < 20:
pil_mask = Image.open(f'{path}/../../images/masks/{mask}')
pil_mask_resized = pil_mask.resize((width, height))
mask_position = (left[0] + int(0.15*width), top[1])
elif left_chin_node_distance > right_chin_node_distance:
right = f'{mask.split(".")[0]}_right.{mask.split(".")[1]}'
pil_mask = Image.open(f'{path}/../../images/masks/{right}')
pil_mask_resized = pil_mask.resize((width, height))
mask_position = (left[0] + int(0.175*width), top[1])
inverse = True
else:
left_mask = f'{mask.split(".")[0]}_left.{mask.split(".")[1]}'
pil_mask = Image.open(f'{path}/../../images/masks/{left_mask}')
pil_mask_resized = pil_mask.resize((width, height))
mask_position = (left[0] + int(0.15*width), top[1])
# Z - face tilt
angle = get_rotation_angle_from_Y_axis(bottom_chin_nose_connection, inverse)
pil_mask_rotated = pil_mask_resized.rotate(angle)
pil_image.paste(pil_mask_rotated, mask_position, pil_mask_rotated)
| 22,055
|
def print_args(args):
"""Print out the input arguments."""
print ('Sending test email by file: %s' % args.html)
| 22,056
|
def kill_topology(heron_cli_path, cli_config_path, cluster, role, env, topology_name):
"""
Kill a topology using heron-cli
"""
cmd = "%s kill --config-path=%s %s %s" % \
(heron_cli_path, cli_config_path, cluster_token(cluster, role, env), topology_name)
logging.info("Killing topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Failed to kill topology %s" % topology_name)
logging.info("Successfully killed topology %s", topology_name)
| 22,057
|
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues
| 22,058
|
def select_project(FILENAME):
"""
lee el fichero xml FILENAME, muestra los proyectos para que el usuario
escoja uno de ellos
input
FILENAME: fichero xml de estructura adecuada situada donde se encuentran
los scripts del programa
return:
el proyecto seleccionado por el usuario con un árbol xml
"""
import xml.etree.ElementTree as ET
tree = ET.parse(FILENAME)
root = tree.getroot()
print('Projects in ' + FILENAME)
projects = []
for i, project in enumerate(root.findall('project')):
projects.append(project)
print(i, end=' ')
print('. ' + project.get('name'))
print('Select project number:')
choice = input()
return projects[int(choice)]
| 22,059
|
def compute_average(arr):
"""Compute average value for given matrix
Args:
arr (numpy array): a numpy array
Return:
float: average value
"""
val_avg = np.average(arr)
return val_avg
| 22,060
|
def _read_config(rundate, pipeline, *args, **kwargs):
"""Read the configuration of a Where analysis from file
Todo: Add this as a classmethod on Configuration
Args:
rundate: Rundate of analysis.
pipeline: Pipeline used for analysis.
session: Session in analysis.
Returns:
Configuration of Where analysis.
"""
if not has_config(rundate, pipeline, *args, **kwargs):
raise FileNotFoundError(f"No configuration found for {pipeline.upper()} {rundate.strftime(config.FMT_date)}")
cfg = mg_config.Configuration.read_from_file(pipeline, _config_path(rundate, pipeline, *args, **kwargs))
cfg.master_section = pipeline
return cfg
| 22,061
|
def ta_series(func: Callable, *args, **kwargs) -> QFSeries:
"""
Function created to allow using TA-Lib functions with QFSeries.
Parameters
----------
func
talib function: for example talib.MA
args
time series arguments to the function. They are all passed as QFSeries.
for example: 'close' or 'high, low, close' where each argument is a QFSeries.
kwargs
additional arguments to the function. for example: 'timeperiod=10' or 'timeperiod=timeperiod, matype=i'.
All additional arguments have to be passed as keyword arguments.
Returns
-------
QFSeries
Output from the talib function encapsulated in a QFSeries
"""
series_list = list(map(lambda series: series.values, args))
result = func(*series_list, **kwargs)
result = QFSeries(index=args[0].index, data=result)
return result
| 22,062
|
def find_trendline(
df_data: pd.DataFrame, y_key: str, high_low: str = "high"
) -> pd.DataFrame:
"""Attempts to find a trend line based on y_key column from a given stock ticker data frame.
Parameters
----------
df_data : DataFrame
The stock ticker data frame with at least date_id, y_key columns.
y_key : str
Column name to base the trend line on.
high_low: str, optional
Either "high" or "low". High is the default.
Returns
-------
DataFrame
If a trend is successfully found,
An updated Panda's data frame with a trend data {y_key}_trend column.
If no trend was found,
An original Panda's data frame
"""
for iteration in [3, 4, 5, 6, 7]:
df_temp = df_data.copy()
while len(df_temp) > iteration:
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
if high_low == "high":
df_temp = df_temp.loc[
df_temp[y_key] > reg[0] * df_temp["date_id"] + reg[1]
]
else:
df_temp = df_temp.loc[
df_temp[y_key] < reg[0] * df_temp["date_id"] + reg[1]
]
if len(df_temp) > 1:
break
if len(df_temp) == 1:
return df_data
reg = stats.linregress(
x=df_temp["date_id"],
y=df_temp[y_key],
)
df_data[f"{y_key}_trend"] = reg[0] * df_data["date_id"] + reg[1]
return df_data
| 22,063
|
def find_core(read, core, core_position_sum, core_position_count, start = -1):
"""
Find the core sequence, trying "average" position first for efficiency.
"""
if start < 0 and core_position_count > 0:
core_position = round(core_position_sum/core_position_count)
if len(read) > core_position+len(core):
if read[core_position:core_position+len(core)]==core:
return core_position
return read.find(core, start+1)
| 22,064
|
def clambda(n):
"""
clambda(n)
Returns Carmichael's lambda function for positive integer n.
Relies on factoring n
"""
smallvalues=[1,1,2,2,4,2,6,2,6,4,10,2,12,6,4,4,16,6,18,4,6,10,22,2,20,12,18,\
6,28,4,30,8,10,16,12,6,36,18,12,4,40,6,42,10,12,22,46,4,42,20,16,12,52,18,\
20,6,18,28,58,4,60,30,6,16,12,10,66,16,22,12,70,6,72,36,20,18,30,12,78,4,54,\
40,82,6,16,42,28,10,88,12,12,22,30,46,36,8,96,42,30,20]
if n<=100: return smallvalues[n-1]
factors=factor(n)
l1=[]
for p,e in factors:
if p==2 and e>2:
l1.append(2**(e-2))
else:
l1.append((p-1)*p**(e-1))
return reduce(lambda a,b : lcm(a,b), l1)
| 22,065
|
def dashboard():
"""
Render the dashboard template on the /dashboard route
"""
return render_template('page/home/dashboard.html', title="Dashboard")
| 22,066
|
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port"""
try:
r = do_single_request("GET",
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
r = json.loads(r)
except NvpApiClient.ResourceNotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
except NvpApiClient.NvpApiException as e:
raise exception.QuantumException()
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
| 22,067
|
def ensure_stdout_handles_unicode():
""" Ensure stdout can handle unicode by wrapping it if necessary
Required e.g. if output of this script is piped or redirected in a linux
shell, since then sys.stdout.encoding is ascii and cannot handle
print(unicode). In that case we need to find some compatible encoding and
wrap sys.stdout into a encoder following (many thanks!)
https://stackoverflow.com/a/1819009 or https://stackoverflow.com/a/20447935
Can be undone by setting sys.stdout = sys.__stdout__
"""
import codecs
import locale
# do not re-wrap
if isinstance(sys.stdout, codecs.StreamWriter):
return
# try to find encoding for sys.stdout
encoding = None
try:
encoding = sys.stdout.encoding # variable encoding might not exist
except Exception:
pass
if encoding not in (None, '', 'ascii'):
return # no need to wrap
# try to find an encoding that can handle unicode
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# fallback if still no encoding available
if encoding in (None, '', 'ascii'):
encoding = 'utf8'
# logging is probably not initialized yet, but just in case
log.debug('wrapping sys.stdout with encoder using {0}'.format(encoding))
wrapper = codecs.getwriter(encoding)
sys.stdout = wrapper(sys.stdout)
| 22,068
|
def make_nearest_neighbors_graph(data, k, n=1000):
"""Build exact k-nearest neighbors graph from numpy data.
Args:
data: Data to compute nearest neighbors of, each column is one point
k: number of nearest neighbors to compute
n (optional): number of neighbors to compute simultaneously
Returns:
A scipy sparse matrix in LIL format giving the symmetric nn graph.
"""
shape = data.shape
assert shape[0] % n == 0
nbr_graph = scipy.sparse.lil_matrix((shape[0], shape[0]))
norm = np.sum(data**2, axis=1)
cols = np.meshgrid(np.arange(n), np.ones(k+1))[0]
for i in tqdm(range(0, shape[0], n)):
dot = data @ data[i:i+n].T
dists = np.sqrt(np.abs(norm[:, None] - 2*dot + norm[i:i+n][None, :]))
idx = np.argpartition(dists, k, axis=0)[:k+1]
nbrs = idx[np.argsort(dists[idx, cols], axis=0), cols][1:]
for j in range(n):
nbr_graph[i+j, nbrs[:, j]] = 1
# Symmetrize graph
for i in tqdm(range(shape[0])):
for j in nbr_graph.rows[i]:
if nbr_graph[j, i] == 0:
nbr_graph[j, i] = nbr_graph[i, j]
logging.info('Symmetrized neighbor graph')
return nbr_graph
| 22,069
|
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count)
| 22,070
|
def gen_context(n=10):
"""
method returns a random matrix which can be used to produce private prices over a bunch of items
"""
return np.random.randint(-3,4,size=(n,n))
| 22,071
|
def test_default_lower_rules():
"""Check that the default lower section rules are set correctly."""
lower_rules = yh.DEFAULT_LOWER_RULES
expected_types = {
"Three of a Kind": rl.NofKindScoringRule,
"Four of a Kind": rl.NofKindScoringRule,
"Full House (Two of a Kind and Three of a Kind)": rl.FullHouseScoringRule,
"Small Straight (Four in a Row)": rl.SmallStraightScoringRule,
"Large Straight (Five in a Row)": rl.LargeStraightScoringRule,
"YAHTZEE (Five of a Kind)": rl.YahtzeeScoringRule,
"Chance (Any Five Dice)": rl.ChanceScoringRule,
}
expected_nkind = {
"Three of a Kind": 3,
"Four of a Kind": 4,
}
assert len(lower_rules) == 7
assert all([
lower_rules[idx].name == name and isinstance(lower_rules[idx], type)
for idx, (name, type) in enumerate(expected_types.items())
])
assert all([
lower_rules[idx].name == name and lower_rules[idx].n == n
for idx, (name, n) in enumerate(expected_nkind.items())
if isinstance(lower_rules[idx], rl.NofKindScoringRule)
])
| 22,072
|
def test_get_from_different_buckets(empty_hash_table):
"""Test get works from different buckets."""
empty_hash_table.set('abc', 'a')
empty_hash_table.set('xyz', 'b')
assert empty_hash_table.get('abc') == 'a'
assert empty_hash_table.get('xyz') == 'b'
| 22,073
|
def _parallel_binning_fit(split_feat, _self, X, y,
weights, support_sample_weight,
bins, loss):
"""Private function to find the best column splittings within a job."""
n_sample, n_feat = X.shape
feval = CRITERIA[_self.criterion]
split_t = None
split_col = None
left_node = (None, None, None, None)
right_node = (None, None, None, None)
largs_left = {'classes': None}
largs_right = {'classes': None}
if n_sample < _self._min_samples_split:
return loss, split_t, split_col, left_node, right_node
for col, _bin in zip(split_feat, bins):
for q in _bin:
# create 1D bool mask for right/left children
mask = (X[:, col] > q)
n_left, n_right = (~mask).sum(), mask.sum()
if n_left < _self._min_samples_leaf or n_right < _self._min_samples_leaf:
continue
# create 2D bool mask for right/left children
left_mesh = np.ix_(~mask, _self._linear_features)
right_mesh = np.ix_(mask, _self._linear_features)
model_left = deepcopy(_self.base_estimator)
model_right = deepcopy(_self.base_estimator)
if hasattr(_self, 'classes_'):
largs_left['classes'] = np.unique(y[~mask])
largs_right['classes'] = np.unique(y[mask])
if len(largs_left['classes']) == 1:
model_left = DummyClassifier(strategy="most_frequent")
if len(largs_right['classes']) == 1:
model_right = DummyClassifier(strategy="most_frequent")
if weights is None:
model_left.fit(X[left_mesh], y[~mask])
loss_left = feval(model_left, X[left_mesh], y[~mask],
**largs_left)
wloss_left = loss_left * (n_left / n_sample)
model_right.fit(X[right_mesh], y[mask])
loss_right = feval(model_right, X[right_mesh], y[mask],
**largs_right)
wloss_right = loss_right * (n_right / n_sample)
else:
if support_sample_weight:
model_left.fit(X[left_mesh], y[~mask],
sample_weight=weights[~mask])
model_right.fit(X[right_mesh], y[mask],
sample_weight=weights[mask])
else:
model_left.fit(X[left_mesh], y[~mask])
model_right.fit(X[right_mesh], y[mask])
loss_left = feval(model_left, X[left_mesh], y[~mask],
weights=weights[~mask], **largs_left)
wloss_left = loss_left * (weights[~mask].sum() / weights.sum())
loss_right = feval(model_right, X[right_mesh], y[mask],
weights=weights[mask], **largs_right)
wloss_right = loss_right * (weights[mask].sum() / weights.sum())
total_loss = wloss_left + wloss_right
# store if best
if total_loss < loss:
split_t = q
split_col = col
loss = total_loss
left_node = (model_left, loss_left, wloss_left,
n_left, largs_left['classes'])
right_node = (model_right, loss_right, wloss_right,
n_right, largs_right['classes'])
return loss, split_t, split_col, left_node, right_node
| 22,074
|
def _get_latest_template_version_w_git_ssh(template):
"""
Tries to obtain the latest template version using an SSH key
"""
cmd = 'git ls-remote {} | grep HEAD | cut -f1'.format(template)
ret = temple.utils.shell(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = ret.stderr.decode('utf-8').strip()
stdout = ret.stdout.decode('utf-8').strip()
if stderr and not stdout:
raise RuntimeError((
'An unexpected error happened when running "{}". (stderr="{}"'
).format(cmd, stderr))
return stdout
| 22,075
|
def checkkeywords(keywordsarr, mdtype):
""" Check the keywords
Datasets: for Check 9
Services: for Check 9
Logic: there must be at least one keyword to get a score = 2. If keywords contain comma's (","), then a maimum of score = 1 is possible.
"""
score = 0
# keywordsarr is an array of objects, each containing a property "keywords" and info on a thesaurus
# here we join the keywords from all objects to one array
keywordsstr = ""
if keywordsarr != None:
keywords = []
for k in keywordsarr:
for i in k["keywords"]:
i = i.replace("\n", " ")
# exception for 1 keyword of INSPIRE
if i.find(",") > -1 and i != "Gebiedsbeheer, gebieden waar beperkingen gelden, gereguleerde gebieden en rapportage-eenheden":
score = 1
keywords.append(i)
# if the score is already 1, then we know the keywords are not
# correctly set
if len(keywords) > 0 and score != 1:
score = 2
keywordsstr = valuesep.join(keywords)
else:
keywordsstr = ""
# Now fetch the result
if mdtype == "dataset" or mdtype == "series":
# checkid = 9, so the index in the matrix is: 8
result = checksdatasets[8][2][score]
else:
result = checksservices[8][2][score]
return MkmScore(keywordsstr, score, result)
| 22,076
|
def update_subset(record, fields, *source_records, **kwds):
"""Given a destination record, a sequence of fields, and source
for each field, copy over the first value found in the source records.
The argument for fields must be an iterable where each item is either a
string or a pair of strings. If it is a pair of strings, they name
the destination and source field names. If keyword argument "required"
is True and any of the fields are missing from the source records,
then a KeyError is raised."""
required = kwds.pop('required', True)
assert not kwds, 'Only "required" keyword supported'
for field in fields:
if isinstance(field, str):
dst_name = src_name = field
else:
dst_name, src_name = field
assert isinstance(dst_name, str)
assert isinstance(src_name, str)
value = fetch(src_name, *source_records, required=required)
# TODO: assert value?
if value is not None:
setattr(record, dst_name, value)
| 22,077
|
def compute_presence_ratios(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the presence ratios for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
presence_ratios: np.ndarray
The presence ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
return presence_ratios
| 22,078
|
async def test_ws_long_lived_access_token(hass, hass_ws_client, hass_access_token):
"""Test generate long-lived access token."""
assert await async_setup_component(hass, "auth", {"http": {}})
ws_client = await hass_ws_client(hass, hass_access_token)
# verify create long-lived access token
await ws_client.send_json(
{
"id": 5,
"type": auth.WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
"client_name": "GPS Logger",
"lifespan": 365,
}
)
result = await ws_client.receive_json()
assert result["success"], result
long_lived_access_token = result["result"]
assert long_lived_access_token is not None
refresh_token = await hass.auth.async_validate_access_token(long_lived_access_token)
assert refresh_token.client_id is None
assert refresh_token.client_name == "GPS Logger"
assert refresh_token.client_icon is None
| 22,079
|
def img_box_match(bboxes_gt, bboxes_pre, iou_threshold):
"""
Goal:
Returns info for mAP calculation (Precision recall curve)
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
Returns:
list of [TP/FP, conf]
num_gt_bboxes : int
Notes:
For each prediction bbox, it finds what ground-truth bbox it belongs to in a descending order of confidence
If iou(pred_box, gt_box) > iou_threshold, this gt_box is assigned to this pred_box.
Then we check if the class is correct or not -> correct: TP
-> incorrect: FP
The rest of prediction bboxes cannot find gt bboxes -> FP
The rest of gt bboxes haven't been assigned to any prediction bboxes -> FN
"""
num_gt_bboxes = len(bboxes_gt)
gt_assign = [0] * num_gt_bboxes
pre_TF = []
for box_pre in bboxes_pre:
max_iou = 0
max_iou_index = -1
for i in range(num_gt_bboxes):
iou_temp = iou_compute(box_pre, bboxes_gt[i])
if gt_assign[i] == 0: # This gt bbox hasn't been assigned
# Find the box_gt with largest iou with this given box_pre
if iou_temp > iou_threshold and iou_temp > max_iou:
max_iou_index = i
max_iou = iou_temp
if max_iou_index != -1: # successfully find a box_gt
gt_assign[i] = 1
# TP
pre_TF.append([True, box_pre['conf']])
else:
# FP
pre_TF.append([False, box_pre['conf']])
return pre_TF, num_gt_bboxes
| 22,080
|
def random_char():
"""Return a random character."""
return Char(choice(_possible_chars))
| 22,081
|
def test_valid_team():
"""Test the Team static class method is_valid()."""
team = Team("1", "", "Brussel Sprouts")
assert not Team.is_valid(team)
team = create_test_team("1", "brussel-sprouts", "Brussel Sprouts")
assert Team.is_valid(team)
| 22,082
|
def _add_subparsers(subparsers):
"""Adds argument subparsers"""
subparsers.required = True # Workaround: http://bugs.python.org/issue9253#msg186387
_add_debian_subparser(subparsers)
_add_macos_subparser(subparsers)
_add_linux_simple_subparser(subparsers)
| 22,083
|
def bond(self, atom:Atom, nBonds:int=1, main=False) -> Atom:
"""Like :meth:`__call__`, but returns the atom passed in instead, so you
can form the main loop quickly."""
self(atom, nBonds, main); return atom
| 22,084
|
def train_list():
"""
Return a sorted list of all train patients
"""
patients = listdir_no_hidden(INPUT_PATH)
patients.sort()
l = []
for patient in patients:
if labels[patient] != None:
l.append(patient)
return l
| 22,085
|
def split_data(X, Y):
"""
This function split the features and the target into training and test set
Params:
X- (df containing predictors)
y- (series conatining Target)
Returns:
X_train, y_train, X_test, y_test
"""
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, random_state=2)
return X_train, X_test, Y_train, Y_test
| 22,086
|
def load_libsrc():
"""
loads directories in 'nest_py/lib_src' into the sys path.
TODO: needs to automatically pick up new directories,
currently hardcode
"""
import sys
ops_dir = os.path.dirname(os.path.realpath(__file__))
fst_package = ops_dir + '/../lib_src/fst_pipeline'
sys.path.append(fst_package)
return
| 22,087
|
def get_bucket(client=None, **kwargs):
"""
Get bucket object.
:param client: client object to use.
:type client: Google Cloud Storage client
:returns: Bucket object
:rtype: ``object``
"""
bucket = client.lookup_bucket(kwargs['Bucket'])
return bucket
| 22,088
|
def n2(data_source, outfile='n2.html', show_browser=True, embeddable=False,
title=None, use_declare_partial_info=False):
"""
Generate an HTML file containing a tree viewer.
Optionally opens a web browser to view the file.
Parameters
----------
data_source : <Problem> or str
The Problem or case recorder database containing the model or model data.
outfile : str, optional
The name of the final output file
show_browser : bool, optional
If True, pop up the system default web browser to view the generated html file.
Defaults to True.
embeddable : bool, optional
If True, gives a single HTML file that doesn't have the <html>, <DOCTYPE>, <body>
and <head> tags. If False, gives a single, standalone HTML file for viewing.
title : str, optional
The title for the diagram. Used in the HTML title.
use_declare_partial_info : bool, optional
If True, in the N2 matrix, component internal connectivity computed using derivative
declarations, otherwise, derivative declarations ignored, so dense component connectivity
is assumed.
"""
# if MPI is active only display one copy of the viewer
if MPI and MPI.COMM_WORLD.rank != 0:
return
# grab the model viewer data
model_data = _get_viewer_data(data_source)
options = {}
options['use_declare_partial_info'] = use_declare_partial_info
model_data['options'] = options
model_data = 'var modelData = %s' % json.dumps(model_data, default=make_serializable)
import openmdao
openmdao_dir = os.path.dirname(inspect.getfile(openmdao))
vis_dir = os.path.join(openmdao_dir, "visualization/n2_viewer")
libs_dir = os.path.join(vis_dir, "libs")
src_dir = os.path.join(vis_dir, "src")
style_dir = os.path.join(vis_dir, "style")
# grab the libraries, src and style
lib_dct = {'d3': 'd3.v4.min', 'awesomplete': 'awesomplete', 'vk_beautify': 'vkBeautify'}
libs = read_files(itervalues(lib_dct), libs_dir, 'js')
src_names = 'constants', 'draw', 'legend', 'modal', 'ptN2', 'search', 'svg'
srcs = read_files(src_names, src_dir, 'js')
styles = read_files(('awesomplete', 'partition_tree'), style_dir, 'css')
with open(os.path.join(style_dir, "fontello.woff"), "rb") as f:
encoded_font = str(base64.b64encode(f.read()).decode("ascii"))
if title:
title = "OpenMDAO Model Hierarchy and N2 diagram: %s" % title
else:
title = "OpenMDAO Model Hierarchy and N2 diagram"
h = DiagramWriter(filename=os.path.join(vis_dir, "index.html"),
title=title,
styles=styles, embeddable=embeddable)
# put all style and JS into index
h.insert('{{fontello}}', encoded_font)
for k, v in iteritems(lib_dct):
h.insert('{{{}_lib}}'.format(k), write_script(libs[v], indent=_IND))
for name, code in iteritems(srcs):
h.insert('{{{}_lib}}'.format(name.lower()), write_script(code, indent=_IND))
h.insert('{{model_data}}', write_script(model_data, indent=_IND))
# Toolbar
toolbar = h.toolbar
group1 = toolbar.add_button_group()
group1.add_button("Return To Root", uid="returnToRootButtonId", disabled="disabled",
content="icon-home")
group1.add_button("Back", uid="backButtonId", disabled="disabled", content="icon-left-big")
group1.add_button("Forward", uid="forwardButtonId", disabled="disabled",
content="icon-right-big")
group1.add_button("Up One Level", uid="upOneLevelButtonId", disabled="disabled",
content="icon-up-big")
group2 = toolbar.add_button_group()
group2.add_button("Uncollapse In View Only", uid="uncollapseInViewButtonId",
content="icon-resize-full")
group2.add_button("Uncollapse All", uid="uncollapseAllButtonId",
content="icon-resize-full bigger-font")
group2.add_button("Collapse Outputs In View Only", uid="collapseInViewButtonId",
content="icon-resize-small")
group2.add_button("Collapse All Outputs", uid="collapseAllButtonId",
content="icon-resize-small bigger-font")
group2.add_dropdown("Collapse Depth", button_content="icon-sort-number-up",
uid="idCollapseDepthDiv")
group3 = toolbar.add_button_group()
group3.add_button("Clear Arrows and Connections", uid="clearArrowsAndConnectsButtonId",
content="icon-eraser")
group3.add_button("Show Path", uid="showCurrentPathButtonId", content="icon-terminal")
group3.add_button("Show Legend", uid="showLegendButtonId", content="icon-map-signs")
group3.add_button("Toggle Solver Names", uid="toggleSolverNamesButtonId", content="icon-minus")
group3.add_dropdown("Font Size", id_naming="idFontSize", options=_FONT_SIZES,
option_formatter=lambda x: '{}px'.format(x),
button_content="icon-text-height")
group3.add_dropdown("Vertically Resize", id_naming="idVerticalResize",
options=_MODEL_HEIGHTS, option_formatter=lambda x: '{}px'.format(x),
button_content="icon-resize-vertical", header="Model Height")
group4 = toolbar.add_button_group()
group4.add_button("Save SVG", uid="saveSvgButtonId", content="icon-floppy")
group5 = toolbar.add_button_group()
group5.add_button("Help", uid="helpButtonId", content="icon-help")
# Help
help_txt = ('Left clicking on a node in the partition tree will navigate to that node. '
'Right clicking on a node in the model hierarchy will collapse/uncollapse it. '
'A click on any element in the N^2 diagram will allow those arrows to persist.')
h.add_help(help_txt, footer="OpenMDAO Model Hierarchy and N^2 diagram")
# Write output file
h.write(outfile)
# open it up in the browser
if show_browser:
from openmdao.utils.webview import webview
webview(outfile)
| 22,089
|
def drefrrefsC(dref:DRef, context:Context, S=None)->Iterable[RRef]:
""" Iterate over realizations of a derivation `dref` that match the specified
[context](#pylightnix.types.Context). Sorting order is unspecified. """
for rref in drefrrefs(dref,S):
context2=rrefctx(rref,S)
if context_eq(context,context2):
yield rref
| 22,090
|
def test_bullet_glyphs(font_fixture):
"""Test that rendering of unicode glyphs works."""
font_fixture.create_window()
font_fixture.load_font(
size=60
)
font_fixture.create_label(
text=u'\u2022'*5,
)
font_fixture.ask_question(
'You should see 5 bullet glyphs.'
)
| 22,091
|
def remove_macros(xml_tree: etree._ElementTree) -> etree._ElementTree:
"""Removes the macros section from the tool tree.
Args:
xml_tree (etree._ElementTree): The tool element tree.
Returns:
etree.ElementTree: The tool element tree without the macros section.
"""
to_remove = []
for macros_el in xml_tree.getroot().findall("macros"):
to_remove.append(macros_el)
for macros_el in to_remove:
xml_tree.getroot().remove(macros_el)
return xml_tree
| 22,092
|
def change_wireframe_mode():
"""
Change the display mode for the unit cell box.
Equivalent to pressing the `i` key.
"""
gcv().change_wireframe_mode()
| 22,093
|
def flight_time_movies_2_binary_search(movie_lengths, flight_length):
"""
Solution: Sort the list of movies, then iterate it, conducting a binary
search on each item for different item, when added together, equals the
flight length.
Complexity:
Time: O(n * lg{n})
Space: O(1)
"""
if len(movie_lengths) < 2:
raise ValueError('movie length list must be at least 2 items long')
# Sort the movies first: Time: O(n * lg{n})
movie_lengths.sort()
# For each movie length
for index, movie_length_first in enumerate(movie_lengths):
# Conduct a binary search on movie_lengths: O(lg{n}) time
target_length = flight_length - movie_length_first
movie_lengths_sub = movie_lengths[0:index] + movie_lengths[
index + 1:len(
movie_lengths)]
if binary_search(target=target_length, nums=movie_lengths_sub):
return True
return False
| 22,094
|
def section9():
"""
## Update Label Features
"""
| 22,095
|
def find_vertical_bounds(hp, T):
"""
Finds the upper and lower bounds of the characters' zone on the plate based on threshold value T
:param hp: horizontal projection (axis=1) of the plate image pixel intensities
:param T: Threshold value for bound detection
:return: upper and lower bounds
"""
N = len(hp)
# Find lower bound
i = 0
while ~((hp[i] <= T) & (hp[i+1] > T)) & (i < int(N/2)):
i += 1
lower_bound = 0 if i == int(N/2) else i
# Find superior bound
i = N-1
while ~((hp[i-1] > T) & (hp[i] <= T)) & (i > int(N/2)):
i -= 1
upper_bound = i
return [lower_bound, upper_bound]
| 22,096
|
def _rrd_update(self, timestamp, **kwargs):
"""
Updates a RRD file with the given samples. This implements the
rrdupdate_ command.
:param timestamp: Is either an integer or string containing the
number of seconds since the epoch or a
:py:class:`datetime` object.
:param kwargs: This is a dictionary were the key is the name of
a datasource (i.e. the name of the field of the
defined class) and the value, the value for the sample.
Not specified datasources will automatically assume
the value 'U' for unknown.
:raises: :py:class:`thrush.rrd.RRDError`
*Example*: Consider a class ``MyRRD`` that has two datasources
ds1 and ds2.
.. sourcecode:: python
class MyRRD(rrd.RRD):
ds1 = rrd.Gauge(heartbeat=600)
ds2 = rrd.Counter(hearbeat=600)
rra1 = rrd.Max(xff=0.5, steps=1, rows=24)
myrrd = MyRRD("my.rrd")
myrrd.update(1234, ds1=5.4, ds2=3)
myrrd.update(5678, ds2=4)
These updates will be converted in the following ``rrdupdate``
executions.
.. sourcecode:: bash
rrdupdate my.rrd -t ds1:ds2 1234:5.4:3
rrdupdate my.rrd -t ds1:ds2 5678:U:4
.. _rrdupdate: http://oss.oetiker.ch/rrdtool/doc/rrdupdate.en.html
"""
options = ["--template", ":".join(self._meta['datasources_list']), "--"]
data = [_convert_to_timestamp(timestamp)]
data += [
"U" if not ds in kwargs else str(kwargs[ds])
for ds in self._meta['datasources_list']
]
options += [":".join(data)]
stdout = self._meta['implementation'](self.filename, "update", options)
| 22,097
|
def populate_agrument_parser(ap) -> None:
"""
Populates an argparse.ArgumentParser or an subcommand argument parser
"""
pass
| 22,098
|
def square_crop_and_resize(image, size):
"""Crops an image to be square by removing pixels evenly from both sides of
the longest side of the image. Then the image is resized to the desired
size"""
# Calculate how much longer the longest side is than the shortest.
extra = max(image.width, image.height) - min(image.width, image.height)
# Remove pixels evenly from the left or top.
rem_lt = extra // 2
# Remove pixels evenly from the right or bottom. We may need to take
# another single pixel from one side if there is an uneven number of pixels
# to split between the two sides.
rem_rb = rem_lt + extra % 2
# Crop the image centered so the image is square.
if image.width > image.height:
image.crop(rem_lt, 0, image.width - rem_rb - 1, image.height - 1)
else:
image.crop(0, rem_lt, image.width - 1, image.height - rem_rb - 1)
assert image.width == image.height
image.resize(size, size)
| 22,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.