content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_point_callback(event, x, y, flags, param):
"""
cv2鼠标回调函数
"""
global points
if event == cv2.EVENT_MBUTTONDOWN:
points.append([x, y])
| 24,700
|
def make_optimiser_form(optimiser):
"""Make a child form for the optimisation settings.
:param optimiser: the Optimiser instance
:returns: a subclass of FlaskForm; NB not an instance!
"""
# This sets up the initial form with the optimiser's parameters
OptimiserForm = make_component_form(optimiser)
# Now add options for specifying objectives
OptimiserForm.obj_min_A = BooleanField('Minimise A', default=True)
OptimiserForm.obj_min_sigma_varA = BooleanField('Minimise variance in A')
OptimiserForm.obj_min_B = BooleanField('Minimise B')
OptimiserForm.obj_max_C = BooleanField('Maximise C')
# Options saying which variables to optimise
OptimiserForm.var_bool_param = BooleanField(
'Optimise the choice of a binary option',
default=True)
OptimiserForm.var_int_param = BooleanField('Optimise the range of an integer',
default=True)
return OptimiserForm
| 24,701
|
def prepare_for_evaluate(test_images, test_label):
"""
It will preprocess and return the images and labels for tesing.
:param original images for testing
:param original labels for testing
:return preprocessed images
:return preprocessed labels
"""
test_d = np.stack([preprocessing_for_testing(test_images[i]) for i in range(10000)], axis=0)
test_new_image, test_new_label = test_d, test_label
# Shuffle for 20 times
for time in range(20):
test_new_image, test_new_label = shuffle(test_d, test_label,
random_state=randint(0, test_images.shape[0]))
return test_new_image, test_new_label
| 24,702
|
def ab_group_to_dict(group):
"""Convert ABGroup to Python dict. Return None if group is empty."""
d = {'name': '', 'emails': [], 'is_group': True, 'is_company': False}
d['name'] = group.valueForProperty_(AB.kABGroupNameProperty)
for person in group.members():
identifier = group.distributionIdentifierForProperty_person_(
AB.kABEmailProperty, person)
if identifier:
emails = person.valueForProperty_(AB.kABEmailProperty)
email = emails.valueAtIndex_(
emails.indexForIdentifier_(identifier))
# log.debug('{} is in group {}'.format(email, d['name']))
d['emails'].append(email)
if not len(d['emails']):
return None
return d
| 24,703
|
async def async_setup(hass, config):
"""Set up the PEVC modbus component."""
hass.data[DOMAIN] = {}
return True
| 24,704
|
def process_attribute_fields(sender, instance, created, **kwargs):
"""This function is attached to each :class:`Entity` subclass's post_save signal. Any :class:`Attribute`\ s managed by :class:`AttributeProxyField`\ s which have been removed will be deleted, and any new attributes will be created."""
if ATTRIBUTE_REGISTRY in instance.__dict__:
registry = instance.__dict__[ATTRIBUTE_REGISTRY]
instance.attribute_set.filter(key__in=[field.attribute_key for field in registry['removed']]).delete()
for field in registry['added']:
# TODO: Should this perhaps just use instance.attributes[field.attribute_key] = getattr(instance, field.name, None)?
# (Would eliminate the need for field.value_class.)
try:
attribute = instance.attribute_set.get(key=field.attribute_key)
except Attribute.DoesNotExist:
attribute = Attribute()
attribute.entity = instance
attribute.key = field.attribute_key
attribute.set_value(value=getattr(instance, field.name, None), value_class=field.value_class)
del instance.__dict__[ATTRIBUTE_REGISTRY]
| 24,705
|
def write_line_test_results(result_dict_outflows,
comp_dict_outflows,
result_dict_no_outflows,
comp_dict_no_outflows,
fit_mask,
run_dir,
binnum=None,
spaxelx=None,
spaxely=None):
"""
Writes results of outflow testing. Creates FITS tables for
the best-fit parameters and best-fit components for each the outflow
and no-outflow test results.
"""
#
#
# Write Outflow model FITS tables
# Extract elements from dictionaries
par_names = []
par_best = []
sig = []
for key in result_dict_outflows:
par_names.append(key)
par_best.append(result_dict_outflows[key]['med'])
sig.append(result_dict_outflows[key]['std'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma' , format='E' , array=sig)
cols = fits.ColDefs([col1,col2,col3])
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/line_par_table.fits'),overwrite=True)
# Write best-fit components to FITS file
cols = []
# Construct a column for each parameter and chain
for key in comp_dict_outflows:
cols.append(fits.Column(name=key, format='E', array=comp_dict_outflows[key]))
# Add fit mask to cols
cols.append(fits.Column(name="MASK", format='E', array=fit_mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/line_best_model_components.fits'),overwrite=True)
#
#
# Write No-outflow model FITS tables
par_names = []
par_best = []
sig = []
for key in result_dict_no_outflows:
par_names.append(key)
par_best.append(result_dict_no_outflows[key]['med'])
sig.append(result_dict_no_outflows[key]['std'])
if 0:
for i in range(0,len(par_names),1):
print(par_names[i],par_best[i],sig[i])
# Write best-fit parameters to FITS table
col1 = fits.Column(name='parameter', format='30A', array=par_names)
col2 = fits.Column(name='best_fit' , format='E' , array=par_best)
col3 = fits.Column(name='sigma' , format='E' , array=sig)
cols = fits.ColDefs([col1,col2,col3])
hdu = fits.BinTableHDU.from_columns(cols)
hdr = fits.PrimaryHDU()
hdul = fits.HDUList([hdr, hdu])
if binnum is not None:
hdr.header.append(('BINNUM', binnum, 'bin index of the spaxel (Voronoi)'), end=True)
if spaxelx is not None and spaxely is not None:
hdu2 = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array=spaxelx, format='E'),
fits.Column(name='spaxely', array=spaxely, format='E')
]))
hdul.append(hdu2)
hdul.writeto(run_dir.joinpath('log/no_line_par_table.fits'),overwrite=True)
# Write best-fit components to FITS file
cols = []
# Construct a column for each parameter and chain
for key in comp_dict_no_outflows:
cols.append(fits.Column(name=key, format='E', array=comp_dict_no_outflows[key]))
# Add fit mask to cols
cols.append(fits.Column(name="MASK", format='E', array=fit_mask))
# Write to fits
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(run_dir.joinpath('log', 'no_line_best_model_components.fits'),overwrite=True)
#
return
| 24,706
|
def deserialize_value(val: str) -> Any:
"""Deserialize a json encoded string in to its original value"""
return _unpack_value(
seven.json.loads(check.str_param(val, "val")),
whitelist_map=_WHITELIST_MAP,
descent_path="",
)
| 24,707
|
def gen_signature(priv_path, pub_path, sign_path, passphrase=None):
"""
creates a signature for the given public-key with
the given private key and writes it to sign_path
"""
with salt.utils.files.fopen(pub_path) as fp_:
mpub_64 = fp_.read()
mpub_sig = sign_message(priv_path, mpub_64, passphrase)
mpub_sig_64 = binascii.b2a_base64(mpub_sig)
if os.path.isfile(sign_path):
return False
log.trace(
"Calculating signature for %s with %s",
os.path.basename(pub_path),
os.path.basename(priv_path),
)
if os.path.isfile(sign_path):
log.trace(
"Signature file %s already exists, please remove it first and " "try again",
sign_path,
)
else:
with salt.utils.files.fopen(sign_path, "wb+") as sig_f:
sig_f.write(salt.utils.stringutils.to_bytes(mpub_sig_64))
log.trace("Wrote signature to %s", sign_path)
return True
| 24,708
|
def stringify(value):
"""
PHPCS uses a , separated strings in many places
because of how it handles options we have to do bad things
with string concatenation.
"""
if isinstance(value, six.string_types):
return value
if isinstance(value, collections.Iterable):
return ','.join(value)
return str(value)
| 24,709
|
def _DISABLED_test_flash_obround():
"""Umaco example a simple obround flash with and without a hole"""
_test_render('resources/example_flash_obround.gbr', 'golden/example_flash_obround.png')
| 24,710
|
def read_requirements_file(path):
""" reads requirements.txt file """
with open(path) as f:
requires = []
for line in f.readlines():
if not line:
continue
requires.append(line.strip())
return requires
| 24,711
|
def vsa_get_all(context):
"""
Get all Virtual Storage Array records.
"""
session = get_session()
return session.query(models.VirtualStorageArray).\
options(joinedload('vsa_instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
| 24,712
|
def scamp(filename, config, accuracy=0.5, itermax=10,
band=None, useweight=False, CheckPlot=False,
verbose="NORMAL"):
"""Compute astrometric solution of astronomical image using scamp"""
path = os.path.dirname(filename)
imagelist = np.atleast_1d(filename)
for ima in imagelist:
print("Performing astrometric calibration on %s using SCAMP." % ima)
# print ('You required astrometric precision of %.3f arcsec.' % accuracy)
root = os.path.splitext(ima)[0]
_name = root.split("/")[-1]
if CheckPlot:
plot_fmt = "PNG"
plotnames = (
"%s_fgroups,%s_distort,%s_astr_interror2d,%s_astr_interror1d,%s_astr_referror2d,%s_astr_referror1d,%s_astr_chi2,%s_psphot_error"
% (root, root, root, root, root, root, root, root)
)
plottypes = "FGROUPS,DISTORTION,ASTR_INTERROR2D,ASTR_INTERROR1D,ASTR_REFERROR2D,ASTR_REFERROR1D,ASTR_CHI2,PHOT_ERROR"
else:
plot_fmt = "NULL"
plotnames = " "
plottypes = " "
if config["telescope"] == "PS1" and band is not None:
astref_band = band
else:
astref_band = "DEFAULT"
# Initialise the while loop
i = 0
# Dummy offset
mean_offset = 100
while (mean_offset >= accuracy) and (i <= itermax):
i += 1
# Create catalog using sextractor
# print ('Create FITS-LDAC file from SExtractor')
subprocess.call(
[
"sex",
"-c", config["scamp"]["sextractor"],
"-PARAMETERS_NAME", config["scamp"]["param"],
"-VERBOSE_TYPE", verbose,
"-FILTER_NAME", config['sextractor']['convFilter'],
ima,
]
)
# Run SCAMP
subprocess.call(
[
"scamp",
"prepscamp.cat",
"-c", config["scamp"]["conf"],
"-ASTREF_BAND", astref_band,
"-CHECKPLOT_DEV", plot_fmt,
"-CHECKPLOT_NAME", plotnames,
"-CHECKPLOT_TYPE", plottypes,
"-VERBOSE_TYPE", verbose,
]
)
# Check astrometry offset
with open("scamp.xml") as fd:
doc = xmltodict.parse(fd.read())
"""
offset = doc['VOTABLE']['RESOURCE']['RESOURCE']['TABLE'][0]['DATA']['TABLEDATA']['TR']['TD'][34]
offset = offset.split(' ')
offset_axis1 = float(offset[0])
offset_axis2 = float(offset[1])
"""
header = header_from_string("prepscamp.head")
offset_axis1 = float(header["ASTRRMS1"] * 3600)
offset_axis2 = float(header["ASTRRMS2"] * 3600)
mean_offset = np.mean([offset_axis1, offset_axis2])
print(
"Astrometric precision after run %d: %.2f arcseconds. Required: %.2f."
% (i, mean_offset, accuracy)
)
pixelscale = doc["VOTABLE"]["RESOURCE"]["RESOURCE"]["TABLE"][0]["DATA"][
"TABLEDATA"
]["TR"]["TD"][18].split(" ")
pixelscale = [float(pixelscale[0]) / 3600,
float(pixelscale[1]) / 3600]
# Update header of input fits file
update_headers_scamp(ima, "prepscamp.head", pixelscale)
# cp_p('prepscamp.cat', _name.split('.')[0]+'.cat')
# Delete temporary files
clean_tmp_files(ima, soft="scamp")
print("\n")
| 24,713
|
def find_files_match_names_across_dirs(list_path_pattern, drop_none=True):
""" walk over dir with images and segmentation and pair those with the same
name and if the folder with centers exists also add to each par a center
.. note:: returns just paths
:param list(str) list_path_pattern: list of paths with image name patterns
:param bool drop_none: drop if there are some none - missing values in rows
:return: DF<path_1, path_2, ...>
>>> def _mp(d, n):
... return os.path.join(update_path('data_images'),
... 'drosophila_ovary_slice', d, n)
>>> df = find_files_match_names_across_dirs([_mp('image', '*.jpg'),
... _mp('segm', '*.png'),
... _mp('center_levels', '*.csv')])
>>> len(df) > 0
True
>>> df.columns.tolist()
['path_1', 'path_2', 'path_3']
>>> df = find_files_match_names_across_dirs([_mp('image', '*.png'),
... _mp('segm', '*.jpg'),
... _mp('center_levels', '*.csv')])
>>> len(df)
0
"""
list_path_pattern = [pp for pp in list_path_pattern if pp is not None]
assert len(list_path_pattern) > 1, 'at least 2 paths required'
for p in list_path_pattern:
assert os.path.exists(os.path.dirname(p)), \
'missing "%s"' % os.path.dirname(p)
def _get_name(path, pattern='*'):
name = os.path.splitext(os.path.basename(path))[0]
for s in pattern.split('*'):
name = name.replace(s, '')
return name
def _get_paths_names(path_pattern):
paths_ = glob.glob(path_pattern)
if not paths_:
return [None], [None]
names_ = [_get_name(p, os.path.basename(path_pattern)) for p in paths_]
return paths_, names_
logging.info('find match files...')
paths_0, names_0 = _get_paths_names(list_path_pattern[0])
list_paths = [paths_0]
for path_pattern_n in list_path_pattern[1:]:
paths_n = [None] * len(paths_0)
name_pattern = os.path.basename(path_pattern_n)
list_files = glob.glob(path_pattern_n)
logging.debug('found %i files in %s', len(list_files), path_pattern_n)
for path_n in list_files:
name_n = _get_name(path_n, name_pattern)
if name_n in names_0:
idx = names_0.index(name_n)
paths_n[idx] = path_n
list_paths.append(paths_n)
col_names = ['path_%i' % (i + 1) for i in range(len(list_paths))]
df_paths = pd.DataFrame(list(zip(*list_paths)), columns=col_names)
# filter None
if drop_none:
df_paths.dropna(inplace=True)
return df_paths
| 24,714
|
def thread_it(obj, timeout = 10):
""" General function to handle threading for the physical components of the system. """
thread = threading.Thread(target = obj.run())
thread.start()
# Run the 'run' function in the obj
obj.ready.wait(timeout = timeout)
# Clean up
thread.join()
obj.ready.clear()
return None
| 24,715
|
def _subsize_sub_pixel_align_cy_ims(pixel_aligned_cy_ims, subsize, n_samples):
"""
The inner loop of _sub_pixel_align_cy_ims() that executes on a "subsize"
region of the larger image.
Is subsize is None then it uses the entire image.
"""
n_max_failures = n_samples * 2
sub_pixel_offsets = np.zeros((n_samples, pixel_aligned_cy_ims.shape[0], 2))
pixel_aligned_cy0_im = pixel_aligned_cy_ims[0]
im_mea = pixel_aligned_cy_ims.shape[-1]
assert pixel_aligned_cy_ims.shape[-2] == im_mea
def _subregion(im, pos):
if subsize is None:
return im
else:
return imops.crop(im, off=pos, dim=WH(subsize, subsize), center=False)
sample_i = 0
n_failures = 0
while sample_i < n_samples and n_failures < n_max_failures:
try:
if subsize is None:
pos = XY(0, 0)
else:
pos = XY(
np.random.randint(0, im_mea - subsize - 16),
np.random.randint(0, im_mea - subsize - 16),
)
subregion_pixel_aligned_cy0_im = _subregion(pixel_aligned_cy0_im, pos)
for cy_i, pixel_aligned_cy_im in enumerate(pixel_aligned_cy_ims):
if cy_i == 0:
continue
# Use a small region to improve speed
subregion_pixel_aligned_cy_im = _subregion(pixel_aligned_cy_im, pos)
try:
_dy, _dx = _subpixel_align_one_im(
subregion_pixel_aligned_cy0_im, subregion_pixel_aligned_cy_im,
)
sub_pixel_offsets[sample_i, cy_i, :] = (_dy, _dx)
except Exception:
# This is a general exception handler because there
# are a number of ways that the _subpixel_align_one_im
# can fail including linear algebera, etc. All
# of which end up with a skip and a retry.
n_failures += 1
raise AlignmentError
sample_i += 1
except AlignmentError:
# Try again with a new pos
if n_failures >= n_max_failures:
raise AlignmentError
return np.mean(sub_pixel_offsets, axis=0)
| 24,716
|
def obj_setclass(this, klass):
"""
set Class for `this`!!
"""
return this.setclass(klass)
| 24,717
|
def format(number, separator=' ', format=None, add_check_digit=False):
"""Reformat the number to the standard presentation format. The separator
used can be provided. If the format is specified (either 'hex' or 'dec')
the number is reformatted in that format, otherwise the current
representation is kept. If add_check_digit is True a check digit will be
added if it is not present yet."""
# first parse the number
number, cd = _parse(number)
# format conversions if needed
if format == 'dec' and len(number) == 14:
# convert to decimal
number = '%010d%08d' % (int(number[0:8], 16), int(number[8:14], 16))
if cd:
cd = calc_check_digit(number)
elif format == 'hex' and len(number) == 18:
# convert to hex
number = '%08X%06X' % (int(number[0:10]), int(number[10:18]))
if cd:
cd = calc_check_digit(number)
# see if we need to add a check digit
if add_check_digit and not cd:
cd = calc_check_digit(number)
# split number according to format
if len(number) == 14:
number = [number[i * 2:i * 2 + 2]
for i in range(7)] + [cd]
else:
number = (number[:5], number[5:10], number[10:14], number[14:], cd)
return separator.join(x for x in number if x)
| 24,718
|
def parse_equal_statement(line):
"""Parse super-sequence statements"""
seq_names = line.split()[1:]
return seq_names
| 24,719
|
def rect(*args, **kwargs): # real signature unknown
""" Convert from polar coordinates to rectangular coordinates. """
pass
| 24,720
|
def B5(n):
"""Factor Variables B5."""
return np.maximum(0, c4(n) - 3 * np.sqrt(1 - c4(n) ** 2))
| 24,721
|
def test_mnist_dataset():
"""Test case for MNIST Dataset.
"""
mnist_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_mnist",
"mnist.npz")
with np.load(mnist_filename) as f:
(x_test, y_test) = f['x_test'], f['y_test']
image_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_mnist",
"t10k-images-idx3-ubyte.gz")
label_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_mnist",
"t10k-labels-idx1-ubyte.gz")
image_dataset = mnist_io.MNISTImageDataset(image_filename)
label_dataset = mnist_io.MNISTLabelDataset(label_filename)
i = 0
for m_x in image_dataset:
v_x = x_test[i]
assert np.alltrue(v_x == m_x.numpy())
i += 1
assert i == len(y_test)
i = 0
for m_y in label_dataset:
v_y = y_test[i]
assert np.alltrue(v_y == m_y.numpy())
i += 1
assert i == len(y_test)
dataset = mnist_io.MNISTDataset(
image_filename, label_filename)
i = 0
for (m_x, m_y) in dataset:
v_x = x_test[i]
v_y = y_test[i]
assert np.alltrue(v_y == m_y.numpy())
assert np.alltrue(v_x == m_x.numpy())
i += 1
assert i == len(y_test)
| 24,722
|
def y_yhat_plots(y, yh, title="y and y_score", y_thresh=0.5):
"""Output plots showing how y and y_hat are related:
the "confusion dots" plot is analogous to the confusion table,
and the standard ROC plot with its AOC value.
The y=1 threshold can be changed with the y_thresh parameter.
"""
# The predicted y value with threshold = y_thresh
y_pred = 1.0 * (yh > y_thresh)
# Show table of actual and predicted counts
crosstab = pd.crosstab(y, y_pred, rownames=[
'Actual'], colnames=[' Predicted'])
print("\nConfusion matrix (y_thresh={:.3f}):\n\n".format(y_thresh),
crosstab)
# Calculate the various metrics and rates
tn = crosstab[0][0]
fp = crosstab[1][0]
fn = crosstab[0][1]
tp = crosstab[1][1]
##print(" tn =",tn)
##print(" fp =",fp)
##print(" fn =",fn)
##print(" tp =",tp)
this_fpr = fp / (fp + tn)
this_fnr = fn / (fn + tp)
this_recall = tp / (tp + fn)
this_precision = tp / (tp + fp)
this_accur = (tp + tn) / (tp + fn + fp + tn)
this_posfrac = (tp + fn) / (tp + fn + fp + tn)
print("\nResults:\n")
print(" False Pos = ", 100.0 * this_fpr, "%")
print(" False Neg = ", 100.0 * this_fnr, "%")
print(" Recall = ", 100.0 * this_recall, "%")
print(" Precision = ", 100.0 * this_precision, "%")
print("\n Accuracy = ", 100.0 * this_accur, "%")
print(" Pos. fract. = ", 100.0 * this_posfrac, "%")
# Put them in a dataframe
ysframe = pd.DataFrame([y, yh, y_pred], index=[
'y', 'y-hat', 'y-pred']).transpose()
# If the yh is discrete (0 and 1s only) then blur it a bit
# for a better visual dots plot
if min(abs(yh - 0.5)) > 0.49:
ysframe["y-hat"] = (0.51 * ysframe["y-hat"]
+ 0.49 * np.random.rand(len(yh)))
# Make a "confusion dots" plot
# Add a blurred y column
ysframe['y (blurred)'] = y + 0.1 * np.random.randn(len(y))
# Plot the real y (blurred) vs the predicted probability
# Note the flipped ylim values.
ysframe.plot.scatter('y-hat', 'y (blurred)', figsize=(12, 5),
s=2, xlim=(0.0, 1.0), ylim=(1.8, -0.8))
# show the "correct" locations on the plot
plt.plot([0.0, y_thresh], [0.0, 0.0], '-',
color='green', linewidth=5)
plt.plot([y_thresh, y_thresh], [0.0, 1.0], '-',
color='gray', linewidth=2)
plt.plot([y_thresh, 1.0], [1.0, 1.0], '-',
color='green', linewidth=5)
plt.title("Confusion-dots Plot: " + title, fontsize=16)
# some labels
ythr2 = y_thresh/2.0
plt.text(ythr2 - 0.03, 1.52, "FN", fontsize=16, color='red')
plt.text(ythr2 + 0.5 - 0.03, 1.52, "TP", fontsize=16, color='green')
plt.text(ythr2 - 0.03, -0.50, "TN", fontsize=16, color='green')
plt.text(ythr2 + 0.5 - 0.03, -0.50, "FP", fontsize=16, color='red')
plt.show()
# Make the ROC curve
# Set the y-hat as the index and sort on it
ysframe = ysframe.set_index('y-hat').sort_index()
# Put y-hat back as a column (but the sorting remains)
ysframe = ysframe.reset_index()
# Initialize the counts for threshold = 0
p_thresh = 0
FN = 0
TN = 0
TP = sum(ysframe['y'])
FP = len(ysframe) - TP
# Assemble the fpr and recall values
recall = []
fpr = []
# Go through each sample in y-hat order,
# advancing the threshold and adjusting the counts
for iprob in range(len(ysframe['y-hat'])):
p_thresh = ysframe.iloc[iprob]['y-hat']
if ysframe.iloc[iprob]['y'] == 0:
FP -= 1
TN += 1
else:
TP -= 1
FN += 1
# Recall and FPR:
recall.append(TP / (TP + FN))
fpr.append(FP / (FP + TN))
# Put recall and fpr in the dataframe
ysframe['Recall'] = recall
ysframe['FPR'] = fpr
# - - - ROC - - - could be separate routine
zoom_in = False
# Calculate the area under the ROC
roc_area = 0.0
for ifpr in range(1, len(fpr)):
# add on the bit of area (note sign change, going from high fpr to low)
roc_area += 0.5 * (recall[ifpr] + recall[ifpr - 1]
) * (fpr[ifpr - 1] - fpr[ifpr])
plt.figure(figsize=(8, 8))
plt.title("ROC: " + title, size=16)
plt.plot(fpr, recall, '-b')
# Set the scales
if zoom_in:
plt.xlim(0.0, 0.10)
plt.ylim(0.0, 0.50)
else:
# full range:
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
# The reference line
plt.plot([0., 1.], [0., 1.], '--', color='orange')
# The point at the y_hat = y_tresh threshold
if True:
plt.plot([this_fpr], [this_recall], 'o', c='blue', markersize=15)
plt.xlabel('False Postive Rate', size=16)
plt.ylabel('Recall', size=16)
plt.annotate('y_hat = {:.2f}'.format(y_thresh),
xy=(this_fpr + 0.015,
this_recall), size=14, color='blue')
plt.annotate(' Pos.Fraction = ' +
' {:.0f}%'.format(100 * this_posfrac),
xy=(this_fpr + 0.02, this_recall - 0.03),
size=14, color='blue')
# Show the ROC area (shows on zoomed-out plot)
plt.annotate('ROC Area = ' + str(roc_area)
[:5], xy=(0.4, 0.1), size=16, color='blue')
# Show the plot
plt.show()
return ysframe
| 24,723
|
def _derive_scores(model, txt_file, base_words):
"""
Takes a model, a text file, and a list of base words.
Returns a dict of {base_word: score}, where score is an integer between 0
and 100 which represents the average similarity of the text to the given
word.
"""
with open(txt_file, 'r') as f:
text = f.read()
words = sample_words(text)
# This is a list of dicts of the form {base_word: score}.
raw_scores = [_single_word_score(model, base_words, word) for word in words]
summed_scores = {}
for base_word in base_words:
summed_scores[base_word] = sum([item[base_word] for item in raw_scores])
summed_scores[base_word] = round(
100 * summed_scores[base_word] / len(words)
)
return summed_scores
| 24,724
|
def verifyRRD(fix_rrd=False):
"""
Go through all known monitoring rrds and verify that they
match existing schema (could be different if an upgrade happened)
If fix_rrd is true, then also attempt to add any missing attributes.
"""
global rrd_problems_found
global monitorAggregatorConfig
mon_dir = monitorAggregatorConfig.monitor_dir
status_dict = {}
completed_stats_dict = {}
completed_waste_dict = {}
counts_dict = {}
# initialize the RRD dictionaries to match the current schema for verification
for tp in list(status_attributes.keys()):
if tp in list(type_strings.keys()):
tp_str = type_strings[tp]
attributes_tp = status_attributes[tp]
for a in attributes_tp:
status_dict[f"{tp_str}{a}"] = None
for jobrange in glideFactoryMonitoring.getAllJobRanges():
completed_stats_dict[f"JobsNr_{jobrange}"] = None
for timerange in glideFactoryMonitoring.getAllTimeRanges():
completed_stats_dict[f"Lasted_{timerange}"] = None
completed_stats_dict[f"JobsLasted_{timerange}"] = None
for jobtype in glideFactoryMonitoring.getAllJobTypes():
for timerange in glideFactoryMonitoring.getAllMillRanges():
completed_waste_dict[f"{jobtype}_{timerange}"] = None
for jobtype in ("Entered", "Exited", "Status"):
for jobstatus in ("Wait", "Idle", "Running", "Held"):
counts_dict[f"{jobtype}{jobstatus}"] = None
for jobstatus in ("Completed", "Removed"):
counts_dict["{}{}".format("Entered", jobstatus)] = None
# FROM: lib2to3.fixes.fix_ws_comma
# completed_waste_dict["%s_%s"%(jobtype, timerange)]=None
#
# for jobtype in ('Entered', 'Exited', 'Status'):
# for jobstatus in ('Wait', 'Idle', 'Running', 'Held'):
# counts_dict["%s%s"%(jobtype, jobstatus)]=None
# for jobstatus in ('Completed', 'Removed'):
# counts_dict["%s%s"%('Entered', jobstatus)]=None
#
# verifyHelper(os.path.join(total_dir,
# "Status_Attributes.rrd"), status_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed.rrd"),
# glideFactoryMonitoring.getLogCompletedDefaults(), fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed_Stats.rrd"), completed_stats_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed_WasteTime.rrd"), completed_waste_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Counts.rrd"), counts_dict, fix_rrd)
# for filename in os.listdir(dir):
# if filename[:6]=="entry_":
# entrydir=os.path.join(dir, filename)
# for subfilename in os.listdir(entrydir):
# if subfilename[:9]=="frontend_":
# current_dir=os.path.join(entrydir, subfilename)
# verifyHelper(os.path.join(current_dir,
# "Status_Attributes.rrd"), status_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed.rrd"),
# glideFactoryMonitoring.getLogCompletedDefaults(), fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed_Stats.rrd"), completed_stats_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed_WasteTime.rrd"),
# completed_waste_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Counts.rrd"), counts_dict, fix_rrd)
# return not rrd_problems_found
completed_dict = glideFactoryMonitoring.getLogCompletedDefaults()
rrdict = {
"Status_Attributes.rrd": status_dict,
"Log_Completed.rrd": completed_dict,
"Log_Completed_Stats.rrd": completed_stats_dict,
"Log_Completed_WasteTime.rrd": completed_waste_dict,
"Log_Counts.rrd": counts_dict,
}
for dir_name, sdir_name, f_list in os.walk(mon_dir):
for file_name in f_list:
if file_name in list(rrdict.keys()):
verifyHelper(os.path.join(dir_name, file_name), rrdict[file_name], fix_rrd)
return not rrd_problems_found
| 24,725
|
def features_ids_argument_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the argument describing features ids in different subparsers
"""
parser = ArgumentParser(add_help=False, parents=[collection_option_parser()])
parser.add_argument(FEATURES_IDS_ARGNAME, nargs='+',
help='features identifiers or features UUIDs')
return parser
| 24,726
|
def isolate_blue_blocks(image, area_min=10, side_ratio=0.5):
"""Return a sequence of masks on the original area showing significant blocks of blue."""
contours, _ = cv2.findContours(
blue(image).astype(np.uint8) * 255, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
rects = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if min(w, h) / max(w, h) > side_ratio and cv2.contourArea(c) > area_min:
rects.append((x, y, w, h))
masks = np.zeros_like()
return filtered
| 24,727
|
def read_gtf(
filepath_or_buffer: Union[str, StringIO, Path],
expand_attribute_column: bool = True,
infer_biotype_column: bool = False,
column_converters: Optional[Dict[str, Callable[..., str]]] = None,
usecols: Optional[List[str]] = None,
features: Optional[Tuple[str]] = None,
chunksize: int = 1024 * 1024,
) -> pd.DataFrame:
"""
Parse a GTF into a dictionary mapping column names to sequences of values.
Parameters
----------
filepath_or_buffer : str or buffer object
Path to GTF file (may be gzip compressed) or buffer object
such as StringIO
expand_attribute_column : bool
Replace strings of semi-colon separated key-value values in the
'attribute' column with one column per distinct key, with a list of
values for each row (using None for rows where key didn't occur).
infer_biotype_column : bool
Due to the annoying ambiguity of the second GTF column across multiple
Ensembl releases, figure out if an older GTF's source column is actually
the gene_biotype or transcript_biotype.
column_converters : dict, optional
Dictionary mapping column names to conversion functions. Will replace
empty strings with None and otherwise passes them to given conversion
function.
usecols : list of str or None
Restrict which columns are loaded to the give set. If None, then
load all columns.
features : set of str or None
Drop rows which aren't one of the features in the supplied set
chunksize : int
"""
if isinstance(filepath_or_buffer, str):
filepath_or_buffer = Path(filepath_or_buffer)
if isinstance(filepath_or_buffer, Path) and not filepath_or_buffer.exists():
logger.exception(f"GTF file does not exist: {filepath_or_buffer}")
raise FileNotFoundError
if expand_attribute_column:
result_df = parse_gtf_and_expand_attributes(
filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols
)
else:
result_df = parse_gtf(
filepath_or_buffer, chunksize=chunksize, features=features
)
if column_converters:
for column_name in column_converters:
result_df[column_name] = result_df[column_name].astype(
column_converters[column_name], errors="ignore"
)
# Hackishly infer whether the values in the 'source' column of this GTF
# are actually representing a biotype by checking for the most common
# gene_biotype and transcript_biotype value 'protein_coding'
if infer_biotype_column:
unique_source_values = result_df["source"].unique()
if "protein_coding" in unique_source_values:
column_names = result_df.columns.unique()
# Disambiguate between the two biotypes by checking if
# gene_biotype is already present in another column. If it is,
# the 2nd column is the transcript_biotype (otherwise, it's the
# gene_biotype)
if "gene_biotype" not in column_names:
logger.info("Using column 'source' to replace missing 'gene_biotype'")
result_df["gene_biotype"] = result_df["source"]
if "transcript_biotype" not in column_names:
logger.info(
"Using column 'source' to replace missing 'transcript_biotype'"
)
result_df["transcript_biotype"] = result_df["source"]
if usecols is not None:
column_names = result_df.columns.unique()
valid_columns = [c for c in usecols if c in column_names]
result_df = result_df[valid_columns]
return result_df
| 24,728
|
def version_in(dirname, indexname = None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the
version number of the on-disk format used for the index -- e.g. -102.
The second number (format version) may be useful for figuring out if you
need to recreate an index because the format has changed. However, you
can just try to open the index and see if you get an IndexVersionError
exception.
Note that the release and format version are available as attributes
on the Index object in Index.release and Index.version.
:param dirname: the file path of a directory containing an index.
:param indexname: the name of the index. If None, the default index name is used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
from whoosh.filedb.filestore import FileStorage
storage = FileStorage(dirname)
return version(storage, indexname=indexname)
| 24,729
|
def extract_pz(
ctable,
suffix="SAC_PZ",
outdir=".",
keep_sensitivity=False,
filter_by_chid=None,
filter_by_name=None,
filter_by_component=None,
):
"""Extract instrumental response in SAC PZ format from channel table.
.. warning::
Only works for instrumental responses of Hi-net network.
RESP files of F-net network can be downloaded from
`F-net website <http://www.fnet.bosai.go.jp/st_info/response.php?LANG=en>`_.
Parameters
----------
ctable: str
Channel table file.
suffix: str
Suffix of SAC PZ files. Defaults to ``SAC_PZ``.
outdir: str
Output directory. Defaults to current directory.
keep_sensivity: bool
win2sac automatically removes sensivity from waveform data
during win32 format to SAC format conversion.
So the generated polezero file should omit the sensitivity.
filter_by_id: list of str or wildcard
Filter channels by ID.
filter_by_name: list of str or wildcard
Filter channels by name.
filter_by_component: list of str or wildcard
Filter channels by component.
Examples
--------
>>> extract_pz("0101_20100101.ch")
Extract all channel with specified suffix and output directory:
>>> extract_pz("0101_20100101.ch", suffix="", outdir="20100101000")
Extract only specified channels:
>>> extract_pz(
... "0101_20100101.ch", filter_by_name="N.NA*", filter_by_component="[NE]"
... )
"""
if not ctable:
logger.error("ctable is `None'. Data requests may fail. Skipped.")
return
channels = _get_channels(ctable)
if filter_by_chid or filter_by_name or filter_by_component:
channels = _filter_channels(
channels, filter_by_chid, filter_by_name, filter_by_component
)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
for channel in channels:
_extract_sacpz(
channel, suffix=suffix, outdir=outdir, keep_sensitivity=keep_sensitivity
)
| 24,730
|
def apply_pairs(fn, convert, *args):
"""non-public"""
if len(args) == 2:
fn([convert(args[0])], [args[1]])
else:
a1, a2 = unzip(args[0])
fn(convert(a1), list(a2))
| 24,731
|
def scrub_old_style_ceph():
"""Purge any legacy ceph configuration from install"""
# NOTE: purge old override file - no longer needed
if os.path.exists('/etc/init/cinder-volume.override'):
os.remove('/etc/init/cinder-volume.override')
# NOTE: purge any CEPH_ARGS data from /etc/environment
env_file = '/etc/environment'
ceph_match = re.compile("^CEPH_ARGS.*").search
with open(env_file, 'rt') as input_file:
with NamedTemporaryFile(mode='wt',
delete=False,
dir=os.path.dirname(env_file)) as outfile:
for line in input_file:
if not ceph_match(line):
print(line, end='', file=outfile)
os.rename(outfile.name, input_file.name)
| 24,732
|
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value
| 24,733
|
def hrm_job_title_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages.NONE
db = current.db
table = db.hrm_job_title
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
| 24,734
|
def test_update_exception_w_obj(client):
""" Verify the Client raises an exception if update is called with unsupporte type """
with pytest.raises(TypeError) as exc:
client.update(1)
assert "support updating objects of type" in str(exc)
| 24,735
|
def is_empty_array_expr(ir: irast.Base) -> bool:
"""Return True if the given *ir* expression is an empty array expression.
"""
return (
isinstance(ir, irast.Array)
and not ir.elements
)
| 24,736
|
def get_raw_entity_names_from_annotations(annotations):
"""
Args:
annotated_utterance: annotated utterance
Returns:
Wikidata entities we received from annotations
"""
raw_el_output = annotations.get("entity_linking", [{}])
entities = []
try:
if raw_el_output:
if isinstance(raw_el_output[0], dict):
entities = raw_el_output[0].get("entity_ids", [])
if isinstance(raw_el_output[0], list):
entities = raw_el_output[0][0]
except Exception as e:
error_message = f"Wrong entity linking output format {raw_el_output} : {e}"
sentry_sdk.capture_exception(e)
logger.exception(error_message)
return entities
| 24,737
|
def CheckFileStoragePathVsEnabledDiskTemplates(
logging_warn_fn, file_storage_dir, enabled_disk_templates):
"""Checks whether the given file storage directory is acceptable.
@see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates}
"""
CheckFileBasedStoragePathVsEnabledDiskTemplates(
logging_warn_fn, file_storage_dir, enabled_disk_templates,
constants.DT_FILE)
| 24,738
|
def nextPara(file, line):
"""Go forward one paragraph from the specified line and return the line
number of the first line of that paragraph.
Paragraphs are delimited by blank lines. It is assumed that the
current line is standalone (which is bogus).
- file is an array of strings
- line is the starting point (zero-based)"""
maxLine = len(file) - 1
# Skip over current paragraph
while (line != maxLine and not isempty(file[line])):
line = line + 1
# Skip over white space
while (line != maxLine and isempty(file[line])):
line = line + 1
return line
| 24,739
|
def test_generate_all(pytester, file_creator):
"""Generated testfiles are saved and not collected, then collected and tested.
Note that tests/test_example.py is collected by pytest normally
since it is not a .md file.
When running pytest on generated test files, the pytest --doctest-modules
is needed to test the Python interactive sessions.
"""
file_creator.populate_all(pytester_object=pytester)
rr = pytester.runpytest(
"-v",
"--phmdoctest-generate",
".gendir",
".",
)
assert rr.ret == pytest.ExitCode.OK
rr.assert_outcomes(passed=1)
rr.stdout.fnmatch_lines(
[
"*tests/test_example.py::test_example*",
]
)
assert Path(".gendir").exists()
assert Path(".gendir/test_doc__directive2.py").exists()
assert Path(".gendir/test_doc__project.py").exists()
assert Path(".gendir/test_README.py").exists()
assert len(list(Path(".gendir").iterdir())) == 3
# Files with no Python fenced code blocks did not get collected.
assert not Path(".gendir/test_CONTRIBUTING.py").exists()
rr2 = pytester.runpytest("tests", ".gendir", "-v", "--doctest-modules")
assert rr2.ret == pytest.ExitCode.OK
rr2.assert_outcomes(passed=10)
rr2.stdout.fnmatch_lines(
[
"*tests/test_example.py::test_example*",
"*.gendir/test_README.py::test_README.session_00001_line_24*",
"*.gendir/test_README.py::test_code_10_output_17*",
"*.gendir/test_doc__directive2.py::test_code_25_output_32*",
"*.gendir/test_doc__directive2.py::test_code_42_output_47*",
"*.gendir/test_doc__directive2.py::test_code_52_output_56*",
"*.gendir/test_doc__project.py::test_doc__project.session_00001_line_31*",
"*.gendir/test_doc__project.py::test_doc__project.session_00002_line_46 PASSED*",
"*.gendir/test_doc__project.py::test_doc__project.session_00003_line_55 PASSED*",
"*.gendir/test_doc__project.py::test_code_12_output_19*",
],
consecutive=True,
)
| 24,740
|
def build_asignar_anexos_query(filters, request):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery().filter(ambito__path__istartswith=request.get_perfil().ambito.path).order_by('nombre')
| 24,741
|
def WriteResult(state, num):
"""保存结果"""
fileObject = open('results/result%04d.txt'%num, 'w')
for i in range(5,-1,-1):
for j in range(5-i):
for m in range(i):
fileObject.write(" ")
for k in range(5-i):
fileObject.write(str(state[i,j,k])+" ")
fileObject.write('\n')
fileObject.write('\n')
fileObject.write('\n')
fileObject.close()
| 24,742
|
def tang_save_features(data, labels, groundtruthfilename='100p'):
"""temp kludge
"""
[height, width, nbands] = data.shape
x = tf.placeholder(tf.float32, shape=(19,19,nbands+18))
feat = tang_net(x)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
padded_data = np.pad(data, ((9,9),(9,9),(9,9)), 'reflect')
all_pixels = np.array(list(itertools.product(range(width),range(height))))
labelled_pixels = all_pixels[:10]
print('requesting %d MB memory' % (labelled_pixels.shape[0] * 271*nbands * 4 / 1000000.0))
labelled_pix_feat = np.zeros((labelled_pixels.shape[0], 271*nbands), dtype=np.float32)
for pixel_i, pixel in enumerate(tqdm(labelled_pixels)):
# this iterates through columns first
[pixel_x, pixel_y] = pixel
subimg = padded_data[pixel_y:(pixel_y+19), pixel_x:(pixel_x+19), :]
feed_dict = {x: subimg}
labelled_pix_feat[pixel_i,:] = sess.run(feat, feed_dict)
pdb.set_trace()
flat_labels = labels.transpose().reshape(height*width)
trainY = flat_labels[flat_labels!=0]
print('starting training')
start = time.time()
clf = SVC(kernel='linear')
clf.fit(labelled_pix_feat, trainY)
end = time.time()
print(end - start)
# now start predicting the full image, 1 column at a time
col_feat = np.zeros((height, 271*nbands), dtype=np.float32)
pred_image = np.zeros((height,width), dtype=int)
test_flags = '-q'
for pixel_x in tqdm(range(width)):
# get feat
for pixel_y in range(height):
subimg = padded_data[pixel_y:(pixel_y+19), pixel_x:(pixel_x+19), :]
feed_dict = {x: subimg}
col_feat[pixel_y,:] = sess.run(feat, feed_dict)
# get pred for feat
# dontcare = [0] * height
p_label = clf.predict(col_feat);
pred_image[:,pixel_x] = np.array(p_label).astype(int)
imgmatfiledata = {}
imgmatfiledata[u'imgHat'] = pred_image
imgmatfiledata[u'groundtruthfilename'] = groundtruthfilename
hdf5storage.write(imgmatfiledata, filename=groundtruthfilename+'_100p_tang_fullimg.mat', matlab_compatible=True)
print('done making img, run hundredpercent_img_figures.m')
| 24,743
|
def validate_paths(paths_A, paths_B, strict=True, keys_ds=None):
""" Validate the constructed images path lists are consistent.
Can allow using B/HR and A/LR folders with different amount of images
Parameters:
paths_A (str): the path to domain A
paths_B (str): the path to domain B
keys_ds (list): the paired 'dataroot_' properties names expected in the Dataset.
strict (bool): If strict = True, will make sure both lists only contains images
if properly paired in the other dataset, otherwise will fill missing images
paths in LR/A with 'None' to be taken care of later (ie. with on-the-fly
generation)
Examples of OTF usage:
- If an LR image pair is not found, downscale HR on the fly, else, use the LR
- If all LR are provided and 'lr_downscale' is enabled, randomize use of provided
LR and OTF LR for augmentation
"""
if keys_ds is None: keys_ds = ['LR', 'HR']
if not strict:
assert len(paths_B) >= len(paths_A), \
'{} dataset contains less images than {} dataset - {}, {}.'.format(\
keys_ds[1], keys_ds[0], len(paths_B), len(paths_A))
if len(paths_A) < len(paths_B):
print('{} contains less images than {} dataset - {}, {}. Will generate missing images on the fly.'.format(
keys_ds[0], keys_ds[1], len(paths_A), len(paths_B)))
i=0
tmp_A = []
tmp_B = []
for idx in range(0, len(paths_B)):
B_head, B_tail = os.path.split(paths_B[idx])
if i < len(paths_A):
A_head, A_tail = os.path.split(paths_A[i])
if A_tail == B_tail:
A_img_path = os.path.join(A_head, A_tail)
tmp_A.append(A_img_path)
i+=1
if strict:
B_img_path = os.path.join(B_head, B_tail)
tmp_B.append(B_img_path)
else:
if not strict:
A_img_path = None
tmp_A.append(A_img_path)
else: #if the last image is missing
if not strict:
A_img_path = None
tmp_A.append(A_img_path)
paths_A = tmp_A
paths_B = tmp_B if strict else paths_B
assert len(paths_A) == len(paths_B)
return paths_A, paths_B
| 24,744
|
def _median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise ValueError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
| 24,745
|
def num_translate(value: str) -> str:
"""переводит числительное с английского на русский """
str_out = NUM_DICT.get(value)
return str_out
| 24,746
|
def test_code_fixture(db_test_app):
"""
Test the localhost fixture
"""
code_echo = db_test_app.code_echo
assert code_echo.uuid is not None
code_echo.get_remote_exec_path()
| 24,747
|
def send_email(self, record_id):
# pylint: disable=no-member,too-many-arguments,too-many-statements
""" send out single email """
print(f"task:working on id - {record_id}")
print(f"attempt no: {self.request.retries}")
session = None
db_session = None
record = None
try:
session = create_session()
db_session = session()
record = db_session.query(HistoryModel).filter(HistoryModel.id == record_id).one()
data = record.request
message = Mail()
#One line settings """
message.from_email = From(data['from']['email'], data['from']['name'])
message.subject = Subject(data['subject'])
if 'asm' in data.keys() and data['asm'] is not None and data['asm']['group_id'] != '':
message.asm = Asm(GroupId(data['asm']['group_id']),
GroupsToDisplay(data['asm']['groups_to_display']))
func_switcher = {
"to": HelperService.get_emails,
"cc": HelperService.get_emails,
"bcc": HelperService.get_emails,
"content": HelperService.get_content,
"attachments": HelperService.get_attachments,
"custom_args": HelperService.get_custom_args
}
message.to = func_switcher.get("to")(data['to'], 'to')
data_keys = data.keys()
if 'cc' in data_keys:
message.cc = func_switcher.get("cc")(data['cc'], 'cc')
if 'bcc' in data_keys:
message.bcc = func_switcher.get("bcc")(data['bcc'], 'bcc')
if 'template' in data_keys and not 'content' in data_keys:
data['content'] = generate_template_content(data['template'])
data_keys = data.keys()
if 'content' in data_keys:
message.content = func_switcher.get("content")(data['content'])
if 'attachments' in data_keys:
message.attachment = func_switcher.get("attachments")(data['attachments'])
if 'custom_args' in data_keys:
message.custom_arg = func_switcher.get("custom_args")(data['custom_args'])
#logging.warning(message.get())
sendgrid_client = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.send(message)
print(f"response: {response.body}")
print(f"status: {response.status_code}")
record.email_content = [c.get() for c in message.contents]
record.result = response.body
record.processed_timestamp = datetime.datetime.now(LOCAL_TZ)
db_session.commit()
except HTTPError as err:
print(f"send_grid error: {err.to_dict}")
except Exception as err: # pylint: disable=broad-except
print(f"task Error: {err}")
print(traceback.format_exc())
if self.request.retries >= self.max_retries:
print(ERR_MSG_MAX_RETRIES)
rollback(db_session, record)
raise err
finally:
db_session.close()
print(f"task:finished with id - {record_id}")
| 24,748
|
def get_patch_shape(corpus_file):
"""Gets the patch shape (height, width) from the corpus file.
Args:
corpus_file: Path to a TFRecords file.
Returns:
A tuple (height, width), extracted from the first record.
Raises:
ValueError: if the corpus_file is empty.
"""
example = tf.train.Example()
try:
example.ParseFromString(next(tf.python_io.tf_record_iterator(corpus_file)))
except StopIteration as e:
raise ValueError('corpus_file cannot be empty: %s' % e)
return (example.features.feature['height'].int64_list.value[0],
example.features.feature['width'].int64_list.value[0])
| 24,749
|
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError as e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
| 24,750
|
def get_children_as_dict(parent):
"""For a given parent object, return all children as a dictionary with the childs tag as key"""
child_list = getChildElementsListWithSpecificXpath(parent, "*")
child_dict = {}
for child in child_list:
value = get_children_as_dict(child)
if child.tag not in child_dict:
child_dict[child.tag] = [value] if value != {} else [child.text]
else:
child_dict[child.tag].append(value if value != {} else child.text)
return child_dict
| 24,751
|
def markdown(build_reset, monkeypatch):
"""Create markdown and text widgets."""
app = App(__name__, sidebar=True)
app.add(mark)
app.add_sidebar(side)
app.add_sidebar(text)
app.subscribe(text.on_change)(write)
# pylint: disable=protected-access
app._build()
with server_check(app) as server:
yield server
| 24,752
|
def test_create_vmis_proper_dpg(vmi_service, database, vnc_api_client, vm_model, vmi_model, vn_model_1, vn_model_2):
""" A new VMI is being created with proper DPG. """
vmi_model.vcenter_port.portgroup_key = 'dvportgroup-1'
database.vmis_to_update.append(vmi_model)
database.save(vn_model_1)
database.save(vn_model_2)
vmi_service.update_vmis()
assert database.get_all_vmi_models() == [vmi_model]
assert vmi_model.vm_model == vm_model
assert vmi_model.vn_model == vn_model_1
assert vmi_model in database.ports_to_update
assert vmi_model in database.vlans_to_update
assert 'vnc-vn-uuid-1' in [ref['uuid'] for ref in vmi_model.vnc_vmi.get_virtual_network_refs()]
vnc_api_client.update_vmi.assert_called_once()
| 24,753
|
def load_ascii_font(font_name):
"""
Load ascii font from a txt file.
Parameter
---------
font_name: name of the font (str).
Return
------
font: font face from the file (dic).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 27/02/17)
Notes
-----
Load font in figlet format (http://www.figlet.org).
"""
chars = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_'abcdefghijklmnopqrstuvwxyz{|}~ÄÖÜäöüβ"
font = {}
char_index = 0
current_char = ''
current_char_width = 0
font_path = 'art/%s' % (font_name)
if not path.isfile(font_path):
return None
f = open(font_path, 'r')
for line in f:
current_char_width = len(line.replace('@', '')) - 1
current_char += line.replace('@', '')
if line.endswith('@@\n'):
font[chars[char_index]] = {}
font[chars[char_index]]['text'] = current_char
font[chars[char_index]]['width'] = current_char_width
current_char = ''
char_index += 1
f.close()
return font
| 24,754
|
def get_multi_tower_fn(num_gpus, variable_strategy,
model_fn, device_setter_fn, lr_provider):
"""Returns a function that will build the resnet model.
Args:
num_gpus: number of GPUs to use (obviously)
variable_strategy: "GPU" or "CPU"
model_fn: The function providing the model as in
loss, gradvars, preds = model_fn(is_training,
features,
labels,
data_format, params)
lr_provider: a function that takes a tf.train.get_global_step() and returns
a learning rate value for that step
device_setter_fn: A device setter
"""
def _multi_tower_model_fn(features, labels, mode, params):
"""A model function that distributes models amongst towers.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
momentum = params.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = device_setter_fn(
variable_strategy, worker_device, num_gpus)
with tf.variable_scope('neural_network', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = \
model_fn(is_training,
tower_features[i],
tower_labels[i],
data_format, params)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
learning_rate = lr_provider(tf.train.get_global_step())
loss = tf.reduce_mean(tower_losses, name='loss')
examples_sec_hook = reporting_utils.ExamplesPerSecondHook(
params.train_batch_size, every_n_steps=10)
tensors_to_log = {'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
train_hooks = [logging_hook, examples_sec_hook]
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
if params.sync:
raise ValueError("We don't support parallel processing at the moment.")
# optimizer = tf.train.SyncReplicasOptimizer(
# optimizer, replicas_to_aggregate=num_workers)
# sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
# train_hooks.append(sync_replicas_hook)
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
# noinspection PyUnboundLocalVariable
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
'accuracy':
tf.metrics.accuracy(stacked_labels, predictions['classes'])
}
# noinspection PyUnboundLocalVariable
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics)
return _multi_tower_model_fn
| 24,755
|
def _check_num_classes_binary(
num_classes: int, multiclass: tp.Optional[bool], implied_classes: tp.Optional[int]
) -> None:
"""This checks that the consistency of `num_classes` with the data and `multiclass` param for binary data."""
if implied_classes is not None and implied_classes != 2:
raise ValueError(
"If `preds` have one dimension more than `target`, then `num_classes` should be 2 for binary data."
)
if num_classes > 2:
raise ValueError("Your data is binary, but `num_classes` is larger than 2.")
if num_classes == 2 and not multiclass:
raise ValueError(
"Your data is binary and `num_classes=2`, but `multiclass` is not True."
" Set it to True if you want to transform binary data to multi-class format."
)
if num_classes == 1 and multiclass:
raise ValueError(
"You have binary data and have set `multiclass=True`, but `num_classes` is 1."
" Either set `multiclass=None`(default) or set `num_classes=2`"
" to transform binary data to multi-class format."
)
| 24,756
|
def extract_by_css(
content: str, selector: str, *, first: bool = True
) -> Union[str, list]:
"""Extract values from HTML content using CSS selector.
:param content: HTML content
:param selector: CSS selector
:param first: (optional) return first found element or all of them
:return: value of the 1st found element or emtpy string if not found; or a list of all found elements
"""
extracted = ScrapySelector(text=content).css(selector).extract()
if first:
result = extracted[0] if len(extracted) > 0 else ""
else:
result = extracted
return result
| 24,757
|
def generate_styles():
""" Create custom style rules """
# Set navbar so it's always at the top
css_string = "#navbar-top{background-color: white; z-index: 100;}"
# Set glossdef tip
css_string += "a.tip{text-decoration:none; font-weight:bold; cursor:pointer; color:#2196F3;}"
css_string += "a.tip:hover{position: relative;border-bottom: 1px dashed #2196F3;}"
# Set glossdef span
css_string += "a.tip span{display: none;background-color: white;font-weight: normal;border:1px solid gray;width: 250px;}"
css_string += "a.tip:hover span{display: block;position: absolute;z-index: 100;padding: 5px 15px;}"
return css_string
| 24,758
|
def get_config_file() -> Path:
"""
Get default config file.
"""
return get_project_root()/'data/config/config.yaml'
| 24,759
|
def kilometers_to_miles(dist_km):
"""Converts km distance to miles
PARAMETERS
----------
dist_km : float
Scalar distance in kilometers
RETURNS
-------
dist_mi : float
Scalar distance in kilometers
"""
return dist_km / 1.609344
| 24,760
|
def test_Image_tmax_fallback(tmax_source, xy, expected, tol=0.001):
"""Test getting Tmax median value when daily doesn't exist
To test this, move the test date into the future
"""
input_img = ee.Image.constant([300, 0.8]).rename(['lst', 'ndvi']) \
.set({'system:index': SCENE_ID,
'system:time_start': ee.Date(SCENE_DATE).update(2099).millis()})
output_img = model.Image(input_img, tmax_source=tmax_source)._tmax
output = utils.point_image_value(ee.Image(output_img), xy)
assert abs(output['tmax'] - expected) <= tol
| 24,761
|
def test_get_bitinformation_dtype(rasm, dtype):
"""Test xb.get_bitinformation returns correct number of bits depending on dtype."""
ds = rasm.astype(dtype)
v = list(ds.data_vars)[0]
dtype_bits = dtype.replace("float", "")
assert len(xb.get_bitinformation(ds, dim="x")[v].coords["bit" + dtype_bits]) == int(
dtype_bits
)
| 24,762
|
def test_atomic_any_uri_max_length_3_nistxml_sv_iv_atomic_any_uri_max_length_4_1(mode, save_output, output_format):
"""
Type atomic/anyURI is restricted by facet maxLength with value 31.
"""
assert_bindings(
schema="nistData/atomic/anyURI/Schema+Instance/NISTSchema-SV-IV-atomic-anyURI-maxLength-4.xsd",
instance="nistData/atomic/anyURI/Schema+Instance/NISTXML-SV-IV-atomic-anyURI-maxLength-4-1.xml",
class_name="NistschemaSvIvAtomicAnyUriMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 24,763
|
def _objc_provider_framework_name(path):
"""Returns the name of the framework from an `objc` provider path.
Args:
path: A path that came from an `objc` provider.
Returns:
A string containing the name of the framework (e.g., `Foo` for `Foo.framework`).
"""
return path.rpartition("/")[2].partition(".")[0]
| 24,764
|
def create(rosdistro_index_url, extend_path, dir, name, build_tool, verbose):
"""Creates a new workspace, saves it, and switches to it if it is the first
workspace.
:param rosdistro_index_url: The rosdistro to use
:param extend_path: Parent workspace to use.
:param dir: Where to create the workspace
:param name: Name of the new workspace.
:param name: Create the workspace with colcon, instead of catkin_tools
:param verbose: Unused.
"""
# also allow files
if os.path.isfile(rosdistro_index_url):
rosdistro_index_url = 'file://%s' % os.path.realpath(rosdistro_index_url)
try:
urlopen(rosdistro_index_url)
except (ValueError, URLError) as e:
logger.error(e)
return 1
if not os.path.isdir(dir):
logger.error('target path is not a directory')
return 1
enclosing_workspace = find_enclosing_workspace(dir)
if enclosing_workspace:
logger.error("Its not allowed to create a worksapce inside another workspace, other workspace found here:\n%s",
enclosing_workspace)
return 1
# try to guess which build tool to use
if os.path.exists(os.path.join(extend_path, '.catkin')):
build_tool = 'catkin_tools'
else:
build_tool = 'colcon'
if build_tool == 'catkin_tools':
result = create_workspace_with_catkin_tools(extend_path, dir)
elif build_tool == 'colcon':
result = create_workspace_with_colcon(extend_path, dir)
if result:
return result
save(dir, name, verbose)
save_config(dir, rosdistro_index_url=rosdistro_index_url)
| 24,765
|
def sample_publisher(name='EA'):
"""Create and return a sample publisher"""
return Publisher.objects.create(name=name)
| 24,766
|
def AddGlobalFile(gfile):
"""
Add a global file to the cmd string.
@return string containing knob
"""
string = ''
if gfile:
string = ' --global_file ' + gfile
return string
| 24,767
|
def get_scale_sequence(scale_0, v_init, a, n_frames):
""" simulates an object's size change from an initial velocity and an acceleration type
"""
scale = scale_0
sequence = [scale]
# TODO
# friction, sinusoidal
for i in range(n_frames-1):
scale = max(0.05, scale + v_init)
if not isinstance(a, str):
v_init = v_init + a
sequence.append(scale)
return sequence
| 24,768
|
def read_file(filename):
"""Opens the file with the given filename and creates the puzzle in it.
Returns a pair consisting of the puzzle grid and the list of clues. Assumes
that the first line gives the size. Afterwards, the rows and clues are given.
The description of the rows and clues may interleave arbitrarily.
"""
size = 0
out_list = []
rows = []
clues = []
with open(filename, 'r') as file:
for line in file:
line = line.replace('\n', '')
line = split_type(line)
if line[0] == 'SIZE':
size = int(line[1])
elif line[0] == 'ROW':
rows.append(read_row(line[1]))
else:
clues.append(read_clue(line[1]))
return (rows, clues)
| 24,769
|
def login_user(request):
"""View to login a new user"""
user = authenticate(username=request.POST['EMail'][:30], password=request.POST['Password'])
if user is not None:
if user.is_active:
login(request, user)
send_email("ROCK ON!!!", "User login - " + user.first_name + " " + user.last_name)
# Redirect to a success page.
return HttpResponse('success')
else:
# Return a 'disabled account' error message
return HttpResponse('Account disabled')
else:
# Return an 'invalid login' error message.
return HttpResponse('Invalid username or password')
| 24,770
|
def update_signature_approved(signature, value):
"""Helper function to update the signature approval status and send emails if necessary."""
previous = signature.get_signature_approved()
signature.set_signature_approved(value)
email_approval = cla.conf['EMAIL_ON_SIGNATURE_APPROVED']
if email_approval and not previous and value: # Just got approved.
subject, body, recipients = get_signature_approved_email_content(signature)
get_email_service().send(subject, body, recipients)
| 24,771
|
def gdf_convex_hull(gdf):
"""
Creates a convex hull around the total extent of a GeoDataFrame.
Used to define a polygon for retrieving geometries within. When calculating
densities for urban blocks we need to retrieve the full extent of e.g.
buildings within the blocks, not crop them to an arbitrary bounding box.
Parameters
----------
gdf : geodataframe
currently accepts a projected gdf
Returns
-------
shapely polygon
"""
### INSERT CHECK FOR CRS HERE?
# project gdf back to geographic coordinates as footprints_from_polygon
# requires it
gdf_temp = ox.projection.project_gdf(gdf, to_latlong=True)
# determine the boundary polygon to fetch buildings within
# buffer originally 0.000225, buffer actually needs to go whole block away
# to get complete highways therefor trying 0.001
boundary=gdf_temp.cascaded_union.convex_hull.buffer(0.001)
# NOTE - maybe more efficient to generate boundary first then reproject second?
return boundary
| 24,772
|
def room_operating_mode(mode: str) -> Dict[str, Any]:
"""Payload to set operating mode for
:class:`~pymultimatic.model.component.Room`.
"""
return {"operationMode": mode}
| 24,773
|
def _validate_options(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Looks up the exporter_type from the data, selects the correct export
options serializer based on the exporter_type and finally validates the data using
that serializer.
:param data: A dict of data to serialize using an exporter options serializer.
:return: validated export options data
"""
option_serializers = table_exporter_registry.get_option_serializer_map()
validated_exporter_type = validate_data(BaseExporterOptionsSerializer, data)
serializer = option_serializers[validated_exporter_type["exporter_type"]]
return validate_data(serializer, data)
| 24,774
|
def LodeCross(m=1.2, clr='black', ls='dashed', lw=1, zorder=0, **kwargs):
"""
Draw cross for Lode diagram
===========================
"""
xmin, xmax = gca().get_xlim()
ymin, ymax = gca().get_ylim()
if xmin > 0.0: xmin = 0.0
if xmax < 0.0: xmax = 0.0
if ymin > 0.0: ymin = 0.0
if ymax < 0.0: ymax = 0.0
dx = abs(xmax - xmin)
dy = abs(ymax - ymin)
d = m * max(dx, dy)
#print('dx=',dx,' dy=',dy,' d=',d)
#d = sqrt((xmax-xmin)*2.0 + (ymax-ymin)*2.0)
c30, s30 = sqrt(3.0)/2.0, 0.5
c60, s60 = 0.5, sqrt(3.0)/2.0
plot([0,0], [0,d*c30], color=clr, ls=ls, lw=lw, zorder=zorder, **kwargs)
plot([0,-d*s30],[0,d*c30], color=clr, ls=ls, lw=lw, zorder=zorder, **kwargs)
plot([0,-d*s60],[0,d*c60], color=clr, ls=ls, lw=lw, zorder=zorder, **kwargs)
| 24,775
|
def householder_name (name, rank):
"""Returns if the name conforms to Householder notation.
>>> householder_name('A_1', 2)
True
>>> householder_name('foobar', 1)
False
"""
base, _, _ = split_name(name)
if base in ['0', '1']:
return True
elif rank == 0:
if base in GREEK_ALPHA:
return True
elif rank == 1:
if len(base) == 1 and base.isalpha() and base.islower():
return True
elif rank == 2:
if len(base) == 1 and base.isupper() and base.isalpha():
return True
return False
| 24,776
|
def calculateDerivatives(x,t,id):
"""
dxdt, x0, id_, x_mean = calculateDerivatives(x,t,id)
Missing data is assumed to be encoded as np.nan
"""
nm = ~np.isnan(t) & ~np.isnan(x) # not missing
id_u = np.unique(id)
id_ = []
dxdt = []
x0 = []
x_mean = []
for k in range(0,len(id_u)):
rowz = id==id_u[k]
rowz = rowz & nm
t_k = t[rowz]
x_k = x[rowz]
if np.sum(rowz)>1:
# Gradient via linear regression
lm = np.polyfit(t_k,x_k,1)
id_.append(id_u[k])
dxdt.append(lm[0])
x0.append(lm[1])
x_mean.append(np.nanmean(x_k))
print('k = {0} \n * n = {1}\n * dx/dt = {2} | x0 = {3} | mean(x) = {4}'.format(k,sum(rowz),dxdt[-1],x0[-1],x_mean[-1]))
#plt.plot(t[rowz],x[rowz],'x')
#plt.plot([min(t[rowz]),max(t[rowz])],[min(t[rowz]),max(t[rowz])]*dxdt[-1] + x0[-1],'-')
#plt.show()
# Remove any nan
dxdt_isnan = np.isnan(dxdt)
x0_isnan = np.isnan(x0)
dxdt = np.delete(dxdt,np.where(dxdt_isnan | x0_isnan)[0])
x0 = np.delete(x0,np.where(dxdt_isnan | x0_isnan)[0])
id_u = np.delete(id_u,np.where(dxdt_isnan | x0_isnan)[0])
return dxdt, x0, id_, x_mean
| 24,777
|
def DeployConnectAgent(args,
service_account_key_data,
image_pull_secret_data,
membership_ref):
"""Deploys the GKE Connect agent to the cluster.
Args:
args: arguments of the command.
service_account_key_data: The contents of a Google IAM service account JSON
file
image_pull_secret_data: The contents of image pull secret to use for
private registries.
membership_ref: The membership should be associated with the connect agent
in the format of
`project/[PROJECT]/location/global/memberships/[MEMBERSHIP]`.
Raises:
exceptions.Error: If the agent cannot be deployed properly
calliope_exceptions.MinimumArgumentException: If the agent cannot be
deployed properly
"""
kube_client = kube_util.KubernetesClient(args)
project_id = properties.VALUES.core.project.GetOrFail()
log.status.Print('Generating connect agent manifest...')
full_manifest = _GenerateManifest(args,
service_account_key_data,
image_pull_secret_data,
False,
membership_ref)
# Generate a manifest file if necessary.
if args.manifest_output_file:
try:
files.WriteFileContents(
files.ExpandHomeDir(args.manifest_output_file),
full_manifest,
private=True)
except files.Error as e:
exceptions.Error('could not create manifest file: {}'.format(e))
log.status.Print(MANIFEST_SAVED_MESSAGE.format(args.manifest_output_file))
return
log.status.Print('Deploying GKE Connect agent to cluster...')
namespace = _GKEConnectNamespace(kube_client, project_id)
# Delete the ns if necessary
kube_util.DeleteNamespaceForReinstall(kube_client, namespace)
# TODO(b/138816749): add check for cluster-admin permissions
_PurgeAlphaInstaller(kube_client, namespace, project_id)
# # Create or update the agent install deployment and related resources.
_, err = kube_client.Apply(full_manifest)
if err:
raise exceptions.Error(
'Failed to apply manifest to cluster: {}'.format(err))
# TODO(b/131925085): Check connect agent health status.
| 24,778
|
def SolveCaptcha(api_key, site_key, url):
"""
Uses the 2Captcha service to solve Captcha's for you.
Captcha's are held in iframes; to solve the captcha, you need a part of the url of the iframe. The iframe is usually
inside a div with id=gRecaptcha. The part of the url we need is the query parameter k, this is called the site_key:
www.google.com/recaptcha/api2/anchor?ar=1&k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9&co=aHR0cHM6Ly93d3cuZGljZS5jb206NDQz&hl=en&v=oqtdXEs9TE9ZUAIhXNz5JBt_&size=normal&cb=rpcg9w84syix
k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
Here the site_key is 6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
You also need to supply the url of the current page you're on.
This function will return a string with the response key from captcha validating the test. This needs to be inserted
into an input field with the id=g-recaptcha-response.
:param api_key: The 2Captcha API key.
:param site_key: The site_key extracted from the Captcha iframe url
:param url: url of the site you're on
:return: The response from captcha validating the test
"""
print("Solving Captcha...")
print("Sending Request...")
request_response = requests.get("https://2captcha.com/in.php?", params={
"googlekey": site_key,
"method": "userrecaptcha",
"pageurl": url,
"key": api_key,
"json": 1,
"invisible": 0,
})
request_response.raise_for_status()
print("Waiting for Response...")
time.sleep(30)
answer_response_json = {'status': 0, 'request': 'CAPCHA_NOT_READY'}
while answer_response_json['request'] == 'CAPCHA_NOT_READY':
answer_response = requests.get("https://2captcha.com/res.php", params={
"key": api_key,
"action": "get",
"id": request_response.json()['request'],
"json": 1
})
answer_response_json = answer_response.json()
print(answer_response_json)
time.sleep(5)
if answer_response_json['status'] == 1:
print("Solved!")
return answer_response_json['request']
elif answer_response_json['request'] == 'ERROR_CAPTCHA_UNSOLVABLE':
raise TimeoutError("ERROR_CAPTCHA_UNSOLVABLE")
else:
raise Exception(answer_response_json['request'])
| 24,779
|
def send_asset(asset_file_name):
"""Return an asset.
Args:
asset_file_name: The path of the asset file relative to the assets folder.
Returns:
The asset specified in the URL.
"""
asset_path = f"assets/{asset_file_name}"
asset_size = os.path.getsize(asset_path)
with open(asset_path, "rb") as asset_file:
asset_etag = RangeRequest.make_etag(asset_file)
asset_response = RangeRequest(
open(asset_path, "rb"), # noqa: WPS515
etag=asset_etag,
last_modified=server_boot_time,
size=asset_size,
).make_response()
asset_response.mimetype = mimetypes.guess_type(asset_file_name)[0]
return asset_response
| 24,780
|
def clean_crn(crn, duplicates = True, trivial = True, inter = None):
"""Takes a crn and removes trivial / duplicate reactions. """
new = []
seen = set()
for [R, P] in crn:
lR = sorted(interpret(R, inter)) if inter else sorted(R)
lP = sorted(interpret(P, inter)) if inter else sorted(P)
tR = tuple(lR)
tP = tuple(lP)
if trivial and tR == tP:
continue
if duplicates and (tR, tP) in seen:
continue
new.append([lR, lP])
seen.add((tR, tP))
return new
| 24,781
|
def to_libsvm(data, target, save_to=None):
"""
tranforms a dataset to libsvm format
"""
le = LabelEncoder()
target_t = le.fit_transform(data.data[target].to_ndarray())
groups = [group for group in data.groups if group != target]
with open(save_to, 'w') as f:
for row in libsvm_row(target_t, data.data[groups].to_ndarray()):
f.write(" ".join(row))
f.write("\n")
| 24,782
|
def bwa_index(fasta):
""" Create a BWA index. """
shared.run_command(
[BIN['bwa'], 'index', fasta],
)
| 24,783
|
def get_db_mapping(mesh_id):
"""Return mapping to another name space for a MeSH ID, if it exists.
Parameters
----------
mesh_id : str
The MeSH ID whose mappings is to be returned.
Returns
-------
tuple or None
A tuple consisting of a DB namespace and ID for the mapping or None
if not available.
"""
return mesh_to_db.get(mesh_id)
| 24,784
|
def MC_dBESQ_gateway(N = 10**6, t = 0, n0 = 0, test = 'laguerre', method = 'laguerre', args = [], num_decimal = 4):
"""
Monte Carlo estimator of expected dBESQ using birth-death simulation, exact BESQ solution, dLaguerre simulation
or PDE systems.
:param N: int, Number of simulations
:param T: positive float, Simulation horizon
:param x0: initial value of X
:param method: simulation method, currently support {'birth-death', 'exact-besq', 'laguerre', 'pde'}
:param test: defines test function
:args: arguments to define test function
"""
if method == 'birth-death':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
xt_array = bd_simulator(t, x0=n0, num_paths=N, method='bessel', num_threads=4)
return np.mean(f(xt_array)).round(num_decimal)
elif method == 'exact-besq':
if test == 'laguerre':
return np.mean(exp(-t+1)*jv(0, 2*np.sqrt(np.random.gamma(n0+1)))).round(num_decimal)
elif method == 'laguerre':
if test == 'laguerre':
f = lambda n : eval_laguerre(n, 1)
s = log(t / 2)
def poisson_x0():
return np.random.poisson(np.random.gamma(n0+1))
xt_array = bd_simulator(s, x0=poisson_x0, num_paths=N, method='laguerre', num_threads=4)
return np.mean(f(np.random.poisson(t/2 *np.random.gamma(xt_array+1)))).round(num_decimal)
| 24,785
|
def locate_app(app_id):
"""Attempts to locate the application."""
if app_id is None:
return find_app_in_cwd()
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
| 24,786
|
def test_owe_transition_mode_multi_bss(dev, apdev):
"""Opportunistic Wireless Encryption transition mode (multi BSS)"""
try:
run_owe_transition_mode_multi_bss(dev, apdev)
finally:
dev[0].request("SCAN_INTERVAL 5")
| 24,787
|
def resolve_image(image):
""" Resolve an informal image tag into a full Docker image tag. Any tag
available on Docker Hub for Neo4j can be used, and if no 'neo4j:' prefix
exists, this will be added automatically. The default edition is
Community, unless a cluster is being created in which case Enterprise
edition is selected instead. Explicit selection of Enterprise edition can
be made by adding an '-enterprise' suffix to the image tag.
If a 'file:' URI is passed in here instead of an image tag, the Docker
image will be loaded from that file instead.
Examples of valid tags:
- 3.4.6
- neo4j:3.4.6
- latest
- file:/home/me/image.tar
"""
if image.startswith("file:"):
return load_image_from_file(image[5:])
elif ":" in image:
return image
else:
return "neo4j:" + image
| 24,788
|
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image
| 24,789
|
def get_value_counts_and_frequencies(elem: Variable, data: pd.DataFrame) -> Categories:
"""Call function to generate frequencies depending on the variable type
Input:
elem: dict
data: pandas DataFrame
Output:
statistics: OrderedDict
"""
statistics: Categories = Categories()
_scale = elem["scale"]
statistics.update(get_categorical_frequencies(elem, data))
return statistics
| 24,790
|
def determineLinearRegions(data, minLength=.1, minR2=.96, maxSlopeInterceptDiff=.75):
"""
Determine regions of a plot that are approximately linear by performing
linear least-squares on a rolling window.
Parameters
----------
data : array_like
Data within which linear regions are to be identified
minLength : int or float
The minimum length of a linear segment, either as an
integer number of indices, or as a float fraction of the
overall data length.
minR2 : float
The minimum r-squared value for a region to be
considered linear.
maxSlopeInterceptDiff : float
The float percentage difference allowed between slopes
and intercepts of adjacent slices for them to be
considered the same region.
Returns
-------
regionIndices : np.ndarray[N,2]
The start and end indices for the N detected regions.
slopes : np.ndarray[N]
The slope of each region.
intercepts : np.ndarray[N]
The intercept of each region.
"""
if minLength < 1:
minLinSteps = int(len(data)*minLength)
else:
minLinSteps = int(minLength)
inLinearRegion = False
linearRegions = []
slopes = []
intercepts = []
# Perform least squares on a rolling window
i = 0
while i < len(data) - minLinSteps:
xArr = np.arange(i, i+minLinSteps)
slope, intercept, r2, p_value, std_err = linregress(xArr, data[i:i+minLinSteps])
if np.abs(r2) > minR2:
if inLinearRegion:
# Calculate how different new slope is from old one
if np.abs((np.mean(slopes[-1]) - slope) / np.mean(slopes[-1])) < maxSlopeInterceptDiff and np.abs((np.mean(intercepts[-1]) - intercept) / np.mean(intercepts[-1])) < maxSlopeInterceptDiff:
# This is still the same linear region, so we extend the bounds
linearRegions[-1][1] = i+minLinSteps
# And average in the slopes and intercepts
slopes[-1] += [slope]
intercepts[-1] += [intercept]
else:
# Otherwise, we have a new linear region, which we start
# at the end of the other one
i = linearRegions[-1][1]
inLinearRegion = False
continue
else:
# New linear region
linearRegions.append([i, i+minLinSteps])
slopes.append([slope])
intercepts.append([intercept])
inLinearRegion = True
else:
inLinearRegion = False
i += 1
slopes = np.array([np.mean(s) for s in slopes])
intercepts = np.array([np.mean(inter) for inter in intercepts])
return np.array(linearRegions), slopes, intercepts
| 24,791
|
def compute_dispersion(aperture, beam, dispersion_type, dispersion_start,
mean_dispersion_delta, num_pixels, redshift, aperture_low, aperture_high,
weight=1, offset=0, function_type=None, order=None, Pmin=None, Pmax=None,
*coefficients):
"""
Compute a dispersion mapping from a IRAF multi-spec description.
:param aperture:
The aperture number.
:param beam:
The beam number.
:param dispersion_type:
An integer representing the dispersion type:
0: linear dispersion
1: log-linear dispersion
2: non-linear dispersion
:param dispersion_start:
The value of the dispersion at the first physical pixel.
:param mean_dispersion_delta:
The mean difference between dispersion pixels.
:param num_pixels:
The number of pixels.
:param redshift:
The redshift of the object. This is accounted for by adjusting the
dispersion scale without rebinning:
>> dispersion_adjusted = dispersion / (1 + redshift)
:param aperture_low:
The lower limit of the spatial axis used to compute the dispersion.
:param aperture_high:
The upper limit of the spatial axis used to compute the dispersion.
:param weight: [optional]
A multiplier to apply to all dispersion values.
:param offset: [optional]
A zero-point offset to be applied to all the dispersion values.
:param function_type: [optional]
An integer representing the function type to use when a non-linear
dispersion mapping (i.e. `dispersion_type = 2`) has been specified:
1: Chebyshev polynomial
2: Legendre polynomial
3: Cubic spline
4: Linear spline
5: Pixel coordinate array
6: Sampled coordinate array
:param order: [optional]
The order of the Legendre or Chebyshev function supplied.
:param Pmin: [optional]
The minimum pixel value, or lower limit of the range of physical pixel
coordinates.
:param Pmax: [optional]
The maximum pixel value, or upper limit of the range of physical pixel
coordinates.
:param coefficients: [optional]
The `order` number of coefficients that define the Legendre or Chebyshev
polynomial functions.
:returns:
An array containing the computed dispersion values.
"""
if dispersion_type in (0, 1):
# Simple linear or logarithmic spacing
dispersion = \
dispersion_start + np.arange(num_pixels) * mean_dispersion_delta
if dispersion_start == 1:
dispersion = 10.**dispersion
elif dispersion_type == 2:
# Non-linear mapping.
if function_type is None:
raise ValueError("function type required for non-linear mapping")
elif function_type not in range(1, 7):
raise ValueError(
"function type {0} not recognised".format(function_type))
if function_type == 1:
order = int(order)
n = np.linspace(-1, 1, Pmax - Pmin + 1)
temp = np.zeros((Pmax - Pmin + 1, order), dtype=float)
temp[:, 0] = 1
temp[:, 1] = n
for i in range(2, order):
temp[:, i] = 2 * n * temp[:, i-1] - temp[:, i-2]
for i in range(0, order):
temp[:, i] *= coefficients[i]
dispersion = temp.sum(axis=1)
elif function_type == 2:
# Legendre polynomial.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a Chebyshev or Legendre polynomial")
Pmean = (Pmax + Pmin)/2
Pptp = Pmax - Pmin
x = (np.arange(num_pixels) + 1 - Pmean)/(Pptp/2)
p0 = np.ones(num_pixels)
p1 = mean_dispersion_delta
dispersion = coefficients[0] * p0 + coefficients[1] * p1
for i in range(2, int(order)):
if function_type == 1:
# Chebyshev
p2 = 2 * x * p1 - p0
else:
# Legendre
p2 = ((2*i - 1)*x*p1 - (i - 1)*p0) / i
dispersion += p2 * coefficients[i]
p0, p1 = (p1, p2)
elif function_type == 3:
# Cubic spline.
if None in (order, Pmin, Pmax, coefficients):
raise TypeError("order, Pmin, Pmax and coefficients required "
"for a cubic spline mapping")
s = (np.arange(num_pixels, dtype=float) + 1 - Pmin)/(Pmax - Pmin) \
* order
j = s.astype(int).clip(0, order - 1)
a, b = (j + 1 - s, s - j)
x = np.array([
a**3,
1 + 3*a*(1 + a*b),
1 + 3*b*(1 + a*b),
b**3])
dispersion = np.dot(np.array(coefficients), x.T)
else:
raise NotImplementedError("function type not implemented yet")
else:
raise ValueError(
"dispersion type {0} not recognised".format(dispersion_type))
# Apply redshift correction.
dispersion = weight * (dispersion + offset) / (1 + redshift)
return dispersion
| 24,792
|
async def websocket_remove_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Remove a node from the Z-Wave network."""
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
unsubs = [
controller.on("exclusion started", forward_event),
controller.on("exclusion failed", forward_event),
controller.on("exclusion stopped", forward_event),
controller.on("node removed", node_removed),
]
result = await controller.async_begin_exclusion()
connection.send_result(
msg[ID],
result,
)
| 24,793
|
def _fix_google_module():
"""Reloads the google module to prefer our third_party copy.
When Python is not invoked with the -S option, it may preload the google module via .pth file.
This leads to the "site_packages" version being preferred over gsutil "third_party" version.
To force the "third_party" version, insert the path at the start of sys.path and reload the google module.
This is a hacky. Reloading is required for the rare case that users have
google-auth already installed in their Python environment.
Note that this reload may cause an issue for Python 3.5.3 and lower
because of the weakref issue, fixed in Python 3.5.4:
https://github.com/python/cpython/commit/9cd7e17640a49635d1c1f8c2989578a8fc2c1de6.
"""
if 'google' not in sys.modules:
return
import importlib # pylint: disable=g-import-not-at-top
importlib.reload(sys.modules['google'])
| 24,794
|
def _remove_edgetpu_model(data_dir):
"""Removes edgetpu models recursively from given folder.
Filenames with suffix `_edgetpu.tflite` will be removed.
Args:
data_dir: string, path to folder.
"""
print("Removing edgetpu models from ", data_dir)
edgetpu_model_list = glob.glob(
os.path.join(data_dir, "**/*_edgetpu.tflite"), recursive=True)
models_to_skip = _get_models_to_skip()
for model_path in edgetpu_model_list:
if models_to_skip and os.path.basename(model_path).replace(
"_edgetpu.tflite", "") in models_to_skip:
print("Skipping %s" % (model_path))
continue
print("Removing model: %s" % (os.path.join(data_dir, model_path)))
os.remove(os.path.join(data_dir, model_path))
| 24,795
|
def test_shutil_ignore_function():
"""
>>> test_shutil_ignore_function()
"""
# Setup
path_test_dir = pathlib.Path(__file__).parent.resolve()
path_source_dir = path_test_dir / "example"
path_target_dir = path_test_dir / "target"
shutil.rmtree(path_target_dir, ignore_errors=True)
# Test
ignore_parser = igittigitt.IgnoreParser()
ignore_parser.parse_rule_files(base_dir=path_source_dir, filename=".test_gitignore")
shutil.copytree(
path_source_dir, path_target_dir, ignore=ignore_parser.shutil_ignore,
)
assert len(list(path_target_dir.glob("**/*"))) == 9
# Teardown
shutil.rmtree(path_target_dir, ignore_errors=True)
| 24,796
|
def normalize_pcp_area(pcp):
"""
Normalizes a pcp so that the sum of its content is 1,
outputting a pcp with up to 3 decimal points.
"""
pcp = np.divide(pcp, np.sum(pcp))
new_format = []
for item in pcp:
new_format.append(item)
return np.array(new_format)
| 24,797
|
def calc_line_flux(spec, ws, ivar, w0, w1, u_flux):
""" calculate the flux and flux error of the line within the range w0 and w1 using trapz rule"""
u_spec = spec.unit
u_ws = ws.unit
ivar = ivar.to(1./(u_spec**2))
spec_uless = np.array(spec)
ws_uless = np.array(ws)
ivar_uless = np.array(ivar)
if ivar.unit != (1./(spec.unit**2)):
raise Exception("[spector] spec and ivar units inconsistent")
# select region to integrate
select_ws = (ws_uless > w0) & (ws_uless < w1)
ws_sel = ws_uless[select_ws]
spec_sel = spec_uless[select_ws]
ivar_sel = ivar_uless[select_ws]
var_sel = 1./ivar_sel
# integrate
f, fvar = trapz_var(x=ws_sel, y=spec_sel, yvar=var_sel)
f = (f*u_spec*u_ws).to(u_flux)
ferr = (np.sqrt(fvar)*u_spec*u_ws).to(u_flux)
return f, ferr
| 24,798
|
def main():
"""
fonction principale
"""
if len(sys.argv) >= 2 and sys.argv[1] == 'find':
if len(sys.argv) > 2:
name = sys.argv[2]
else:
name = None
data = find_gce(name=name)
if len(data) > 0:
data = data[0]
print("{}:{}".format(data[0], data[3]))
else:
print("Test GCE")
gce = find_first_gce()
print("Device:", gce)
# teleinfo
print("Teleinfo:")
for key, value in sorted(teleinfo(gce=gce).items()):
text = '?'
for i, j in TELEINFO_ERDF.items():
if key.endswith(i):
text = j
print("%20s : %-14s %s" % (key, value, text))
# compteurs
cpt = compteurs(gce=gce)
print("Compteurs:")
print("%20s : %-14s %s" % ("C1", cpt['Day_C1'], 'Compteur 1'))
print("%20s : %-14s %s" % ("C2", cpt['Day_C2'], 'Compteur 2'))
# # résumé des données
# data = donnees(gce=gce)
# for key, value in sorted(data.items()):
# print("%20s : %-14s" % (key, value))
# # status
# data = status(gce=gce)
# for key, value in sorted(data.items()):
# print("%20s : %-14s" % (key, value))
| 24,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.