content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def aggregate_hts(style="all_modes_combined"):
"""Use the 'processed' version of the HTS table to summarize the flows.
Using the 'style' parameter, you can:
- aggregate by mode using 'by_mode'
- aggregate by mode and o&d location
types using 'by_mode_and_location_type'
- aggregate without considering mode,
using the default 'all_modes_combined'
"""
def _use_the_right_query(style: str, query: str) -> str:
"""If the 'style' is 'by_mode':
- add 'mode_agg' into the query
If the 'style' is 'by_mode_and_location_type':
- add 'trip_type' and 'mode_agg' into the query
Otherwise, just return the query as it was originally.
"""
if style == "by_mode":
return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg")
elif style == "by_mode_and_location_type":
return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg, trip_type")
else:
return query
db = db_connection()
all_combos_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_24hr,
sum(compositeweight) as sum_24hr
from hts_2013_processed
where trip_num < 97
group by o_cpa, d_cpa
order by sum(compositeweight) desc
"""
am_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_am,
sum(compositeweight) as sum_am
from hts_2013_processed
where
trip_num < 97
and
time_window like '%%AM%%'
group by o_cpa, d_cpa
"""
pm_query = """
select
o_cpa, d_cpa,
count(*) as numtrips_pm,
sum(compositeweight) as sum_pm
from hts_2013_processed
where
trip_num < 97
and
time_window like '%%PM%%'
group by o_cpa, d_cpa
"""
# Add the 'mode_agg' column if the 'style' is 'by_mode'
all_combos_query = _use_the_right_query(style, all_combos_query)
am_query = _use_the_right_query(style, am_query)
pm_query = _use_the_right_query(style, pm_query)
# Also, join on the 'mode_agg' column if we're analyzing 'by_mode'
join_cols = ["o_cpa", "d_cpa"]
if style == "by_mode":
join_cols.append("mode_agg")
elif style == "by_mode_and_location_type":
join_cols.append("mode_agg")
join_cols.append("trip_type")
# Get the 24-hour totals
df = db.query_as_df(all_combos_query)
# Query and join the AM trips
df_am = db.query_as_df(am_query)
df = pd.merge(df, df_am, how="left", on=join_cols)
# Repeat for the PM trips
df_pm = db.query_as_df(pm_query)
df = pd.merge(df, df_pm, how="left", on=join_cols)
# Save the resulting dataframe back to SQL
new_table_name = f"hts_2013_aggregated_{style}"
db.import_dataframe(df, new_table_name, if_exists="replace")
| 21,500
|
def pix2sky(shape, wcs, pix, safe=True, corner=False):
"""Given an array of corner-based pixel coordinates [{y,x},...],
return sky coordinates in the same ordering."""
pix = np.asarray(pix).astype(float)
if corner: pix -= 0.5
pflat = pix.reshape(pix.shape[0], -1)
coords = np.asarray(wcsutils.nobcheck(wcs).wcs_pix2world(*(tuple(pflat)[::-1]+(0,)))[::-1])*get_unit(wcs)
coords = coords.reshape(pix.shape)
if safe and not wcsutils.is_plain(wcs):
coords = utils.unwind(coords)
return coords
| 21,501
|
def getRelativeSilenceVideo(videoPath):
"""Function to get relative silence videos before and after each video"""
silVid = ['', '']
vidData = getVideoDataFromPath(videoPath)
videoNameList = videoPath.split('/')
tempVidName = videoNameList[0] + '/' + videoNameList[1] + '/' + videoNameList[2] + '/Silence/sil_{}.mp4'
vidNumber = int((vidData.identifier.split('_')[1]).split('.')[0])
silVid[0] = tempVidName.format(vidNumber * 2)
silVid[1] = tempVidName.format((vidNumber * 2) + 1)
return silVid
| 21,502
|
def phase_space_plot(data_fname="kadowaki2019.tsv",
ned_fname="objsearch_cz2000-12000_500arcmin.txt",
plot_fname="phasespace.pdf",
plot_udgs=True,
local_env=True,
udg_only=True,
mfeat="Re"):
"""
Creates phase space diagram
ARGS:
RETURNS:
"""
coma_vel, coma_dist = load_NED_data(ned_fname)
efeat = 'LocalEnv' if local_env else 'GlobalEnv'
df = read_data(data_fname, udg_only=udg_only, field="Coma")
df = df[["ra", "dec", "cz", mfeat, efeat]].dropna()
df = df.sort_values(by=[mfeat], ascending=False)
# Sort UDGs to Plot Largest First & Smallest Last
separation = get_angular_size(df["ra"], df["dec"], coma_ra, coma_dec)/60 # arcmin
# Select Legend Labels, Marker Sizes & Colors & Shapes
marker_size = 40 # Size of Marker for R_e = 1.0 kpc
small_thres = 1.5 # kpc
large_thres = 3.5 # kpc
label_size = 30
label, color, marker, legend_title = get_label_color_marker(df, efeat)
# Plot Limits
kms_min = 2000 if udg_only else 0
kms_max = 12050 if udg_only else 13000
arcmin_min = 0
arcmin_max = 505 if udg_only else 650
mpc_min = 0
mpc_max = get_physical_size(60*arcmin_max, c*coma_z, H0=h0)
# Plot Phase Space Data
legend_loc = 'lower right' if udg_only else 'upper right'
# Create Figure
plt.clf()
plt.rcParams['savefig.facecolor'] = "1."
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size='28')
# Establish Axis Limits & Labels
fig, ax1 = plt.subplots(figsize=(10,10))
ax2 = ax1.twiny()
ax2.set_xlim(mpc_min, mpc_max)
ax2.set_xlabel("$r_\mathrm{proj} \, (\mathrm{Mpc})$",
size=label_size)
ax1.set_ylabel("$cz \, (\mathrm{km \, s^{-1}})$", size=label_size)
# Plot Splashback Radius
ax2.plot((r_splash,r_splash), [kms_min, kms_max], 'r--', linewidth=3)
plt.minorticks_on()
plt.tick_params(which='both', direction='in', pad=10, width=1.5)
ax2.tick_params(which='major', length=5)
ax2.tick_params(which='minor', length=2)
ax2.xaxis.set_ticks(np.arange(0,16,5))
# Plot Coma's Mean Recessional Velocity & Overlay with Coma Galaxies from NED
ax1.plot([arcmin_min, arcmin_max], (c*coma_z, c*coma_z), # Mean Velocity
'blue', linewidth=2)
ax1.scatter(coma_dist, coma_vel,
s=10, marker='o', c='cornflowerblue',
linewidths=0.3, alpha=0.4) # Coma Galaxies
# Plot UDGs
if plot_udgs:
for idx,sep in enumerate(separation):
ax1.scatter(sep, df["cz"].iloc[idx],
color=color[idx], marker=marker[idx], label=label[idx],
s=marker_size * df["Re"].iloc[idx]**2,
alpha=1,
linewidths=3 if df["Re"].iloc[idx] > large_thres else 0.2,
edgecolors='k')
ax1.set_xlim([arcmin_min, arcmin_max]) #arcmin
ax1.set_ylim([kms_min, kms_max]) #km/s
#ax1.set_ylabel("$cz \, (\mathrm{km \, s^{-1}})$", size=label_size)
ax1.set_xlabel("$r_\mathrm{proj} \, (\mathrm{arcmin})$", size=label_size)
ax1.xaxis.labelpad = 20
plt.minorticks_on()
plt.tick_params(which='both', direction='in', pad=10, width=1.5)
ax1.tick_params(which='major', length=5)
ax1.tick_params(which='minor', length=2)
ax1.xaxis.set_ticks(np.arange(0,505,100))
ax1.yaxis.set_ticks(np.arange(2000,12005,2000))
if plot_udgs:
# Unique Markers in Legend Only (Uses Markers w/o Bold Outline)
handles, labels = ax1.get_legend_handles_labels()
handles = handles[::-1]
labels = labels[::-1]
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) \
if l not in labels[:i]]
legend = ax1.legend(*zip(*unique), loc=legend_loc,
fancybox=True,
shadow=True,
frameon=True,
prop={'size': 20},
title_fontsize=24,
title=legend_title)
# Set Marker Size in Legend to `small_thres` Size
for legend_handle in legend.legendHandles:
legend_handle._sizes = [marker_size * small_thres**2]
# Sets Axes Line Width
for axis in ['top','bottom','left','right']:
ax1.spines[axis].set_linewidth(1.5)
# Removes Border Whitespace & Save
plt.tight_layout()
plt.savefig(plot_fname, format='pdf')
plt.close()
| 21,503
|
def convert_to_xyxy_coordinates(boxes: tf.Tensor) -> tf.Tensor:
"""Convert boxes to their center coordinates
y_cent, x_cent, h, w -> y_min, x_min, y_max, x_max
Arguments:
- *boxes*: A Tensor of shape [N, ..., (y_cent, x_cent, h, w)]
Returns:
A tensor of shape [N, ..., num_boxes, (y_min, x_min, y_max, x_max)]
"""
y_cent, x_cent, h, w = tf.split(value=boxes, num_or_size_splits=4, axis=-1)
y_min = y_cent - 0.5 * h
x_min = x_cent - 0.5 * w
y_max = y_cent + 0.5 * h
x_max = x_cent + 0.5 * w
return tf.concat([y_min, x_min, y_max, x_max], axis=-1)
| 21,504
|
def debug(args, env, cwd, type_="debug"):
"""Command to debug the firmware with GDB"""
soc = get_soc_name(args.soc)
board = get_board_name(args.board)
if not os.path.exists(f"build/{soc}/{board}/zephyr/zephyr/zephyr.elf"):
raise SystemExit("No Zephyr elf found. Run \"./elements-fpga.py compile " \
f"{args.soc} {args.board} <app>\" before.")
openocd_cwd = os.path.join(cwd, "openocd")
yaml_path = f"../build/{soc}/{board}/zibal/VexRiscv.yaml"
command = ['./src/openocd', '-c', 'set HYDROGEN_CPU0_YAML {}'.format(yaml_path),
'-f', 'tcl/interface/jlink.cfg',
'-f', '../zibal/gdb/hydrogen.cfg']
logging.debug(command)
openocd_process = subprocess.Popen(command, env=env, cwd=openocd_cwd,
stdout=subprocess.DEVNULL)
toolchain = env['ZEPHYR_SDK_INSTALL_DIR']
command = ['{}/riscv64-zephyr-elf/bin/riscv64-zephyr-elf-gdb'.format(toolchain),
'-x', 'zibal/gdb/{}.cmd'.format(type_),
'build/{}/{}/zephyr/zephyr/zephyr.elf'.format(soc, board)]
logging.debug(command)
if type_ == "flash":
gdb_process = subprocess.Popen(command, env=env, cwd=cwd)
time.sleep(15)
gdb_process.terminate()
else:
subprocess.run(command, env=env, cwd=cwd, check=True)
openocd_process.terminate()
| 21,505
|
def ceilo2nc(full_path: str,
output_file: str,
site_meta: dict,
keep_uuid: Optional[bool] = False,
uuid: Optional[str] = None,
date: Optional[str] = None) -> str:
"""Converts Vaisala / Lufft ceilometer data into Cloudnet Level 1b netCDF file.
This function reads raw Vaisala (CT25k, CL31, CL51, CL61-D) and Lufft (CHM15k)
ceilometer files and writes the data into netCDF file. Three variants
of the attenuated backscatter are saved in the file:
1. Raw backscatter, `beta_raw`
2. Signal-to-noise screened backscatter, `beta`
3. SNR-screened backscatter with smoothed weak background, `beta_smooth`
With CL61-D `beta_raw` is not saved due to large file size. Instead, two dditional
depolarisation parameters are saved:
1. Signal-to-noise screened depolarisation, `depolarisation`
2. SNR-screened depolarisation with smoothed weak background, `depolarisation_smooth`
Args:
full_path: Ceilometer file name. For Vaisala it is a text file, for CHM15k it is
a netCDF file.
output_file: Output file name, e.g. 'ceilo.nc'.
site_meta: Dictionary containing information about the site and instrument.
Required key value pairs are `name` and `altitude` (metres above mean sea level).
Also 'calibration_factor' is recommended because the default value is probably
incorrect.
keep_uuid: If True, keeps the UUID of the old file, if that exists. Default is False
when new UUID is generated.
uuid: Set specific UUID for the file.
date: Expected date as YYYY-MM-DD of all profiles in the file.
Returns:
UUID of the generated file.
Raises:
RuntimeError: Failed to read or process raw ceilometer data.
Examples:
>>> from cloudnetpy.instruments import ceilo2nc
>>> site_meta = {'name': 'Mace-Head', 'altitude': 5}
>>> ceilo2nc('vaisala_raw.txt', 'vaisala.nc', site_meta)
>>> site_meta = {'name': 'Juelich', 'altitude': 108, 'calibration_factor': 2.3e-12}
>>> ceilo2nc('chm15k_raw.nc', 'chm15k.nc', site_meta)
"""
ceilo_obj = _initialize_ceilo(full_path, date)
logging.debug('reading daily file')
ceilo_obj.read_ceilometer_file(site_meta.get('calibration_factor', None))
if 'cl61' in ceilo_obj.model.lower():
depol_variants = ceilo_obj.calc_depol()
else:
depol_variants = None
beta_variants = ceilo_obj.calc_beta()
_append_data(ceilo_obj, beta_variants, depol_variants)
_append_height(ceilo_obj, site_meta['altitude'])
attributes = output.add_time_attribute(ATTRIBUTES, ceilo_obj.date)
output.update_attributes(ceilo_obj.data, attributes)
return _save_ceilo(ceilo_obj, output_file, site_meta['name'], keep_uuid, uuid)
| 21,506
|
def classify(mapper: object,
files: list or dict,
samples: list = None,
fmt: str = None,
demux: bool = None,
trimsub: str = None,
tree: dict = None,
rankdic: dict = None,
namedic: dict = None,
root: str = None,
ranks: str = None,
rank2dir: dict = None,
outzip: str = None,
uniq: bool = False,
major: int = None,
above: bool = False,
subok: bool = False,
sizes: dict = None,
unasgd: bool = False,
stratmap: dict = None,
chunk: int = None,
cache: int = 1024,
zippers: dict = None,
outcov_dir: str = None) -> dict:
"""Core of the classification workflow.
Parameters
----------
mapper : object
Mapping module (Plain or Ordinal).
files : list or dict
Paths to input alignment files, if multiplexed, or dictionary of file
paths to sample IDs, if per-sample.
samples : list of str, optional
Sample ID list to include.
fmt : str, optional
Format of input alignment file. Options:
- 'b6o': BLAST tabular format.
- 'sam': SAM format.
- 'map': Simple map of query <tab> subject.
If None, program will automatically infer from file content.
demux : bool, optional
Whether perform demultiplexing.
trimsub : str, optional
Trim subject IDs at the last given delimiter.
tree : dict, optional
Taxonomic tree.
rankdic : dict, optional
Rank dictionary.
namedic : dict, optional
Taxon name dictionary.
root : str, optional
Root identifier.
ranks: list of str, optional
List of ranks at each of which sequences are to be classified. Can also
be "none" to omit classification (simply report subject IDs) or "free"
to perform free-rank classification (LCA of subjects regardless of rank
will be reported).
rank2dir : dict, otional
Write classification map per rank to directory.
outzip : str, optional
Output read map compression method (gz, bz2, xz or None).
uniq : bool, optional
Assignment must be unique. Otherwise, report all possible assignments
and normalize counts (for none- and fixed-rank assignments).
major : int, optional
In given-rank classification, perform majority-rule assignment based on
this percentage threshold. Range: [51, 99].
above : bool, optional
Allow assigning to a classification unit higher than given rank.
subok : bool, optional
In free-rank classification, allow assigning sequences to their direct
subjects instead of higher classification units, if applicable.
sizes : dict, optional
Subject size dictionary.
unasgd : bool, optional
Report unassigned sequences.
stratmap : dict, optional
Map of sample ID to stratification file.
chunk : int, optional
Number of lines per chunk to read from alignment file.
cache : int, optional
LRU cache size for classification results at each rank.
zippers : dict, optional
External compression programs.
outcov_dir : str, optional
Write Subject coverage maps to directory.
Returns
-------
dict of dict
Per-rank profiles generated from classification.
Notes
-----
Subject(s) of each query are sorted and converted into a tuple, which is
hashable, a property necessary for subsequent assignment result caching.
"""
data = {x: {} for x in ranks}
# assigners for each rank
assigners = {}
# assignment parameters
kwargs = {'assigners': assigners, 'cache': cache, 'tree': tree, 'rankdic':
rankdic, 'namedic': namedic, 'root': root, 'uniq': uniq,
'major': major and major / 100, 'above': above, 'subok': subok,
'sizes': sizes, 'unasgd': unasgd, 'rank2dir': rank2dir,
'outzip': outzip if outzip != 'none' else None}
# (optional) subject coverage data
covers = {} if outcov_dir else None
# current sample Id
csample = False
# parse input alignment file(s) and generate profile(s)
for fp in sorted(files):
click.echo(f'Parsing alignment file {basename(fp)} ', nl=False)
# read alignment file into query-to-subject(s) map
with readzip(fp, zippers) as fh:
# query and progress counters
nqry, nstep = 0, -1
# parse alignment file by chunk
for qryque, subque in mapper(fh, fmt=fmt, n=chunk):
nqry += len(qryque)
# (optional) demultiplex and generate per-sample maps
rmaps = demultiplex(qryque, subque, samples) if demux else {
files[fp] if files else None: (qryque, subque)}
# (optional) calculate subject coverage
if outcov_dir:
parse_ranges(rmaps, covers)
# assign reads at each rank
for sample, (qryque, subque) in rmaps.items():
# (optional) strip suffixes from subject Ids
subque = deque(map(tuple, map(sorted, strip_suffix(
subque, trimsub) if trimsub else subque)))
# (optional) read strata of current sample into cache
if stratmap and sample != csample:
kwargs['strata'] = read_strata(
stratmap[sample], zippers)
csample = sample
# call assignment workflow for each rank
for rank in ranks:
assign_readmap(
qryque, subque, data, rank, sample, **kwargs)
# show progress
istep = nqry // 1000000 - nstep
if istep:
click.echo('.' * istep, nl=False)
nstep += istep
click.echo(' Done.')
click.echo(f' Number of sequences classified: {nqry}.')
# write coverage maps
if outcov_dir:
click.echo('Calculating per sample coverage...', nl=False)
write_coverage(calc_coverage(covers), outcov_dir)
click.echo(' Done.')
click.echo('Classification completed.')
return data
| 21,507
|
def apply():
"""Run terraform apply. Raises an exception if the Terraform is invalid."""
# Validate and format the terraform files.
os.chdir(TERRAFORM_DIR)
subprocess.check_call(['terraform', 'validate'])
subprocess.check_call(['terraform', 'fmt'])
# Setup the backend if needed and reload modules.
subprocess.check_call(['terraform', 'init'])
# Apply changes.
subprocess.check_call(['terraform', 'apply'])
# A second apply is unfortunately necessary to update the Lambda aliases.
print('\nRe-applying to update Lambda aliases...')
subprocess.check_call(
['terraform', 'apply', '-refresh=false'] + LAMBDA_ALIASES_TERRAFORM_TARGETS)
| 21,508
|
def test_traverse_nonexistent_fk():
"""Comparing with a reverse FK traversal that does not exist for the model."""
user = UserFactory(profile=None)
profile = ProfileFactory(user=UserFactory(profile=None))
user_has_profile = R(profile=profile)
user_has_no_profile = R(profile=None)
# filter() tests
users_with_profile = user_has_profile.filter(user, User.objects.all())
assert user not in users_with_profile
assert profile.user in users_with_profile
users_with_no_profile = user_has_no_profile.filter(user, User.objects.all())
assert user in users_with_no_profile
assert profile.user not in users_with_no_profile
# check() tests
assert not user_has_profile.check(user, user)
assert user_has_profile.check(user, profile.user)
assert user_has_no_profile.check(user, user)
assert not user_has_no_profile.check(user, profile.user)
| 21,509
|
def get_better_loci(filename, cutoff):
"""
Returns a subset of loci such that each locus includes at least "cutoff"
different species.
:param filename:
:param cutoff:
:return:
"""
f = open(filename)
content = f.read()
f.close()
loci = re.split(r'//.*|', content)
better_loci = []
for locus in loci:
found_species = set()
for line in locus.strip().split("\n"):
if line == "":
continue
(individual, sequence) = line[1:].split()
found_species.add(individual.split("_")[-1])
if len(found_species) >= cutoff:
better_loci.append(locus)
return better_loci
| 21,510
|
def paginate_data(data_list, page=1 ,per_page=10):
"""将数据分页返回"""
pages = int(math.ceil(len(data_list) / per_page))
page = int(page)
per_page = int(per_page)
has_next = True if pages > page else False
has_prev = True if 1 < page <= int(pages) else False
items = data_list[(page-1)*per_page : page*per_page]
return {
"item_list": items,
"page": page,
"total": len(data_list),
"pages": pages,
"has_next": has_next,
"next_num": page + 1 if has_next else None,
"per_page": per_page,
"has_prev": has_prev,
"prev_num": page - 1 if has_prev else None
}
| 21,511
|
def get_all_element_frequencies(sequences):
"""
UC Computes the frequencies of each element across a collection of sequences.
"""
pass
| 21,512
|
def writec(s,font,color,text,border=1):
"""write centered text to a surface with a black border
<pre>writec(s,font,color,text,border=1)</pre>
"""
w,h = font.size(text)
x = (s.get_width()-w)/2
y = (s.get_height()-h)/2
write(s,font,(x,y),color,text,border)
| 21,513
|
def setup(bot: Bot) -> None:
"""Sync cog load."""
bot.add_cog(Sync(bot))
log.info("Cog loaded: Sync")
| 21,514
|
def quat_to_rotmat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
| 21,515
|
def has_multimethods(cls):
""" Declare class as one that have multimethods."""
for name, obj in cls.__dict__.items():
if isinstance(obj, MethodDispatcher):
obj.proceed_unbound_rules(cls)
return cls
| 21,516
|
def elastic_depth(f, time, method="DP2", lam=0.0, parallel=True):
"""
calculates the elastic depth between functions in matrix f
:param f: matrix of size MxN (M time points for N functions)
:param time: vector of size M describing the sample points
:param method: method to apply optimization (default="DP2") options are "DP","DP2","RBFGS"
:param lam: controls the elasticity (default = 0.0)
:rtype: scalar
:return amp: amplitude depth
:return phase: phase depth
"""
obs, fns = f.shape
amp_dist = zeros((fns,fns))
phs_dist = zeros((fns,fns))
if parallel:
out = Parallel(n_jobs=-1)(delayed(distmat)(f, f[:, n], time, n, method) for n in range(fns))
for i in range(0, fns):
amp_dist[i, :] = out[i][0]
phs_dist[i, :] = out[i][1]
else:
for i in range(0, fns):
amp_dist[i, :], phs_dist[i, :] = distmat(f, f[:, i], time, i, method)
amp_dist = amp_dist + amp_dist.T
phs_dist = phs_dist + phs_dist.T
amp = 1 / (1 + median(amp_dist,axis=0))
phase = 1 / (1 + median(phs_dist,axis=0))
phase = ((2+pi)/pi) * (phase - 2/(2+pi))
return amp, phase
| 21,517
|
def run_coro_thread(func: callable, *args, **kwargs) -> Any:
"""
Run a Python AsyncIO coroutine function within a new event loop using a thread, and return the result / raise any exceptions
as if it were ran normally within an AsyncIO function.
.. Caution:: If you're wanting to run a coroutine within a thread from an AsyncIO function/method, then you should
use :func:`.run_coro_thread_async` instead, which uses :func:`asyncio.sleep` while waiting for a result/exception
to be transmitted via a queue.
This allows you to run and wait for multiple coroutine threads simultaneously, as there's no synchronous blocking
wait - unlike this function.
This will usually allow you to run coroutines from a synchronous function without running into the dreaded "Event loop is already
running" error - since the coroutine will be ran inside of a thread with it's own dedicated event loop.
**Example Usage**::
>>> async def example_func(lorem: int, ipsum: int):
... if lorem > 100: raise AttributeError("lorem is greater than 100!")
... return f"example: {lorem + ipsum}"
>>> run_coro_thread(example_func, 10, 20)
example: 30
>>> run_coro_thread(example_func, 3, ipsum=6)
example: 9
>>> run_coro_thread(example_func, lorem=40, ipsum=1)
example: 41
>>> run_coro_thread(example_func, 120, 50)
File "", line 2, in example_func
if lorem > 100: raise AttributeError("lorem is greater than 100!")
AttributeError: lorem is greater than 100!
Creates a new :class:`threading.Thread` with the target :func:`.coro_thread_func` (via :func:`.run_coro_thread_base`), passing
the coroutine ``func`` along with the passed positional ``args`` and keyword ``kwargs``, which creates a new event loop, and
then runs ``func`` within that thread event loop.
Uses the private :class:`queue.Queue` threading queue :attr:`._coro_thread_queue` to safely relay back to the calling thread -
either the result from the coroutine, or an exception if one was raised while trying to run the coroutine.
:param callable func: A reference to the ``async def`` coroutine function that you want to run
:param args: Positional arguments to pass-through to the coroutine function
:param kwargs: Keyword arguments to pass-through to the coroutine function
:return Any coro_res: The result returned from the coroutine ``func``
"""
t_co = run_coro_thread_base(func, *args, **kwargs, _output_queue=_coro_thread_queue)
t_co.join()
res = _coro_thread_queue.get(block=True, timeout=10)
if isinstance(res, (Exception, BaseException)):
raise res
return res
| 21,518
|
def test_choice_retries_on_failure(mock_input):
"""
Tests the function will continue to retry until a valid option
has been entered
"""
answer = choice('Choose: ', choices=CHOICES)
assert answer == 'Major'
| 21,519
|
def get_all_ports(entity):
"""
Recursively descends through the entity hierarchy and collects all ports
defined within the parameter or any of its children.
Parameters
----------
entity : Entity
The root from which to start collecting.
Returns
-------
list of Port
A list of ports within the entity or its children.
"""
return [p for e in get_all_entities(entity) for p in get_ports(e)]
| 21,520
|
def phase_plane_curves(hstar, hustar, state, g=1., wave_family='both', y_axis='u', ax=None,
plot_unphysical=False):
"""
Plot the curves of points in the h - u or h-hu phase plane that can be
connected to (hstar,hustar).
state = 'qleft' or 'qright' indicates whether the specified state is ql or qr.
wave_family = 1, 2, or 'both' indicates whether 1-waves or 2-waves should be plotted.
Colors in the plots indicate whether the states can be connected via a shock or rarefaction.
"""
if ax is None:
fig, ax = plt.subplots()
h = np.linspace(0, hstar, 200)
if wave_family in [1,'both']:
if state == 'qleft' or plot_unphysical:
u = integral_curve(h, hstar, hustar, 1, g, y_axis=y_axis)
ax.plot(h,u,'b', label='1-rarefactions')
if state == 'qright' or plot_unphysical:
u = hugoniot_locus(h, hstar, hustar, 1, g, y_axis=y_axis)
ax.plot(h,u,'--r', label='1-shocks')
if wave_family in [2,'both']:
if state == 'qleft' or plot_unphysical:
u = hugoniot_locus(h, hstar, hustar, 2, g, y_axis=y_axis)
ax.plot(h,u,'--r', label='2-shocks')
if state == 'qright' or plot_unphysical:
u = integral_curve(h, hstar, hustar, 2, g, y_axis=y_axis)
ax.plot(h,u,'b', label='2-rarefactions')
h = np.linspace(hstar, 3, 200)
if wave_family in [1,'both']:
if state == 'qright' or plot_unphysical:
u = integral_curve(h, hstar, hustar, 1, g, y_axis=y_axis)
ax.plot(h,u,'--b', label='1-rarefactions')
if state == 'qleft' or plot_unphysical:
u = hugoniot_locus(h, hstar, hustar, 1, g, y_axis=y_axis)
ax.plot(h,u,'r', label='1-shocks')
if wave_family in [2,'both']:
if state == 'qright' or plot_unphysical:
u = hugoniot_locus(h, hstar, hustar, 2, g, y_axis=y_axis)
ax.plot(h,u,'r', label='2-shocks')
if state == 'qleft' or plot_unphysical:
u = integral_curve(h, hstar, hustar, 2, g, y_axis=y_axis)
ax.plot(h,u,'--b', label='2-rarefactions')
# plot and label the point (hstar, hustar)
ax.set_xlabel('Depth (h)')
if y_axis == 'u':
ustar = hustar/hstar
ax.set_ylabel('Velocity (u)')
else:
ustar = hustar # Fake it
ax.set_ylabel('Momentum (hu)')
ax.plot([hstar],[ustar],'ko',markersize=5)
ax.text(hstar + 0.1, ustar - 0.2, state, fontsize=13)
| 21,521
|
def rinex_sopac(station, year, month, day):
"""
author: kristine larson
inputs: station name, year, month, day
picks up a hatanaka RINEX file from SOPAC - converts to o
hatanaka exe hardwired for my machine
"""
exedir = os.environ['EXE']
crnxpath = hatanaka_version()
doy,cdoy,cyyyy,cyy = ymd2doy(year,month,day)
sopac = 'ftp://garner.ucsd.edu'
oname,fname = rinex_name(station, year, month, day)
# compressed version??
file1 = fname + '.Z'
path1 = '/pub/rinex/' + cyyyy + '/' + cdoy + '/'
url1 = sopac + path1 + file1
#file2 = oname + '.Z'
#path2 = '/pub/rinex/' + cyyyy + '/' + cdoy + '/'
#url2 = sopac + path2 + file2
try:
wget.download(url1,file1)
subprocess.call(['uncompress', file1])
subprocess.call([crnxpath, fname])
subprocess.call(['rm', '-f',fname])
print('successful Hatanaka download from SOPAC ')
except:
print('not able to download from SOPAC',file1)
subprocess.call(['rm', '-f',file1])
subprocess.call(['rm', '-f',fname])
| 21,522
|
def deploy(**kwargs):
"""Deploy a PR into a remote server via Fabric"""
return apply_pr(**kwargs)
| 21,523
|
def word_list2tensor(word_list, dictionary):
"""
args
word_list: [batch_size, seq_len, token_id]
dictionary: Dictionary
return
source, target [batch_size, seq_len, token_id]
"""
word_list_padded = add_word_padding(word_list, dictionary)
batch = torch.LongTensor(word_list_padded)
return batch
| 21,524
|
def canonicalize_monotonicity(monotonicity, allow_decreasing=True):
"""Converts string constants representing monotonicity into integers.
Args:
monotonicity: The monotonicities hyperparameter of a `tfl.layers` Layer
(e.g. `tfl.layers.PWLCalibration`).
allow_decreasing: If decreasing monotonicity is considered a valid
monotonicity.
Returns:
monotonicity represented as -1, 0, 1, or None.
Raises:
ValueError: If monotonicity is not in the set
{-1, 0, 1, 'decreasing', 'none', 'increasing'} and allow_decreasing is
True.
ValueError: If monotonicity is not in the set {0, 1, 'none', 'increasing'}
and allow_decreasing is False.
"""
if monotonicity is None:
return None
if monotonicity in [-1, 0, 1]:
if not allow_decreasing and monotonicity == -1:
raise ValueError(
"'monotonicities' must be from: [0, 1, 'none', 'increasing']. "
"Given: {}".format(monotonicity))
return monotonicity
elif isinstance(monotonicity, six.string_types):
if monotonicity.lower() == "decreasing":
if not allow_decreasing:
raise ValueError(
"'monotonicities' must be from: [0, 1, 'none', 'increasing']. "
"Given: {}".format(monotonicity))
return -1
if monotonicity.lower() == "none":
return 0
if monotonicity.lower() == "increasing":
return 1
raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', "
"'none', 'increasing']. Given: {}".format(monotonicity))
| 21,525
|
def killIfHasMore(sprite, partner, game, resource, limit=1):
""" If 'sprite' has more than a limit of the resource type given, it dies. """
if sprite.resources[resource] >= limit:
killSprite(sprite, partner, game)
| 21,526
|
def test_pipeline_split_shared_parameter_with_micro_batch_interleaved_stage1_opt_shard():
"""
Feature: test PipelineSplitSharedParameter with MicroBatchInterleaved in auto parallel.
Description: net with MicroBatchInterleaved in semi auto parallel.
Expectation: success.
"""
context.set_auto_parallel_context(device_num=32, global_rank=16, pipeline_stages=2, enable_parallel_optimizer=True)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
data = Tensor(np.ones([32, 64]), dtype=ms.float32)
label = Tensor(np.ones([64, 64]), dtype=ms.float32)
strategy1 = ((16, 1), (1, 1))
strategy2 = ((8, 1), (1, 1))
micro_batch_interleaved = 2
net = PipelineCell(MicroBatchInterleaved(PipelineSplitSharedParam(strategy1, strategy2),
micro_batch_interleaved), 4)
params = net.trainable_params()
dataset = DatasetLenet(data, label, 4)
optimizer = nn.Lamb(params, learning_rate=0.02)
model = Model(net, optimizer=optimizer)
model.train(2, dataset, dataset_sink_mode=False)
| 21,527
|
def add_gdp(df, gdp, input_type="raw", drop=True):
"""Adds the `GDP` to the dataset. Assuming that both passed dataframes have a column named `country`.
Parameters
----------
df : pd.DataFrame
Training of test dataframe including the `country` column.
gdp : pd.DataFrame
Mapping between `country` and `GDP`
input_type : {"raw", "aggregated"}
Whether the operation should run on the raw, or the aggregated dataset.
drop : bool
Whether the old country columns should be droped.
Returns
-------
pd.DataFrame
The passed `df` with a new column corresponding to the mapped GDP.
"""
def stringify(maybe_string):
# Handles Unicode country names like "Côte d’Ivoire" , "Réunion" etc, as well as countries only existing
# in one of the two dataframes.
try:
return str(maybe_string)
except UnicodeEncodeError:
return "Unknown"
if input_type == "aggregated":
country_cols = [col for col in df.columns if col.startswith("country") and col != "country"]
def inverse_ohe(row):
for c in country_cols:
if row[c] == 1:
return c.split("_")[1]
df["country"] = df.apply(inverse_ohe, axis=1)
if drop:
df = df.drop(country_cols, axis=1)
elif input_type != "raw":
msg = "Only {} and {} are supported. \n" + \
"\tThe former assumes the original form where only the JSON has been flattened.\n" + \
"\tThe latter assumes that OHE has already occured on top."
raise ValueError(msg)
df["country"] = df["country"].fillna("Unknown").apply(stringify)
result = df.merge(gdp, on="country", how='left')
if drop:
result.drop("country", axis=1, inplace=True)
return result
| 21,528
|
def get_options(cmd_args):
""" Argument Parser. """
parser = argparse.ArgumentParser(
prog='activitygen.py', usage='%(prog)s -c configuration.json',
description='SUMO Activity-Based Mobility Generator')
parser.add_argument(
'-c', type=str, dest='config', required=True,
help='JSON configuration file.')
parser.add_argument(
'--profiling', dest='profiling', action='store_true',
help='Enable Python3 cProfile feature.')
parser.add_argument(
'--no-profiling', dest='profiling', action='store_false',
help='Disable Python3 cProfile feature.')
parser.set_defaults(profiling=False)
return parser.parse_args(cmd_args)
| 21,529
|
def validate_partition_manifests(manifests):
"""
Check the correctness of the manifests list
(no conflicts, no missing elements, etc.)
:param manifests: List of the partition manifests
"""
for manifest in manifests:
assert isinstance(manifest, Manifest)
partitions_names = {}
partitions_ids = {}
rot_service_ids = {}
rot_service_names = {}
rot_service_signals = {}
irq_signals = {}
irq_numbers = {}
all_extern_sids = set()
spe_contained_manifests = []
for manifest in manifests:
# Make sure the partition names are unique.
if manifest.name in partitions_names:
raise ValueError(
'Partition name {} is not unique, '
'found in both {} and {}.'.format(
manifest.name,
partitions_names[manifest.name],
manifest.file
)
)
partitions_names[manifest.name] = manifest.file
# Make sure the partition ID's are unique.
if manifest.id in partitions_ids:
raise ValueError(
'Partition id {} is not unique, '
'found in both {} and {}.'.format(
manifest.id,
partitions_ids[manifest.id],
manifest.file
)
)
partitions_ids[manifest.id] = manifest.file
is_nspe_callabale = False
# Make sure all the Root of Trust Service IDs and signals are unique.
for rot_service in manifest.rot_services:
if rot_service.name in rot_service_names:
raise ValueError(
'Root of Trust Service name {} is found '
'in both {} and {}.'.format(
rot_service.name,
rot_service_names[rot_service.name],
manifest.file
)
)
rot_service_names[rot_service.name] = manifest.file
if rot_service.signal in rot_service_signals:
raise ValueError(
'Root of Trust Service signal {} is found '
'in both {} and {}.'.format(
rot_service.signal,
rot_service_signals[rot_service.signal],
manifest.file
)
)
rot_service_signals[rot_service.signal] = manifest.file
if rot_service.numeric_id in rot_service_ids:
raise ValueError(
'Root of Trust Service identifier {} is found '
'in both {} and {}.'.format(
rot_service.numeric_id,
rot_service_ids[rot_service.numeric_id],
manifest.file
)
)
rot_service_ids[rot_service.numeric_id] = manifest.file
is_nspe_callabale |= rot_service.nspe_callable
if not is_nspe_callabale:
spe_contained_manifests.append(manifest)
# Make sure all the IRQ signals and line-numbers are unique.
for irq in manifest.irqs:
if irq.signal in irq_signals:
raise ValueError(
'IRQ signal {} is found in both {} and {}.'.format(
irq.signal,
irq_signals[irq.signal],
manifest.file
)
)
irq_signals[irq.signal] = manifest.file
if irq.line_num in irq_numbers:
raise ValueError(
'IRQ line number {} is found in both {} and {}.'.format(
irq.line_num,
irq_numbers[irq.line_num],
manifest.file
)
)
irq_numbers[irq.line_num] = manifest.file
all_extern_sids.update(manifest.extern_sids)
# Check that all the external SIDs can be found.
declared_sids = set(rot_service_names.keys())
for manifest in manifests:
extern_sids = set(manifest.extern_sids)
if not extern_sids.issubset(declared_sids):
missing_sids = extern_sids.difference(declared_sids)
raise ValueError(
"External SID(s) {} required by {} can't be found in "
"any partition manifest.".format(
', '.join(missing_sids), manifest.file)
)
if check_circular_call_dependencies(manifests):
raise ValueError(
"Detected a circular call dependency between the partitions.")
for manifest in spe_contained_manifests:
rot_services = set([service.name for service in manifest.rot_services])
if not rot_services.intersection(all_extern_sids) and len(
manifest.irqs) == 0:
raise ValueError(
'Partition {} (defined by {}) is not accessible from NSPE '
'and not referenced by any other partition.'.format(
manifest.name,
manifest.file
)
)
| 21,530
|
def partida_7():
"""partida_7"""
check50.run("python3 volleyball.py").stdin("A\nA\nA\nB\nA\nB\nA\nA\nB\nA\nA", prompt=False).stdout("EMPIEZA\nSACA A\nGANA A\nA 1 B 0\nSACA A\nGANA A\nA 2 B 0\nSACA A\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA B\nA 3 B 0\nSACA B\nGANA A\nA 3 B 0\nSACA A\nGANA A\nA 4 B 0\nSACA A\nGANA B\nA 4 B 0\nSACA B\nGANA A\nA 4 B 0\nSACA A\nGANA A\nA 5 B 0\nFINAL", regex=False).exit(0)
| 21,531
|
def test_is_team_guid__false():
""" Test that an invalid team GUID is recognized.
"""
assert not is_team_guid("not a guid")
| 21,532
|
def no_background_patches(threshold=0.4, percentile=99.9):
"""Returns a patch filter to be used by :func:`create_patches` to determine for each image pair which patches
are eligible for sampling. The purpose is to only sample patches from "interesting" regions of the raw image that
actually contain a substantial amount of non-background signal. To that end, a maximum filter is applied to the target image
to find the largest values in a region.
Parameters
----------
threshold : float, optional
Scalar threshold between 0 and 1 that will be multiplied with the (outlier-robust)
maximum of the image (see `percentile` below) to denote a lower bound.
Only patches with a maximum value above this lower bound are eligible to be sampled.
percentile : float, optional
Percentile value to denote the (outlier-robust) maximum of an image, i.e. should be close 100.
Returns
-------
function
Function that takes an image pair `(y,x)` and the patch size as arguments and
returns a binary mask of the same size as the image (to denote the locations
eligible for sampling for :func:`create_patches`). At least one pixel of the
binary mask must be ``True``, otherwise there are no patches to sample.
Raises
------
ValueError
Illegal arguments.
"""
(np.isscalar(percentile) and 0 <= percentile <= 100) or _raise(ValueError())
(np.isscalar(threshold) and 0 <= threshold <= 1) or _raise(ValueError())
from scipy.ndimage.filters import maximum_filter
def _filter(datas, patch_size, dtype=np.float32):
image = datas[0]
if dtype is not None:
image = image.astype(dtype)
# make max filter patch_size smaller to avoid only few non-bg pixel close to image border
patch_size = [(p//2 if p>1 else p) for p in patch_size]
filtered = maximum_filter(image, patch_size, mode='constant')
return filtered > threshold * np.percentile(image,percentile)
return _filter
| 21,533
|
def start_thread():
"""Start new thread with or without first comment."""
subject = request.form.get('subject') or ''
comment = request.form.get('comment') or ''
if not subject:
return error('start_thread:subject')
storage.start_thread(g.username, subject, comment)
flash('New Thread Started: {0}'.format(subject), 'success')
return to_threads()
| 21,534
|
def copyrotateimage(srcpath, dstpath, rotate=False, deletesource=False):
"""
Copying with rotation: Copies a TIFF image from srcpath to dstpath,
rotating the image by 180 degrees if specified.
"""
#Handles special case where source path and destination path are the same
if srcpath==dstpath:
if not rotate:
#Then there's nothing to do
return;
else:
#Move file to temporary location before continuing
srcpath = srcpath + str(uuid.uuid4());
shutil.move(dstpath, srcpath);
deletesource = True;
if not rotate:
shutil.copy(srcpath, dstpath, follow_symlinks=True);
else:
driver = gdal.GetDriverByName('GTiff'); # Растровый "драйвер" для работы с расширением "GeoTIFF"
tilefile = gdal.Open(srcpath); # Открывает файл с куском карты
copyfile = driver.CreateCopy(dstpath, tilefile, strict=0); # Создает копию но не "1 в 1"
numbands = copyfile.RasterCount; # Количество цветовых каналов (1 - изображение черно-белое, 3 - RGB)
for bandnum in range(1, numbands+1):
banddata = tilefile.GetRasterBand(bandnum).ReadAsArray();
banddata = np.fliplr(np.flipud(banddata)); #180 deg rotation
copyfile.GetRasterBand(bandnum).WriteArray(banddata);
copyfile.FlushCache();
copyfile = None;
tilefile = None;
if deletesource:
os.remove(srcpath);
| 21,535
|
def main_sim_multi(cor = 0.75, rs = 0.5):
"""
multitask simulated data
"""
dic1, rel1, turk1, dic2, rel2, turk2 = simulate_multitask(cor)
lc1 = crowd_model.labels_collection(turk1, rel1)
lc2 = crowd_model.labels_collection(turk2, rel2)
for rs in [0.1, 0.2, 0.3, 0.4, 0.5]:
res = main_multitask([lc1, lc2], [dic1, dic2], rs)
import pickle
f = open('simult_' + str(cor) + '.pkl', 'w')
pickle.dump(res, f)
f.close()
| 21,536
|
def get_package_data(name, package=None):
"""Retrieve metadata information for the given package name"""
if not package:
package = models.Package(name=name)
releases = {}
else:
releases = package.get_all_releases()
client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
versions = client.package_releases(package.name, True)
# package_releases() method is case-sensitive, if nothing found
# then we search for it
# XXX: Ask pypi to make it case-insensitive?
if not versions:
for item in client.search({'name': name}):
if name.lower() == item['name'].lower():
package.name = name = item['name']
break
else:
logger.info("No packages found matching %r", name)
return
# Retry retrieving the versions with the new/correct name
versions = client.package_releases(package.name, True)
# Save the package if it is new
if not package.pk:
package.save()
for version in versions:
release, files = releases.get(version, (None, {}))
if not release:
release = models.Release(package=package, version=version)
release.save()
data = client.release_data(package.name, release.version)
release_form = forms.PypiReleaseDataForm(data, instance=release)
if release_form.is_valid():
release_form.save()
release_files = client.package_urls(package.name, release.version)
for info in release_files:
release_file = files.get(info['filename'])
if not release_file:
release_file = models.ReleaseFile(
release=release, filename=info['filename'])
release_file.python_version = info['python_version']
release_file.filetype = info['packagetype']
release_file.url = info['url']
release_file.size = info['size']
release_file.md5_digest = info['md5_digest']
release_file.save()
package.update_timestamp = now()
package.save()
return package
| 21,537
|
def _extract_xbstream(
input_stream, working_dir, xbstream_binary=XBSTREAM_BINARY
):
"""
Extract xbstream stream in directory
:param input_stream: The stream in xbstream format
:param working_dir: directory
:param xbstream_binary: Path to xbstream
:return: True if extracted successfully
"""
try:
cmd = [xbstream_binary, "-x"]
LOG.debug("Running %s", " ".join(cmd))
LOG.debug("Working directory: %s", working_dir)
LOG.debug("Xbstream binary: %s", xbstream_binary)
proc = Popen(
cmd, stdin=input_stream, stdout=PIPE, stderr=PIPE, cwd=working_dir
)
cout, cerr = proc.communicate()
ret = proc.returncode
if ret:
LOG.error("%s exited with code %d", " ".join(cmd), ret)
if cout:
LOG.error("STDOUT: %s", cout)
if cerr:
LOG.error("STDERR: %s", cerr)
return ret == 0
except OSError as err:
raise TwinDBBackupError("Failed to extract xbstream: %s" % err) from err
| 21,538
|
def info(*k, **kw):
"""
Wraps logging.info
"""
log.info(*k, **kw)
| 21,539
|
def describe_default_cluster_parameters(ParameterGroupFamily=None, MaxRecords=None, Marker=None):
"""
Returns a list of parameter settings for the specified parameter group family.
For more information about parameters and parameter groups, go to Amazon Redshift Parameter Groups in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.describe_default_cluster_parameters(
ParameterGroupFamily='string',
MaxRecords=123,
Marker='string'
)
:type ParameterGroupFamily: string
:param ParameterGroupFamily: [REQUIRED]
The name of the cluster parameter group family.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeDefaultClusterParameters request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
:return: {
'DefaultClusterParameters': {
'ParameterGroupFamily': 'string',
'Marker': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'ApplyType': 'static'|'dynamic',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string'
},
]
}
}
"""
pass
| 21,540
|
def execute_experiment_two_files_bin(result: dict,
experiment_id: str,
compression_method: str,
nsnps: int,
nsamples: int,
N: int = 1) -> None:
"""Executes experiment for file types with two files (map, sample)
Warning: this function uses variables defined outside its scope
Args:
result (dict): Dictionary for experiment. Values will be assigned
to this dictionary "by reference".
"""
nsnps_id = nsnps_ids[nsnps]
nsamples_id = nsamples_ids[nsamples]
print("Starting Experiment " + experiment_id + " (" + nsnps_id +
" SNPs, " + nsamples_id + " individuals) with N = " + str(N) +
"; Compression method: " + compression_method)
# get filenames
f_ext: dict = exps[experiment_id]['file_extensions']
mfname = str(data_dir + 'out_' + nsnps_id + '_' + nsamples_id +
f_ext['map'])
pfname = str(data_dir + 'out_' + nsnps_id + '_' + nsamples_id +
f_ext['ped'])
ifname = str(data_dir + 'out_' + nsnps_id + '_' + nsamples_id +
f_ext['ids'])
# Setting up result dictionary
result['fsize'] = [] # file size
result['dbsize'] = [] # doc size in db
result['time'] = [] # insertion time
result['summarize'] = [] # summarization example and time
result['individuals_of_snps'] = []
result['delete_individual'] = []
# * Performing experiment N times and storing results
for i in range(N):
print("i: " + str(i))
print("Resetting database...")
reset_db(compression_method=compression_method)
print("Database reset operation successful.")
print("Generating input files...")
t_map: float = 0.0
t_sample: float = 0.0
file_size: float = 0.0
# * Generating input files
# If less than 10000 samples, generate in one file
# Else, generate blocks of up to 10000 samples
n_blocks: int = int(np.ceil(nsamples / 10000))
remaining_samples: int = nsamples
start_sample: int = 1
# Map file
generate_random_file(filename=mfname,
file_type=f_ext['map'],
verbose=True,
n=nsnps)
# start_from_id=start_map)
# Importing map file
with open(mfname, 'rb') as mf:
t_tmp: float = time.time()
snpdb.insert_file(mf,
map_name=experiment_id + '_' + nsnps_id + '_' +
nsamples_id,
file_type=f_ext['map'])
t_tmp = time.time() - t_tmp
t_map += t_tmp
for i in range(n_blocks):
print("Block: " + str(i))
nsamples_block = int(np.minimum(remaining_samples, 10000.0))
# Samples file
generate_random_file(filename=pfname,
file_type=f_ext['ped'],
verbose=True,
n=nsamples_block,
map_size=nsnps,
start_from_id=start_sample)
# Id map file
generate_random_file(filename=ifname,
file_type='.ids',
verbose=True,
n=nsamples_block,
first_sample_id=start_sample)
start_sample += nsamples_block
remaining_samples -= nsamples_block
# Importing sample file
id_map: dict = {}
# Linking samples to individuals in the database
if ifname is not None:
with open(ifname, "r") as f:
for line in f:
(sample, individual) = line.split()
id_map[sample] = individual
with open(pfname, 'rb') as pf:
t_tmp = time.time()
snpdb.insert_file(pf,
map_name=experiment_id + '_' + nsnps_id +
'_' + nsamples_id,
file_type=f_ext['ped'])
t_tmp = time.time() - t_tmp
t_sample += t_tmp
with open(ifname, 'rb') as iff:
t_tmp = time.time()
snpdb.insert_file(iff,
map_name=experiment_id + '_' + nsnps_id +
'_' + nsamples_id,
file_type='.ids')
t_tmp = time.time() - t_tmp
t_sample += t_tmp
# Validating Statistics
snpdb._db.command("validate", "fs.chunks", full=True)
snpdb._db.command("validate", "fs.files", full=True)
file_size = snpdb._db.command("collstats", "fs.chunks")["storageSize"]
# file_size = snpdb._db["fs.files"].find_one()["length"]
t: float = t_map + t_sample
print("Imported file file\tTime: " + str(round(t, 3)) + "s\tSize:" +
str(round(file_size / 1024**2, 2)) + "MB")
# Appending generated file sizes
result['fsize'].append(
float(os.stat(mfname).st_size) + float(os.stat(pfname).st_size) +
float(os.stat(ifname).st_size))
# Appending stored document sizes from MongoDB
result['dbsize'].append(file_size)
# Appending insertion times
result['time'].append(t_map + t_sample)
# Writing partial results to file
with open(results_fname, 'w') as f:
json.dump(results,
f,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=1,
sort_keys=True)
| 21,541
|
def load_version(pkg_dir, pkg_name):
"""Load version from variable __version__ in file __init__.py with a regular expression"""
try:
filepath_init = path.join(pkg_dir, pkg_name, '__init__.py')
file_content = read_file(filepath_init)
re_for_version = re.compile(r'''__version__\s+=\s+['"](.*)['"]''')
match = re_for_version.search(file_content)
version = match.group(1)
return version
except Exception:
raise ValueError('Version could not be read from variable __version__ in file __init__.py')
| 21,542
|
def feedforward(
inputs,
input_dim,
hidden_dim,
output_dim,
num_hidden_layers,
hidden_activation=None,
output_activation=None):
"""
Creates a dense feedforward network with num_hidden_layers layers where each layer
has hidden_dim number of units except for the last layer which has output_dim number of units.
Arguments:
inputs: Tensor input.
hidden_dim: The number of units in each hidden layer.
output_dim: The number of units in the output layer.
num_hidden_layers: The number of hidden layers.
hidden_activation: The activation function of hidden layers.
Set it to None to use a linear activation.
output_activation: The activation function of the output layer.
Set it to None to use a linear activation.
Returns:
Output tensor.
"""
prev_input_dim = input_dim
prev_output = inputs
for i in range(0, num_hidden_layers):
with tf.variable_scope("dense" + str(i)):
w_n = tf.get_variable("w_" + str(i), [prev_input_dim, hidden_dim], initializer=tf.initializers.random_normal(0, 1))
b_n = tf.get_variable("b_" + str(i), [hidden_dim], initializer=tf.initializers.random_normal(0, 1))
prev_input_dim = hidden_dim
prev_output = hidden_activation(tf.matmul(prev_output, w_n) + b_n)
with tf.variable_scope("dense_output"):
return tf.layers.dense(prev_output, output_dim, activation=output_activation)
| 21,543
|
def test_gradient_accumulation_multi_batch(tmpdir, explicit_loops):
"""
from _base: increase batches per step and number of steps
"""
for graph_runner in [run_mm_graph, run_complex_graph]:
np.random.seed(1234)
label_array = np.random.randint(0, hidden_size, batch_size)
accl_initial_proto, accl_proto_filename, accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=4,
enable_accum=True,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "accl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
no_accl_initial_proto, no_accl_proto_filename, no_accl_anchor_arrays = run_mm_graph(
sgd_optimizer,
label_array=label_array,
accum_factor=1,
enable_accum=False,
batches_per_step=5,
number_of_steps=3,
final_proto_filename=os.path.join(tmpdir, "noAccl5batches3steps"),
enable_multi_ipu=False,
full_anchorage=False,
explicit_loops=explicit_loops)
check_models(accl_initial_proto, accl_proto_filename,
no_accl_proto_filename)
| 21,544
|
def _test():
"""
>>> solve("axyb", "abyxb")
axb
"""
global chr
import doctest
def chr(x): return x
doctest.testmod()
| 21,545
|
def thumbnail_image(url, size=(64, 64), format='.png'):
""" Convert image to a specific format """
im = Image.open(urllib.request.urlopen(url))
# filename is last part of URL minus extension + '.format'
pieces = url.split('/')
filename = ''.join((pieces[-2], '_', pieces[-1].split('.')[0], '_thumb', format))
im.thumbnail(size, Image.ANTIALIAS)
im.save(filename)
print('Saved', filename)
| 21,546
|
def _basic_rebuild_chain(target: database.Target) -> RebuildChain:
"""
Get a rebuild chain based purely on 'rebuild info' from Jam.
"""
chain: RebuildChain = [(target, None)]
current: Optional[database.Target] = target
assert current is not None
while True:
reason = current.rebuild_reason
current = current.rebuild_reason_target
if current is None:
break
else:
chain.append((current, reason))
return chain
| 21,547
|
def test_md013_good_indented_code_block():
"""
Test to make sure this rule does not trigger with a document that
contains a long line within an indented code block, but with the
code_block value turned off.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md013/good_indented_code_block.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 21,548
|
def ravel_group_params(parameters_group):
"""Take a dict(group -> {k->p}) and return a dict('group:k'-> p)
"""
return {f'{group_name}:{k}': p
for group_name, group_params in parameters_group.items()
for k, p in group_params.items()}
| 21,549
|
def dir_manager(
path: ty.Optional[ty.Union[pathlib.Path, str]] = None, cleanup=None
) -> ty.Generator[pathlib.Path, None, None]:
"""A context manager to deal with a directory, default to a self-destruct temp one."""
if path is None:
d_path = pathlib.Path(tempfile.mkdtemp())
if cleanup is None:
cleanup = True
else:
d_path = pathlib.Path(path).resolve()
d_path.mkdir(parents=True, exist_ok=True)
if cleanup is None:
cleanup = False
elif cleanup:
if d_path.glob("*"):
raise ValueError(f"{d_path} is not empty.")
try:
yield d_path
finally:
if cleanup:
shutil.rmtree(d_path)
| 21,550
|
def decode_orders(game, power_name, dest_unit_value, factors):
""" Decode orders from computed factors
:param game: An instance of `diplomacy.Game`
:param power_name: The name of the power we are playing
:param dest_unit_value: A dict with unit as key, and unit value as value
:param factors: An instance of `Factors`
:return: A list of orders
:type factors: Factors
:type game: diplomacy.Game
"""
phase_type = game.get_current_phase()[-1]
# Movement phase
if phase_type == 'M':
return generate_movement_orders(game, power_name, dest_unit_value, factors)
# Retreat Phaes
if phase_type == 'R':
return generate_retreat_orders(game, power_name, dest_unit_value)
# Adjustment
if phase_type == 'A':
power = game.get_power(power_name)
nb_builds = len(power.centers) - len(power.units)
# Building
if nb_builds >= 0:
return generate_build_orders(game, power_name, dest_unit_value)
# Disbanding
return generate_disband_orders(game, power_name, dest_unit_value)
# Otherwise, invalid phase_type
LOGGER.error('Invalid phase type. Got %s. Expected M, R, A', phase_type)
return []
| 21,551
|
def _convert_to_type(se, allow_any=False, allow_implicit_tuple=False):
""" Converts an S-Expression representing a type, like (Vec Float) or (Tuple Float (Vec Float)),
into a Type object, e.g. Type.Tensor(1,Type.Float) or Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)).
If allow_implicit_tuple is true, also converts a list of types into a Tuple, e.g.
(Float (Vec Float)) becomes Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)), i.e. as if
the S-Expression began with an extra "Tuple".
"""
while isinstance(se, list) and len(se)==1:
se=se[0] # Discard ((pointless)) brackets
if isinstance(se, sexpdata.Symbol):
if se.value() == "Any" and allow_any: return None
return Type(se.value())
if isinstance(se, list) and len(se)>0:
if isinstance(se[0], sexpdata.Symbol):
sym = se[0].value()
if sym == "Tensor" and len(se) == 3:
assert se[1] == 1, "Only 1D 'Tensor's ('Vec's) supported"
return Type.Tensor(1, _convert_to_type(se[2]))
children = [_convert_to_type(s) for s in se[1:]]
if sym == "Vec" and len(se)==2:
return Type.Tensor(1, utils.single_elem(children))
if sym == "Tuple":
return Type.Tuple(*children)
# Fall through in case it's a list of types with allow_implicit_tuple.
if allow_implicit_tuple:
return Type.Tuple(*[_convert_to_type(s) for s in se])
raise ValueError("Did not know how to parse type {}".format(se))
| 21,552
|
def index():
"""Returns a 200, that's about it!!!!!!!"""
return 'Wow!!!!!'
| 21,553
|
def file_sort_key(file):
"""Calculate the sort key for ``file``.
:param file: The file to calculate the sort key for
:type file: :class:`~digi_edit.models.file.File`
:return: The sort key
:rtype: ``tuple``
"""
path = file.attributes['filename'].split(os.path.sep)
path_len = len(path)
key = []
for idx, element in enumerate(path):
if idx < path_len - 1:
key.append((1, element))
else:
key.append((0, element))
return tuple(key)
| 21,554
|
def merge_tables(pulse_data, trial_data, merge_keys=TRIAL_GROUPER):
"""Add trial-wise information to the pulse-wise table."""
pulse_data = pulse_data.merge(trial_data, on=merge_keys)
add_kernel_data(pulse_data)
return pulse_data
| 21,555
|
def writeFFDFile(fileName, nBlocks, nx, ny, nz, points):
"""
Take in a set of points and write the plot 3dFile
"""
f = open(fileName, "w")
f.write("%d\n" % nBlocks)
for i in range(nBlocks):
f.write("%d %d %d " % (nx[i], ny[i], nz[i]))
# end
f.write("\n")
for block in range(nBlocks):
for k in range(nz[block]):
for j in range(ny[block]):
for i in range(nx[block]):
f.write("%f " % points[block][i, j, k, 0])
# end
# end
# end
f.write("\n")
for k in range(nz[block]):
for j in range(ny[block]):
for i in range(nx[block]):
f.write("%f " % points[block][i, j, k, 1])
# end
# end
# end
f.write("\n")
for k in range(nz[block]):
for j in range(ny[block]):
for i in range(nx[block]):
f.write("%f " % points[block][i, j, k, 2])
# end
# end
# end
# end
f.close()
| 21,556
|
def eos_deriv(beta, g):
""" compute d E_os(beta)/d beta from polynomial expression"""
x = np.tan(beta/2.0)
y = g[4] + x * g[3] + x*x * g[2] + x*x*x*g[1] + x*x*x*x*g[0]
y = y / ((1.0 + x*x)*(1.0 + x*x)*(1.0 + x*x))
return y
| 21,557
|
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle
| 21,558
|
def process_one_name(stove_name):
"""
Translates a single PokerStove-style name of holecards into an
expanded list of pokertools-style names.
For example:
"AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"]
"66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"]
"""
if len(stove_name) == 3:
rank1, rank2, suit_mark = stove_name
if suit_mark == "s":
return [
"{}{} {}{}".format(rank1, suit, rank2, suit)
for suit in SUITS
]
elif suit_mark == "o":
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_PERMUATIONS
]
else:
raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name))
else:
rank1, rank2 = stove_name
if rank1 == rank2:
return [
"{}{} {}{}".format(rank1, suit1, rank2, suit2)
for suit1, suit2 in SUIT_COMBINATIONS
]
else:
raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
| 21,559
|
def job_dispatch(results, job_id, batches):
"""
Process the job batches one at a time
When there is more than one batch to process, a chord is used to delay the
execution of remaining batches.
"""
batch = batches.pop(0)
info('dispatching job_id: {0}, batch: {1}, results: {2}'.format(job_id, batch, results))
tasks = [job_worker.subtask((job_id, task_num)) for task_num in batch]
# when there are other batches to process, use a chord to delay the
# execution of remaining tasks, otherwise, finish off with a TaskSet
if batches:
info('still have batches, chording {0}'.format(batches))
callback = job_dispatch.subtask((job_id, batches))
return chord(tasks)(callback)
else:
info('only batch, calling TaskSet')
return TaskSet(tasks=tasks).apply_async()
| 21,560
|
def get_temperature():
"""
Serves temperature data from the database, in a simple html format
"""
logger = logging.getLogger("logger")
#sqlite handler
sql_handler = SQLiteHandler()
logger.addHandler(sql_handler)
logger.setLevel(logging.INFO)
con = sqlite3.connect(db)
cur = con.cursor()
cur.execute("select * from temperatures")
rows = cur.fetchall()
cur.close()
logger.info("Temperatures data was requested.")
return render_template("temp.html", rows=rows)
| 21,561
|
def command_result_processor_category_empty(command_category):
"""
Command result message processor if a command category is empty.
Parameters
----------
command_category : ``CommandLineCommandCategory``
Respective command category.
Returns
-------
message : `str`
"""
command_full_name = ''.join(command_category._trace_back_name())
message_parts = []
message_parts.append('Command category: ')
message_parts.append(repr(command_full_name.name))
message_parts.append(' has no direct command, neither sub commands registered.\n')
return ''.join(message_parts)
| 21,562
|
def _parse_path(**kw):
"""
Parse leaflet `Path` options.
http://leafletjs.com/reference-1.2.0.html#path
"""
color = kw.pop('color', '#3388ff')
return {
'stroke': kw.pop('stroke', True),
'color': color,
'weight': kw.pop('weight', 3),
'opacity': kw.pop('opacity', 1.0),
'lineCap': kw.pop('line_cap', 'round'),
'lineJoin': kw.pop('line_join', 'round'),
'dashArray': kw.pop('dash_array', None),
'dashOffset': kw.pop('dash_offset', None),
'fill': kw.pop('fill', False),
'fillColor': kw.pop('fill_color', color),
'fillOpacity': kw.pop('fill_opacity', 0.2),
'fillRule': kw.pop('fill_rule', 'evenodd'),
'bubblingMouseEvents': kw.pop('bubbling_mouse_events', True),
}
| 21,563
|
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
| 21,564
|
def create_app(config_name: str) -> Flask:
"""Create the Flask application
Args:
config_name (str): Config name mapping to Config Class
Returns:
[Flask]: Flask Application
"""
from app.config import config_by_name
from app.models import User, Iris
from app.controllers import user_api, iris_api, default
# Create the app
app = Flask(__name__)
# Log the current config name being used and setup app with the config
app.logger: Logger
app.logger.debug(f"CONFIG NAME: {config_name}")
config = config_by_name[config_name]
app.config.from_object(config)
# Initialize the database
db.init_app(app)
# Initialize Rest+ API
api.init_app(app)
api.add_namespace(user_api, path="/user")
api.add_namespace(iris_api, path="/iris")
# Initialize the flask-praetorian instance for the app
guard.init_app(app, User)
return app
| 21,565
|
def load_module():
"""This function loads the module and returns any errors that occur in the process."""
proc = subprocess.Popen(["pactl", "load-module", "module-suspend-on-idle"], stderr=subprocess.PIPE)
stderr = proc.communicate()[1].decode("UTF-8")
return stderr
| 21,566
|
def time_struct_2_datetime(
time_struct: Optional[time.struct_time],
) -> Optional[datetime]:
"""Convert struct_time to datetime.
Args:
time_struct (Optional[time.struct_time]): A time struct to convert.
Returns:
Optional[datetime]: A converted value.
"""
return (
datetime.fromtimestamp(time.mktime(time_struct))
if time_struct is not None
else None
)
| 21,567
|
def _parse_input():
"""
A function for handling terminal commands.
:return: The path to the experiment configuration file.
"""
parser = argparse.ArgumentParser(description='Performs CNN analysis according to the input config.')
parser.add_argument('-i', '--experiments_file', default='experiments_config.json', type=str,
help='A path to the experiments config file.')
args = parser.parse_args()
experiments_config_path = args.experiments_file
return experiments_config_path
| 21,568
|
def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False,
q_sqrt=None, white=False):
"""
The inducing outputs live in the g-space (R^L).
Interdomain conditional calculation.
:param Kmn: M x L x N x P
:param Kmm: L x M x M
:param Knn: N x P or N x N or P x N x N or N x P x N x P
:param f: data matrix, M x L
:param q_sqrt: L x M x M or M x L
:param full_cov: calculate covariance between inputs
:param full_output_cov: calculate covariance between outputs
:param white: use whitened representation
:return:
- mean: N x P
- variance: N x P, N x P x P, P x N x N, N x P x N x P
"""
logger.debug("independent_interdomain_conditional")
M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)]
Lm = tf.cholesky(Kmm) # L x M x M
# Compute the projection matrix A
Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P))
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
# compute the covariance due to the conditioning
if full_cov and full_output_cov:
fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P
elif full_cov and not full_output_cov:
At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N
elif not full_cov and full_output_cov:
At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML
fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P
# another backsubstitution in the unwhitened case
if not white:
A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP
Ar = tf.reshape(A, (L, M, N, P))
fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P
if q_sqrt is not None:
if q_sqrt.shape.ndims == 3:
Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M
LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP
else: # q_sqrt M x L
LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP
if full_cov and full_output_cov:
LTAr = tf.reshape(LTA, (L * M, N * P))
fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P))
elif full_cov and not full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N
elif not full_cov and full_output_cov:
LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P
fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P
elif not full_cov and not full_output_cov:
fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P))
return fmean, fvar
| 21,569
|
def we_are_buying(account_from, account_to):
"""
Are we buying? (not buying == selling)
"""
buy = False
sell = False
for value in TRADING_ACCOUNTS:
if (value.lower() in account_from):
buy = True
sell = False
elif (value.lower() in account_to):
buy = False
sell = True
return buy
| 21,570
|
def register(param, file_src, file_dest, file_mat, file_out, im_mask=None):
"""
Register two images by estimating slice-wise Tx and Ty transformations, which are regularized along Z. This function
uses ANTs' isct_antsSliceRegularizedRegistration.
:param param:
:param file_src:
:param file_dest:
:param file_mat:
:param file_out:
:param im_mask: Image of mask, could be 2D or 3D
:return:
"""
# TODO: deal with mask
# initialization
failed_transfo = 0 # by default, failed matrix is 0 (i.e., no failure)
do_registration = True
# get metric radius (if MeanSquares, CC) or nb bins (if MI)
if param.metric == 'MI':
metric_radius = '16'
else:
metric_radius = '4'
file_out_concat = file_out
kw = dict()
im_data = Image(file_src) # TODO: pass argument to use antsReg instead of opening Image each time
# register file_src to file_dest
if param.todo == 'estimate' or param.todo == 'estimate_and_apply':
# If orientation is sagittal, use antsRegistration in 2D mode
# Note: the parameter --restrict-deformation is irrelevant with affine transfo
if im_data.orientation[2] in 'LR':
cmd = ['isct_antsRegistration',
'-d', '2',
'--transform', 'Affine[%s]' %param.gradStep,
'--metric', param.metric + '[' + file_dest + ',' + file_src + ',1,' + metric_radius + ',Regular,' + param.sampling + ']',
'--convergence', param.iter,
'--shrink-factors', '1',
'--smoothing-sigmas', param.smooth,
'--verbose', '1',
'--output', '[' + file_mat + ',' + file_out_concat + ']']
cmd += sct.get_interpolation('isct_antsRegistration', param.interp)
if im_mask is not None:
# if user specified a mask, make sure there are non-null voxels in the image before running the registration
if np.count_nonzero(im_mask.data):
cmd += ['--masks', im_mask.absolutepath]
else:
# Mask only contains zeros. Copying the image instead of estimating registration.
sct.copy(file_src, file_out_concat, verbose=0)
do_registration = False
# TODO: create affine mat file with identity, in case used by -g 2
# 3D mode
else:
cmd = ['isct_antsSliceRegularizedRegistration',
'--polydegree', param.poly,
'--transform', 'Translation[%s]' %param.gradStep,
'--metric', param.metric + '[' + file_dest + ',' + file_src + ',1,' + metric_radius + ',Regular,' + param.sampling + ']',
'--iterations', param.iter,
'--shrinkFactors', '1',
'--smoothingSigmas', param.smooth,
'--verbose', '1',
'--output', '[' + file_mat + ',' + file_out_concat + ']']
cmd += sct.get_interpolation('isct_antsSliceRegularizedRegistration', param.interp)
if im_mask is not None:
cmd += ['--mask', im_mask.absolutepath]
# run command
if do_registration:
kw.update(dict(is_sct_binary=True))
env = dict()
env.update(os.environ)
env = kw.get("env", env)
# reducing the number of CPU used for moco (see issue #201)
env["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"
status, output = sct.run(cmd, verbose=0, **kw)
elif param.todo == 'apply':
sct_apply_transfo.main(args=['-i', file_src,
'-d', file_dest,
'-w', file_mat + 'Warp.nii.gz',
'-o', file_out_concat,
'-x', param.interp,
'-v', '0'])
# check if output file exists
if not os.path.isfile(file_out_concat):
# sct.printv(output, verbose, 'error')
sct.printv('WARNING in ' + os.path.basename(__file__) + ': No output. Maybe related to improper calculation of '
'mutual information. Either the mask you provided is '
'too small, or the subject moved a lot. If you see too '
'many messages like this try with a bigger mask. '
'Using previous transformation for this volume (if it'
'exists).', param.verbose, 'warning')
failed_transfo = 1
# TODO: if sagittal, copy header (because ANTs screws it) and add singleton in 3rd dimension (for z-concatenation)
if im_data.orientation[2] in 'LR' and do_registration:
im_out = Image(file_out_concat)
im_out.header = im_data.header
im_out.data = np.expand_dims(im_out.data, 2)
im_out.save(file_out, verbose=0)
# return status of failure
return failed_transfo
| 21,571
|
def parse_geoname_table_file(fpath, delimiter='\t'):
"""
Parse the table given in a file
:param fpath: string - path to the file
:param delimiter: string - delimiter between columns in the file
:returns: list of dict
"""
if not os.path.isfile(fpath):
fstr = "path is not a file: {}".format(fpath)
raise GlobeIndexerError(fstr)
full_fpath = os.path.realpath(fpath)
rows = list()
with open(full_fpath, encoding='utf-8') as fin:
reader = csv.DictReader(fin, fieldnames=GEONAME_TABLE_HEADERS,
delimiter=delimiter, quoting=csv.QUOTE_NONE)
for line in reader:
rows.append(line)
return rows
| 21,572
|
def test_welcome_page_view_integration_test(client, settings):
"""Assert that anonymous client can access WelcomePageView and that
welcome.html used as a template"""
settings.STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.StaticFilesStorage"
)
response = client.get("")
assert response.status_code == 200
assert "welcome.html" in (t.name for t in response.templates)
| 21,573
|
def wait_for_compute_jobs(nevermined, account, jobs):
"""Monitor and wait for compute jobs to finish.
Args:
nevermined (:py:class:`nevermined_sdk_py.Nevermined`): A nevermined instance.
account (:py:class:`contracts_lib_py.account.Account`): Account that published
the compute jobs.
jobs (:obj:`list` of :obj:`tuple`): A list of tuples with each tuple
containing (service_agreement_id, compute_job_id).
Returns:
:obj:`list` of :obj:`str`: Returns a list of dids produced by the jobs
Raises:
ValueError: If any of the jobs fail
"""
failed = False
dids = set()
while True:
finished = 0
for i, (sa_id, job_id) in enumerate(jobs):
status = nevermined.assets.compute_status(sa_id, job_id, account)
print(f"{job_id}: {status['status']}")
if status["status"] == "Failed":
failed = True
if status["status"] == "Succeeded":
finished += 1
dids.add(status["did"])
if failed:
for i, (sa_id, job_id) in enumerate(jobs):
logs = nevermined.assets.compute_logs(sa_id, job_id, account)
for line in logs:
print(f"[{line['podName']}]: {line['content']}")
raise ValueError("Some jobs failed")
if finished == len(jobs):
break
# move up 4 lines
print("\u001B[4A")
time.sleep(5)
return list(dids)
| 21,574
|
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier()
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
| 21,575
|
def plot4halovtimeradinterp(x_func,v_func,snaps,zs,title,x_label,y_label,radii):
"""
Plots a 2x2 grid of profile data vs time for 4 most massive halos.
.. deprecated:: 0.0.0
plot4halovtimeradinterp uses the outdated data format of a list of
Snapshot instances.
This results in inaccurate tracking of halos through time.
Instead, plotting should be done manually, with data obtained via the
ahfhalotools.objects.Cluster object
Parameters
----------
x_func : lambda
lambda that should take list of redshifts (floats) and return array
representing x values.
e.g:
lambda zs : analysis.tfromz(z)
v_func : lambda
lambda that should take Halo object and return array representing
profile data in question as a *function of radius*
snaps : list of Snapshot instances
zs : list of floats
List of redshifts that correspond to snapshots in snaps
title : str
Title of plot
x_label : str
x axis label of plot
y_label : str
y axis label of plot
radii : list of floats
List of radii to interpolate at
"""
fig,ax = plt.subplots(2,2,figsize=(9,6))
fig.suptitle(title,wrap=True)
for i in range(4):
#row and column indexes
row = i//2
col = i%2
#calculate expected timescale from halo at z=0
halo = snaps[0].halos[i]
sigV = halo.sigV * 1000 #km/s to m/s
Rvir = halo.Rvir * 3.086e+19 / (WMAP9.H(0).value/100) #kpc/h to m
timescale = Rvir/sigV / (60**2 * 24 * 365.26 * 1e9)
ax[row,col].set_title("Halo {hid}, expected timescale {t:.3f}Gyr".format(hid=i+1,t=timescale))
#ax[row,col].set_xscale("log")
ax[row,col].set_yscale("log")
if row == 1: ax[row,col].set_xlabel(x_label)
if col == 0: ax[row,col].set_ylabel(y_label)
for radius in radii:
x = x_func(zs)
y = analysis.getAsFuncOfTimeAtRadius(snaps,i,v_func,radius)
ax[row,col].plot(x,y, label="r = {0}".format(radius))
#ax[row,col].set_xlim(0,5)
#ax[row,col].set_ylim(0,300000000)
ax[row,col].legend()
plt.tight_layout()
plt.show()
| 21,576
|
def assertInStdout(proc_output,
expected_output,
process,
cmd_args=None,
*,
output_filter=None,
strict_proc_matching=True,
strip_ansi_escape_sequences=True):
"""
Assert that 'output' was found in the standard out of a process.
:param proc_output: The process output captured by launch_test. This is usually injected
into test cases as self._proc_output
:type proc_output: An launch_testing.IoHandler
:param expected_output: The output to search for
:type expected_output: string or regex pattern or a list of the aforementioned types
:param process: The process whose output will be searched
:type process: A string (search by process name) or a launch.actions.ExecuteProcess object
:param cmd_args: Optional. If 'process' is a string, cmd_args will be used to disambiguate
processes with the same name. Pass launch_testing.asserts.NO_CMD_ARGS to match a proc without
command arguments
:type cmd_args: string
:param output_filter: Optional. A function to filter output before attempting any assertion.
:type output_filter: callable
:param strict_proc_matching: Optional (default True), If proc is a string and the combination
of proc and cmd_args matches multiple processes, then strict_proc_matching=True will raise
an error.
:type strict_proc_matching: bool
:param strip_ansi_escape_sequences: If True (default), strip ansi escape
sequences from actual output before comparing with the output filter or
expected output.
:type strip_ansi_escape_sequences: bool
"""
resolved_procs = resolveProcesses(
info_obj=proc_output,
process=process,
cmd_args=cmd_args,
strict_proc_matching=strict_proc_matching
)
if output_filter is not None:
if not callable(output_filter):
raise ValueError('output_filter is not callable')
output_match = build_text_match(expected_output)
for proc in resolved_procs: # Nominally just one matching proc
full_output = ''.join(
output.text.decode() for output in proc_output[proc] if output.from_stdout
)
if strip_ansi_escape_sequences:
full_output = remove_ansi_escape_sequences(full_output)
if output_filter is not None:
full_output = output_filter(full_output)
if output_match(full_output) is not None:
break
else:
names = ', '.join(sorted(p.process_details['name'] for p in resolved_procs))
assert False, "Did not find '{}' in output for any of the matching processes: {}".format(
expected_output, names
)
| 21,577
|
def _delete_pool_member(members):
"""Deletes pool members"""
ServerPoolMember.objects.filter(id__in=members).delete()
| 21,578
|
def test_btrial():
"""Test module btrial.py by downloading
btrial.csv and testing shape of
extracted data has 45 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = btrial(test_path)
try:
assert x_train.shape == (45, 3)
except:
shutil.rmtree(test_path)
raise()
| 21,579
|
def f1_score_loss(predicted_probs: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
"""
Computes a loss function based on F1 scores (harmonic mean of precision an recall).
Args:
predicted_probs: A [B, L] tensor of predicted probabilities
labels: A [B, 1] tensor of expected labels
Returns:
A tensor of sample-wise losses
"""
# Apply a sharpened sigmoid function to approximate the threshold
thresholded_predictions = predicted_probs - ONE_HALF
level_predictions = 1.0 / (1.0 + tf.exp(BETA * thresholded_predictions)) # [B, L]
# predictions = tf.reduce_prod(level_predictions, axis=-1, keepdims=True) # [B, 1]
predictions = tf.exp(tf.reduce_sum(tf.log(level_predictions), axis=-1, keepdims=True)) # [B, 1]
# Compute the (approximate) F1 score
f1_score = 2 * tf.reduce_sum(predictions * labels) / (tf.reduce_sum(predictions) + tf.reduce_sum(labels))
return 1.0 - f1_score
| 21,580
|
def get_alarm_historys_logic(starttime, endtime, page, limit):
"""
GET 请求历史告警记录信息
:return: resp, status
resp: json格式的响应数据
status: 响应码
"""
data = {'alarm_total': 0, "alarms": []}
status = ''
message = ''
resp = {"status": status, "data": data, "message": message}
alarm_set = SfoAlarmLogMethod.group_by_alarm_device(page=int(page),
limit=int(limit),
starttime=starttime,
endtime=endtime)
if alarm_set:
data['alarm_total'] = alarm_set.total
for alarm in alarm_set.items:
sfo_alarm_logs = SfoAlarmLogMethod.query_by_alarm_device(alarm.alarm_device, starttime, endtime)
if len(sfo_alarm_logs) > 0:
critical_len = filter(lambda x: x.alarm_level == 'critical', sfo_alarm_logs)
warn_len = filter(lambda x: x.alarm_level == 'warning', sfo_alarm_logs)
sfo_cluster_node = SfoClusterNodesMethod.query_host_by_host_name(alarm.hostname)
alarm_info = {"alarm": sfo_alarm_logs[0],
"total": len(sfo_alarm_logs),
"warning_total": len(warn_len),
"critical_total": len(critical_len)}
if sfo_cluster_node and sfo_cluster_node.cluster_name:
alarm_info.update({"cluster_name": sfo_cluster_node.cluster_name})
alarm_info.update({"ip": sfo_cluster_node.node_inet_ip})
data['alarms'].append(alarm_info)
status = 200
message = 'OK'
else:
status = 404
message = 'Not Found Record'
resp.update({"status": status, "data": data, "message": message})
return resp, status
| 21,581
|
def showCities():
"""
Shows all cities in the database
"""
if 'access_token' not in login_session:
return redirect(url_for('showLogin'))
cities = session.query(City).order_by(City.id)
return render_template('cities.html', cities=cities)
| 21,582
|
def symb_to_num(symbolic):
"""
Convert symbolic permission notation to numeric notation.
"""
if len(symbolic) == 9:
group = (symbolic[:-6], symbolic[3:-3], symbolic[6:])
try:
numeric = notation[group[0]] + notation[group[1]] + notation[group[2]]
except:
numeric = "Invalid Symbolic Representation!"
else:
numeric = "Symbolic input should be of lengh 9!"
return numeric
| 21,583
|
def round_vector(v, fraction):
""" ベクトルの各要素をそれぞれ round する
Args:
v (list[float, float, float]):
Returns:
list[float, float, float]:
"""
v = [round(x, fraction) for x in v]
return v
| 21,584
|
def accept(model):
"""Return True if more than 20% of the validation data is being
correctly classified. Used to avoid including nets which haven't
learnt anything in the ensemble.
"""
accuracy = 0
for data, target in validation_data[:(500/100)]:
if use_gpu:
data, target = Variable(data.cuda(), volatile=True), Variable(target.cuda())
else:
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = output.data.max(1, keepdim=True)[1]
accuracy += pred.eq(target.data.view_as(pred)).cpu().sum()
if accuracy < 100: return False
else: return True
| 21,585
|
def parse_tcp_packet(tcp_packet):
"""read tcp data.http only build on tcp, so we do not need to support other protocols."""
tcp_base_header_len = 20
# tcp header
tcp_header = tcp_packet[0:tcp_base_header_len]
source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header)
# real tcp header len
tcp_header_len = ((t_f >> 4) & 0xF) * 4
# skip extension headers
if tcp_header_len > tcp_base_header_len:
pass
# body
body = tcp_packet[tcp_header_len:]
return source_port, dest_port, flags, seq, ack_seq, body
| 21,586
|
def control_browser(cef_handle, queue):
""" Loop thread to send javascript calls to cef
"""
while not cef_handle.HasDocument():
time.sleep(2)
cef_handle.ExecuteFunction('window.setWidth', 0)
while True:
operation = queue.get()
cef_handle.ExecuteFunction(operation[0], operation[1])
| 21,587
|
def find_balanced(text, start=0, start_sep='(', end_sep=')'):
""" Finds balanced ``start_sep`` with ``end_sep`` assuming
that ``start`` is pointing to ``start_sep`` in ``text``.
"""
if start >= len(text) or start_sep != text[start]:
return start
balanced = 1
pos = start + 1
while pos < len(text):
token = text[pos]
pos += 1
if token == end_sep:
if balanced == 1:
return pos
balanced -= 1
elif token == start_sep:
balanced += 1
return start
| 21,588
|
def main():
"""
This script depicts caffeine n times and writes the runtime into a file.
- without augmentations with fingerprint picking
- with augmentations with fingerprint picking
- without augmentations without fingerprint picking
- with augmentations without fingerprint picking
for n in [100*2**n for n in range(1, 11)]
"""
smiles = "CN1C=NC2=C1C(=O)N(C(=O)N2C)C"
with RandomDepictor() as depictor:
for number in [100*2**n for n in range(0, 11)]:
with open('time_report.txt', 'a') as time_report:
tmp_dir = "tmp"
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
# Depict SMILES n times without augmentation with FP picking
start = time.time()
depictor.batch_depict_save_with_fingerprints([smiles],
number,
tmp_dir,
[f"{num}.png" for num in range(number)],
aug_proportion=0,
processes=1)
end = time.time()
time_report.write('{}\t'.format(number))
time_report.write('{}\t'.format(end-start))
shutil.rmtree(tmp_dir, ignore_errors=True)
# Depict SMILES n times with augmentation with FP picking
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
start = time.time()
depictor.batch_depict_save_with_fingerprints([smiles],
number,
tmp_dir,
[f"{num}.png" for num in range(number)],
aug_proportion=1,
processes=1)
end = time.time()
time_report.write('{}\t'.format(end-start))
shutil.rmtree(tmp_dir, ignore_errors=True)
# Depict SMILES n times without augmentation without FP picking
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
start = time.time()
depictor.batch_depict_save([smiles], number, tmp_dir, False, ['caffeine'], (299, 299), processes=1)
end = time.time()
time_report.write('{}\t'.format(end-start))
shutil.rmtree(tmp_dir, ignore_errors=True)
# Depict SMILES n times with augmentation without FP picking
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
start = time.time()
depictor.batch_depict_save([smiles], number, tmp_dir, True, ['caffeine'], (299, 299), processes=1)
end = time.time()
time_report.write('{}\t'.format(end-start))
time_report.write('\n')
shutil.rmtree(tmp_dir, ignore_errors=True)
return
| 21,589
|
def write_json(druid, metadata):
"""Outputs the JSON data file for the roll specified by DRUID."""
output_path = Path(f"output/json/{druid}.json")
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as _fh:
json.dump(metadata, _fh)
| 21,590
|
def test_rk38():
"""Test "corrected" 3rd-order Runge-Kutta"""
rk38 = chk_ode(ints.rk38)
ref = np.array([2,2.7846719015333337,4.141594947022453,6.619134913159302,11.435455703714204])
assert np.allclose(rk38,ref)
| 21,591
|
def get_jhu_counts():
"""
Get latest case count .csv from JHU.
Return aggregated counts by country as Series.
"""
now = datetime.datetime.now().strftime("%m-%d-%Y")
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv"
req = requests.head(url)
while req.status_code != 200:
print("Got status " + str(req.status_code) + " for '" + url + "'")
date = datetime.datetime.now() - datetime.timedelta(days=1)
now = date.strftime("%m-%d-%Y")
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv"
req = requests.head(url)
req = requests.get(url)
jhu_df = pd.read_csv(io.StringIO(req.text))
print(f"Retrieved JHU case counts from {now}.")
jhu_counts = jhu_df['Confirmed'].groupby(
jhu_df['Country_Region']).sum().reset_index()
jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply(
lambda x: re.sub(r'[^a-zA-Z ]', '', x))
jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply(
lambda x: _COUNTRY_MAP[x] if x in _COUNTRY_MAP.keys() else x)
jhu_counts = jhu_counts.set_index('Country_Region')
jhu_counts = pd.Series(jhu_counts.values.flatten(), index=jhu_counts.index)
return jhu_counts
| 21,592
|
def set_debug(boolean):
"""Enable/Disable OpenGL debugging - specifically, this turns on/off calling of glGetError after every call."""
screen.debug = boolean
if boolean:
oglError.ErrorChecker.registerChecker(None)
else:
oglError.ErrorChecker.registerChecker(lambda:None)
| 21,593
|
def sectionsToMarkdown(root):
"""
Converts a list of Demisto JSON tables to markdown string of tables
:type root: ``dict`` or ``list``
:param root: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:return: A string representation of the markdown table
:rtype: ``str``
"""
mdResult = ''
if isinstance(root, dict):
for section in root:
data = root[section]
if isinstance(data, dict):
data = [data]
data = [{k: formatCell(row[k]) for k in row} for row in data]
mdResult += tblToMd(section, data)
return mdResult
| 21,594
|
def rename_files(source_dir, file_type, rfam_acc):
"""
:param source_dir:
:param file_type:
:return:
"""
if not os.path.exists(source_dir):
raise IOError
if file_type == "SEED":
seed_file_loc = os.path.join(source_dir, rfam_acc)
if not os.path.exists(seed_file_loc):
sys.exit("File does not exist %s" % seed_file_loc)
new_name = os.path.join(source_dir, rfam_acc+'.seed')
os.rename(seed_file_loc, new_name)
if not os.path.exists(new_name):
sys.exit("%s SEED cound not be renamed" % rfam_acc)
| 21,595
|
def VonMisesFisher_sample(phi0, theta0, sigma0, size=None):
""" Draw a sample from the Von-Mises Fisher distribution.
Parameters
----------
phi0, theta0 : float or array-like
Spherical-polar coordinates of the center of the distribution.
sigma0 : float
Width of the distribution.
size : int, tuple, array-like
number of samples to draw.
Returns
-------
phi, theta : float or array_like
Spherical-polar coordinates of sample from distribution.
"""
n0 = cartesian_from_polar(phi0, theta0)
M = rotation_matrix([0, 0, 1], n0)
x = numpy.random.uniform(size=size)
phi = numpy.random.uniform(size=size) * 2*numpy.pi
theta = numpy.arccos(1 + sigma0**2 *
numpy.log(1 + (numpy.exp(-2/sigma0**2)-1) * x))
n = cartesian_from_polar(phi, theta)
x = M.dot(n)
phi, theta = polar_from_cartesian(x)
return phi, theta
| 21,596
|
async def async_init_flow(
hass: HomeAssistantType,
handler: str = DOMAIN,
context: Optional[Dict] = None,
data: Any = None,
) -> Any:
"""Set up mock Roku integration flow."""
with patch(
"homeassistant.components.roku.config_flow.Roku.device_info",
new=MockDeviceInfo,
):
return await hass.config_entries.flow.async_init(
handler=handler, context=context, data=data
)
| 21,597
|
def _get_filtered_topics(topics, include, exclude):
"""
Filter the topics.
:param topics: Topics to filter
:param include: Topics to include if != None
:param exclude: Topics to exclude if != and include == None
:return: filtered topics
"""
logging.debug("Filtering topics (include=%s, exclude=%s) ...", include, exclude)
return [t for t in include if t in topics] if include is not None else \
[t for t in topics if t not in exclude] if exclude is not None else topics
| 21,598
|
def test_reg_file_bad_type(tmp_path):
""" Test that RegistryFile raises error about bad file type """
bad_path = tmp_path / "my_folder"
bad_path.mkdir()
with pytest.raises(PrologueError) as excinfo:
RegistryFile(bad_path)
assert f"Path provided is not a file {bad_path}" in str(excinfo.value)
| 21,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.