content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def insert_singletons(words, singletons, p=0.5):
"""
Replace singletons by the unknown word with a probability p.
"""
new_words = []
for word in words:
if word in singletons and np.random.uniform() < p:
new_words.append(0)
else:
new_words.append(word)
return new_words
| 24,500
|
def test_bake_with_apostrophe_and_run_tests(cookies):
"""Ensure that a `full_name` with apostrophes does not break setup.py"""
with bake_in_temp_dir(
cookies,
extra_context={'full_name': "O'connor"}
) as result:
assert result.project.isdir()
run_inside_dir('python setup.py test', str(result.project)) == 0
| 24,501
|
def talk(text, is_yelling=False, trim=False, verbose=True):
"""
Prints text
is_yelling capitalizes text
trim - trims whitespace from both ends
verbose - if you want to print something on screen
returns transformed text
"""
if trim:
text = text.strip()
if is_yelling:
text = text.upper()
if verbose:
print(text) # printing is considered a side effect inside a function
return text
| 24,502
|
def getNorthPoleAngle(target, position, C, B, camera):
"""
Get angle north pole of target makes with image y-axis, in radians.
"""
# get target spin axis
# the last row of the matrix is the north pole vector, *per spice docs*
# seems correct, as it's nearly 0,0,1
Bz = B[2]
print 'Bz=north pole spin axis',Bz
# get target radius, km
nvalues, radii = spice.bodvrd(target, 'RADII', 3)
targetRadiusEquator = (radii[0] + radii[1]) / 2
targetRadiusPoles = radii[2]
targetRadius = sum(radii) / 3
# flatteningCoefficient = (targetRadiusEquator - targetRadiusPoles) / targetRadiusEquator
# print 'target radius in km', targetRadius
# get north pole location
positionNP = position + targetRadius * Bz
print 'positionNP=north pole in world coords', positionNP
# get target position in camera space
c = np.dot(C, position)
cNP = np.dot(C, positionNP)
print 'c=position in camera space',c
print 'cNP=north pole in camera space',cNP
# get camera fov and focal length
fovDegrees = config.cameraFOVs[camera] # 0.424 or 3.169 deg
fovRadians = fovDegrees * math.pi / 180
f = 1.0 / math.tan(fovRadians/2) # focal length (relative to screen halfwidth of 1.0)
print 'f=focal length',f
# get camera-to-screen matrix S
cz = c[2]
fz = f/cz
# print 'fz=f/cz',fz
S = np.array([[fz,0,0],[0,fz,0]])
# get screen coordinate (-1 to 1, -1 to 1)
s = np.dot(S, c)
sNP = np.dot(S, cNP)
# ie sx=cx*f/cz; sy=cy*f/cz
print 's=screen space (-1 to 1)',s
print 'sNP=screen space north pole (-1 to 1)',sNP
# get angle between north pole and image y-axis
npDelta = sNP-s
npRadians = math.atan(npDelta[0]/npDelta[1])
npAngle = npRadians * 180/math.pi
print 'npAngle',npAngle
return npRadians
| 24,503
|
def cgr(bundle,
source_node,
contact_graph,
route_list,
contact_list,
current_time,
limbo,
hot_spots=None):
"""Core routing function of SCGR implementation.
Enqueues a bundle (packet) into a contact queue base on CGR.
Args:
bundle (Packet): The Packet object that should be routed.
source_node (string): The originating node (i.e. the node where the
routing decision is performed)
contact_graph (dict): The contact graph object that provides the
topological information.
route_list (list): Cache list to store already found routes inbetween
routing decisions (whenever possible).
contact_list (list): List of :mod:`Contact` objects that will be used
for enqueuing the packet.
current_time (int): The time of the routing decision (in ms).
limbo (list): A node-based list where unroutable packets are enqueued
(and in the best case considered again at a later point)
hot_spots (list): The list of hot spots n the network. Required to
prevent a frequent cause for loops. Defaults to none.
"""
# Reset the list of the excluded nodes
excluded_nodes = []
# Check if the bundle prevents returning it to the sender node, if that is
# the case, add sender to list of excluded nodes
if not bundle.return_to_sender and bundle.hops:
# Add all hot_spots to the excluded nodes list if the current node
# is a hotspot and the packet came from a hot spot. This helps
# to prevent loops.
if (hot_spots is not None and source_node in hot_spots
and bundle.hops[-1] in hot_spots):
excluded_nodes.extend(hot_spots)
else:
excluded_nodes.append(bundle.hops[-1])
# If the bundle is critical, replicate and forward it to all feasible
# proximate neighbor nodes (i.e. flood it)
# FIXME The critical bundle forwarding is experimental and cannot be
# assumed to be correct yet. It needs more testing.
if bundle.is_critical:
for contact, route in find_critical_bundle_neighbors(
contact_graph, source_node, bundle.end_node, excluded_nodes,
current_time):
# Create a deep copy of the packet object to create real duplicates
bundle_copy = copy.deepcopy(bundle)
# Don't set route characteristics for critical bundles
# Enqueue to queue
contact_list[contact].enqueue_packet(bundle_copy, route, None)
del bundle
# Return as all possible enqueueing operations already took place
return
best_route = identify_best_feasible_route(
source_node,
bundle,
contact_graph,
route_list,
excluded_nodes,
current_time,
contact_list,
ignore_capacity=True)
first_alternative_route = identify_best_feasible_route(
source_node,
bundle,
contact_graph,
route_list,
excluded_nodes,
current_time,
contact_list,
alternative_route=True)
# Identify the neighbor with the best feasible route to the destination
neighbor = identify_best_feasible_route(source_node, bundle, contact_graph,
route_list, excluded_nodes,
current_time, contact_list)
# Check if feasible next hop has been found, if so enqueue the packet for
# the contact
if neighbor:
# Enqueue
contact_list[neighbor.contact].enqueue_packet(bundle, neighbor.route,
best_route.route)
bundle.add_planned_route(neighbor.route)
if first_alternative_route:
bundle.add_alternative_route(first_alternative_route.route)
else:
bundle.add_alternative_route(None)
# If no feasible next hop could be found, enqueue the bundle to the limbo
# (and maybe try later again)
else:
limbo.append(bundle)
# returns nothing
| 24,504
|
def test_work_order_with_no_indata(setup_config):
""" Testing work order request with no indata """
# input file name
request = 'work_order_tests/input/work_order_with_no_indata.json'
work_order_response, generic_params = (work_order_request_params
(setup_config, request))
err_cd, work_order_get_result_response = (work_order_get_result_params
(work_order_response[:6],
generic_params))
assert (verify_work_order_signature(work_order_get_result_response,
generic_params[0])
is TestStep.SUCCESS.value)
assert (decrypt_work_order_response(work_order_get_result_response,
work_order_response[3],
work_order_response[4])[0]
is TestStep.SUCCESS.value)
| 24,505
|
def ray_casting(polygon, ray_line):
""" checks number of intersection a ray makes with polygon
parameters: Polygon, ray (line)
output: number of intersection
"""
vertex_num = polygon.get_count()
ray_casting_result = [False] * vertex_num
''' count for vertices that is colinear and intersects with ray '''
vertex_colinear_intersect_with_ray = 0
cursor = polygon.head
for index in range(vertex_num):
edge = LineSegment(cursor.data, cursor.next.data)
ray_casting_result[index] = does_lines_intersect(edge, ray_line)
cursor = cursor.next
''' added to check whether vertex is colinear with ray '''
if is_vertex_colinear(ray_line, cursor.data) and ray_casting_result[index]:
vertex_colinear_intersect_with_ray = vertex_colinear_intersect_with_ray + 1
# print(ray_casting_result)
# print(vertex_colinear_intersect_with_ray)
''' adjusted for colinear vertices '''
return ray_casting_result.count(True) - vertex_colinear_intersect_with_ray
| 24,506
|
def main_install(args, config=None):
"""
Main function for the 'install' command.
"""
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Use correct umask when installing
saved_umask = os.umask(config.install.umask.value)
# Mount
new_mounts = []
for i in config.install.mount:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.info("Mounting {}".format(i))
new_mounts.append(i)
try:
subprocess.run(['mount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.die("Could not mount '{}', mount returned code {}. Aborting.".format(i, e.returncode))
# Check mounts
for i in config.install.mount + config.install.assert_mounted:
if not os.access(i, os.R_OK):
log.die("Permission denied on accessing '{}'. Aborting.".format(i))
if not os.path.ismount(i):
log.die("'{}' is not mounted. Aborting.".format(i))
# Execute pre hook
execute_command(args, 'install.hooks.pre', config.install.hooks.pre, replace_common_vars)
kernel_version = autokernel.kconfig.get_kernel_version(args.kernel_dir)
target_dir = replace_common_vars(args, config.install.target_dir)
# Config output is "{KERNEL_DIR}/.config"
config_output = os.path.join(args.kernel_dir, '.config.autokernel')
# Initramfs basename "initramfs-{KERNEL_VERSION}.cpio"
# The .cpio suffix is cruical, as the kernel makefile requires it to detect initramfs archives
initramfs_basename = 'initramfs-{}.cpio'.format(kernel_version)
# Initramfs output is "{KERNEL_DIR}/initramfs-{KERNEL_VERSION}.cpio"
initramfs_output = os.path.join(args.kernel_dir, initramfs_basename)
# bzImage output
bzimage_output = os.path.join(args.kernel_dir, 'arch', autokernel.kconfig.get_uname_arch(), 'boot/bzImage')
def _purge_old(path):
keep_old = config.install.keep_old.value
# Disable purging on negative count
if keep_old < 0:
return
# Disable purging for non versionated paths
if not '{KERNEL_VERSION}' in path:
return
tokens = path.split('{KERNEL_VERSION}')
if len(tokens) > 2:
log.warn("Cannot purge path with more than one {{KERNEL_VERSION}} token: '{}'".format(path))
return
re_semver = re.compile(r'^[\d\.]+\d')
def _version_sorter(i):
suffix = i[len(tokens[0]):]
basename = suffix.split('/')[0]
st = os.stat(i)
try:
time_create = st.st_birthtime
except AttributeError:
time_create = st.st_mtime
semver = re_semver.match(basename).group()
val = autokernel.config.semver_to_int(semver)
return val, time_create
escaped_kv = re.escape('{KERNEL_VERSION}')
# matches from {KERNEL_VERSION} until first / exclusive in an regex escaped path
match_basename = re.compile(re.escape(escaped_kv) + r"(.+?(?=\\\/|$)).*$")
# derive regex to check if a valid semver is contained and prefix and suffix are given
re_match_valid_paths = re.compile('^' + match_basename.sub(lambda m: r'[0-9]+(\.[0-9]+(\.[0-9]+)?)?(-[^\/]*)?' + m.group(1) + r'.*$', re.escape(path)))
# matches from {KERNEL_VERSION} until first / exclusive in a normal path
re_replace_wildcard = re.compile(escaped_kv + r"[^\/]*")
# replace {KERNEL_VERSION}-* component with *
wildcard_path = re_replace_wildcard.sub('*', glob.escape(path))
# sort out paths that don't contain valid semvers
valid_globbed = [i for i in glob.glob(wildcard_path) if re_match_valid_paths.match(i)]
for i in sorted(valid_globbed, key=_version_sorter)[:-(keep_old + 1)]:
# For security, we will not call rmtree on a path that doesn't end with a slash,
# or if the realpath has less then two slash characters in it.
# Otherwise we only call unlink
if i[-1] == '/' and os.path.realpath(i).count('/') >= 2:
try:
shutil.rmtree(i)
except OSError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
else:
try:
os.unlink(i)
except IOError as e:
log.warn("Could not remove {}: {}".format(i, str(e)))
def _move_to_old(path):
re_old_suffix = re.compile(r'^.*\.old(\.\d+)?\/*$')
dst = path + '.old'
highest_num = -1
for i in glob.glob(glob.escape(dst) + '*'):
m = re_old_suffix.match(i)
old_num = int((m.group(1) or '.0')[1:]) if m else 0
if highest_num < old_num:
highest_num = old_num
if highest_num >= 0:
dst += ".{:d}".format(highest_num + 1)
shutil.move(path, dst)
def _install(name, src, target_var):
# If the target is disabled, return.
if not target_var:
return
# Figure out destination, and move existing filed if necessary
dst = os.path.join(target_dir, replace_common_vars(args, target_var))
if os.path.exists(dst):
_move_to_old(dst)
# Create directory if it doesn't exist
Path(os.path.dirname(dst)).mkdir(parents=True, exist_ok=True)
log.info("Installing {:<11s} {}".format(name + ':', dst))
# Install target file
shutil.copyfile(src, dst)
# Purge old files
_purge_old(os.path.join(target_dir, str(target_var)))
# Move target_dir, if it is dynamic
if '{KERNEL_VERSION}' in str(config.install.target_dir) and os.path.exists(target_dir):
_move_to_old(os.path.realpath(target_dir))
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
sym_modules = kconfig.syms['MODULES']
# Install modules
if config.install.modules_prefix and sym_modules.str_value != 'n':
modules_prefix = str(config.install.modules_prefix)
modules_prefix_with_lib = os.path.join(modules_prefix, "lib/modules")
modules_dir = os.path.join(modules_prefix_with_lib, kernel_version)
if os.path.exists(modules_dir):
_move_to_old(os.path.realpath(modules_dir))
log.info("Installing modules: {}".format(modules_prefix_with_lib))
install_modules(args, prefix=modules_prefix)
_purge_old(modules_prefix_with_lib + "/{KERNEL_VERSION}/")
# Install targets
_install('bzimage', bzimage_output, config.install.target_kernel)
_install('config', config_output, config.install.target_config)
if config.initramfs.enabled:
_install('initramfs', initramfs_output, config.install.target_initramfs)
# Purge old target_dirs (will only be done if it is dynamic)
_purge_old(str(config.install.target_dir) + '/')
# Execute post hook
execute_command(args, 'install.hooks.post', config.install.hooks.post, replace_common_vars)
# Undo what we have mounted
for i in reversed(new_mounts):
log.info("Unmounting {}".format(i))
try:
subprocess.run(['umount', '--', i], check=True)
except subprocess.CalledProcessError as e:
log.warn("Could not umount '{}' (returned {})".format(i, e.returncode))
# Restore old umask
os.umask(saved_umask)
| 24,507
|
def kmeans_clustering_missing(reduced_components, output_path,
n_clusters=2, max_iter=10):
"""
Performs a K-means clustering with missing data.
:param reduced_components: reduced components matrix
:type reduced_components: np.ndarray
:param output_path: path to output directory
:type output_path: str
:param n_clusters: number of clusters
:type n_clusters: int
:param max_iter: maximum iterations for convergence
:type max_iter: int
:return: clustered array, centroids of clusters, filled matrix
:rtype: np.ndarray, list, np.ndarray
"""
logging.basicConfig(filename=os.path.join(output_path,
'clustering_FC_states_missing.log'),
level=logging.INFO)
# Initialize missing values to their column means
missing = ~np.isfinite(reduced_components)
mu = np.nanmean(reduced_components, axis=0)
X_filled = np.where(missing, mu, reduced_components)
for i in tqdm(range(max_iter)):
if i > 0:
# k means with previous centroids
cls = KMeans(n_clusters, init=prev_centroids)
else:
# do multiple random initializations in parallel
cls = KMeans(n_clusters, n_jobs=-1)
# perform clustering on the filled-in data
labels = cls.fit_predict(X_filled)
centroids = cls.cluster_centers_
# fill in the missing values based on their cluster centroids
X_filled[missing] = centroids[labels][missing]
# when the labels have stopped changing then we have converged
if i > 0 and np.all(labels == prev_labels):
break
prev_labels = labels
prev_centroids = cls.cluster_centers_
# perform the silhouette analysis as a metric for the clustering model
silhouette_avg = silhouette_score(X_filled, cls.labels_,
sample_size=300)
logging.info('For n_clusters = {}, the average silhouette score is: {}'
.format(n_clusters, silhouette_avg))
logging.info('For n_clusters = {}, the cluster centers are: {} and the '
'sum of squared distances of samples to their closest '
'cluster center are: {}'.format(n_clusters, centroids,
cls.inertia_))
np.savez(os.path.join(output_path, 'clustered_matrix'), labels)
return labels, centroids, X_filled
| 24,508
|
def test_macc_chardis():
"""tests that the macc_chardis function gives the
right output depending on the string in the Md column
in a dataframe."""
test_df = pd.DataFrame({'ColX': [0, 1, 2],
'Md': ['D', 'C', 'Something else']})
test_row1 = test_df.iloc[0]
test_row2 = test_df.iloc[1]
test_row3 = test_df.iloc[2]
assert macc_chardis(test_row1) == -1
assert macc_chardis(test_row2) == 1
assert macc_chardis(test_row3) == 1
return
| 24,509
|
def ErrorWrapper(err, resource_name):
"""Wraps http errors to handle resources names with more than 4 '/'s.
Args:
err: An apitools.base.py.exceptions.HttpError.
resource_name: The requested resource name.
Returns:
A googlecloudsdk.api_lib.util.exceptions.HttpException.
"""
exc = exceptions.HttpException(err)
if exc.payload.status_code == 404:
# status_code specific error message
exc.error_format = ('{{api_name}}: {resource_name} not found.').format(
resource_name=resource_name)
else:
# override default error message
exc.error_format = ('Unknown error. Status code {status_code}.')
return exc
| 24,510
|
def contexts_list_cli(configuration: Configuration, ctx: click.Context) -> None:
"""Print all available contexts."""
if len(configuration.contexts_repository) == 0:
click.echo("No contexts were found.")
ctx.exit(1)
for context in configuration.contexts_repository:
click.echo(f"{name_style(context.name)} - {context.help}")
| 24,511
|
def test_species_no_spc_nmsc():
"""If there is no scientific name, the string representation of a
species object is just the common name.
"""
attrs = {"spc_nmco": "Salvelinus Sp.", "spc": "086", "tagged": True}
species = Species(
spc_nmco=attrs.get("spc_nmco"),
spc_nmsc=attrs.get("spc_nmsc"),
spc=attrs.get("spc"),
tagged=attrs.get("tagged"),
)
should_be = "{spc_nmco}"
assert str(species) == should_be.format(**attrs)
| 24,512
|
def get_str_cmd(cmd_lst):
"""Returns a string with the command to execute"""
params = []
for param in cmd_lst:
if len(param) > 12:
params.append('"{p}"'.format(p=param))
else:
params.append(param)
return ' '.join(params)
| 24,513
|
def guarded_call(name, function, message=None):
"""
Run a function once by creating a guard file on first run
"""
GUARD_DIRECTORY.mkdir(parents=True, exist_ok=True)
guard_file = GUARD_DIRECTORY / name
if not guard_file.exists():
if message is None:
print("Running {}".format(name))
function()
guard_file.touch()
| 24,514
|
def calculate_score(arr):
"""Inside calculate_score() check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score. 0 will represent a blackjack in our game.
It check for an 11 (ace). If the score is already over 21, remove the 11 and replace it with a 1"""
if sum(arr) == 21 and len(arr) == 2:
return 0 # represents blackjack
if sum(arr) > 21 and 11 in arr:
arr.remove(11)
arr.append(1)
return sum(arr)
| 24,515
|
def tt_true(alpha):
"""Is the propositional sentence alpha a tautology? (alpha will be
coerced to an expr.)
>>> tt_true(expr("(P >> Q) <=> (~P | Q)"))
True
"""
return tt_entails(TRUE, expr(alpha))
| 24,516
|
def test_stix_semantics_timestamp_coconstraint(
generator_stix_semantics, num_trials, gen_op, python_op
):
"""
Test value co-constraint satisfaction in the object generator.
"""
for _ in range(num_trials):
value = generator_stix_semantics.generate_from_spec({
"type": "object",
"value-coconstraints": [
"ts1 {} ts2".format(gen_op)
],
"properties": {
"ts1": {
"type": "string",
"semantics": "stix-timestamp"
},
"ts2": {
"type": "string",
"semantics": "stix-timestamp"
}
}
})
ts1_dt = datetime.datetime.strptime(value["ts1"], _TIMESTAMP_FORMAT)
ts2_dt = datetime.datetime.strptime(value["ts2"], _TIMESTAMP_FORMAT)
assert python_op(ts1_dt, ts2_dt)
| 24,517
|
def contrast_jwst_ana_num(matdir, matrix_mode="analytical", rms=1. * u.nm, im_pastis=False, plotting=False):
"""
Calculate the contrast for an RMS WFE with image PASTIS, matrix PASTIS
:param matdir: data directory to use for matrix and calibration coefficients from
:param matrix_mode: use 'analytical or 'numerical' matrix
:param rms: RMS wavefront error in pupil to calculate contrast for; in NANOMETERS
:param im_pastis: default False, whether to also calculate contrast from image PASTIS
:param plotting: default False, whether to save E2E and PASTIS DH PSFs; works only if im_pastis=True
:return:
"""
from e2e_simulators import webbpsf_imaging as webbim
log.warning("THIS ONLY WORKS FOR PISTON FOR NOW")
# Keep track of time
start_time = time.time() # runtime currently is around 12 min
# Parameters
dataDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), matdir)
which_tel = CONFIG_PASTIS.get('telescope', 'name')
nb_seg = CONFIG_PASTIS.getint(which_tel, 'nb_subapertures')
filter = CONFIG_PASTIS.get(which_tel, 'filter_name')
fpm = CONFIG_PASTIS.get(which_tel, 'focal_plane_mask') # focal plane mask
lyot_stop = CONFIG_PASTIS.get(which_tel, 'pupil_plane_stop') # Lyot stop
inner_wa = CONFIG_PASTIS.getint(which_tel, 'IWA')
outer_wa = CONFIG_PASTIS.getint(which_tel, 'OWA')
tel_size_px = CONFIG_PASTIS.getint('numerical', 'tel_size_px')
sampling = CONFIG_PASTIS.getfloat(which_tel, 'sampling')
#real_samp = sampling * tel_size_px / im_size
zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike')
zern_mode = util.ZernikeMode(zern_number)
zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern')
# Import PASTIS matrix
matrix_pastis = None
if matrix_mode == 'numerical':
filename = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_numerical', filename + '.fits'))
elif matrix_mode == 'analytical':
filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_analytical', filename + '.fits'))
# Create random aberration coefficients
aber = np.random.random([nb_seg]) # piston values in input units
#log.info(f'PISTON ABERRATIONS: {aber}')
# Normalize to the RMS value I want
rms_init = util.rms(aber)
aber *= rms.value / rms_init
calc_rms = util.rms(aber) * u.nm
aber *= u.nm # making sure the aberration has the correct units
log.info(f"Calculated RMS: {calc_rms}")
# Remove global piston
aber -= np.mean(aber)
# Make equivalent aberration array that goes into the WebbPSF function
Aber_WSS = np.zeros([nb_seg, zern_max])
Aber_WSS[:,0] = aber.to(u.m).value # index "0" works because we're using piston currently; convert to meters
### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
log.info('Generating baseline PSF from E2E - no coronagraph, no aberrations')
psf_perfect = webbim.nircam_nocoro(filter, np.zeros_like(Aber_WSS))
normp = np.max(psf_perfect)
psf_perfect = psf_perfect / normp
### WEBBPSF
log.info('Generating E2E coro contrast')
start_webb = time.time()
# Set up NIRCam and coronagraph, get PSF
psf_webbpsf = webbim.nircam_coro(filter, fpm, lyot_stop, Aber_WSS)
psf_webbpsf = psf_webbpsf / normp
# Create dark hole
dh_area = util.create_dark_hole(psf_webbpsf, inner_wa, outer_wa, sampling)
# Get the mean contrast from the WebbPSF coronagraph
webb_dh_psf = psf_webbpsf * dh_area
contrast_webbpsf = np.mean(webb_dh_psf[np.where(webb_dh_psf != 0)])
end_webb = time.time()
#TODO: save plots of phase on segmented pupil
# Load in baseline contrast
contrastname = 'base-contrast_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
coro_floor = float(np.loadtxt(os.path.join(dataDir, 'calibration', contrastname+'.txt')))
### IMAGE PASTIS
contrast_am = np.nan
if im_pastis:
log.info('Generating contrast from image-PASTIS')
start_impastis = time.time()
# Create calibrated image from analytical model
psf_am, full_psf = impastis.analytical_model(zern_number, aber, cali=True)
# Get the mean contrast from image PASTIS
contrast_am = np.mean(psf_am[np.where(psf_am != 0)]) + coro_floor
end_impastis = time.time()
### MATRIX PASTIS
log.info('Generating contrast from matrix-PASTIS')
start_matrixpastis = time.time()
# Get mean contrast from matrix PASTIS
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + coro_floor # calculating contrast with PASTIS matrix model
end_matrixpastis = time.time()
ratio = None
if im_pastis:
ratio = contrast_am / contrast_matrix
# Outputs
log.info('\n--- CONTRASTS: ---')
log.info(f'Mean contrast from E2E: {contrast_webbpsf}')
log.info(f'Mean contrast with image PASTIS: {contrast_am}')
log.info(f'Contrast from matrix PASTIS: {contrast_matrix}')
log.info(f'Ratio image PASTIS / matrix PASTIS: {ratio}')
log.info('\n--- RUNTIMES: ---')
log.info(f'E2E: {end_webb-start_webb}sec = {(end_webb-start_webb)/60}min')
if im_pastis:
log.info(f'Image PASTIS: {end_impastis-start_impastis}sec = {(end_impastis-start_impastis)/60}min')
log.info(f'Matrix PASTIS: {end_matrixpastis-start_matrixpastis}sec = {(end_matrixpastis-start_matrixpastis)/60}min')
end_time = time.time()
runtime = end_time - start_time
log.info(f'Runtime for contrast_calculation_simple.py: {runtime} sec = {runtime/60} min')
# Save the PSFs
if im_pastis:
if plotting:
# As fits files
util.write_fits(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), os.path.join(dataDir, 'results',
'dh_images_'+matrix_mode, '{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_e2e.fits'))
util.write_fits(psf_am, os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_am.fits'))
# As PDF plot
plt.clf()
plt.figure()
plt.suptitle('{:.2e}'.format(rms.value) + str(rms.unit) + " RMS")
plt.subplot(1, 2, 1)
plt.title("E2E")
plt.imshow(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), norm=LogNorm())
plt.colorbar()
plt.subplot(1, 2, 2)
plt.title("PASTIS image")
plt.imshow(psf_am, norm=LogNorm())
plt.colorbar()
plt.savefig(os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+'DH_PSFs.pdf'))
#TODO: check image rotation, I think there is a 90 degree difference in them for the JWST simulations
return contrast_webbpsf, contrast_am, contrast_matrix
| 24,518
|
def show_config_data_by_section(data:configparser.ConfigParser, section:str):
"""Print a section's data by section name
Args:
data (configparser.ConfigParser): Data
section (str): Section name
"""
if not _check_data_section_ok(data, section):
return None
val = data[section]
print("[{}]".format(section))
for k, v in val.items():
print("{} = {}".format(k, v))
print()
| 24,519
|
def psk_key_get(identity, hint):
"""return PSK string (in hex format without heading 0x) if given identity
and hint pair is allowed to connect else return False or None
"""
| 24,520
|
def train_save_tfidf(filein, target):
"""input is a bow corpus saved as a tfidf file. The output is
a saved tfidf corpus"""
try:
corpus = corpora.MmCorpus(filein)
except:
raise NameError('HRMMPH. The file does not seem to exist. Create a file'+
'first by running the "train_save_dictionary_corpus" function.')
tfidf = models.TfidfModel(corpus)
tfidf.save(f'nlp_training_data/{target}_tfidf_model.tfidf')
tfidf_corpus = tfidf[corpus]
return tfidf_corpus
| 24,521
|
def ScrewTrajectoryList(Xstart, Xend, Tf, N, method, gripper_state, traj_list):
""" Modified from the modern_robotics library ScrewTrajectory
Computes a trajectory as a list of SE(3) matrices with a gripper value and
converts into a list of lists
Args:
Xstart : The initial end-effector configuration
Xend : The final end-effector configuration
Tf : Total time of the motion in seconds from rest to rest
N : The number of points N > 1 in the discrete representation of the trajectory
method : The time-scaling method
gripper_state : The gripper open (0) and closed (1) value
Returns:
traj_list : list of rotations, positions, and gripper state
"""
N = int(N)
timegap = Tf / (N - 1.0)
traj = [[None]] * N
for i in range(N):
if method == 3:
s = CubicTimeScaling(Tf, timegap * i)
else:
s = QuinticTimeScaling(Tf, timegap * i)
traj[i] = np.dot(Xstart, MatrixExp6(MatrixLog6(np.dot(TransInv(Xstart), Xend)) * s))
traj = np.asarray(traj)
for i in range(N):
r11 = traj[i][0][0]
r12 = traj[i][0][1]
r13 = traj[i][0][2]
r21 = traj[i][1][0]
r22 = traj[i][1][1]
r23 = traj[i][1][2]
r31 = traj[i][2][0]
r32 = traj[i][2][1]
r33 = traj[i][2][2]
px = traj[i][0][3]
py = traj[i][1][3]
pz = traj[i][2][3]
traj_list.append([r11, r12, r13, r21, r22, r23, r31, r32, r33, px, py, pz, gripper_state])
return traj_list
| 24,522
|
def calculate_phase(time, period):
"""Calculates phase based on period.
Parameters
----------
time : type
Description of parameter `time`.
period : type
Description of parameter `period`.
Returns
-------
list
Orbital phase of the object orbiting the star.
"""
return (time % period) / period
| 24,523
|
def content_disposition(disposition, filename):
"""
Generates a content disposition hedaer given a
*disposition* and a *filename*. The filename needs
to be the base name of the path, i.e. instead of
``~/file.txt`` you need to pass in ``file.txt``.
The filename is automatically quoted.
"""
yield 'Content-Disposition'
yield '%s; filename="%s"' % (disposition, quote(filename))
| 24,524
|
def get_sbappname(filepath):
""" Given a file path, find an acceptable name on the BL filesystem """
filename = os.path.split(filepath)[1]
filename = filename.split('.')[0]
return re.sub(r'[:*?"<>|]', "", filename)[:24]
| 24,525
|
def perspective_transform(img):
"""
Do a perspective transform over an image.
Points are hardcoded and depend on the camera and it's positioning
:param img:
:return:
"""
pts1 = np.float32([[250, 686], [1040, 680], [740, 490], [523, 492]])
pts2 = np.float32([[295, 724], [980, 724], [988, 164], [297, 150]])
M = cv2.getPerspectiveTransform(pts1, pts2)
transformed_image = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
return transformed_image
| 24,526
|
def main(output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'
files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']
for filename in files:
r = requests.get(baseurl+filename, stream=True)
if r.status == 200:
with open(output_filepath+"/"+filename, "wb") as f:
f.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
| 24,527
|
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 400)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
| 24,528
|
def setup(rank: int, world_size: int, dist_url: str):
"""Setting-up method to be called in the distributed function
Borrowed from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html
Parameters
----------
rank : int
process int
world_size : int
number of porocesses (of the process group)
dist_url: str
the url+port of master machine, such as "tcp:127.0.0.1:12345"
"""
dist.init_process_group(
"nccl", rank=rank, world_size=world_size,
init_method=dist_url)
| 24,529
|
def value_iteration(P, nS, nA, gamma=0.9, tol=1e-3):
"""
Learn value function and policy by using value iteration method for a given
gamma and environment.
Parameters:
----------
P, nS, nA, gamma:
defined at beginning of file
tol: float
Terminate value iteration when
max |value(s) - prev_value(s)| < tol
Returns:
----------
value: np.ndarray[nS]
policy: np.ndarray[nS]
"""
value = np.zeros(nS) # value function initialized
policy = np.zeros(nS, dtype=int) # policy initialized
while True: # until convergence or finite horizon overflow
new_value = np.zeros(nS)
for state in range(nS): # for each state
best_Q_value = -float("inf") # we are looking for the best action in term of Q value
for action in range(nA): # for each action
p = P[state][action] # {(probability, nextstate, reward, terminal),...}[state,action]
reward = sum([i[0]*i[2] for i in p]) # expected reward for this state-action
Q_value = reward + gamma*(sum([i[0]*value[i[1]] for i in p])) # expected reward + gamma * expected value for this state-action
if Q_value > best_Q_value:
new_value[state] = Q_value # max_a Q for this state
policy[state] = action # argmax_a Q for this state
best_Q_value = Q_value
if np.max(np.abs(new_value - value)) < tol: # convergence
value = new_value
break
value = new_value
return value, policy
| 24,530
|
def get_clients():
"""
Determine if the current user has a connected client.
"""
return jsonify(g.user.user_id in clients)
| 24,531
|
def get_split_indices(word, curr_tokens, include_joiner_token, joiner):
"""Gets indices for valid substrings of word, for iterations > 0.
For iterations > 0, rather than considering every possible substring, we only
want to consider starting points corresponding to the start of wordpieces in
the current vocabulary.
Args:
word: string we want to split into substrings
curr_tokens: string to int dict of tokens in vocab (from previous iteration)
include_joiner_token: bool whether to include joiner token
joiner: string used to indicate suffixes
Returns:
list of ints containing valid starting indices for word
"""
indices = []
start = 0
while start < len(word):
end = len(word)
while end > start:
subtoken = word[start:end]
# Subtoken includes the joiner token.
if include_joiner_token and start > 0:
subtoken = joiner + subtoken
# If subtoken is part of vocab, 'end' is a valid start index.
if subtoken in curr_tokens:
indices.append(end)
break
end -= 1
if end == start:
return None
start = end
return indices
| 24,532
|
def main():
"""Main function to list, read and submit images recording the time spent
"""
images = os.listdir(path=IMAGE_PATH)
time_inference = []
for image_name in images:
with open(os.path.join(IMAGE_PATH, image_name), "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
payload = {"image": encoded_string.decode(), "metadata": {}}
req_post = requests.post(
url=API_POST_URL,
data=json.dumps(payload),
headers={
"Api-Key": API_KEY,
"accept": "application/json",
"Content-Type": "application/json",
},
)
start_clock = time.time()
if req_post.status_code == 201:
response = req_post.json()
inferred_image_with_box, diff = monitor_inference(
inference_id=response["id"], start_clock=start_clock
) # Blocking call
save(inferred_image_with_box, os.path.join(OUTPUT_PATH + image_name))
time_inference.append(diff)
print("{}\t{}".format(image_name, diff))
else:
print("{}\tError {}".format(image_name, req_post.status_code))
print("Mean\t\t {}".format(sum(time_inference) / float(len(time_inference))))
| 24,533
|
def perspective(
vlist: list[list[Number,
Number,
Number]],
rotvec: list[list[float, float],
list[float, float],
list[float, float]],
dispvec: list[Number,
Number,
Number],
d: float) -> tuple:
"""Projects 3D points to 2D and
apply rotation and translation
vectors
Args:
vlist : list of 3D vertices
rotvec : 3D rotation vector
dispvec: 3D translation vector
d : Distance of observer
from the screen
Returns:
tuple (list, list)
"""
projvlist = []
rotvlist = []
((sroll, croll),
(spitch, cpitch),
(syaw, cyaw)) = rotvec
for p in vlist:
(px, py, pz) = p
x1 = -cyaw * px - syaw * pz
y1 = croll * py - sroll * x1
z1 = -syaw * px + cyaw * pz
x = croll * x1 + sroll * py
y = spitch * z1 + cpitch * y1
z = cpitch * z1 - spitch * y1
x += dispvec[0]
y += dispvec[1]
z += dispvec[2]
rotvlist.append([x, y, z])
projvlist.append([-d * x / z,
-d * y / z])
return (rotvlist, projvlist)
| 24,534
|
def test_gcp_iam_project_role_permission_remove_command_exception(client):
"""
Remove permissions from custom project role.
Given:
- User has provided valid credentials.
When:
- gcp-iam-project-role-permission-remove called.
- User has provided invalid command arguments.
Then:
- Ensure command raise exception.
"""
client.gcp_iam_project_role_get_request = Mock(return_value={})
mock_response = load_mock_response('role/project_role_get.json')
client.gcp_iam_project_role_update_request = Mock(return_value=mock_response)
role_name = "projects/xsoar-project-5/roles/test_xsoar_role"
permissions = "aiplatform.artifacts.get,aiplatform.artifacts.list"
command_args = dict(role_name=role_name, permissions=permissions)
with pytest.raises(Exception):
GCP_IAM.gcp_iam_project_role_permission_remove_command(client, command_args)
| 24,535
|
def unroll_func_obs(obs):
"""Returns flattened list of FunctionalObservable instances
It inspect recursively the observable content of the argument to yield
all nested FunctionalObservable instances. They are ordered from lower to
deeper layers in nested-ness. If you need to compute f(g(h(x))), where
x is a raw Observable, the generator yields h, g, and f lastly, so that
evaluation can be performed in direct order.
Parameters
----------
obs : :class:`FunctionalObservable` instance
the observable to inspect
Yields
-------
:class:`FunctionalObservable` instance
The generator yields funcObs instance in appropriate order (from lower
to higher level in nested-ness).
"""
if isinstance(obs, FunctionalObservable):
for item in obs.observables:
for elem in unroll_func_obs(item):
yield elem
yield obs
elif isinstance(obs, collections.Iterable):
for item in obs:
for elem in unroll_func_obs(item):
yield elem
| 24,536
|
def _check_shebang(filename, disallow_executable):
"""Return 0 if the filename's executable bit is consistent with the
presence of a shebang line and the shebang line is in the whitelist of
acceptable shebang lines, and 1 otherwise.
If the string "# noqa: shebang" is present in the file, then this check
will be ignored.
"""
with open(filename, mode='r', encoding='utf8') as file:
content = file.read()
if "# noqa: shebang" in content:
# Ignore.
return 0
is_executable = os.access(filename, os.X_OK)
if is_executable and disallow_executable:
print("ERROR: {} is executable, but should not be".format(filename))
print("note: fix via chmod a-x '{}'".format(filename))
return 1
lines = content.splitlines()
assert len(lines) > 0, f"Empty file? {filename}"
shebang = lines[0]
has_shebang = shebang.startswith("#!")
if is_executable and not has_shebang:
print("ERROR: {} is executable but lacks a shebang".format(filename))
print("note: fix via chmod a-x '{}'".format(filename))
return 1
if has_shebang and not is_executable:
print("ERROR: {} has a shebang but is not executable".format(filename))
print("note: fix by removing the first line of the file")
return 1
shebang_whitelist = {
"bash": "#!/bin/bash",
"python": "#!/usr/bin/env python3",
}
if has_shebang and shebang not in list(shebang_whitelist.values()):
print(("ERROR: shebang '{}' in the file '{}' is not in the shebang "
"whitelist").format(shebang, filename))
for hint, replacement_shebang in shebang_whitelist.items():
if hint in shebang:
print(("note: fix by replacing the shebang with "
"'{}'").format(replacement_shebang))
return 1
return 0
| 24,537
|
def test_inspector_adult_easy_py_pipeline_without_inspections():
"""
Tests whether the .py version of the inspector works
"""
inspector_result = PipelineInspector\
.on_pipeline_from_py_file(ADULT_SIMPLE_PY)\
.execute()
extracted_dag = inspector_result.dag
expected_dag = get_expected_dag_adult_easy(ADULT_SIMPLE_PY)
compare(networkx.to_dict_of_dicts(extracted_dag), networkx.to_dict_of_dicts(expected_dag))
| 24,538
|
def actor_is_contact(api_user, nick, potential_contact):
"""Determine if one is a contact.
PARAMETERS:
potential_contact - stalkee.
RETURNS: boolean
"""
nick = clean.user(nick)
potential_contact = clean.user(potential_contact)
key_name = Relation.key_from(relation='contact',
owner=nick,
target=potential_contact)
rel_ref = Relation.get_by_key_name(key_name)
return rel_ref and True
| 24,539
|
def compute_mem(w, n_ring=1, spectrum='nonzero', tol=1e-10):
"""Compute Moran eigenvectors map.
Parameters
----------
w : BSPolyData, ndarray or sparse matrix, shape = (n_vertices, n_vertices)
Spatial weight matrix or surface. If surface, the weight matrix is
built based on the inverse geodesic distance between each vertex
and the vertices in its `n_ring`.
n_ring : int, optional
Neighborhood size to build the weight matrix. Only used if user
provides a surface mesh. Default is 1.
spectrum : {'all', 'nonzero'}, optional
Eigenvalues/vectors to select. If 'all', recover all eigenvectors
except the smallest one. Otherwise, select all except non-zero
eigenvectors. Default is 'nonzero'.
tol : float, optional
Minimum value for an eigenvalue to be considered non-zero.
Default is 1e-10.
Returns
-------
w : 1D ndarray, shape (n_components,)
Eigenvalues in descending order. With ``n_components = n_vertices - 1``
if ``spectrum == 'all'`` and ``n_components = n_vertices - n_zero`` if
``spectrum == 'nonzero'``, and `n_zero` is number of zero eigenvalues.
mem : 2D ndarray, shape (n_vertices, n_components)
Eigenvectors of the weight matrix in same order.
See Also
--------
:func:`.moran_randomization`
:class:`.MoranRandomization`
References
----------
* Wagner H.H. and Dray S. (2015). Generating spatially constrained
null models for irregularly spaced data using Moran spectral
randomization methods. Methods in Ecology and Evolution, 6(10):1169-78.
"""
if spectrum not in ['all', 'nonzero']:
raise ValueError("Unknown autocor '{0}'.".format(spectrum))
# If surface is provided instead of affinity
if not (isinstance(w, np.ndarray) or ssp.issparse(w)):
w = me.get_ring_distance(w, n_ring=n_ring, metric='geodesic')
w.data **= -1 # inverse of distance
# w /= np.nansum(w, axis=1, keepdims=True) # normalize rows
if not is_symmetric(w):
w = make_symmetric(w, check=False, sparse_format='coo')
# Doubly centering weight matrix
if ssp.issparse(w):
m = w.mean(axis=0).A
wc = w.mean() - m - m.T
if not ssp.isspmatrix_coo(w):
w_format = w.format
w = w.tocoo(copy=False)
row, col = w.row, w.col
w = getattr(w, 'to' + w_format)(copy=False)
else:
row, col = w.row, w.col
wc[row, col] += w.data
else:
m = w.mean(axis=0, keepdims=True)
wc = w.mean() - m - m.T
wc += w
# when using float64, eigh is unstable for sparse matrices
ev, mem = np.linalg.eigh(wc.astype(np.float32))
ev, mem = ev[::-1], mem[:, ::-1]
# Remove zero eigen-value/vector
ev_abs = np.abs(ev)
mask_zero = ev_abs < tol
n_zero = np.count_nonzero(mask_zero)
if n_zero == 0:
raise ValueError('Weight matrix has no zero eigenvalue.')
# Multiple zero eigenvalues
if spectrum == 'all':
if n_zero > 1:
n = w.shape[0]
memz = np.hstack([mem[:, mask_zero], np.ones((n, 1))])
q, _ = np.linalg.qr(memz)
mem[:, mask_zero] = q[:, :-1]
idx_zero = mask_zero.argmax()
else:
idx_zero = ev_abs.argmin()
ev[idx_zero:-1] = ev[idx_zero+1:]
mem[:, idx_zero:-1] = mem[:, idx_zero + 1:]
ev = ev[:-1]
mem = mem[:, :-1]
else: # only nonzero
mask_nonzero = ~mask_zero
ev = ev[mask_nonzero]
mem = mem[:, mask_nonzero]
return mem, ev
| 24,540
|
def printAb(A, b):
""" printout the matrix A and vector b in a pretty fashion. We
don't use the numpy print here, because we want to make them
side by side"""
N = len(b)
openT = "/"
closeT = "\\"
openB = "\\"
closeB = "/"
# numbers take 6 positions + 2 spaces
aFmt = " %6.3f "
space = 8*" "
line = "|" + N*aFmt + "|" + space + "|" + aFmt + "|"
top = openT + N*space + closeT + space + openT + space + closeT
bottom = openB + N*space + closeB + space + openB + space + closeB + "\n"
print top
for i in range(N):
out = tuple(A[i,:]) + (b[i],)
print line % out
print bottom
| 24,541
|
def cli(ctx, search):
"""Output quotes."""
ctx.obj = Context(search)
result = ctx.obj.quote.random(search=search)
click.echo(result)
| 24,542
|
def gbsShowLayer(mapLayer):
""" Show layer by map object name
"""
if not mapLayer or mapLayer.getLayer() is None:
return
layerId = mapLayer.getLayer().id()
iface.mapLegend.showLayer(layerId)
| 24,543
|
def test_correct_digit_required(min_v, dummy_form, dummy_field):
"""
It should pass for the string with correct count of required digit.
"""
dummy_field.data = "asdqwe872536"
validator = digit_required(min_v)
validator(dummy_form, dummy_field)
| 24,544
|
def eval_BenchmarkModel(x, a, y, model, loss):
"""
Given a dataset (x, a, y) along with predictions,
loss function name
evaluate the following:
- average loss on the dataset
- DP disp
"""
pred = model(x) # apply model to get predictions
n = len(y)
if loss == "square":
err = mean_squared_error(y, pred) # mean square loss
elif loss == "absolute":
err = mean_absolute_error(y, pred) # mean absolute loss
## functions from sklearn.metrics library.
## The strange thing is that in the evaluate_FairModel function, the author uses his own function.
elif loss == "logistic": # assuming probabilistic predictions
# take the probability of the positive class
pred = pd.DataFrame(pred).iloc[:, 1]
err = log_loss(y, pred, eps=1e-15, normalize=True)
else:
raise Exception('Loss not supported: ', str(loss))
disp = pred2_disp(pred, a, y, loss)
## this function seems incomplete
## because i cannot find the definition of function argument quantization.
loss_vec = loss_vec2(pred, y, loss)
## Isn't this equal to the error part???
loss_mean, loss_std = norm.fit(loss_vec)
evaluation = {}
evaluation['pred'] = pred
evaluation['average_loss'] = err
evaluation['DP_disp'] = disp['DP']
evaluation['disp_std'] = KS_confbdd(n, alpha=0.05)
evaluation['loss_std'] = loss_std / np.sqrt(n)
return evaluation
| 24,545
|
def parse_sgf_game(s):
"""Read a single SGF game from a string, returning the parse tree.
s -- 8-bit string
Returns a Coarse_game_tree.
Applies the rules for FF[4].
Raises ValueError if can't parse the string.
If a property appears more than once in a node (which is not permitted by
the spec), treats it the same as a single property with multiple values.
Identifies the start of the SGF content by looking for '(;' (with possible
whitespace between); ignores everything preceding that. Ignores everything
following the first game.
"""
game_tree, _ = _parse_sgf_game(s, 0)
if game_tree is None:
raise ValueError("no SGF data found")
return game_tree
| 24,546
|
def mesh_checker(mesh,Dict):
"""Give a mesh and a Dict loaded from HDF5 to compare"""
# print((mesh.elements - Dict['elements']).max())
# print(mesh.elements.dtype, Dict['elements'].dtype)
print("Checking higher order mesh generators results")
if entity_checker(mesh.elements,Dict['elements']):
print(tick, "mesh elements match")
else:
print(cross, "mesh elements do not match")
exit()
if entity_checker(mesh.points,Dict['points']):
print(tick, "mesh points match")
else:
print(cross, "mesh points do not match")
exit()
if entity_checker(mesh.edges,Dict['edges']):
print(tick, "mesh edges match")
else:
print(cross, "mesh edges do not match")
exit()
if mesh.element_type == "tet" or mesh.element_type == "hex":
if entity_checker(mesh.faces,Dict['faces']):
print(tick, "mesh faces match")
else:
print(cross, "mesh faces do not match")
exit()
| 24,547
|
def retrieve(
framework,
region,
version=None,
py_version=None,
instance_type=None,
accelerator_type=None,
image_scope=None,
container_version=None,
distribution=None,
base_framework_version=None,
):
"""Retrieves the ECR URI for the Docker image matching the given arguments.
Args:
framework (str): The name of the framework or algorithm.
region (str): The AWS region.
version (str): The framework or algorithm version. This is required if there is
more than one supported version for the given framework or algorithm.
py_version (str): The Python version. This is required if there is
more than one supported Python version for the given framework version.
instance_type (str): The SageMaker instance type. For supported types, see
https://aws.amazon.com/sagemaker/pricing/instance-types. This is required if
there are different images for different processor types.
accelerator_type (str): Elastic Inference accelerator type. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html.
image_scope (str): The image type, i.e. what it is used for.
Valid values: "training", "inference", "eia". If ``accelerator_type`` is set,
``image_scope`` is ignored.
container_version (str): the version of docker image
distribution (dict): A dictionary with information on how to run distributed training
(default: None).
Returns:
str: the ECR URI for the corresponding SageMaker Docker image.
Raises:
ValueError: If the combination of arguments specified is not supported.
"""
config = _config_for_framework_and_scope(framework, image_scope, accelerator_type)
original_version = version
version = _validate_version_and_set_if_needed(version, config, framework)
version_config = config["versions"][_version_for_config(version, config)]
if framework == HUGGING_FACE_FRAMEWORK:
if version_config.get("version_aliases"):
full_base_framework_version = version_config["version_aliases"].get(
base_framework_version, base_framework_version
)
_validate_arg(full_base_framework_version, list(version_config.keys()), "base framework")
version_config = version_config.get(full_base_framework_version)
py_version = _validate_py_version_and_set_if_needed(py_version, version_config, framework)
version_config = version_config.get(py_version) or version_config
registry = _registry_from_region(region, version_config["registries"])
hostname = utils._botocore_resolver().construct_endpoint("ecr", region)["hostname"]
repo = version_config["repository"]
processor = _processor(
instance_type, config.get("processors") or version_config.get("processors")
)
if framework == HUGGING_FACE_FRAMEWORK:
pt_or_tf_version = (
re.compile("^(pytorch|tensorflow)(.*)$").match(base_framework_version).group(2)
)
tag_prefix = f"{pt_or_tf_version}-transformers{original_version}"
else:
tag_prefix = version_config.get("tag_prefix", version)
tag = _format_tag(
tag_prefix,
processor,
py_version,
container_version,
)
if _should_auto_select_container_version(instance_type, distribution):
container_versions = {
"tensorflow-2.3-gpu-py37": "cu110-ubuntu18.04-v3",
"tensorflow-2.3.1-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-2.3.2-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15-gpu-py37": "cu110-ubuntu18.04-v8",
"tensorflow-1.15.4-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15.5-gpu-py37": "cu110-ubuntu18.04",
"mxnet-1.8-gpu-py37": "cu110-ubuntu16.04-v1",
"mxnet-1.8.0-gpu-py37": "cu110-ubuntu16.04",
"pytorch-1.6-gpu-py36": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py36": "cu110-ubuntu18.04",
"pytorch-1.6-gpu-py3": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py3": "cu110-ubuntu18.04",
}
key = "-".join([framework, tag])
if key in container_versions:
tag = "-".join([tag, container_versions[key]])
if tag:
repo += ":{}".format(tag)
return ECR_URI_TEMPLATE.format(registry=registry, hostname=hostname, repository=repo)
| 24,548
|
def deconstruct_DMC(G, alpha, beta):
"""Deconstruct a DMC graph over a single step."""
# reverse complementation
if G.has_edge(alpha, beta):
G.remove_edge(alpha, beta)
w = 1
else:
w = 0
# reverse mutation
alpha_neighbors = set(G.neighbors(alpha))
beta_neighbors = set(G.neighbors(beta))
x = len(alpha_neighbors & beta_neighbors)
y = len(alpha_neighbors | beta_neighbors)
for neighbor in alpha_neighbors:
G.add_edge(beta, neighbor)
# reverse duplication
G.remove_node(alpha)
return (w, x, y)
| 24,549
|
def write_input_xml(filename):
"""
Write the XML file of this sample model
"""
model_input = create_sample_input_model_with_spherical_cv()
model_input.serialize(filename)
return
| 24,550
|
def execute(args):
""" Execute a specified command """
subprocess.run(args)
| 24,551
|
def new_followers_view(request):
"""
View to show new followers.
:param request:
:return:
"""
current_author = request.user.user
followers_new = FollowRequest.objects.all().filter(friend=current_author).filter(acknowledged=False)
for follow in followers_new:
follow.acknowledged = True
follow.save()
request.context['followers_new'] = followers_new
return render(request, 'authors/follower_request.html', request.context)
| 24,552
|
def dump_sphmap(stream, snake):
""" Ad-hoc sphere mapping dump format:
First Line: [# Vertices, Original Sphere Radius, Original Sphere Center (XYZ)]
Others: [Shape (distance),
Sphere Coords (XYZ),
Sphere Coords (Phi, Theta),
Surface Coords (XYZ)]
"""
dump_data = zip(snake.travel,
snake.starting_points,
snake.spherical_starting_points,
snake.vertices)
num_vertices = len(snake.vertices)
radius = snake.contour.radius
cx, cy, cz = snake.contour.center
print("{0}\t{1}\t{2}\t{3}\t{4}".format(num_vertices, radius, cx, cy, cz), file=stream)
for idx, vertex_data in enumerate(dump_data):
travel, points, spherical_points, on_surf = vertex_data
line = []
line.append(travel)
line.extend(points)
line.extend(spherical_points)
line.extend(on_surf)
format = ("{:.4f}\t" * len(line)).strip()
print(format.format(*line), file=stream)
| 24,553
|
def inv(a, p):
"""Inverse of a in :math:`{mathbb Z}_p`
:param a,p: non-negative integers
:complexity: O(log a + log p)
"""
return bezout(a, p)[0] % p
| 24,554
|
def test_raw_html_634bb():
"""
Test case 634bb: variation of 634 in block quote
"""
# Arrange
source_markdown = """> <a /><b2
> data="foo" ><c>"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[para(1,3):\n ]",
"[raw-html(1,3):a /]",
'[raw-html(1,9):b2\n\a \a\x03\adata="foo" ]',
"[raw-html(2,17):c]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p><a /><b2
data="foo" ><c></p>
</blockquote>"""
# Act & Assert
act_and_assert(
source_markdown, expected_gfm, expected_tokens, disable_consistency_checks=True
)
| 24,555
|
def get_iSUN(location=None):
"""
Loads or downloads and caches the iSUN dataset.
@type location: string, defaults to `None`
@param location: If and where to cache the dataset. The dataset
will be stored in the subdirectory `iSUN` of
location and read from there, if already present.
@return: Training stimuli, validation stimuli, testing stimuli, training fixation trains, validation fixation trains
.. seealso::
P. Xu, K. A. Ehinger, Y. Zhang, A. Finkelstein, S. R. Kulkarni, and J. Xiao.: TurkerGaze: Crowdsourcing Saliency with Webcam based Eye Tracking
http://lsun.cs.princeton.edu/
http://vision.princeton.edu/projects/2014/iSUN/
"""
if location:
location = os.path.join(location, 'iSUN')
if os.path.exists(location):
stimuli_training = _load(os.path.join(location, 'stimuli_training.hdf5'))
stimuli_validation = _load(os.path.join(location, 'stimuli_validation.hdf5'))
stimuli_testing = _load(os.path.join(location, 'stimuli_testing.hdf5'))
fixations_training = _load(os.path.join(location, 'fixations_training.hdf5'))
fixations_validation = _load(os.path.join(location, 'fixations_validation.hdf5'))
return stimuli_training, stimuli_validation, stimuli_testing, fixations_training, fixations_validation
os.makedirs(location)
with TemporaryDirectory(cleanup=True) as temp_dir:
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/training.mat',
os.path.join(temp_dir, 'training.mat'),
'5a8b15134b17c7a3f69b087845db1363')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/validation.mat',
os.path.join(temp_dir, 'validation.mat'),
'f68e9b011576e48d2460b883854fd86c')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/testing.mat',
os.path.join(temp_dir, 'testing.mat'),
'be008ef0330467dcb9c9cd9cc96a8546')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/fixation.zip',
os.path.join(temp_dir, 'fixation.zip'),
'aadc15784e1b0023cda4536335b7839c')
download_and_check('http://lsun.cs.princeton.edu/challenge/2015/eyetracking/data/image.zip',
os.path.join(temp_dir, 'image.zip'),
'0a3af01c5307f1d44f5dd309f71ea963')
# Stimuli
print('Creating stimuli')
f = zipfile.ZipFile(os.path.join(temp_dir, 'image.zip'))
namelist = f.namelist()
namelist = filter_files(namelist, ['.DS_Store'])
f.extractall(temp_dir, namelist)
def get_stimuli_names(name):
data_file = os.path.join(temp_dir, '{}.mat'.format(name))
data = loadmat(data_file)[name]
stimuli_names = [d[0] for d in data['image'][:, 0]]
stimuli_names = ['{}.jpg'.format(n) for n in stimuli_names]
return stimuli_names
stimulis = []
stimuli_src_location = os.path.join(temp_dir, 'images')
for name in ['training', 'validation', 'testing']:
print("Creating {} stimuli".format(name))
stimuli_target_location = os.path.join(location, 'stimuli_{}'.format(name)) if location else None
images = get_stimuli_names(name)
stimulis.append(create_stimuli(stimuli_src_location, images, stimuli_target_location))
# FixationTrains
print('Creating fixations')
def get_fixations(name):
data_file = os.path.join(temp_dir,'{}.mat'.format(name))
data = loadmat(data_file)[name]
gaze = data['gaze'][:, 0]
ns = []
train_xs = []
train_ys = []
train_ts = []
train_subjects = []
for n in range(len(gaze)):
fixation_trains = gaze[n]['fixation'][0, :]
for train in fixation_trains:
xs = train[:, 0]
ys = train[:, 1]
ns.append(n)
train_xs.append(xs)
train_ys.append(ys)
train_ts.append(range(len(xs)))
train_subjects.append(0)
fixations = FixationTrains.from_fixation_trains(train_xs, train_ys, train_ts, ns, train_subjects)
return fixations
fixations = []
for name in ['training', 'validation']:
print("Creating {} fixations".format(name))
fixations.append(get_fixations(name))
if location:
stimulis[0].to_hdf5(os.path.join(location, 'stimuli_training.hdf5'))
stimulis[1].to_hdf5(os.path.join(location, 'stimuli_validation.hdf5'))
stimulis[2].to_hdf5(os.path.join(location, 'stimuli_test.hdf5'))
fixations[0].to_hdf5(os.path.join(location, 'fixations_training.hdf5'))
fixations[1].to_hdf5(os.path.join(location, 'fixations_validation.hdf5'))
return stimulis + fixations
| 24,556
|
def metadata_columns(request, metadata_column_headers):
"""Make a metadata column header and column value dictionary."""
template = 'val{}'
columns = {}
for header in metadata_column_headers:
columns[header] = []
for i in range(0, request.param):
columns[header].append(template.format(i))
return columns
| 24,557
|
def test_differentiable_sgd():
"""Test second order derivative after taking optimization step."""
policy = torch.nn.Linear(10, 10, bias=False)
lr = 0.01
diff_sgd = DifferentiableSGD(policy, lr=lr)
named_theta = dict(policy.named_parameters())
theta = list(named_theta.values())[0]
meta_loss = torch.sum(theta**2)
meta_loss.backward(create_graph=True)
diff_sgd.step()
theta_prime = list(policy.parameters())[0]
loss = torch.sum(theta_prime**2)
update_module_params(policy, named_theta)
diff_sgd.zero_grad()
loss.backward()
result = theta.grad
dtheta_prime = 1 - 2 * lr # dtheta_prime/dtheta
dloss = 2 * theta_prime # dloss/dtheta_prime
expected_result = dloss * dtheta_prime # dloss/dtheta
assert torch.allclose(result, expected_result)
| 24,558
|
def all_done_tasks_for_person(person, client=default):
"""
Returns:
list: Tasks that are done for given person (only for open projects).
"""
person = normalize_model_parameter(person)
return raw.fetch_all("persons/%s/done-tasks" % person["id"], client=client)
| 24,559
|
def main(params):
"""
PyRate merge main function. Assembles product tiles in to
single geotiff files
"""
# setup paths
rows, cols = params["rows"], params["cols"]
mpiops.run_once(_merge_stack, rows, cols, params)
mpiops.run_once(_create_png_from_tif, params[cf.OUT_DIR])
if params[cf.TIME_SERIES_CAL]:
_merge_timeseries(rows, cols, params)
# mpiops.run_once(_delete_tsincr_files, params)
| 24,560
|
def transform_cfg_to_wcnf(cfg: CFG) -> CFG:
"""
Transform given cfg into Weakened Normal Chomsky Form (WNCF)
Parameters
----------
cfg: CFG
CFG object to transform to WNCF
Returns
-------
wncf: CFG
CFG in Weakened Normal Chomsky Form (WNCF)
"""
wncf = (
cfg.remove_useless_symbols()
.eliminate_unit_productions()
.remove_useless_symbols()
)
new_productions = wncf._get_productions_with_only_single_terminals()
new_productions = wncf._decompose_productions(new_productions)
return CFG(start_symbol=wncf.start_symbol, productions=new_productions)
| 24,561
|
def add_def() -> bool:
""" Retrieves the definition from the user and
enters it into the database.
"""
logger.info("Start <add_def>")
fields = ["what", "def_body", "subject"]
fields_dict = {"what": '', "def_body": '', "subject": ''}
for fi in fields:
phrase = PHRASES[fi]
fields_dict[fi] = input(colored(phrase[0], "green"))
if not fields_dict[fi]:
cprint(phrase[1], "green")
fields_dict[fi] = input(colored(phrase[2], "green"))
if not fields_dict[fi]:
cprint("Mm-m, no - some help?..")
return False
lecture = input(colored("Lecture #", "green"))
if (not lecture and lecture.isalnum()):
cprint("Number of lecture must be integer, did you know that?",
color="yellow")
lecture = input(colored("Lecture #", "green"))
if (not lecture and lecture.isalnum()):
cprint("Mm-m, no - some help?..")
return False
# what = what.lower()
lecture = int(lecture) if lecture else -1
result = [
fields_dict["what"], fields_dict["def_body"], fields_dict["subject"],
lecture
]
result[2] = result[2].capitalize()
logger.info(f"Get what=<{result[0]}>")
logger.debug(f"Get def_body=<{result[1]}>")
logger.debug(f"Get subject=<{result[2]}>")
logger.debug(f"Get lecture=<{result[3]}>")
data_base.add_def(*result)
cprint(f"All done! New definition of '{result[0]}' has been saved",
color="green")
return True
| 24,562
|
def test_anonymize_files_bad_input_missing(tmpdir):
"""Test anonymize_files with non-existent input."""
filename = "test.txt"
input_file = tmpdir.join(filename)
output_file = tmpdir.mkdir("out").join(filename)
with pytest.raises(ValueError, match="Input does not exist"):
anonymize_files(
str(input_file),
str(output_file),
True,
True,
salt=_SALT,
sensitive_words=_SENSITIVE_WORDS,
)
| 24,563
|
def _cd_step(examples: List[Example]):
"""
CD step to save all beam examples/tests/katas and their outputs on the GCS
"""
cd_helper = CDHelper()
cd_helper.store_examples(examples)
| 24,564
|
def insert_scope_name(urls):
"""
given a tuple of URLs for webpy with '%s' as a placeholder for
SCOPE_NAME_REGEXP, return a finalised tuple of URLs that will work for all
SCOPE_NAME_REGEXPs in all schemas
"""
regexps = get_scope_name_regexps()
result = []
for i in range(0, len(urls), 2):
if "%s" in urls[i]:
# add a copy for each unique SCOPE_NAME_REGEXP
for scope_name_regexp in regexps:
result.append(urls[i] % scope_name_regexp)
result.append(urls[i + 1])
else:
# pass through unmodified
result.append(urls[i])
result.append(urls[i + 1])
return tuple(result)
| 24,565
|
def test_read_config_file(mock_config_file):
"""Try to read a configuration file."""
from cdparacord import config
config_file = mock_config_file
# Setup our expectations
var_name = 'editor'
expected_value = 'probably-not-a-real-editor'
# Write them to the file
with open(config_file, 'w') as f:
yaml.safe_dump({var_name: expected_value}, f)
c = config.Config()
# We should get the value in the file
assert c.get(var_name) == expected_value
| 24,566
|
def test_get_worst_match(name, capacity, pref_names):
""" Check that a hospital can return its worst match. """
hospital = Hospital(name, capacity)
others = [Resident(other) for other in pref_names]
hospital.matching = [others[0]]
assert hospital.get_worst_match() == others[0]
hospital.matching = others
assert hospital.get_worst_match() == others[-1]
| 24,567
|
def start_server() -> None:
"""Start the server.
If this was started from the command line the path of the config file may
be passed as the first parameter. Otherwise we take the default config file
from the package.
"""
filename = (
sys.argv[1] if len(sys.argv) == 2 else
pkg_resources.resource_filename(__name__, "config.yml")
)
config = load_config(filename)
initialise_logging(config.get('logging'))
app = create_application(config)
start_http_server(app, config['app'])
| 24,568
|
def fit_cochrane_orcutt(ts, regressors, maxIter=10, sc=None):
"""
Fit linear regression model with AR(1) errors , for references on Cochrane Orcutt model:
See [[https://onlinecourses.science.psu.edu/stat501/node/357]]
See : Applied Linear Statistical Models - Fifth Edition - Michael H. Kutner , page 492
The method assumes the time series to have the following model
Y_t = B.X_t + e_t
e_t = rho*e_t-1+w_t
e_t has autoregressive structure , where w_t is iid ~ N(0,&sigma 2)
Outline of the method :
1) OLS Regression for Y (timeseries) over regressors (X)
2) Apply auto correlation test (Durbin-Watson test) over residuals , to test whether e_t still
have auto-regressive structure
3) if test fails stop , else update update coefficients (B's) accordingly and go back to step 1)
Parameters
----------
ts:
Vector of size N for time series data to create the model for as a Numpy array
regressors:
Matrix N X K for the timed values for K regressors over N time points as a Numpy array
maxIter:
maximum number of iterations in iterative cochrane-orchutt estimation
Returns instance of class [[RegressionARIMAModel]]
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
fnord = _nparray2breezematrix(sc, regressors)
print(fnord)
jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitCochraneOrcutt(
_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), maxIter
)
return RegressionARIMAModel(jmodel=jmodel, sc=sc)
| 24,569
|
def get_nodeweight(obj):
"""
utility function that returns a
node class and it's weight
can be used for statistics
to get some stats when NO Advanced Nodes are available
"""
k = obj.__class__.__name__
if k in ('Text',):
return k, len(obj.caption)
elif k == 'ImageLink' and obj.isInline():
return 'InlineImageLink', 1
return k, 1
| 24,570
|
def CMYtoRGB(C, M, Y):
""" convert CMY to RGB color
:param C: C value (0;1)
:param M: M value (0;1)
:param Y: Y value (0;1)
:return: RGB tuple (0;255) """
RGB = [(1.0 - i) * 255.0 for i in (C, M, Y)]
return tuple(RGB)
| 24,571
|
def update_local_artella_root():
"""
Updates the environment variable that stores the Artella Local Path
NOTE: This is done by Artella plugin when is loaded, so we should not do it manually again
"""
metadata = get_metadata()
if metadata:
metadata.update_local_root()
return True
return False
| 24,572
|
def app_tests(enable_migrations, tags, verbosity):
"""Gets the TestRunner and runs the tests"""
# prepare the actual test environment
setup(enable_migrations, verbosity)
# reuse Django's DiscoverRunner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
tags=tags,
)
failures = test_runner.run_tests(['.'])
return failures
| 24,573
|
def quicksort(numbers, low, high):
"""Python implementation of quicksort."""
if low < high:
pivot = _partition(numbers, low, high)
quicksort(numbers, low, pivot)
quicksort(numbers, pivot + 1, high)
return numbers
| 24,574
|
def __save_dataset(dataset: set, output_csv_path: str):
"""
Function to save the set of dataset in the csv path
Arguments:
dataset {set} -- The set of tuples consisting the triplet dataset
output_csv_path {str} -- The output csv path to save
"""
dataset_list = list(dataset)
dataset_df = pd.DataFrame(data = dataset_list, columns = ["Sample1", "Sample2", "Label"])
# save it
dataset_df.to_csv(output_csv_path, index = False)
| 24,575
|
def bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.):
"""
bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.)
Black-Scholes put option delta.
See Also
--------
bscall
"""
r, q = np.asarray(r), np.asarray(q)
d1, d2 = bsd1d2(k, t, x0=x0, r=r, q=q, sigma=sigma)
return -exp(-q*t) * scipy.stats.norm.cdf(-d1)
| 24,576
|
def regex_filter(patterns: Sequence[Regex], negate: bool = False, **kwargs) -> SigMapper:
"""Filter out the signals that do not match regex patterns (or do match if negate=True)."""
patterns = list(map(re.compile, patterns))
def filt(sigs):
def map_sig(sig):
return _regex_map(sig, patterns,
on_match = lambda s, p: (s if not negate else None),
on_no_match = lambda s: (None if not negate else s),
**kwargs)
return list(filter(None, map(map_sig, sigs)))
return filt
| 24,577
|
def cpncc(img, vertices_lst, tri):
"""cython version for PNCC render: original paper"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
pncc_img = crender_colors(vertices, tri, pncc_code, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img
| 24,578
|
def run_net(X, y, batch_size, dnn, data_layer_name, label_layer_name,
loss_layer, accuracy_layer, accuracy_sink, is_train):
"""Runs dnn on given data"""
start = time.time()
total_loss = 0.
run_iter = dnn.learn if is_train else dnn.run
math_engine = dnn.math_engine
accuracy_layer.reset = True # Reset previous statistics
for X_batch, y_batch in irnn_data_iterator(X, y, batch_size, math_engine):
run_iter({data_layer_name: X_batch, label_layer_name: y_batch})
total_loss += loss.last_loss * y_batch.batch_width
accuracy_layer.reset = False # Don't reset statistics within one epoch
avg_loss = total_loss / y.shape[0]
acc = accuracy_sink.get_blob().asarray()[0]
run_time = time.time() - start
return avg_loss, acc, run_time
| 24,579
|
def json_safe(arg: Any):
"""
Checks whether arg can be json serialized and if so just returns arg as is
otherwise returns none
"""
try:
json.dumps(arg)
return arg
except:
return None
| 24,580
|
def _key_iv_check(key_iv):
"""
密钥或初始化向量检测
"""
# 密钥
if key_iv is None or not isinstance(key_iv, string_types):
raise TypeError('Parameter key or iv:{} not a basestring'.format(key_iv))
if isinstance(key_iv, text_type):
key_iv = key_iv.encode(encoding=E_FMT)
if len(key_iv) > BLOCK_BYTE:
raise ValueError('Parameter key or iv:{} byte greater than {}'.format(key_iv.decode(E_FMT),
BLOCK_BYTE))
return key_iv
| 24,581
|
def decomposeArbitraryLength(number):
"""
Returns decomposition for the numbers
Examples
--------
number 42 : 32 + 8 + 2
powers : 5, 3, 1
"""
if number < 1:
raise WaveletException("Number should be greater than 1")
tempArray = list()
current = number
position = 0
while current >= 1.:
power = getExponent(current)
tempArray.append(power)
current = current - scalb(1., power)
position += 1
return tempArray[:position]
| 24,582
|
def test_outfile_verbose() -> None:
""" outfile + verbose """
outfile = random_string()
if os.path.isfile(outfile):
os.remove(outfile)
try:
flag = '-v' if random.choice([0, 1]) else '--verbose'
rv, out = getstatusoutput(f'{RUN} {flag} -o {outfile} LSU {LSU}')
assert rv == 0
assert out.splitlines() == [
' 1: ./tests/inputs/lsu.fq',
f'Done, checked 4, wrote 2 to "{outfile}".'
]
assert os.path.isfile(outfile)
expected = open(LSU + '.upper.out').read().rstrip()
assert open(outfile).read().rstrip() == expected
finally:
if os.path.isfile(outfile):
os.remove(outfile)
| 24,583
|
def ax_draw_macd2(axes, ref, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
macd = MACD(CLOSE(kdata), n1, n2, n3)
bmacd, fmacd, smacd = macd.getResult(0), macd.getResult(1), macd.getResult(2)
text = 'MACD(%s,%s,%s) DIF:%.2f, DEA:%.2f, BAR:%.2f'%(n1,n2,n3,fmacd[-1],smacd[-1],bmacd[-1])
#axes.text(0.01,0.97, text, horizontalalignment='left', verticalalignment='top', transform=axes.transAxes)
total = len(kdata)
x = [i for i in range(total)]
y = bmacd
y1,y2,y3 = [y[0]],[y[0]],[y[0]]
for i in range(1, total):
if ref[i]-ref[i-1]>0 and y[i]-y[i-1]>0:
y2.append(y[i])
y1.append('-')
y3.append('-')
elif ref[i]-ref[i-1]<0 and y[i]-y[i-1]<0:
y3.append(y[i])
y1.append('-')
y2.append('-')
else:
y1.append(y[i])
y2.append('-')
y3.append('-')
style = gcf().get_style(axes)
bar = Bar(subtitle=text, title_pos='10%', title_top='8%')
bar.add('1', x, y1, is_stack=True, is_legend_show=False, **style)
bar.add('2', x, y2, is_stack=True, is_legend_show=False, **style)
bar.add('3', x, y3, is_stack=True, is_legend_show=False, **style)
axes.add(bar)
fmacd.plot(axes=axes, line_type='dotted')
smacd.plot(axes=axes)
gcf().add_axis(axes)
return gcf()
| 24,584
|
def write_basis(basis, filename):
"""
Writes the given basis to the given file.
:param pd.DataFrame basis
:param str filename
"""
logging.info('Writing basis to {}'.format(filename))
basis.to_csv(filename)
| 24,585
|
def scipy_bfgs(
criterion_and_derivative,
x,
*,
convergence_absolute_gradient_tolerance=CONVERGENCE_ABSOLUTE_GRADIENT_TOLERANCE,
stopping_max_iterations=STOPPING_MAX_ITERATIONS,
norm=np.inf,
):
"""Minimize a scalar function of one or more variables using the BFGS algorithm.
For details see :ref:`list_of_scipy_algorithms`.
"""
options = {
"gtol": convergence_absolute_gradient_tolerance,
"maxiter": stopping_max_iterations,
"norm": norm,
}
res = scipy.optimize.minimize(
fun=criterion_and_derivative,
x0=x,
method="BFGS",
jac=True,
options=options,
)
return process_scipy_result(res)
| 24,586
|
def assign_to_coders_backend(sample,
limit_to_unassigned,
shuffle_pieces_before_assigning,
assign_each_piece_n_times,
max_assignments_per_piece,
coders, max_pieces_per_coder,
creation_time, creator):
"""Assignment to coders currently uses the following algorithm:
#. Get a list of all pieces in the sample.
#. If "shuffle pieces before assigning" is checked, shuffle the list of pieces
#. Make a numbering of "target coders" for this assignment, determine a
coder whose "turn" it is.
#. For each piece in the list of pieces, do the following:
#. If "limit to unassigned" is checked, and the piece is assigned to
someone, continue to the next piece.
#. Find how often this piece has already been assigned as
``n_piece_assignments``.
#. Determine number of new assignments *n* for this piece as::
n = min(
max_assignments_per_piece-n_piece_assignments,
assign_each_piece_n_times))
#. Do the following *n* times:
#. Try to assign the piece to the coder whose 'turn' it is.
#. If that coder already has this article assigned, go
round-robin among coders until someone does not have the article
assigned to them.
#. If no-one is found, skip this piece.
#. Advance the "turn", taking into account ``pieces_per_coder``.
If all coders have reached their ``pieces_per_coder`` (in this
assignment round), stop.
"""
log_lines = []
coder_idx_to_count = {}
num_coders = len(coders)
pieces = sample.pieces.all()
if shuffle_pieces_before_assigning:
pieces = list(pieces)
from random import shuffle
shuffle(pieces)
quit_flag = False
coder_idx = 0
for piece in pieces:
n_piece_assignments = CodingAssignment.objects.filter(
sample=sample, piece=piece).count()
if (limit_to_unassigned and n_piece_assignments):
log_lines.append("%s already assigned to someone, skipping."
% six.text_type(piece))
continue
assign_times = assign_each_piece_n_times
if max_assignments_per_piece is not None:
max_assign_times = assign_times = max(
0,
max_assignments_per_piece
- n_piece_assignments)
assign_times = min(
max_assign_times,
assign_times)
if assign_times == 0:
log_lines.append("Piece '%s' has reached max assignment count, skipping."
% six.text_type(piece))
continue
for i_assignment in range(assign_times):
local_coder_idx = coder_idx
assignment_tries = 0
# was this piece already assigned to this coder? (if so, try next)
# Note that, in its desperation, this may assign a few more items
# to a coder than are technically allowed by their limit.
while (
CodingAssignment.objects.filter(
sample=sample, piece=piece,
coder=coders[local_coder_idx]).count()
and assignment_tries < num_coders):
local_coder_idx = (local_coder_idx + 1) % num_coders
assignment_tries += 1
if assignment_tries >= num_coders:
log_lines.append("Piece '%s' already assigned "
"to all coders, skipping." % six.text_type(piece))
break
assmt = CodingAssignment()
assmt.coder = coders[local_coder_idx]
assmt.piece = piece
assmt.sample = sample
assmt.state = assignment_states.not_started
assmt.latest_state_time = creation_time
assmt.creation_time = creation_time
assmt.creator = creator
assmt.save()
coder_idx_to_count[local_coder_idx] = \
coder_idx_to_count.get(local_coder_idx, 0) + 1
# {{{ advance coder turn
find_coder_tries = 0
while find_coder_tries < num_coders:
coder_idx = (coder_idx + 1) % num_coders
if (
max_pieces_per_coder is None
or coder_idx_to_count.get(coder_idx, 0)
< max_pieces_per_coder):
break
find_coder_tries += 1
if find_coder_tries >= num_coders:
log_lines.append("All coders have reached their item limit, "
"stopping.")
quit_flag = True
break
# }}}
if quit_flag:
break
for coder_idx, coder in enumerate(coders):
log_lines.append("%s: %d new items assigned"
% (coder, coder_idx_to_count.get(coder_idx, 0)))
return log_lines
| 24,587
|
def mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients
| 24,588
|
def _set_actor(user, sender, instance, signal_duid, **kwargs):
"""Signal receiver with extra 'user' and 'signal_duid' kwargs.
This function becomes a valid signal receiver when it is curried with the actor and a dispatch id.
"""
try:
auditlog = threadlocal.auditlog
except AttributeError:
pass
else:
if signal_duid != auditlog["signal_duid"]:
return
auth_user_model = get_user_model()
if (
sender == LogEntry
and isinstance(user, auth_user_model)
and instance.actor is None
):
instance.actor = user
instance.remote_addr = auditlog["remote_addr"]
instance.additional_request_data = auditlog["additional_request_data"]
| 24,589
|
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
# XXX: The following does not strictly adhere to RFCs in that empty
# names and values are legal (the former will only appear once and will
# be overwritten if multiple occurrences are present). This is
# mostly to deal with backwards compatibility.
for ii, param in enumerate(ns_header.split(';')):
param = param.strip()
key, sep, val = param.partition('=')
key = key.strip()
if not key:
if ii == 0:
break
else:
continue
# allow for a distinction between present and empty and missing
# altogether
val = val.strip() if sep else None
if ii != 0:
lc = key.lower()
if lc in known_attrs:
key = lc
if key == "version":
# This is an RFC 2109 cookie.
if val is not None:
val = strip_quotes(val)
version_set = True
elif key == "expires":
# convert expires date to seconds since epoch
if val is not None:
val = http2time(strip_quotes(val)) # None if invalid
pairs.append((key, val))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
| 24,590
|
def test_extra_010a():
"""
List item with weird progression.
"""
# Arrange
source_markdown = """* First Item
* Second Item
* Third Item
"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):First Item:]",
"[end-para:::True]",
"[li(2,2):3: :]",
"[para(2,4):: ]",
"[text(2,4):Second Item:]",
"[end-para:::True]",
"[li(3,3):4: :]",
"[para(3,5):]",
"[text(3,5):Third Item:]",
"[end-para:::True]",
"[BLANK(4,1):]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>First Item</li>
<li>Second Item</li>
<li>Third Item</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 24,591
|
def add_newrepo_subparser(subparser):
"""
Function to add a subparser for newrepo repository.
"""
### Base args: should be in every
parser_newrepo = subparser.add_parser('newrepo', help="download from newrepo")
parser_newrepo.add_argument('newrepoid', type=str, nargs='+',
help="One or more newrepo project or sample identifiers (EXAMPLE####)")
parser_newrepo.add_argument('-o', dest="outdir", type=str, default="",
help="directory in which to save output. created if it doesn't exist")
parser_newrepo.add_argument('-r',dest="retries", type=int, default=0,
help="number of times to retry download")
parser_newrepo.add_argument('-t',dest="threads", type=int, default=1,
help="threads to use (for pigz)")
parser_newrepo.add_argument('-f', dest="force", action="store_true",
help = "force re-download of files")
parser_newrepo.add_argument('-l', dest="list", action="store_true",
help="list (but do not download) samples to be grabbed")
### OPTIONAL: Use if metadata are available
parser_newrepo.add_argument('-m', dest="metadata", action="store_true",
help="save metadata")
### Add any repository-specific arguments here
| 24,592
|
def deploy(remote='origin', reload=False):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging, electron])
render.render_all()
if env.settings == 'electron':
if not os.path.exists('electron'):
os.makedirs('electron')
local('npm run-script pack')
if env.settings == 'fileserver':
local('rsync -vr www/ %s@%s:%s/%s' % (
app_config.FILE_SERVER_USER,
app_config.FILE_SERVER,
app_config.FILE_SERVER_PATH,
app_config.PROJECT_SLUG
))
if env.settings == 'production' or env.settings == 'staging':
flat.deploy_folder(
app_config.S3_BUCKET,
'www',
app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.DEFAULT_MAX_AGE
},
ignore=['www/img/*', 'www/live-data/*']
)
flat.deploy_folder(
app_config.S3_BUCKET,
'www/img',
'%s/img' % app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.ASSETS_MAX_AGE
}
)
| 24,593
|
def main(verbose: str, mt_file: str, target: str) -> None: # pylint: disable=W0613
"""
Move training data to zip model artifact
\f
:param verbose: more extensive logging
:param mt_file: json/yaml file with a mode training resource
:param target: directory where result model will be saved
"""
k8s_trainer = parse_model_training_entity(mt_file)
parameters: Dict[str, str] = k8s_trainer.model_training.spec.hyper_parameters
if not os.path.exists(target):
os.makedirs(target)
copyfile(parameters[INPUT_FILE_LOCATION], join(target, parameters[TARGET_FILE_LOCATION]))
copytree(parameters[INPUT_MODEL_LOCATION], join(target, parameters[TARGET_MODEL_LOCATION]))
click.echo("Files were copied!")
| 24,594
|
def test_conditional_retry_policy_default_parameters():
"""
Ensures that the conditional retry policy has not been implemented.
"""
try:
RequestsStampede.policy.retry.ConditionalRetryPolicy()
except NotImplementedError as e:
assert isinstance(e, NotImplementedError)
else:
assert False
| 24,595
|
def run_experiment(ctx, experiment_name: str):
"""run a single experiment."""
kwargs = {
"flag": "run_experiment",
"experiment_name": experiment_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
| 24,596
|
def vim_instance_api_get_instances(connection, msg):
"""
Handle Get-Instances API request
"""
DLOG.verbose("Get instance, all=%s." % msg.get_all)
instance_table = tables.tables_get_instance_table()
for instance in instance_table.values():
response = rpc.APIResponseGetInstance()
response.uuid = instance.uuid
response.name = instance.name
response.admin_state = instance.admin_state
response.oper_state = instance.oper_state
response.avail_status = instance.avail_status
response.action = instance.action
response.host_name = instance.host_name
response.instance_type_original_name \
= instance.instance_type_original_name
response.image_uuid = instance.image_uuid
response.vcpus = instance.vcpus
response.memory_mb = instance.memory_mb
response.disk_gb = instance.disk_gb
response.ephemeral_gb = instance.ephemeral_gb
response.swap_gb = instance.swap_gb
response.auto_recovery = instance.auto_recovery
response.live_migration_timeout \
= instance.max_live_migrate_wait_in_secs
response.live_migration_max_downtime \
= instance.max_live_migration_downtime_in_ms
if instance.host_name is not None:
host_table = tables.tables_get_host_table()
host = host_table.get(instance.host_name, None)
if host is not None:
response.host_uuid = host.uuid
connection.send(response.serialize())
DLOG.verbose("Sent response=%s" % response)
connection.close()
| 24,597
|
def same_kind_right_null(a: DataType, _: Null) -> bool:
"""Return whether `a` is nullable."""
return a.nullable
| 24,598
|
def unary_col(op, v):
"""
interpretor for executing unary operator expressions on columnars
"""
if op == "+":
return v
if op == "-":
return compute.subtract(0.0, v)
if op.lower() == "not":
return compute.invert(v)
raise Exception("unary op not implemented")
| 24,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.