content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
import subprocess
import sys
import getpass
def ssh_cmd(ssh_cfg, command):
"""Returns ssh command."""
try:
binary = os.environ['SSH_BINARY']
except KeyError:
if os.name != 'nt':
binary = subprocess.check_output(
'which ssh', shell=True).decode(sys.stdout.encoding).strip()
else:
raise Exception('SSH binary not provided.')
cmd = [binary]
if ssh_cfg.get('port'):
cmd.extend(['-p', ssh_cfg['port']])
cmd.append('{}@{}'.format(getpass.getuser(), ssh_cfg['host']))
cmd.append(command)
return cmd | f7e110c76e26a462dd9929fc6fa4f2c025a2df44 | 26,300 |
import json
def test_sensor_query(cbcsdk_mock):
"""Test the sensor kit query."""
def validate_post(url, param_table, **kwargs):
assert kwargs['configParams'] == 'SampleConfParams'
r = json.loads(kwargs['sensor_url_request'])
assert r == {'sensor_types': [{'device_type': 'LINUX', 'architecture': '64', 'type': 'SUSE',
'version': '1.2.3.4'},
{'device_type': 'MAC', 'architecture': '64', 'type': 'MAC',
'version': '5.6.7.8'}],
'expires_at': '2021-04-01T23:39:52Z'}
return GET_SENSOR_INFO_RESP
cbcsdk_mock.mock_request("POST_MULTIPART", "/lcm/v1/orgs/test/sensor/_download", validate_post)
api = cbcsdk_mock.api
query = api.select(SensorKit)
query.add_sensor_kit_type(device_type='LINUX', architecture='64', sensor_type='SUSE', version='1.2.3.4')
skit = SensorKit.from_type(api, 'MAC', '64', 'MAC', '5.6.7.8')
query.add_sensor_kit_type(skit).expires('2021-04-01T23:39:52Z').config_params('SampleConfParams')
assert query._count() == 2
result = list(query)
assert len(result) == 2
assert result[0].sensor_type == {'device_type': 'LINUX', 'architecture': '64', 'type': 'SUSE', 'version': '1.2.3.4'}
assert result[0].sensor_url == "https://SensorURL1"
assert result[0].sensor_config_url == "https://SensorConfigURL1"
assert result[0].error_code == "NoErr1"
assert result[0].message == "Message1"
assert result[1].sensor_type == {'device_type': 'MAC', 'architecture': '64', 'type': 'MAC', 'version': '5.6.7.8'}
assert result[1].sensor_url == "https://SensorURL2"
assert result[1].sensor_config_url == "https://SensorConfigURL2"
assert result[1].error_code == "NoErr2"
assert result[1].message == "Message2" | 878a060ad7f532522b8ef81f76701fbe0c7dc11b | 26,301 |
import os
def ExtractParametersBoundaries(Basin):
"""
=====================================================
ExtractParametersBoundaries(Basin)
=====================================================
Parameters
----------
Basin : [Geodataframe]
gepdataframe of catchment polygon, make sure that the geodataframe contains
one row only, if not merge all the polygons in the shapefile first.
Returns
-------
UB : [list]
list of the upper bound of the parameters.
LB : [list]
list of the lower bound of the parameters.
the parameters are
["tt", "sfcf","cfmax","cwh","cfr","fc","beta",
"lp","k0","k1","k2","uzl","perc", "maxbas"]
"""
ParametersPath = os.path.dirname(Hapi.__file__)
ParametersPath = ParametersPath + "/Parameters"
ParamList = ["tt", "sfcf","cfmax","cwh","cfr","fc","beta", #"rfcf","e_corr",
"lp","k0","k1","k2","uzl","perc", "maxbas"] #,"c_flux"
raster = rasterio.open(ParametersPath + "/max/" + ParamList[0] + "-Max.tif")
Basin = Basin.to_crs(crs=raster.crs)
# max values
UB = list()
for i in range(len(ParamList)):
raster = rasterio.open(ParametersPath + "/max/" + ParamList[i] + "-Max.tif")
array = raster.read(1)
affine = raster.transform
UB.append(zonal_stats(Basin, array, affine=affine, stats=['max'])[0]['max']) #stats=['min', 'max', 'mean', 'median', 'majority']
# min values
LB = list()
for i in range(len(ParamList)):
raster = rasterio.open(ParametersPath + "/min/" + ParamList[i] + "-Min.tif")
array = raster.read(1)
affine = raster.transform
LB.append(zonal_stats(Basin, array, affine=affine, stats=['min'])[0]['min'])
# plot the given basin with the parameters raster
# Plot DEM
ax = show((raster, 1), with_bounds=True)
Basin.plot(facecolor='None', edgecolor='blue', linewidth=2, ax=ax)
# ax.set_xbound([Basin.bounds.loc[0,'minx']-10,Basin.bounds.loc[0,'maxx']+10])
# ax.set_ybound([Basin.bounds.loc[0,'miny']-1, Basin.bounds.loc[0,'maxy']+1])
return UB, LB | cb79f39380116a28307763f85e696abef0c4f3b5 | 26,302 |
def encoder_apply_one_shift(prev_layer, weights, biases, act_type, name='E', num_encoder_weights=1):
"""Apply an encoder to data for only one time step (shift).
Arguments:
prev_layer -- input for a particular time step (shift)
weights -- dictionary of weights
biases -- dictionary of biases
act_type -- string for activation type for nonlinear layers (i.e. sigmoid, relu, or elu)
name -- string for prefix on weight matrices (default 'E' for encoder)
num_encoder_weights -- number of weight matrices (layers) in encoder network (default 1)
Returns:
final -- output of encoder network applied to input prev_layer (a particular time step / shift)
Side effects:
None
"""
for i in np.arange(num_encoder_weights - 1):
prev_layer = tf.matmul(prev_layer, weights['W%s%d' % (name, i + 1)]) + biases['b%s%d' % (name, i + 1)]
if act_type == 'sigmoid':
prev_layer = tf.sigmoid(prev_layer)
elif act_type == 'relu':
prev_layer = tf.nn.relu(prev_layer)
elif act_type == 'elu':
prev_layer = tf.nn.elu(prev_layer)
# apply last layer without any nonlinearity
final = tf.matmul(prev_layer, weights['W%s%d' % (name, num_encoder_weights)]) + biases[
'b%s%d' % (name, num_encoder_weights)]
return final | 19ea3beec271e003f6d9ccadd4d508d97f6b7572 | 26,303 |
import uuid
def db_entry_generate_id():
""" Generate a new uuid for a new entry """
return str(uuid.uuid4()).lower().replace('-','') | d5e90504a1927623b267082cd228981684c84e8d | 26,304 |
import os
def get_manager_rest_service_host():
"""
Returns the host the manager REST service is running on.
"""
return os.environ[constants.REST_HOST_KEY] | 21a5cd5d8c77e1ff3f6edd7b7d80d03edb2ab974 | 26,305 |
def angle_boxplus(a, v):
"""
Returns the unwrapped angle obtained by adding v to a in radians.
"""
return angle_unwrap(a + v) | 9434d88d59956eeb4803bbee0f0fb3ad8acd1f5f | 26,306 |
def color_gradient_threshold(img, s_thresh=[(170, 255),(170, 255)], sx_thresh=(20, 100)):
"""
Apply a color threshold and a gradient threshold to the given image.
Args:
img: apply thresholds to this image
s_thresh: Color threshold (apply to S channel of HLS and B channel of LAB)
sx_thresh: Gradient threshold (apply to x gradient on L channel of HLS)
Returns:
new image with thresholds applied
"""
img = np.copy(img)
# Convert to HLS color space and separate the channels.
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
# H = hls[:, :, 0]
L = hls[:, :, 1]
S = hls[:, :, 2]
# Convert to LAB color space and separate the channels.
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# L = lab[:, :, 0]
# A = lab[:, :, 1]
B = lab[:, :, 2]
# Apply Sobel x (take the derivative on the x axis) to the HLS L channel.
sobelx = cv2.Sobel(L, cv2.CV_64F, 1, 0)
# Absolute x derivative to accentuate lines away from horizontal.
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Appply gradient threshold.
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1])] = 1
# Apply color channel threshold.
s_thresh = (125, 180)
S = S * (255 / np.max(S)) # normalize
S_thresh = np.zeros_like(S)
S_thresh[(S > s_thresh[0]) & (S <= s_thresh[1])] = 1
s_thresh = (220, 255)
B = B * (255 / np.max(B)) # normalize
B_thresh = np.zeros_like(B)
B_thresh[(B > s_thresh[0]) & (B <= s_thresh[1])] = 1
# Combine HLS S and Lab B channel thresholds.
sb_binary = np.zeros_like(S_thresh)
sb_binary[(S_thresh == 1) | (B_thresh == 1)] = 1
# Stack each channel and return.
# B G R
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, sb_binary))
color_binary *= 255 # Convert from [0, 1] back to [0, 255]
return np.uint8(color_binary) | 02bdfbd9a95dbfe726eac425e1b6efb78bfedb2b | 26,307 |
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See also
--------
circulant : circulant matrix
hankel : Hankel matrix
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
a, b = np.ogrid[0:len(c), len(r) - 1:-1:-1]
indx = a + b
# `indx` is a 2D array of indices into the 1D array `vals`, arranged so
# that `vals[indx]` is the Toeplitz matrix.
return vals[indx] | 00c68daef087fded65e1feee375491db559c792f | 26,308 |
def MQWS(settings, T):
"""
Generates a surface density profile as the per method used in Mayer, Quinn,
Wadsley, and Stadel 2004
** ARGUMENTS **
NOTE: if units are not supplied, assumed units are AU, Msol
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable
A function to calculate temperature as a function of radius
** RETURNS **
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Load in settings
n_points = settings.sigma.n_points
rin = settings.sigma.rin
rout = settings.sigma.rout
rmax = settings.sigma.rmax
Qmin = settings.sigma.Qmin
m = settings.physical.m
Mstar = settings.physical.M
#m_disk = settings.sigma.m_disk
rin = match_units(pynbody.units.au, rin)[1]
rout = match_units(pynbody.units.au, rout)[1]
#m_disk = match_units(pynbody.units.Msol, m_disk)[1]
if rmax is None:
rmax = 2.5 * rout
else:
rmax = match_units(pynbody.units.au, rmax)[1]
r = np.linspace(0, rmax, n_points)
a = (rin/r).in_units('1')
b = (r/rout).in_units('1')
sigma = (np.exp(-a**2 - b**2)/r) * Mstar.units/r.units
# Calculate Q
Q = np.sqrt(Mstar*kB*T(r)/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
sigma *= np.nanmin(Q)/Qmin
# Remove all nans
sigma[np.isnan(sigma)] = 0.0
return r, sigma | bd1227f4416d093271571f0d6385c98d263c514e | 26,309 |
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
x : numpy.array
State estimate vector
P : numpy.array
Covariance matrix
F : numpy.array()
State Transition matrix
Q : numpy.array, Optional
Process noise matrix
u : numpy.array, Optional, default 0.
Control vector. If non-zero, it is multiplied by B
to create the control input into the system.
B : numpy.array, optional, default 0.
Control transition matrix.
alpha : float, Optional, default=1.0
Fading memory setting. 1.0 gives the normal Kalman filter, and
values slightly larger than 1.0 (such as 1.02) give a fading
memory effect - previous measurements have less influence on the
filter's estimates. This formulation of the Fading memory filter
(there are many) is due to Dan Simon
Returns
-------
x : numpy.array
Prior state estimate vector
P : numpy.array
Prior covariance matrix
"""
if np.isscalar(F):
F = np.array(F)
x = dot(F, x) + dot(B, u)
P = (alpha * alpha) * dot(dot(F, P), F.T) + Q
return x, P | fa638183a90583c47476cc7687b8702eb193dffb | 26,310 |
from typing import Dict
def footer_processor(request: HttpRequest) -> Dict[str, str]:
"""Add the footer email me message to the context of all templates since the footer is included everywhere."""
try:
message = KlanadTranslations.objects.all()[0].footer_email_me
return {"footer_email_me": message}
except IndexError:
return {} | 3d38c4414cf4ddab46a16d09c0dcc37c57354cb1 | 26,311 |
def genome_2_validator(genome_2):
"""
Conducts various test to ensure the stability of the Genome 2.0
"""
standard_gene_length = 27
def structure_test_gene_lengths():
"""
Check length requirements for each gene
"""
gene_anomalies = 0
for key in genome_2:
if len(key) != standard_gene_length:
print("Warning! Key did not meet length requirement:", key)
gene_anomalies += 1
if gene_anomalies == 0:
print("\nGene length verification...... PASSED!")
else:
print("\nGene length verification...... Failed! ", gene_anomalies, " anomalies detected")
return gene_anomalies | 7fe54b51673f3bc71cb8899f9a20b51d28d80957 | 26,312 |
import os
def product_info_from_tree(path):
"""Extract product information from a directory
Arguments:
path (str): path to a directory
"""
log.debug('Reading product version from %r', path)
product_txt = os.path.join(path, 'product.txt')
if not os.path.isfile(product_txt):
raise CommandExecutionError(
'Path {} has no "product.txt"'.format(path))
with salt.utils.files.fopen(os.path.join(path, 'product.txt')) as fd:
return _get_product_info(fd.read()) | 321f604e737cecadedf8f6ac34b83d55902d0bf4 | 26,313 |
def to_poly(group):
"""Convert set of fire events to polygons."""
# create geometries from events
geometries = []
for _, row in group.iterrows():
geometry = corners_to_poly(row['H'], row['V'], row['i'], row['j'])
geometries.append(geometry)
# convert to single polygon
vt_poly = gpd.GeoDataFrame(
crs='+proj=sinu +R=6371007.181 +nadgrids=@null +wktext',
geometry=geometries,
)
# dissolve polygons
vt_poly['dissolvefield'] = 1
vt_poly = vt_poly.dissolve('dissolvefield')
# reproject to WGS84
vt_poly = vt_poly.to_crs('epsg:4326')
return vt_poly | 5482121dc57e3729695b3b3962339cb51c1613dc | 26,314 |
def get_user():
"""
Get the current logged in user to Jupyter
:return: (str) name of the logged in user
"""
uname = env_vars.get('JUPYTERHUB_USER') or env_vars.get('USER')
return uname | a7ece43874794bbc62a43085a5bf6b352a293ea2 | 26,315 |
import logging
import time
def get_top_articles(update=False):
"""
Retrieve 10 most recent wiki articles from the datastore or from memcache
:param update: when this is specified, articles are retrived from the datastore
:return: a list of 10 most recent articles
"""
# use caching to avoid running unnecessary DB queries at each page load
key = 'top_ten'
articles = memcache.get(key)
logging.warn('MEMCACHE | Wiki articles %s' % str(articles))
if (articles is None) or (len(articles) == 0) or update:
# necessary artificial delay when a new article has just been persisted to the datastore
if update:
time.sleep(2)
articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')
articles = list(articles)
memcache.set(key, articles)
logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))
return articles | b5ac25e8d06acd48e3ee4157fcfcffd580cf421e | 26,316 |
def bucket(x, bucket_size):
"""'Pixel bucket' a numpy array.
By 'pixel bucket', I mean, replace groups of N consecutive pixels in
the array with a single pixel which is the sum of the N replaced
pixels. See: http://stackoverflow.com/q/36269508/513688
"""
for b in bucket_size: assert float(b).is_integer()
bucket_size = [int(b) for b in bucket_size]
x = np.ascontiguousarray(x)
new_shape = np.concatenate((np.array(x.shape) // bucket_size, bucket_size))
old_strides = np.array(x.strides)
new_strides = np.concatenate((old_strides * bucket_size, old_strides))
axis = tuple(range(x.ndim, 2*x.ndim))
return np.lib.stride_tricks.as_strided(x, new_shape, new_strides).sum(axis) | 8ff3eda1876b48a8bdd4fbfe6b740ed7e3498c51 | 26,317 |
def create_msg(q1,q2,q3):
""" Converts the given configuration into a string of bytes
understood by the robot arm.
Parameters:
q1: The joint angle for the first (waist) axis.
q2: The joint angle for the second (shoulder) axis.
q3: The joint angle for the third (wrist) axis.
Returns:
The string of bytes.
"""
return ('%d,%d,%d\n' % (q1,q2,q3)).encode() | 26f9954a55686c9bf8bd08cc7a9865f3e4e602e3 | 26,318 |
def get_config():
"""Provide the global configuration object."""
global __config
if __config is None:
__config = ComplianceConfig()
return __config | cdaa82445b4f260c7b676dc25ce4e8009488603e | 26,319 |
import array
def size(x: "array.Array") -> "array.Array":
"""Takes a tensor as input and outputs a int64 scalar that equals to the total
number of elements of the input tensor.
Note that len(x) is more efficient (and should give the same result).
The difference is that this `size` free function adds the `Size` node to the
`ONNX` graph, which could improve runtime if the output is used in subsequent
operations. It will also know the tensor size at runtime (which may not be
known when the graph is declared, i.e. when using len(x)).
Args:
x (array.Array): Input tensor
Returns:
array.Array: Size of the input tensor
"""
@allowed_types(all_types)
def size_helper(x: "array.Array"):
result = nary_operator("Size", x)
result._dims = DynamicShape()
result._dtype = np.int64
return result
return size_helper(x) | 2e80223a2468f0d9363ad2aa148d14a090c0d009 | 26,320 |
def linspace(start, stop, length):
"""
Create a pdarray of linearly spaced points in a closed interval.
Parameters
----------
start : scalar
Start of interval (inclusive)
stop : scalar
End of interval (inclusive)
length : int
Number of points
Returns
-------
pdarray, float64
Array of evenly spaced points along the interval
See Also
--------
arange
Examples
--------
>>> ak.linspace(0, 1, 5)
array([0, 0.25, 0.5, 0.75, 1])
"""
if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(length))):
raise TypeError("all arguments must be scalars")
starttype = resolve_scalar_dtype(start)
startstr = NUMBER_FORMAT_STRINGS[starttype].format(start)
stoptype = resolve_scalar_dtype(stop)
stopstr = NUMBER_FORMAT_STRINGS[stoptype].format(stop)
lentype = resolve_scalar_dtype(length)
if lentype != 'int64':
raise TypeError("Length must be int64")
lenstr = NUMBER_FORMAT_STRINGS[lentype].format(length)
repMsg = generic_msg("linspace {} {} {}".format(startstr, stopstr, lenstr))
return create_pdarray(repMsg) | 82d90c0f6dcdca87b5c92d2668b289a1db0b2e64 | 26,321 |
def _load_corpus_as_dataframe(path):
"""
Load documents corpus from file in 'path'
:return:
"""
json_data = load_json_file(path)
tweets_df = _load_tweets_as_dataframe(json_data)
_clean_hashtags_and_urls(tweets_df)
# Rename columns to obtain: Tweet | Username | Date | Hashtags | Likes | Retweets | Url | Language
corpus = tweets_df.rename(
columns={"id": "Id", "full_text": "Tweet", "screen_name": "Username", "created_at": "Date",
"favorite_count": "Likes",
"retweet_count": "Retweets", "lang": "Language"})
# select only interesting columns
filter_columns = ["Id", "Tweet", "Username", "Date", "Hashtags", "Likes", "Retweets", "Url", "Language"]
corpus = corpus[filter_columns]
return corpus | 7113b51ec7e35d2b11697e8b049ba9ef7e1eb903 | 26,322 |
def UNTL_to_encodedUNTL(subject):
"""Normalize a UNTL subject heading to be used in SOLR."""
subject = normalize_UNTL(subject)
subject = subject.replace(' ', '_')
subject = subject.replace('_-_', '/')
return subject | 51c863327eec50232d83ea645d4f89f1e1829444 | 26,323 |
def parse_cigar(cigarlist, ope):
""" for a specific operation (mismach, match, insertion, deletion... see above)
return occurences and index in the alignment """
tlength = 0
coordinate = []
# count matches, indels and mismatches
oplist = (0, 1, 2, 7, 8)
for operation, length in cigarlist:
if operation == ope:
coordinate.append([length, tlength])
if operation in oplist:
tlength += length
return coordinate | 4eceab70956f787374b2c1cffa02ea7ce34fe657 | 26,324 |
def _decode_token_compact(token):
"""
Decode a compact-serialized JWT
Returns {'header': ..., 'payload': ..., 'signature': ...}
"""
header, payload, raw_signature, signing_input = _unpack_token_compact(token)
token = {
"header": header,
"payload": payload,
"signature": base64url_encode(raw_signature)
}
return token | e7dbe465c045828e0e7b443d01ea2daeac2d9b9a | 26,325 |
def _top_N_str(m, col, count_col, N):
"""
Example
-------
>>> df = pd.DataFrame({'catvar':["a","b","b","c"], "numvar":[10,1,100,3]})
>>> _top_N_str(df, col = 'catvar', count_col ='numvar', N=2)
'b (88.6%), a (8.8%)'
"""
gby = m.groupby(col)[count_col].agg(np.sum)
gby = 100 * gby / gby.sum()
gby = gby.sort_values(ascending=False)
out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])
return out | d80e5f7822d400e88594a96c9e1866ede7d9843e | 26,326 |
def insert_box(part, box, retries=10):
"""Adds a box / connector to a part using boolean union. Operating under the assumption
that adding a connector MUST INCREASE the number of vertices of the resulting part.
:param part: part to add connector to
:type part: trimesh.Trimesh
:param box: connector to add to part
:type box: trimesh.Trimesh (usually a primitive like Box)
:param retries: number of times to retry before raising an error, checking to see if number of
vertices increases. Default = 10
:type retries: int
:return: The part with the connector added
:rtype: trimesh.Trimesh
"""
utils.trimesh_repair(part)
for t in range(retries):
new_part_slot = part.union(box)
if len(new_part_slot.vertices) > len(part.vertices):
return new_part_slot
raise Exception("Couldn't insert slot") | 76f29f8fb4ebdd67b7385f5a81fa87df4b64d4c7 | 26,327 |
import argparse
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, required=True)
parser.add_argument('--spacy_model', type=str, default='en_core_web_sm')
parser.add_argument('--omit_answers', action='store_true')
parser.add_argument('--include_stopwords', action='store_true')
parser.add_argument('--lowercase', action='store_true')
parser.add_argument('--k', type=int, default=5)
return parser.parse_args() | 905f9e46d17b45e28afeaf13769434ad75685582 | 26,328 |
def pnorm(x, p):
"""
Returns the L_p norm of vector 'x'.
:param x: The vector.
:param p: The order of the norm.
:return: The L_p norm of the matrix.
"""
result = 0
for index in x:
result += abs(index) ** p
result = result ** (1/p)
return result | 110fea5cbe552f022c163e9dcdeacddd920dbc65 | 26,329 |
import os
def get_arr(logdir_multiseed, acc_thresh_dict=None, agg_mode='median'):
"""
Reads a set of evaluation log files for multiple seeds and computes
the aggregated metrics with error bounds. Also computes the CL metrics
with error bounds.
Args:
logdir_multiseed (str): Path to the parent log folder containing folders for different seeds
acc_thresh_dict (dict): Thresholds used for computing CL metrics
agg_mode (str): Should be either 'mean' or 'median'
Returns:
metric_arr, log_metric_arr, acc_arr, cl_metric_arr, seeds, description, cl_sequence
TODO: Complete description
"""
# Fetch the log files for the different seeds
eval_files = get_eval_files(logdir_multiseed)
# Create a merged JSON file from the different
# JSON log files for the different seeds
eval_dict = None
for eval_file in eval_files:
eval_dict_single = read_dict(eval_file)
if eval_dict is None:
eval_dict = deepcopy(eval_dict_single)
else:
eval_dict = merge(eval_dict, eval_dict_single)
args = eval_dict['args']
data = eval_dict['data']
local_home = os.path.expanduser('~')
# Count the number of seeds
if isinstance(args['seed'], int):
seeds = args['seed']
num_seeds = 1
else:
seeds = args['seed'].split(',')
num_seeds = len(seeds)
description = args['description']
num_tasks, cl_sequence = count_lines(os.path.join(local_home, '/'.join(args['seq_file'].split('/')[3:])))
# How many metrics are measured?
metrics = data['metric_errors']['train_task_0']['eval_task_0'].keys()
# How many data points per metric?
num_data_points = len(data['metric_errors']['train_task_0']['eval_task_0']['swept'])
# Create a numpy array for each metric, which are stored in a dict
# For storing the trajectory metrics
mid_metric_arr = dict()
high_err_metric_arr = dict()
low_err_metric_arr = dict()
# For storing the log_10 of the trajectory metrics
mid_log_metric_arr = dict()
high_err_log_metric_arr = dict()
low_err_log_metric_arr = dict()
# For storing the accuracy of the trajectories
acc_arr = dict()
# Initialize the arrays
for metric in metrics:
if agg_mode is not None:
mid_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
high_err_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
low_err_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
mid_log_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
high_err_log_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
low_err_log_metric_arr[metric] = np.zeros((num_tasks, num_tasks))
acc_arr[metric] = np.zeros((num_tasks, num_tasks, num_seeds))
else:
mid_metric_arr[metric] = np.zeros((num_tasks, num_tasks, num_data_points))
mid_log_metric_arr[metric] = np.zeros((num_tasks, num_tasks, num_data_points))
for task_name in cl_sequence:
train_index = cl_sequence.index(task_name)
task_results = data['metric_errors'][f'train_task_{train_index}']
for eval_index in range(train_index+1):
eval_results = task_results[f'eval_task_{eval_index}']
for metric_name, metric_errors in eval_results.items():
if agg_mode == 'mean':
mid_metric = np.mean(metric_errors)
high_err_metric = mid_metric + np.std(metric_errors)
low_err_metric = mid_metric - np.std(metric_errors)
mid_log_metric = np.mean(np.log10(metric_errors))
high_err_log_metric = mid_log_metric + np.std(np.log10(metric_errors))
low_err_log_metric = mid_log_metric - np.std(np.log10(metric_errors))
elif agg_mode == 'median':
mid_metric = np.median(metric_errors)
high_err_metric = np.percentile(metric_errors, 75)
low_err_metric = np.percentile(metric_errors, 25)
mid_log_metric = np.median(np.log10(metric_errors))
high_err_log_metric = np.percentile(np.log10(metric_errors), 75)
low_err_log_metric = np.percentile(np.log10(metric_errors), 25)
elif agg_mode is None:
# Do not compute aggregate error metrics, return the raw data
mid_metric = metric_errors
high_err_metric = None
low_err_metric = None
mid_log_metric = np.log10(metric_errors)
high_err_log_metric = None
low_err_log_metric = None
else:
raise NotImplementedError(f'Unknown agg_mode: {agg_mode}')
# `acc` will be an array of length `num_seeds`
if agg_mode is not None:
acc = accuracy(metric_errors, threshold=acc_thresh_dict[metric_name], num_seeds=num_seeds)
acc_arr[metric_name][train_index, eval_index] = acc
mid_metric_arr[metric_name][train_index, eval_index] = mid_metric
high_err_metric_arr[metric_name][train_index, eval_index] = high_err_metric
low_err_metric_arr[metric_name][train_index, eval_index] = low_err_metric
mid_log_metric_arr[metric_name][train_index, eval_index] = mid_log_metric
high_err_log_metric_arr[metric_name][train_index, eval_index] = high_err_log_metric
low_err_log_metric_arr[metric_name][train_index, eval_index] = low_err_log_metric
else:
mid_metric_arr[metric_name][train_index, eval_index] = np.array(mid_metric)
mid_log_metric_arr[metric_name][train_index, eval_index] = np.array(mid_log_metric)
if agg_mode is None:
# If we do not want aggregate metrics
return mid_metric_arr, mid_log_metric_arr
else:
# If we want aggrgate metrics
metric_arr = {'mid': mid_metric_arr,
'high': high_err_metric_arr,
'low': low_err_metric_arr}
log_metric_arr = {'mid': mid_log_metric_arr,
'high': high_err_log_metric_arr,
'low': low_err_log_metric_arr}
# Calculate the CL metrics
mid_cl_metric_arr = dict()
high_cl_metric_arr = dict()
low_cl_metric_arr = dict()
for metric_name in metrics:
acc_list, bwt_list, bwt_plus_list, rem_list = list(), list(), list(), list()
for seed_idx in range(num_seeds):
arr = acc_arr[metric_name][:,:,seed_idx]
acc, bwt, bwt_plus, rem = get_cl_metrics(arr)
acc_list.append(acc)
bwt_list.append(bwt)
bwt_plus_list.append(bwt_plus)
rem_list.append(rem)
if agg_mode == 'mean':
mid_acc = np.mean(acc_list)
high_acc = mid_acc + np.std(acc_list)
low_acc = mid_acc - np.std(acc_list)
mid_bwt = np.mean(bwt_list)
high_bwt = mid_bwt + np.std(bwt_list)
low_bwt = mid_bwt - np.std(bwt_list)
mid_bwt_plus = np.mean(bwt_plus_list)
high_bwt_plus = mid_bwt_plus + np.std(bwt_plus_list)
low_bwt_plus = mid_bwt_plus - np.std(bwt_plus_list)
mid_rem = np.mean(rem_list)
high_rem = mid_rem + np.std(rem_list)
low_rem = mid_rem - np.std(rem_list)
elif agg_mode == 'median':
mid_acc = np.median(acc_list)
high_acc = np.percentile(acc_list, 75)
low_acc = np.percentile(acc_list, 25)
mid_bwt = np.median(bwt_list)
high_bwt = np.percentile(bwt_list, 75)
low_bwt = np.percentile(bwt_list, 25)
mid_bwt_plus = np.median(bwt_plus_list)
high_bwt_plus = np.percentile(bwt_plus_list, 75)
low_bwt_plus = np.percentile(bwt_plus_list, 25)
mid_rem = np.median(rem_list)
high_rem = np.percentile(rem_list, 75)
low_rem = np.percentile(rem_list, 25)
else:
raise NotImplementedError(f'Unknown agg_mode: {agg_mode}')
mid_cl_metric_arr[metric_name] = {'acc': mid_acc,'bwt': mid_bwt, 'bwt_plus': mid_bwt_plus, 'rem': mid_rem}
high_cl_metric_arr[metric_name] = {'acc': high_acc,'bwt': high_bwt, 'bwt_plus': high_bwt_plus, 'rem': high_rem}
low_cl_metric_arr[metric_name] = {'acc': low_acc,'bwt': low_bwt, 'bwt_plus': low_bwt_plus, 'rem': low_rem}
cl_metric_arr = {'mid': mid_cl_metric_arr,
'high': high_cl_metric_arr,
'low': low_cl_metric_arr}
return metric_arr, log_metric_arr, cl_metric_arr, seeds, description, cl_sequence | 11d17378bb9824a6e112335bfa2c9c219b3d9c18 | 26,330 |
def _kneighborsclassifier(*, train, test, x_predict=None, metrics, n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None, **kwargs):
"""
For more info visit :
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier
"""
model = KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm, leaf_size=leaf_size, p=p, metric=metric,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
model.fit(train[0], train[1])
model_name = 'KNeighbors Classifier'
y_hat = model.predict(test[0])
if metrics == 'accuracy':
accuracy = accuracy_score(test[1], y_hat)
if metrics == 'f1':
accuracy = f1_score(test[1], y_hat)
if metrics == 'jaccard':
accuracy = jaccard_score(test[1], y_hat)
if x_predict is None:
return (model_name, accuracy, None)
y_predict = model.predict(x_predict)
return (model_name, accuracy, y_predict) | 0a8ff00a5fc4978758432df34947895688b225cd | 26,331 |
def overlap(batch_x, n_context=296, n_input=39):
"""
Due to the requirement of static shapes(see fix_batch_size()),
we need to stack the dynamic data to form a static input shape.
Using the n_context of 296 (1 second of mfcc)
"""
window_width = n_context
num_channels = n_input
batch_x = tf.expand_dims(batch_x, axis=0)
# Create a constant convolution filter using an identity matrix, so that the
# convolution returns patches of the input tensor as is, and we can create
# overlapping windows over the MFCCs.
eye_filter = tf.constant(
np.eye(window_width * num_channels).reshape(
window_width, num_channels, window_width * num_channels
),
tf.float32,
)
# Create overlapping windows
batch_x = tf.nn.conv1d(batch_x, eye_filter, stride=1, padding="SAME")
# Remove dummy depth dimension and reshape into
# [n_windows, n_input]
batch_x = tf.reshape(batch_x, [-1, num_channels])
return batch_x | 75936fe9ecb0f3e278fd6c990cab297c878006b1 | 26,332 |
def eval_on_train_data_input_fn(training_dir, hyperparameters):
"""
:param training_dir: The directory where the training CSV is located
:param hyperparameters: A parameter set of the form
{
'batch_size': TRAINING_BATCH_SIZE,
'num_epochs': TRAINING_EPOCHS,
'data_downsize': DATA_DOWNSIZE
}
:return: A numpy_input_fn for the run
"""
return _input_fn(
training_dir,
'ml_sort_train.csv',
{
'batch_size': EVAL_BATCH_SIZE,
'num_epochs': EVAL_EPOCHS,
'data_downsize': DATA_DOWNSIZE,
'shuffle': EVAL_SHUFFLE
}
) | 07f9b33c936be5914b30697c25085baa25799d0d | 26,333 |
import json
def load_config(config_file):
"""
加载配置文件
:param config_file:
:return:
"""
with open(config_file, encoding='UTF-8') as f:
return json.load(f) | 85bab8a60e3abb8af56b0ae7483f2afe992d84b4 | 26,334 |
def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | da59232e9e7715c5c1e87fde99f19997c8e1e890 | 26,335 |
from typing import List
import os
def read_annotation_files(annotation_files_directory: str, audio_files_directory: str,
max_audio_files: int = np.inf, exclude_classes: List[str] = None) -> List[AudioFile]:
"""
Reads annotation files in a directory specified.
:param annotation_files_directory: the directory of the annotation files
:param audio_files_directory: path to directory audio files are stored in
:param max_audio_files: maximum number of audio files to be loaded
:param exclude_classes: list of classes to remove from audio files
:return: list of files
"""
all_files: List[AudioFile] = []
filenames = sorted(os.listdir(annotation_files_directory))
for index, file in enumerate(filenames):
if len(all_files) >= max_audio_files:
break
log("Reading annotation file {} ({})...".format(index + 1, file))
filepath = os.path.join(annotation_files_directory, file)
read_annotation_file(filepath, audio_files_directory, all_files, max_audio_files, exclude_classes)
return all_files | c4a89d10353c2e46be9a205d89918bc34f3a9a07 | 26,336 |
def vehicle_emoji(veh):
"""Maps a vehicle type id to an emoji
:param veh: vehicle type id
:return: vehicle type emoji
"""
if veh == 2:
return u"\U0001F68B"
elif veh == 6:
return u"\U0001f687"
elif veh == 7:
return u"\U000026F4"
elif veh == 12:
return u"\U0001F686"
else:
return u"\U0001F68C" | 8068ce68e0cdf7f220c37247ba2d03c6505a00fe | 26,337 |
import functools
def np_function(func=None, output_dtypes=None):
"""Decorator that allow a numpy function to be used in Eager and Graph modes.
Similar to `tf.py_func` and `tf.py_function` but it doesn't require defining
the inputs or the dtypes of the outputs a priori.
In Eager mode it would convert the tf.Tensors to np.arrays before passing to
`func` and then convert back the outputs from np.arrays to tf.Tensors.
In Graph mode it would create different tf.py_function for each combination
of dtype of the inputs and cache them for reuse.
NOTE: In Graph mode: if `output_dtypes` is not provided then `func` would
be called with `np.ones()` to infer the output dtypes, and therefore `func`
should be stateless.
```python
Instead of doing:
def sum(x):
return np.sum(x)
inputs = tf.constant([3, 4])
outputs = tf.py_function(sum, inputs, Tout=[tf.int64])
inputs = tf.constant([3., 4.])
outputs = tf.py_function(sum, inputs, Tout=[tf.float32])
Do:
@eager_utils.np_function
def sum(x):
return np.sum(x)
inputs = tf.constant([3, 4])
outputs = sum(inputs) # Infers that Tout is tf.int64
inputs = tf.constant([3., 4.])
outputs = sum(inputs) # Infers that Tout is tf.float32
# Output dtype is always float32 for valid input dtypes.
@eager_utils.np_function(output_dtypes=np.float32)
def mean(x):
return np.mean(x)
# Output dtype depends on the input dtype.
@eager_utils.np_function(output_dtypes=lambda x: (x, x))
def repeat(x):
return x, x
with context.graph_mode():
outputs = sum(tf.constant([3, 4]))
outputs2 = sum(tf.constant([3., 4.]))
sess.run(outputs) # np.array(7)
sess.run(outputs2) # np.array(7.)
with context.eager_mode():
inputs = tf.constant([3, 4])
outputs = sum(tf.constant([3, 4])) # tf.Tensor([7])
outputs = sum(tf.constant([3., 4.])) # tf.Tensor([7.])
```
Args:
func: A numpy function, that takes numpy arrays as inputs and return numpy
arrays as outputs.
output_dtypes: Optional list of dtypes or a function that maps input dtypes
to output dtypes. Examples: output_dtypes=[tf.float32],
output_dtypes=lambda x: x (outputs have the same dtype as inputs).
If it is not provided in Graph mode the `func` would be called to infer
the output dtypes.
Returns:
A wrapped function that can be used with TF code.
"""
def decorated(func):
"""Decorated func."""
dtype_map = {}
def wrapper(*args, **kwargs):
"""Wrapper to add nested input and outputs support."""
func_with_kwargs = functools.partial(func, **kwargs)
def func_flat_outputs(*args):
return tf.nest.flatten(func_with_kwargs(*args))
def compute_output_dtypes(*args):
"""Calls the func to compute output dtypes."""
result = func(*args, **kwargs)
return tf.nest.map_structure(lambda x: x.dtype, result)
if tf.executing_eagerly():
result = func_with_kwargs(
*tf.nest.map_structure(lambda x: x.numpy(), args))
convert = lambda x: x if x is None else tf.convert_to_tensor(value=x)
return tf.nest.map_structure(convert, result)
else:
input_dtypes = tuple([x.dtype for x in tf.nest.flatten(args)])
if input_dtypes not in dtype_map:
if output_dtypes is None:
dummy_args = tf.nest.map_structure(
lambda x: np.ones(x.shape, x.dtype.as_numpy_dtype), args)
dtype_map[input_dtypes] = compute_output_dtypes(*dummy_args)
elif isinstance(output_dtypes, (list, tuple)):
# output_dtypes define the output dtypes.
dtype_map[input_dtypes] = output_dtypes
else:
try:
# See if output_dtypes define the output dtype directly.
tf.as_dtype(output_dtypes)
dtype_map[input_dtypes] = output_dtypes
except TypeError:
if callable(output_dtypes):
# output_dtypes is mapping from input_dtypes to output_dtypes.
dtype_map[input_dtypes] = output_dtypes(*input_dtypes)
else:
raise ValueError(
'output_dtypes not a list of dtypes or a callable.')
flat_output_dtypes = tf.nest.flatten(dtype_map[input_dtypes])
flat_outputs = tf.py_function(func_flat_outputs,
inp=args,
Tout=flat_output_dtypes)
return tf.nest.pack_sequence_as(dtype_map[input_dtypes], flat_outputs)
return tf_decorator.make_decorator(func, wrapper)
# This code path is for the `foo = np_function(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the decorator
# @np_function(...)
# def foo(...):
return decorated | 5ed18b1575ec88fe96c27e7de38b00c5a734ee91 | 26,338 |
from typing import List
from typing import Dict
def constituency_parse(doc: List[str]) -> List[Dict]:
"""
parameter: List[str] for each doc
return: List[Dict] for each doc
"""
predictor = get_con_predictor()
results = []
for sent in doc:
result = predictor.predict(sentence=sent)
results.append(result)
assert len(results) == len(doc)
return results | 30dd8eca61412083f1f11db6dc8aeb27bc171de9 | 26,339 |
import os
def ifFileExists(filePath):
"""
Cheks if the file exists; returns True/False
filePath File Path
"""
return os.path.isfile(filePath) | 2c4d6c332cff980a38d147ad0eafd1d0c3d902fc | 26,340 |
def extract_results(filename):
""" Extract intensity data from a FLIMfit results file.
Converts any fraction data (e.g. beta, gamma) to contributions
Required arguments:
filename - the name of the file to load
"""
file = h5py.File(filename,'r')
results = file['results']
keys = sorted_nicely(results.keys())
params = sorted_nicely(results['image 1'].keys())
groups = []
g = 1
while(param(g,'I_0') in params):
group = [param(g,'I_0')]
name_search = [param(g,'gamma'), param(g,'beta')]
for name in name_search:
if len(group) == 1:
group = group + [x for x in params if x.startswith(name)]
groups.append(group)
g = g + 1
print(groups)
X = []
mask = []
for k in keys:
A = []
m = np.array([False])
for group in groups:
I_0 = results[k][group[0]]
m = m | ~np.isfinite(I_0)
if len(group) == 1:
A.append(I_0)
else:
for i in range(1,len(group)):
A.append(results[k][group[i]][()] * I_0)
A = np.stack(A, axis=-1)
A[np.isnan(A)] = 0
X.append(A)
mask.append(m)
X = np.stack(X)
mask = np.stack(mask)
return X, groups, mask | c4a9f4f66a53050ea55cb1bd266edfa285000717 | 26,341 |
async def bundle_status(args: Namespace) -> ExitCode:
"""Query the status of a Bundle in the LTA DB."""
response = await args.di["lta_rc"].request("GET", f"/Bundles/{args.uuid}")
if args.json:
print_dict_as_pretty_json(response)
else:
# display information about the core fields
print(f"Bundle {args.uuid}")
print(f" Priority: {display_time(response['work_priority_timestamp'])}")
print(f" Status: {response['status']} ({display_time(response['update_timestamp'])})")
if response['status'] == "quarantined":
print(f" Reason: {response['reason']}")
print(f" Claimed: {response['claimed']}")
if response['claimed']:
print(f" Claimant: {response['claimant']} ({display_time(response['claim_timestamp'])})")
print(f" TransferRequest: {response['request']}")
print(f" Source: {response['source']} -> Dest: {response['dest']}")
print(f" Path: {response['path']}")
if 'files' in response:
print(f" Files: {len(response['files'])}")
else:
print(" Files: Not Listed")
# display additional information if available
if 'bundle_path' in response:
print(f" Bundle File: {response['bundle_path']}")
if 'size' in response:
print(f" Size: {response['size']}")
if 'checksum' in response:
print(" Checksum")
print(f" adler32: {response['checksum']['adler32']}")
print(f" sha512: {response['checksum']['sha512']}")
# display the contents of the bundle, if requested
if args.contents:
print(" Contents: Not Listed")
return EXIT_OK | 2efb12b4bba3d9e920c199ad1e7262a24220d603 | 26,342 |
def determine_step_size(mode, i, threshold=20):
"""
A helper function that determines the next action to take based on the designated mode.
Parameters
----------
mode (int)
Determines which option to choose.
i (int)
the current step number.
threshold (float)
The upper end of our control.
Returns
-------
decision (float)
The value to push/pull the cart by, positive values push to the right.
"""
if mode == 1:
return 0
if mode == 2:
return np.random.uniform(low=-threshold, high=threshold)
if mode == 3:
side = -1 if i%2 == 0 else 1
return threshold*side
if mode == 4:
inp_str = "Enter a float value from -{} to {}:\n".format(threshold, threshold)
return float(input(inp_str)) | 9b59ebe5eeac13f06662e715328d2d9a3ea0e9a2 | 26,343 |
def scroll_down(driver):
"""
This function will simulate the scroll down of the webpage
:param driver: webdriver
:type driver: webdriver
:return: webdriver
"""
# Selenium supports execute JavaScript commands in current window / frame
# get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
# scroll to the end of the page
driver.execute_script("window.scrollTo(0, {});".format(last_height))
return driver | 7d68201f3a49950e509a7e389394915475ed8c94 | 26,344 |
from datetime import datetime
def processing():
"""Renders the khan projects page."""
return render_template('stem/tech/processing/gettingStarted.html', title="Processing - Getting Started", year=datetime.now().year) | 53f6c69692591601dcb41c7efccad60bbfaf4cf7 | 26,345 |
def conv_unit(input_tensor, nb_filters, mp=False, dropout=0.1):
"""
one conv-relu-bn unit
"""
x = ZeroPadding2D()(input_tensor)
x = Conv2D(nb_filters, (3, 3))(x)
x = relu()(x)
x = BatchNormalization(axis=3, momentum=0.66)(x)
if mp:
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
x = Dropout(dropout)(x)
return x | 7c24dae045c38c073431e4fab20687439601b141 | 26,346 |
import torch
def combine_vectors(x, y):
"""
Function for combining two vectors with shapes (n_samples, ?) and (n_samples, ?).
Parameters:
x: (n_samples, ?) the first vector.
In this assignment, this will be the noise vector of shape (n_samples, z_dim),
but you shouldn't need to know the second dimension's size.
y: (n_samples, ?) the second vector.
Once again, in this assignment this will be the one-hot class vector
with the shape (n_samples, n_classes), but you shouldn't assume this in your code.
"""
combined = torch.cat((x, y), dim=1)
return combined | 700ea418c6244dc745bf6add89ad786c4444d2fe | 26,347 |
def expandMacros(context, template, outputFile, outputEncoding="utf-8"):
"""
This function can be used to expand a template which contains METAL
macros, while leaving in place all the TAL and METAL commands.
Doing this makes editing a template which uses METAL macros easier,
because the results of the macro can be seen immediately.
The macros referred to by the passed in template must be present in
the context so that their contents can be referenced. The
outputEncoding determines the encoding of the returned string, which
will contain the expanded macro.
"""
interp = MacroExpansionInterpreter()
interp.initialise(context, outputFile)
return template.expand(
context, outputFile, outputEncoding, interpreter=interp) | 04aad464f975c5ee216e17f93167be51eea8f6e6 | 26,348 |
def graph_papers(path="papers.csv"):
"""
Spit out the connections between people by papers
"""
data = defaultdict(dict)
jkey = u'Paper'
for gkey, group in groupby(read_csv(path, key=jkey), itemgetter(jkey)):
for pair in combinations(group, 2):
for idx,row in enumerate(pair):
uid = dotify([row[u'Name']])
if uid not in data:
data[uid] = {
'name': uid,
'imports': [],
}
cpart = pair[0] if idx == 1 else pair[1]
data[uid]['imports'].append(dotify([cpart[u'Name']]))
return data | 43cae08f303707b75da2b225112fa0bc448306d9 | 26,349 |
def tariterator1(fileobj, check_sorted=False, keys=base_plus_ext, decode=True):
"""Alternative (new) implementation of tariterator."""
content = tardata(fileobj)
samples = group_by_keys(keys=keys)(content)
decoded = decoder(decode=decode)(samples)
return decoded | 8ea80d266dfe9c63336664aaf0fcac520e620382 | 26,350 |
def juego_nuevo():
"""Pide al jugador la cantidad de filas/columnas, cantidad de palabras y las palabras."""
show_title("Crear sopa de NxN letras")
nxn = pedir_entero("Ingrese un numero entero de la cantidad de\nfilas y columnas que desea (Entre 10 y 20):\n",10,20)
n_palabras = pedir_entero("Ingrese un numero entero de la cantidad de\npalabas que deasea agregar (Entre 0 y %d):\n"%(nxn/2),0,(nxn/2))
palabras = []
palabra_min_caracteres = 3
palabra_repetida = False
while len(palabras)<n_palabras:
if palabra_repetida :
show_msg("Ingreso una palabra repetida")
palabra_repetida = False
# Pedir una palabra que cumpla con los requisitos
palabra = pedir_palabra("[%d|%d]Ingrese una palabra entre %d y %d caracteres: "%(len(palabras)+1,n_palabras,palabra_min_caracteres,(nxn/2)),palabra_min_caracteres,(nxn/2))
if palabra in palabras:
palabra_repetida = True
else :
palabras.append(palabra)
matrix = crear_matrix(nxn)
matrix,posiciones,salteadas = procesar_palabras(matrix, nxn, palabras)
matrix = completar_matrix(matrix, nxn)
return procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones) | ec42615c3934fd98ca5975f99d215f597f353842 | 26,351 |
def mk_sd_graph(pvalmat, thresh=0.05):
"""
Make a graph with edges as signifcant differences between treatments.
"""
digraph = DiGraph()
for idx in range(len(pvalmat)):
digraph.add_node(idx)
for idx_a, idx_b, b_bigger, p_val in iter_all_pairs_cmp(pvalmat):
if p_val > thresh:
continue
if b_bigger:
digraph.add_edge(idx_a, idx_b)
else:
digraph.add_edge(idx_b, idx_a)
return digraph | f219d964ec90d58162db5e72d272ec8138f8991e | 26,352 |
def body2hor(body_coords, theta, phi, psi):
"""Transforms the vector coordinates in body frame of reference to local
horizon frame of reference.
Parameters
----------
body_coords : array_like
3 dimensional vector with (x,y,z) coordinates in body axes.
theta : float
Pitch (or elevation) angle (rad).
phi : float
Bank angle (rad).
psi : float
Yaw (or azimuth) angle (rad)
Returns
-------
hor_coords : array_like
3 dimensional vector with (x,y,z) coordinates in local horizon axes.
Raises
------
ValueError
If the values of the euler angles are outside the proper ranges.
See Also
--------
`hor2body` function.
Notes
-----
See [1] or [2] for frame of reference definition.
Note that in order to avoid ambiguities ranges in angles are limited to:
* -pi/2 <= theta <= pi/2
* -pi <= phi <= pi
* 0 <= psi <= 2*pi
References
----------
.. [1] B. Etkin, "Dynamics of Atmospheric Flight," Courier Corporation,
pp. 104-120, 2012.
.. [2] Gómez Tierno, M.A. et al, "Mecánica del Vuelo," Garceta, pp. 1-12,
2012
"""
# check_theta_phi_psi_range(theta, phi, psi)
# Transformation matrix from body to local horizon
Lhb = np.array([
[cos(theta) * cos(psi),
sin(phi) * sin(theta) * cos(psi) - cos(phi) * sin(psi),
cos(phi) * sin(theta) * cos(psi) + sin(phi) * sin(psi)],
[cos(theta) * sin(psi),
sin(phi) * sin(theta) * sin(psi) + cos(phi) * cos(psi),
cos(phi) * sin(theta) * sin(psi) - sin(phi) * cos(psi)],
[- sin(theta),
sin(phi) * cos(theta),
cos(phi) * cos(theta)]
])
hor_coords = np.squeeze(Lhb).dot(body_coords)
return hor_coords | 2e0e8f6bf3432a944a350fb7df5bdfa067074448 | 26,353 |
def negloglikelihoodZTNB(args, x):
"""Negative log likelihood for zero truncated negative binomial."""
a, m = args
denom = 1 - NegBinom(a, m).pmf(0)
return len(x) * np.log(denom) + negloglikelihoodNB(args, x) | 8458cbc02a00fd2bc37d661a7e34a61afccb6124 | 26,354 |
def combine(m1, m2):
"""
Returns transform that combines two other transforms.
"""
return np.dot(m1, m2) | 083de20237f484806c356c0b29c42ff28aa801f6 | 26,355 |
import torch
def _acg_bound(nsim, k1, k2, lam, mtop = 1000):
# John T Kent, Asaad M Ganeiber, and Kanti V Mardia.
# A new unified approach forthe simulation of a wide class of directional distributions.
# Journal of Computational andGraphical Statistics, 27(2):291–301, 2018.
"""
Sampling approach used in Kent et al. (2018)
Samples the cartesian coordinates from bivariate ACG: x, y
Acceptance criterion:
- Sample values v, from uniform between 0 and 1
- If v < fg, accept x, y
Convert x, y to angles phi using atan2,
we have now simulated the bessel density.
"""
ntry = 0; nleft = nsim; mloop = 0
eig = torch.tensor([0., 0.5 * (k1 - lam**2/k2)]); eigmin = 0
if eig[1] < 0:
eigmin = eig[1]; eig = eig - eigmin
q = 2; b0 = bfind(eig)
phi = 1 + 2*eig/b0; den = log_im(0, k2)
values = torch.empty(nsim, 2); accepted = 0
while nleft > 0 and mloop < mtop:
x = Normal(0., 1.).sample((nleft*q,)).reshape(nleft, q) * torch.ones(nleft, 1) * torch.tensor( (1/phi).sqrt()).reshape(1, q)
r = (x*x).sum(-1).sqrt()
# Dividing a vector by its norm, gives the unit vector
# So the ACG samples unit vectors?
x = x / (r.reshape(nleft, 1) * torch.ones(1, q))
u = ((x*x) * torch.ones(nleft, 1) * torch.tensor(eig).reshape(1, q)).sum(-1)
v = Uniform(0, 1).sample((nleft, ))
# eq 7.3 + eq 4.2
logf = (k1*(x[:,0] - 1) + eigmin) + (log_im(0, torch.sqrt(k2**2 + lam**2 * x[:,1]**2 )) - den )
# eq 3.4
loggi = 0.5 * (q - b0) + q/2 * ((1+2*u/b0).log() + (b0/q).log())
logfg = logf + loggi
ind = (v < logfg.exp())
nacc = ind.sum(); nleft = nleft - nacc; mloop = mloop + 1; ntry=ntry+nleft
if nacc > 0:
start = accepted
accepted += x[ind].shape[0]
values[start:accepted,:] = x[ind,:]
print("Sampling efficiency:", (nsim - nleft.item())/ntry.item())
return torch.atan2(values[:,1], values[:,0]) | 45d96fee1b61d5c020e355df76d77c78483a3a0b | 26,356 |
import os
import shutil
def anonymise_eeg(
original_file: str,
destination_file: str,
field_name: str = '',
field_surname: str = '',
field_birthdate: str = '',
field_sex: str = '',
field_folder: str = '',
field_centre: str = '',
field_comment: str = ''
):
"""Anonymise an .eeg file.
Args:
orginale_file: path to the original file.
destination_file: path to affect the anonymisation.
field_name: patient name.
field_surname: patient surname.
field_birthdate: birthdate.
field_sex: sex.
field_folder: folder name.
field_centre: centre name.
field_comment: comment.
"""
# Copy the original content
content = extract_header(original_file)
# Anonymise
if field_name is not None:
change_field(content, 314, 364, field_name.encode('ascii'))
if field_surname is not None:
change_field(content, 364, 394, field_surname.encode('ascii'))
if field_birthdate is not None:
change_field(content, 394, 404, field_birthdate.encode('ascii'))
if field_sex is not None:
change_field(content, 404, 405, field_sex.encode('ascii'))
if field_folder is not None:
change_field(content, 405, 425, field_folder.encode('ascii'))
if field_centre is not None:
change_field(content, 425, 464, field_centre.encode('ascii'))
if field_comment is not None:
change_field(content, 464, 720, field_comment.encode('ascii'))
ensure_path(path=os.path.dirname(destination_file))
content = (
char if isinstance(char, bytes) else bytes([char]) for char in content
)
if not os.path.isfile(destination_file):
shutil.copyfile(original_file, destination_file + '.part')
os.rename(destination_file + '.part', destination_file)
with open(destination_file, 'rb+') as file_:
file_.seek(0)
for char in content:
file_.write(char if isinstance(char, bytes) else bytes([char]))
return True | d66e62448d0372754bd5a2e83e992c0e32122994 | 26,357 |
def melspecgrams_to_specgrams(logmelmag2 = None, mel_p = None, mel_downscale=1):
"""Converts melspecgrams to specgrams.
Args:
melspecgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [freq, time], mel scaling of frequencies.
Returns:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [freq, time].
"""
mel2l = _mel_to_linear_matrix(mel_downscale)
logmag = None
p = None
if logmelmag2 is not None:
logmelmag2 = logmelmag2.T
logmelmag2 = np.array([logmelmag2])
mag2 = np.tensordot(np.exp(logmelmag2), mel2l, 1)
logmag = 0.5 * np.log(mag2+1e-6)
logmag = logmag[0].T
if mel_p is not None:
mel_p = mel_p.T
mel_p = np.array([mel_p])
mel_phase_angle = np.cumsum(mel_p * np.pi, axis=1)
phase_angle = np.tensordot(mel_phase_angle, mel2l, 1)
p = instantaneous_frequency(phase_angle,time_axis=1)
p = p[0].T
return logmag, p | 34090358eff2bf803af9b56c210d5e093b1f2900 | 26,358 |
from scipy.stats.mstats import gmean
import numpy as np
def ligandScore(ligand, genes):
"""calculate ligand score for given ligand and gene set"""
if ligand.ligand_type == "peptide" and isinstance(ligand.preprogene, str):
# check if multiple genes needs to be accounted for
if isinstance(eval(ligand.preprogene), list):
ligand_genes = list()
for gene in eval(ligand.preprogene):
try:
ligand_genes.append(genes[gene])
except KeyError:
#print(f"{gene} not found")
ligand_genes.append(0.0)
# use max, as there might be many orthologs genes for one original
# gene and not all have to be expressed
try:
ligand_score = max(ligand_genes)
except ValueError:
print(f"something is wrong with the list {ligand_genes}")
ligand_score = 0.0
return ligand_score
elif ligand.ligand_type == "molecule":
synthesis = ligand.synthesis
transport = ligand.transport
reuptake = ligand.reuptake
excluded = ligand.excluded
# get geometric mean of synthesis genes (all need to be present)
if not isinstance(synthesis, str):
# If no genes are needed, synthesis is set to nan
synthesis = np.nan
else:
synthesis_expression = list()
for gene in eval(synthesis):
try:
synthesis_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
synthesis_expression.append(0.0)
synthesis = gmean(synthesis_expression)
# get maximum of vesicle transporters (only one is needed for molecule transport)
if not isinstance(transport, str):
# If no specific genes are needed, set transport to nan
transport = np.nan
else:
transport_expression = list()
for gene in eval(transport):
try:
transport_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
transport_expression.append(0.0)
transport = max(transport_expression)
# Get maximum of reuptake genes (only one is needed)
if not isinstance(reuptake, str):
# If no specific genes are needed, set reuptake to nan
reuptake = np.nan
else:
reuptake_expression = list()
for gene in eval(reuptake):
try:
reuptake_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
reuptake_expression.append(0.0)
reuptake = max(reuptake_expression)
# get maximum among exluding genes where any gene expression divert to other ligands
if not isinstance(excluded, str):
# If no specific genes are needed, set excluded to 0
excluded = 0
else:
excluded_expression = list()
for gene in eval(excluded):
try:
excluded_expression.append(genes[gene])
except KeyError:
# If gene was not found append 0
#print(f"{gene} not found")
excluded_expression.append(0.0)
excluded = max(excluded_expression)
# return geometric mean of synthesis, transport and reuptake multipled exclusion
promoting_factor = gmean(([x for x in [synthesis, transport, reuptake] if str(x) != "nan"])) # genes driving ligand production, remove nan values
if str(promoting_factor) == "nan": # capture cases where no promoting genes were present
print(f"no promoting genes detected for {ligand.ligand}")
return 0.0 # exit before running exclusion calculation
ligand_score = promoting_factor - excluded # correct ligand expression based on the exclusion factor
if ligand_score < 0: # ligand score should be 0 or positive
ligand_score = 0.0
return ligand_score
# If genes are missing from ligand gene list
else:
print("Big error! ligand type is not defined!")
return 0.0 | 68141e9a837619b087cf132c6ba593ba5b1ef43d | 26,359 |
def eval(x):
"""Evaluates the value of a variable.
# Arguments
x: A variable.
# Returns
A Numpy array.
# Examples
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
if isinstance(x, KerasSymbol):
if x.tensor is not None:
if x.name in x.get_bind_values() and _MODEL is not None:
_MODEL._sync_weights()
ret = x.eval().asnumpy()
else:
ret = _forward_pass(x)[0].asnumpy()
# If the Tensor shape is (1, ) and does not have attribute "_is_vector", then, it is considered to be scalar.
# Return the value.
if ret.shape == (1,) and not hasattr(x, '_is_vector'):
ret = ret[0]
return ret
elif isinstance(x, mx.nd.NDArray):
return x.asnumpy()
else:
return x | a9b5473cc71cd999d6e85fd760018d454c194c04 | 26,360 |
from typing import Optional
def triple_in_shape(expr: ShExJ.shapeExpr, label: ShExJ.tripleExprLabel, cntxt: Context) \
-> Optional[ShExJ.tripleExpr]:
""" Search for the label in a shape expression """
te = None
if isinstance(expr, (ShExJ.ShapeOr, ShExJ.ShapeAnd)):
for expr2 in expr.shapeExprs:
te = triple_in_shape(expr2, label, cntxt)
if te is not None:
break
elif isinstance(expr, ShExJ.ShapeNot):
te = triple_in_shape(expr.shapeExpr, label, cntxt)
elif isinstance(expr, ShExJ.shapeExprLabel):
se = reference_of(expr, cntxt)
if se is not None:
te = triple_in_shape(se, label, cntxt)
return te | a1e9ba9e7c282475c775c17f52b51a78c3dcfd71 | 26,361 |
def poly_learning_rate(base_lr, curr_iter, max_iter, power=0.9):
"""poly learning rate policy"""
lr = base_lr * (1 - float(curr_iter) / max_iter) ** power
return lr | fdb2b6ed3784deb3fbf55f6b23f6bd32dac6a988 | 26,362 |
def parent_path(xpath):
"""
Removes the last element in an xpath, effectively yielding the xpath to the parent element
:param xpath: An xpath with at least one '/'
"""
return xpath[:xpath.rfind('/')] | b435375b9d5e57c6668536ab819f40ae7e169b8e | 26,363 |
from datetime import datetime
def change_project_description(project_id):
"""For backwards compatibility: Change the description of a project."""
description = read_request()
assert isinstance(description, (str,))
orig = get_project(project_id)
orig.description = description
orig.lastUpdated = datetime.datetime.utcnow()
orig.save()
return JsonResponse(orig) | c6b59cfbbffb353943a0a7ba4160ffb0e2382a51 | 26,364 |
import unittest
def run_all(examples_main_path):
"""
Helper function to run all the test cases
:arg: examples_main_path: the path to main examples directory
"""
# test cases to run
test_cases = [TestExample1,
TestExample2,
TestExample3,
TestExample4,
TestExample5s1,
TestExample5s2,
TestExample6,
TestExample7,
TestExample8,
TestExample9]
# load all the specified test cases
test_loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_case in test_cases:
test_names = test_loader.getTestCaseNames(test_case)
for test_name in test_names:
suite.addTest(test_case(methodName=test_name,
examples_path=examples_main_path))
# run the test suite
result = unittest.TextTestRunner().run(suite)
return int(not result.wasSuccessful()) | 9a5176bff4e2c82561e3b0dbee467cd1dec0e63e | 26,365 |
def get_queue(queue):
"""
:param queue: Queue Name or Queue ID or Queue Redis Key or Queue Instance
:return: Queue instance
"""
if isinstance(queue, Queue):
return queue
if isinstance(queue, str):
if queue.startswith(Queue.redis_queue_namespace_prefix):
return Queue.from_queue_key(queue)
else:
return Queue.from_queue_key(Queue.redis_queue_namespace_prefix+queue)
raise TypeError('{0} is not of class {1} or {2}'.format(queue, str, Queue)) | 159860f2efa5c7a2643d4ed8b316e8abca85e67f | 26,366 |
def ptttl_to_samples(ptttl_data, amplitude=0.5, wavetype=SINE_WAVE):
"""
Convert a PTTTLData object to a list of audio samples.
:param PTTTLData ptttl_data: PTTTL/RTTTL source text
:param float amplitude: Output signal amplitude, between 0.0 and 1.0.
:param int wavetype: Waveform type for output signal. Must be one of\
tones.SINE_WAVE, tones.SQUARE_WAVE, tones.TRIANGLE_WAVE, or tones.SAWTOOTH_WAVE.
:return: list of audio samples
:rtype: tones.tone.Samples
"""
parser = PTTTLParser()
data = parser.parse(ptttl_data)
return _generate_samples(data, amplitude, wavetype) | f4be93a315ff177cbdf69249f7efece55561b431 | 26,367 |
from typing import Union
from pathlib import Path
from typing import Tuple
import numpy
import pandas
def read_output_ascii(
path: Union[Path, str]
) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]:
"""Read an output file (raw ASCII format)
Args:
path (str): path to the file
Return:
list: The contents of the output file as a :py:class:`list` containing
four :py:class:`numpy.ndarray` instances. The first one contains all
simulation times. The other entries contain the norm, energy and
maximum SPF overlap of the wave function at all times.
"""
dataFrame = pandas.read_csv(
str(path), sep=r"\s+", names=["time", "norm", "energy", "overlap"]
)
return (
dataFrame["time"].values,
dataFrame["norm"].values,
dataFrame["energy"].values,
dataFrame["overlap"].values,
) | ef3008f6cf988f7bd42ccb75bfd6cfd1a58e28ae | 26,368 |
def AliasPrefix(funcname):
"""Return the prefix of the function the named function is an alias of."""
alias = __aliases[funcname][0]
return alias.prefix | 771c0f665ddad2427759a5592608e5467005c26d | 26,369 |
import logging
import subprocess
def run_command(*cmd_args, **kargs):
"""
Shell runner helper
Work as subproccess.run except check is set to true by default and
stdout is not printed unless the logging level is DEBUG
"""
logging.debug("Run command: " + " ".join(map(str, cmd_args)))
if not dry_run:
if "check" not in kargs:
kargs["check"] = True
if (
logging.getLogger().getEffectiveLevel() != logging.DEBUG
and "stdout" not in kargs
and (
"capture_output" not in kargs
or not kargs["capture_output"]
)
):
kargs["stdout"] = subprocess.DEVNULL
return subprocess.run(cmd_args, **kargs) | ce191b500176e263ecf1035f3ae467a334045757 | 26,370 |
from typing import Optional
from typing import Tuple
from typing import Callable
def connect(
sender: QWidget,
signal: str,
receiver: QObject,
slot: str,
caller: Optional[FormDBWidget] = None,
) -> Optional[Tuple[pyqtSignal, Callable]]:
"""Connect signal to slot for QSA."""
# Parameters example:
# caller: <clientes.FormInternalObj object at 0x7f78b5c230f0>
# sender: <pineboolib.qt3_widgets.qpushbutton.QPushButton object at 0x7f78b4de1af0>
# signal: 'clicked()'
# receiver: <clientes.FormInternalObj object at 0x7f78b5c230f0>
# slot: 'iface.buscarContacto()'
if caller is not None:
logger.trace("* * * Connect:: %s %s %s %s %s", caller, sender, signal, receiver, slot)
else:
logger.trace("? ? ? Connect:: %s %s %s %s", sender, signal, receiver, slot)
signal_slot = solve_connection(sender, signal, receiver, slot)
if not signal_slot:
return None
# http://pyqt.sourceforge.net/Docs/PyQt4/qt.html#ConnectionType-enum
conntype = Qt.QueuedConnection | Qt.UniqueConnection
new_signal, new_slot = signal_slot
# if caller:
# for sl in caller._formconnections:
# if sl[0].signal == signal_slot[0].signal and sl[1].__name__ == signal_slot[1].__name__:
# return False
try:
slot_done_fn: Callable = slot_done(new_slot, new_signal, sender, caller)
# MyPy/PyQt5-Stubs misses connect(type=param)
new_signal.connect(slot_done_fn, type=conntype) # type: ignore
except Exception:
logger.warning("ERROR Connecting: %s %s %s %s", sender, signal, receiver, slot)
return None
signal_slot = new_signal, slot_done_fn
return signal_slot | 2ebeca355e721c5fad5ec6aac24a59587e4e86bd | 26,371 |
import torch
def get_detection_input(batch_size=1):
"""
Sample input for detection models, usable for tracing or testing
"""
return (
torch.rand(batch_size, 3, 224, 224),
torch.full((batch_size,), 0).long(),
torch.Tensor([1, 1, 200, 200]).repeat((batch_size, 1)),
torch.full((batch_size,), 1).long(),
) | 710a5ed2f89610555d347af568647a8768f1ddb4 | 26,372 |
def build_tables(ch_groups, buffer_size, init_obj=None):
""" build tables and associated I/O info for the channel groups.
Parameters
----------
ch_groups : dict
buffer_size : int
init_obj : object with initialize_lh5_table() function
Returns
-------
ch_to_tbls : dict or Table
A channel-indexed dictionary of tables for quick look-up, or if passed a
dummy group (no group name), return the one table made.
"""
ch_to_tbls = {}
# set up a table for each group
for group_name, group_info in ch_groups.items():
tbl = lh5.Table(buffer_size)
if init_obj is not None:
channel = None # for dummy ch_group
# Note: all ch in ch_list will be written to the same table. So it
# should suffice to initials for first channel in the list
if 'ch_list' in group_info: channel = group_info['ch_list'][0]
init_obj.initialize_lh5_table(tbl, channel)
group_info['table'] = tbl
if group_name == '':
if len(ch_groups) != 1:
print("Error: got dummy group (no name) in non-dummy ch_groups")
return None
return tbl
# cache the table to a ch-indexed dict for quick look-up
for ch in group_info['ch_list']: ch_to_tbls[ch] = tbl
return ch_to_tbls | 964bc6a817688eb8426976cec2b0053f43c6ed79 | 26,373 |
def segmentspan(revlog, revs):
"""Get the byte span of a segment of revisions
revs is a sorted array of revision numbers
>>> revlog = _testrevlog([
... 5, #0
... 10, #1
... 12, #2
... 12, #3 (empty)
... 17, #4
... ])
>>> segmentspan(revlog, [0, 1, 2, 3, 4])
17
>>> segmentspan(revlog, [0, 4])
17
>>> segmentspan(revlog, [3, 4])
5
>>> segmentspan(revlog, [1, 2, 3,])
7
>>> segmentspan(revlog, [1, 3])
7
"""
if not revs:
return 0
end = revlog.end(revs[-1])
return end - revlog.start(revs[0]) | 51624b3eac7bba128a2e702c3387bbaab4974143 | 26,374 |
def is_stateful(change, stateful_resources):
""" Boolean check if current change references a stateful resource """
return change['ResourceType'] in stateful_resources | 055465870f9118945a9e5f2ff39be08cdcf35d31 | 26,375 |
import pwd
import os
def get_osusername():
"""Get the username of the current process."""
if pwd is None:
raise OSError("get_username cannot be called on Windows")
return pwd.getpwuid(os.getuid())[0] | db72cb393a8fd79e5d2078b5597a0c037595b3f6 | 26,376 |
def get_session_from_webdriver(driver: WebDriver, registry: Registry) -> RedisSession:
"""Extract session cookie from a Selenium driver and fetch a matching pyramid_redis_sesssion data.
Example::
def test_newsletter_referral(dbsession, web_server, browser, init):
'''Referral is tracker for the newsletter subscription.'''
b = browser
b.visit(web_server + "/newsletter")
with transaction.manager:
r = ReferralProgram()
r.name = "Foobar program"
dbsession.add(r)
dbsession.flush()
ref_id, slug = r.id, r.slug
# Inject referral data to the active session. We do this because it is very hard to spoof external links pointing to localhost test web server.
session = get_session_from_webdriver(b.driver, init.config.registry)
session["referral"] = {
"ref": slug,
"referrer": "http://example.com"
}
session.to_redis()
b.fill("email", "foobar@example.com")
b.find_by_name("subscribe").click()
# Displayed as a message after succesful form subscription
assert b.is_text_present("Thank you!")
# Check we get an entry
with transaction.manager:
assert dbsession.query(NewsletterSubscriber).count() == 1
subscription = dbsession.query(NewsletterSubscriber).first()
assert subscription.email == "foobar@example.com"
assert subscription.ip == "127.0.0.1"
assert subscription.referral_program_id == ref_id
assert subscription.referrer == "http://example.com"
:param driver: The active WebDriver (usually ``browser.driver``)
:param registry: The Pyramid registry (usually ``init.config.registry``)
"""
# Decode the session our test browser is associated with by reading the raw session cookie value and fetching the session object from Redis
secret = registry.settings["redis.sessions.secret"]
session_cookie = driver.get_cookie("session")["value"]
session_id = signed_deserialize(session_cookie, secret)
class MockRequest:
def __init__(self, registry):
self.registry = registry
# Use pyramid_redis_session to get a connection to the Redis database
redis = get_default_connection(MockRequest(registry))
session = RedisSession(redis, session_id, new=False, new_session=None)
return session | 0faaa394c065344117cec67ec824ec5186252ee2 | 26,377 |
import sys
def parse_argv():
""" Retrieve fields from sys.argv """
if len(sys.argv) == 2:
main_file = sys.argv[1]
with open(main_file) as main_file_handle:
main_code = main_file_handle.read()
return sys.argv[0], {main_file: main_code}, main_file, main_code, None, None, None, None
# elif len(sys.argv) == 3:
else:
return sys.argv
# TODO: Finish handling arguments intelligently | fdc58acbe6dc4f7ccef929da7015269746986fed | 26,378 |
from typing import Tuple
def paper() -> Tuple[str]:
"""
Use my paper figure style.
Returns
-------
Tuple[str]
Colors in the color palette.
"""
sns.set_context("paper")
style = { "axes.spines.bottom": True,
"axes.spines.left": True,
"axes.spines.right": False,
"axes.spines.top": False,
"axes.edgecolor": "0",
"xtick.bottom": True,
"ytick.left": True}
plt.rcParams["legend.frameon"] = False
palette = sns.color_palette("deep")
sns.set_palette(palette)
sns.set_style("ticks", rc=style)
return palette | 6e53247c666db62be1d5bf5ad5d77288af277d2d | 26,379 |
import difflib
def _get_diff_text(old, new):
"""
Returns the diff of two text blobs.
"""
diff = difflib.unified_diff(old.splitlines(1), new.splitlines(1))
return "".join([x.replace("\r", "") for x in diff]) | bd8a3d49ccf7b6c18e6cd617e6ad2ad8324de1cc | 26,380 |
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import SkyCoord
from shapely.geometry import Polygon
from descartes import PolygonPatch
from astroquery.alma import Alma
import os
def alma_query(tab, make_figure=True, xsize=5, nlabel=3, min_size=4, cmap='jet_r', rerun_query=True, query_kwargs={'public':False, 'science':False, 'get_html_version':True}):
"""
Query ALMA archive around the HST data
"""
NOW = Time.now().iso
meta = tab.meta
xr = (meta['XMIN'], meta['XMAX'])
yr = (meta['YMIN'], meta['YMAX'])
ra, dec = meta['BOXRA'], meta['BOXDEC']
cosd = np.cos(dec/180*np.pi)
dx = (xr[1]-xr[0])*cosd*60
dy = (yr[1]-yr[0])*60
box_width = np.maximum(dx, dy)
query_size = np.maximum(min_size, box_width/2)
coo = SkyCoord(ra, dec, unit='deg')
alma_file = '{0}_alma.ecsv'.format(tab.meta['NAME'])
if (not os.path.exists(alma_file)) | rerun_query:
res = Alma.query_region(coo, query_size*u.arcmin, **query_kwargs)
res.meta['TQUERY'] = (NOW, 'Timestamp of query execution')
res.meta['RA'] = (ra, 'Query center, RA')
res.meta['DEC'] = (dec, 'Query center, Dec')
res.meta['R'] = (query_size, 'Query radius, arcmin')
res.meta['N'] = len(res)
res.meta['NAME'] = tab.meta['NAME']
if len(res) > 0:
res['field_root'] = res.meta['NAME'].lower()
res.write(alma_file, overwrite=True, format='ascii.ecsv')
else:
res = Table.read(alma_file, format='ascii.ecsv')
if make_figure & (len(res) > 0):
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_xlim(xr)
#ax.set_ylim(yr)
# Grow by factor of 2
#expand = 2
expand = np.maximum(min_size*2/np.minimum(dx, dy), 1)
ax.set_xlim(ra+dx/60/2*expand/cosd, ra-dx/60/2*expand/cosd)
ax.set_ylim(dec-dy/60/2*expand, dec+dy/60/2*expand)
ax.scatter(ra, dec, marker='+', color='k')
# HST patch
p0 = np.array([[xr[0], yr[0]],
[xr[0], yr[1]],
[xr[1], yr[1]],
[xr[1], yr[0]]])
p_hst = None
for fph in tab['footprint']:
for p in query.parse_polygons(fph):
p_j = Polygon(p).buffer(0.001)
if p_hst is None:
p_hst = p_j
else:
p_hst = p_hst.union(p_j)
ax.add_patch(PolygonPatch(p_hst, ec='k', fc='None',
alpha=0.8, label='HST'))
band_labels = []
so = np.argsort(res['Band'])
is_public = res['Release date'] < NOW
for i in so:
fpstr = res['Footprint'][i]
fps = query.parse_polygons(fpstr)
is_mosaic = res['Mosaic'][i] in ['mosaic']
try:
color = plt.cm.get_cmap(cmap)(int(res['Band'][i])/10)
except:
color = 'r'
if is_public[i]:
linestyle='-'
else:
linestyle='--'
for j, fp in enumerate(fps):
fp_j = Polygon(fp).buffer(0.1/3600.)
if res['Band'][i] in band_labels:
label = None
else:
band_labels.append(res['Band'][i])
label = 'Band {0}'.format(res['Band'][i])
ax.add_patch(PolygonPatch(fp_j, ec=color, fc='None',
alpha=0.8, label=label,
linestyle=linestyle))
if is_mosaic & is_public[i]:
ax.add_patch(PolygonPatch(fp_j, ec=color, fc=color,
alpha=0.1+0.1*is_public[i]))
ax.grid()
ax.set_title('{0} ALMA'.format(res.meta['NAME']))
#ax.set_xlim(ax.get_xlim()[::-1])
ax.set_aspect(1/cosd)
ax.legend(ncol=1, fontsize=6, loc='upper right')
fig.set_size_inches(xsize, xsize*np.clip(dy/dx, 0.2, 5))
if nlabel > 0:
draw_axis_labels(ax=ax, nlabel=nlabel)
ax.text(0.03, 0.03, NOW, fontsize=5, transform=ax.transAxes, ha='left', va='bottom')
fig.tight_layout(pad=0.2)
fig.savefig('{0}_alma.png'.format(meta['NAME']))
else:
fig = None
return res, fig | 3c4409f35b27939f332c31a57f4450b1229b7034 | 26,381 |
def GetStatus(operation):
"""Returns string status for given operation.
Args:
operation: A messages.Operation instance.
Returns:
The status of the operation in string form.
"""
if not operation.done:
return Status.PENDING.name
elif operation.error:
return Status.ERROR.name
else:
return Status.COMPLETED.name | c9630528dd9b2e331a9d387cac0798bf07646603 | 26,382 |
import argparse
from datetime import datetime
def get_args(args):
"""Get the script arguments."""
description = "tvtid - Feteches the tv schedule from client.dk"
arg = argparse.ArgumentParser(description=description)
arg.add_argument(
"-d",
"--date",
metavar="datetime",
type=lambda s: datetime.strptime(s, "%Y-%m-%d"),
help="The date of which you want to get the schedule",
)
arg.add_argument("channel", nargs="+", type=str, help="The channel")
return arg.parse_args(args) | 0068f54fc5660896a8ab6998de9da3909c8e1a6b | 26,383 |
def ft2m(ft):
"""
Converts feet to meters.
"""
if ft == None:
return None
return ft * 0.3048 | ca2b4649b136c9128b5b3ae57dd00c6cedd0f383 | 26,384 |
def show_colors(*, nhues=17, minsat=10, unknown='User', include=None, ignore=None):
"""
Generate tables of the registered color names. Adapted from
`this example <https://matplotlib.org/examples/color/named_colors.html>`__.
Parameters
----------
nhues : int, optional
The number of breaks between hues for grouping "like colors" in the
color table.
minsat : float, optional
The threshold saturation, between ``0`` and ``100``, for designating
"gray colors" in the color table.
unknown : str, default: 'User'
Category name for color names that are unknown to proplot.
Set this to ``False`` to hide unknown color names.
include : str or sequence of str, default: None
Category names to be shown in the table. Use this to limit
the table to a subset of categories. Valid categories are
%(demos.colors)s.
ignore : str or sequence of str, default: 'CSS4'
Used only if `include` was not passed. Category names to be removed
from the colormap table.
Returns
-------
proplot.figure.Figure
The figure.
proplot.gridspec.SubplotGrid
The subplot grid.
"""
# Tables of known colors to be plotted
colordict = {}
if ignore is None:
ignore = 'css4'
if isinstance(include, str):
include = (include.lower(),)
if isinstance(ignore, str):
ignore = (ignore.lower(),)
if include is None:
include = COLORS_TABLE.keys()
include -= set(map(str.lower, ignore))
for cat in sorted(include):
if cat not in COLORS_TABLE:
raise ValueError(
f'Invalid categories {include!r}. Options are: '
+ ', '.join(map(repr, COLORS_TABLE)) + '.'
)
colordict[cat] = list(COLORS_TABLE[cat]) # copy the names
# Add "unknown" colors
if unknown:
unknown_colors = [
color for color in map(repr, pcolors._color_database)
if 'xkcd:' not in color and 'tableau:' not in color
and not any(color in list_ for list_ in COLORS_TABLE)
]
if unknown_colors:
colordict[unknown] = unknown_colors
# Divide colors into columns and rows
# For base and open colors, tables are already organized into like
# colors, so just reshape them into grids. For other colors, we group
# them by hue in descending order of luminance.
namess = {}
for cat in sorted(include):
if cat == 'base':
names = np.asarray(colordict[cat])
ncols, nrows = len(names), 1
elif cat == 'opencolor':
names = np.asarray(colordict[cat])
ncols, nrows = 7, 20
else:
hclpairs = [(name, to_xyz(name, 'hcl')) for name in colordict[cat]]
hclpairs = [
sorted(
[
pair for pair in hclpairs
if _filter_colors(pair[1], ihue, nhues, minsat)
],
key=lambda x: x[1][2] # sort by luminance
)
for ihue in range(nhues)
]
names = np.array([name for ipairs in hclpairs for name, _ in ipairs])
ncols, nrows = 4, len(names) // 4 + 1
names.resize((ncols, nrows)) # fill empty slots with empty string
namess[cat] = names
# Draw figures for different groups of colors
# NOTE: Aspect ratios should be number of columns divided by number
# of rows, times the aspect ratio of the slot for each swatch-name
# pair, which we set to 5.
shape = tuple(namess.values())[0].shape # sample *first* group
figwidth = 6.5
refaspect = (figwidth * 72) / (10 * shape[1]) # points
maxcols = max(names.shape[0] for names in namess.values())
hratios = tuple(names.shape[1] for names in namess.values())
fig, axs = ui.subplots(
figwidth=figwidth,
refaspect=refaspect,
nrows=len(include),
hratios=hratios,
)
title_dict = {
'css4': 'CSS4 colors',
'base': 'Base colors',
'opencolor': 'Open color',
'xkcd': 'XKCD colors',
}
for ax, (cat, names) in zip(axs, namess.items()):
# Format axes
ax.format(
title=title_dict.get(cat, cat),
titleweight='bold',
xlim=(0, maxcols - 1),
ylim=(0, names.shape[1]),
grid=False, yloc='neither', xloc='neither',
alpha=0,
)
# Draw swatches as lines
lw = 8 # best to just use trial and error
swatch = 0.45 # percent of column reserved for swatch
ncols, nrows = names.shape
for col, inames in enumerate(names):
for row, name in enumerate(inames):
if not name:
continue
y = nrows - row - 1 # start at top
x1 = col * (maxcols - 1) / ncols # e.g. idx 3 --> idx 7
x2 = x1 + swatch # portion of column
xtext = x1 + 1.1 * swatch
ax.text(
xtext, y, name, ha='left', va='center',
transform='data', clip_on=False,
)
ax.plot(
[x1, x2], [y, y],
color=name, lw=lw,
solid_capstyle='butt', # do not stick out
clip_on=False,
)
return fig, axs | 34b45185af96f3ce6111989f83d584006ebceb49 | 26,385 |
def get_all_lights(scene, include_light_filters=True):
"""Return a list of all lights in the scene, including
mesh lights
Args:
scene (byp.types.Scene) - scene file to look for lights
include_light_filters (bool) - whether or not light filters should be included in the list
Returns:
(list) - list of all lights
"""
lights = list()
for ob in scene.objects:
if ob.type == 'LIGHT':
if hasattr(ob.data, 'renderman'):
if include_light_filters:
lights.append(ob)
elif ob.data.renderman.renderman_light_role == 'RMAN_LIGHT':
lights.append(ob)
else:
mat = getattr(ob, 'active_material', None)
if not mat:
continue
output = shadergraph_utils.is_renderman_nodetree(mat)
if not output:
continue
if len(output.inputs) > 1:
socket = output.inputs[1]
if socket.is_linked:
node = socket.links[0].from_node
if node.bl_label == 'PxrMeshLight':
lights.append(ob)
return lights | 4570f36bdfbef287f38a250cddcdc7f8c8d8665d | 26,386 |
def get_df(path):
"""Load raw dataframe from JSON data."""
with open(path) as reader:
df = pd.DataFrame(load(reader))
df['rate'] = 1e3 / df['ms_per_record']
return df | 0e94506fcaa4bd64388eb2def4f9a66c19bd9b32 | 26,387 |
def _format_distribution_details(details, color=False):
"""Format distribution details for printing later."""
def _y_v(value):
"""Print value in distribution details."""
if color:
return colored.yellow(value)
else:
return value
# Maps keys in configuration to a pretty-printable name.
distro_pretty_print_map = {
"distro": lambda v: """Distribution Name: """ + _y_v(v),
"release": lambda v: """Release: """ + _y_v(v),
"arch": lambda v: """Architecture: """ + _y_v(Alias.universal(v)),
"pkgsys": lambda v: """Package System: """ + _y_v(v.__name__),
}
return "\n".join([
" - " + distro_pretty_print_map[key](value)
for key, value in details.items()
if key in distro_pretty_print_map
]) + "\n" | ccfa7d9b35b17ba9889f5012d1ae5aa1612d33b1 | 26,388 |
async def async_get_relation_id(application_name, remote_application_name,
model_name=None,
remote_interface_name=None):
"""
Get relation id of relation from model.
:param model_name: Name of model to operate on
:type model_name: str
:param application_name: Name of application on this side of relation
:type application_name: str
:param remote_application_name: Name of application on other side of
relation
:type remote_application_name: str
:param remote_interface_name: Name of interface on remote end of relation
:type remote_interface_name: Optional(str)
:returns: Relation id of relation if found or None
:rtype: any
"""
async with run_in_model(model_name) as model:
for rel in model.applications[application_name].relations:
spec = '{}'.format(remote_application_name)
if remote_interface_name is not None:
spec += ':{}'.format(remote_interface_name)
if rel.matches(spec):
return(rel.id) | 2447c08c57d2ed4548db547fb4c347987f0ac88b | 26,389 |
from operator import or_
def get_timeseries_references(session_id, search_value, length, offset, column, order):
"""
Gets a filtered list of timeseries references.
This function will generate a filtered list of timeseries references belonging to a session
given a search value. The length, offset, and order of the list can also be specified.
"""
Session = app.get_persistent_store_database(
"hydroshare_timeseries_manager",
as_sessionmaker=True
)
session = Session()
sortable_columns = [
"status",
"site_name",
"site_code",
"latitude",
"longitude",
"variable_name",
"variable_code",
"sample_medium",
"begin_date",
"end_date",
"value_count",
"method_link",
"method_description",
"network_name",
"url",
"service_type",
"ref_type",
"return_type"
]
full_query = session.\
query(
TimeSeriesCatalog.status,
TimeSeriesCatalog.status,
TimeSeriesCatalog.site_name,
TimeSeriesCatalog.site_code,
TimeSeriesCatalog.latitude,
TimeSeriesCatalog.longitude,
TimeSeriesCatalog.variable_name,
TimeSeriesCatalog.variable_code,
TimeSeriesCatalog.sample_medium,
TimeSeriesCatalog.begin_date,
TimeSeriesCatalog.end_date,
TimeSeriesCatalog.value_count,
TimeSeriesCatalog.method_link,
TimeSeriesCatalog.method_description,
TimeSeriesCatalog.network_name,
TimeSeriesCatalog.url,
TimeSeriesCatalog.service_type,
TimeSeriesCatalog.ref_type,
TimeSeriesCatalog.return_type,
TimeSeriesCatalog.timeseries_id,
TimeSeriesCatalog.selected
).filter(
TimeSeriesCatalog.session_id == session_id
)
if search_value != "":
filtered_query = full_query.filter(
or_(
TimeSeriesCatalog.status.ilike(f"%{search_value}%"),
TimeSeriesCatalog.site_name.ilike(f"%{search_value}%"),
TimeSeriesCatalog.site_code.ilike(f"%{search_value}%"),
TimeSeriesCatalog.variable_name.ilike(f"%{search_value}%"),
TimeSeriesCatalog.variable_code.ilike(f"%{search_value}%"),
TimeSeriesCatalog.sample_medium.ilike(f"%{search_value}%"),
TimeSeriesCatalog.network_name.ilike(f"%{search_value}%"),
TimeSeriesCatalog.service_type.ilike(f"%{search_value}%"),
TimeSeriesCatalog.ref_type.ilike(f"%{search_value}%"),
TimeSeriesCatalog.return_type.ilike(f"%{search_value}%"),
)
)
else:
filtered_query = full_query
if order == "asc":
ordered_query = filtered_query.order_by(
asc(getattr(TimeSeriesCatalog, sortable_columns[int(column)]))
)
elif order == "desc":
ordered_query = filtered_query.order_by(
desc(getattr(TimeSeriesCatalog, sortable_columns[int(column)]))
)
else:
ordered_query = filtered_query.order_by(
asc(TimeSeriesCatalog.timeseries_id)
)
paginated_query = ordered_query.offset(offset).limit(length)
selected_query = full_query.filter(
TimeSeriesCatalog.selected == True
)
full_query_count = full_query.count()
filtered_query_count = filtered_query.count()
selected_query_count = selected_query.count()
query_results = paginated_query.all()
engine = session.get_bind()
session.close()
engine.dispose()
return full_query_count, filtered_query_count, selected_query_count, query_results | 67011d7d1956259c383cd2722ae4035c28e6a5f3 | 26,390 |
import os
import re
def get_current_version():
"""Get current version"""
base_dir = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(base_dir, "evaluations", "__init__.py")
with open(version_file, 'r') as opened_file:
return re.search(
r'^__version__ = [\'"]([^\'"]*)[\'"]', opened_file.read(), re.M
).group(1) | b1e6a9acfe59b7603c82c93868e634d93ae4cd86 | 26,391 |
def mxprv_from_bip39_mnemonic(
mnemonic: Mnemonic, passphrase: str = "", network: str = "mainnet"
) -> bytes:
"""Return BIP32 root master extended private key from BIP39 mnemonic."""
seed = bip39.seed_from_mnemonic(mnemonic, passphrase)
version = NETWORKS[network].bip32_prv
return rootxprv_from_seed(seed, version) | ceb5f5e853f7964015a2a69ea2fdb26680acf2b3 | 26,392 |
import os
import codecs
import sys
import platform
def setup_ebook_home(args, conf):
"""
Setup user's ebook home, config being set with this order of precedence:
- CLI params
- ENV vars
- saved values in ogre config
- automatically created in $HOME
"""
ebook_home = None
# 1) load CLI parameters (if available)
try:
ebook_home = args.ebook_home
except AttributeError:
pass
# 2) load ENV vars
if ebook_home is None:
ebook_home = os.environ.get('OGRE_HOME')
# 3) load settings from saved config
if ebook_home is None or len(ebook_home) == 0:
ebook_home = conf.get('ebook_home', None)
if type(ebook_home) is str:
# decode ebook_home to unicode according to local fs encoding,
# os.walk/os.listdir then does all further charset conversion for us
ebook_home = codecs.decode(ebook_home, sys.getfilesystemencoding())
# handle no ebook home :(
if ebook_home is None:
ebook_home_found = False
# get the user's HOME directory
home_dir = os.path.expanduser('~')
# setup ebook home cross-platform
if platform.system() == 'Darwin':
ebook_home = os.path.join(home_dir, 'Documents/ogre-ebooks')
else:
ebook_home = os.path.join(home_dir, 'ogre-ebooks')
# create OGRE ebook_home for the user :)
if not os.path.exists(ebook_home):
if not os.path.exists(os.path.join(home_dir, 'Documents')):
os.mkdir(os.path.join(home_dir, 'Documents'))
os.mkdir(ebook_home)
prntr.info('Decrypted ebooks will be put into {}'.format(ebook_home))
else:
ebook_home_found = True
return ebook_home_found, ebook_home | 1d18e120304bb2b21df7252c6ea8c4e09fdf6314 | 26,393 |
def translate_text(
text: str, source_language: str, target_language: str
) -> str:
"""Translates text into the target language.
This method uses ISO 639-1 compliant language codes to specify languages.
To learn more about ISO 639-1, see:
https://www.w3schools.com/tags/ref_language_codes.asp
Args:
text: str. The text to be translated. If text contains html tags, Cloud
Translate only translates content between tags, leaving the tags
themselves untouched.
source_language: str. An allowlisted language code.
target_language: str. An allowlisted language code.
Raises:
ValueError. Invalid source language code.
ValueError. Invalid target language code.
Returns:
str. The translated text.
"""
if source_language not in LANGUAGE_CODE_ALLOWLIST:
raise ValueError('Invalid source language code: %s' % source_language)
if target_language not in LANGUAGE_CODE_ALLOWLIST:
raise ValueError('Invalid target language code: %s' % target_language)
if source_language == target_language:
return text
result = (
CLIENT.translate(
text, target_language=target_language,
source_language=source_language))
# Letting mypy know that result is a dict.
assert isinstance(result, dict)
translated_text = result['translatedText']
return translated_text | ed82dbb2fd89398340ed6ff39132f95758bfab97 | 26,394 |
def evt_cache_staged_t(ticket):
""" create event EvtCacheStaged from ticket ticket
"""
fc_keys = ['bfid' ]
ev = _get_proto(ticket, fc_keys = fc_keys)
ev['cache']['en'] = _set_cache_en(ticket)
return EvtCacheStaged(ev) | 86543ca98257cab28e4bfccef229c8d8e5b6893b | 26,395 |
from typing import Dict
def _get_setup_keywords(pkg_data: dict, keywords: dict) -> Dict:
"""Gather all setuptools.setup() keyword args."""
options_keywords = dict(
packages=list(pkg_data),
package_data={pkg: list(files)
for pkg, files in pkg_data.items()},
)
keywords['options'].update(options_keywords)
return keywords | 34f2d52c484fc4e49ccaca574639929756cfa4dc | 26,396 |
import six
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
# if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, six.string_types):
result.extend(flatten(el))
else:
result.append(el)
return list(result) | 041807c1622f644c062a5adb0404d14589cc543b | 26,397 |
from clawpack.visclaw import colormaps, geoplot
from numpy import linspace
from clawpack.visclaw.data import ClawPlotData
from clawpack.visclaw import gaugetools
import pylab
import pylab
from numpy import ma
from numpy import ma
import pylab
import pylab
from pylab import plot, xticks, floor, xlabel
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
if plotdata is None:
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface', figno=0)
plotfigure.kwargs = {'figsize': (8,5)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
def fixup(current_data):
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Surface at %4.2f hours' % t, fontsize=20)
pylab.grid(True)
#pylab.xticks(fontsize=15)
#pylab.yticks(fontsize=15)
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = colormaps.red_white_blue #geoplot.tsunami_colormap
plotitem.pcolor_cmin = sea_level - 0.1
plotitem.pcolor_cmax = sea_level + 0.1
plotitem.add_colorbar = False
#plotitem.colorbar_shrink = 0.5
plotitem.colorbar_shrink = 1.0
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
plotaxes.xlimits = [-230,-115]
plotaxes.ylimits = [0,65]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-3000,-3000,1)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for adjoint
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Adjoint ', figno=20)
plotfigure.kwargs = {'figsize': (8,5)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('adjoint')
plotaxes.scaled = True
plotaxes.title = 'Adjoint flag'
def fixup(current_data):
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Adjoint flag at %4.2f hours' % t, fontsize=20)
pylab.grid(True)
plotaxes.afteraxes = fixup
adj_flag_tol = 0.000001
def masked_inner_product(current_data):
q = current_data.q
soln = ma.masked_where(q[4,:,:] < adj_flag_tol, q[4,:,:])
return soln
def masked_regions(current_data):
q = current_data.q
soln = ma.masked_where(q[4,:,:] < 1e9, q[4,:,:])
return soln
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 4 #masked_inner_product
plotitem.pcolor_cmap = colormaps.white_red
plotitem.pcolor_cmin = 0.5*adj_flag_tol
plotitem.pcolor_cmax = 6*adj_flag_tol
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_data_show = [1,1,0,0,0,0,0]
plotitem.patchedges_show = 0
#plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = masked_regions
#plotitem.pcolor_cmap = colormaps.white_blue
#plotitem.pcolor_cmin = 9e9
#plotitem.pcolor_cmax = 1.1e10
#plotitem.add_colorbar = False
#plotitem.amr_celledges_show = [0]
#plotitem.amr_data_show = [1,1,0,0]
#plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
plotaxes.xlimits = [-230,-115]
plotaxes.ylimits = [0,65]
#-----------------------------------------
# Figure for levels
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Grid patches', figno=10)
plotfigure.kwargs = {'figsize': (8,5)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Grid patches'
plotaxes.scaled = True
def aa_patches(current_data):
pylab.ticklabel_format(format='plain',useOffset=False)
pylab.xticks([180, 200, 220, 240], rotation=20, fontsize = 28)
pylab.yticks(fontsize = 28)
t = current_data.t
t = t / 3600. # hours
pylab.title('Grids patches at %4.2f hours' % t, fontsize=20)
a = pylab.gca()
a.set_aspect(1./pylab.cos(41.75*pylab.pi/180.))
pylab.grid(True)
def fixup(current_data):
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Grids patches at %4.2f hours' % t, fontsize=20)
pylab.grid(True)
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = [[1,1,1], [0.8,0.8,0.8], [0.8,1,0.8], [1,.7,.7],[0.6,0.6,1]]
plotitem.amr_patchedges_color = ['k','k','g','r','b']
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,1,1,1,1]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotaxes.afteraxes = fixup
plotaxes.xlimits = [-230,-115]
plotaxes.ylimits = [0,65]
#-----------------------------------------
# Zoom
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Crescent City', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = sea_level - 0.1
plotitem.pcolor_cmax = sea_level + 0.1
plotitem.add_colorbar = True
#plotitem.colorbar_shrink = 0.5
plotitem.colorbar_shrink = 1.0
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
######## Limits below encompass Crescent City
plotaxes.xlimits = [-127,-123.5]
plotaxes.ylimits = [40.5,44.5]
#-----------------------------------------
# Zoom2
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Crescent City Zoomed', figno=2)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = sea_level - 0.1
plotitem.pcolor_cmax = sea_level + 0.1
plotitem.add_colorbar = True
#plotitem.colorbar_shrink = 0.5
plotitem.colorbar_shrink = 1.0
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
######## Limits below encompass Crescent City zoomed area
plotaxes.xlimits = [-124.235,-124.143]
plotaxes.ylimits = [41.716,41.783]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [9.5*3600, 15*3600]
plotaxes.ylimits = [-3,3]
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.show = False
def gaugetopo(current_data):
q = current_data.q
h = q[0,:]
eta = q[3,:]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
t = current_data.t
gaugeno = current_data.gaugeno
plot(t, 0*t, 'k')
n = int(floor(t.max()/3600.) + 2)
#xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)])
xticks([3600*i for i in range(9,n)], ['%i' % i for i in range(9,n)])
xlabel('time (hours)')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata | a777686f5b8fafe8c2a109e486242a16d25a463b | 26,398 |
from typing import Dict
import json
def load_spider_tables(filenames: str) -> Dict[str, Schema]:
"""Loads database schemas from the specified filenames."""
examples = {}
for filename in filenames.split(","):
with open(filename) as training_file:
examples.update(process_dbs(json.load(training_file)))
return examples | 1575d0afd4efbe5f53d12be1c7dd3537e54fc46c | 26,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.